repo_name
string
path
string
copies
string
size
string
content
string
license
string
Fechinator/FechdaKernel
lib/xz/xz_dec_stream.c
12733
19721
/* * .xz Stream decoder * * Author: Lasse Collin <lasse.collin@tukaani.org> * * This file has been put into the public domain. * You can do whatever you want with this file. */ #include "xz_private.h" #include "xz_stream.h" /* Hash used to validate the Index field */ struct xz_dec_hash { vli_type unpadded; vli_type uncompressed; uint32_t crc32; }; struct xz_dec { /* Position in dec_main() */ enum { SEQ_STREAM_HEADER, SEQ_BLOCK_START, SEQ_BLOCK_HEADER, SEQ_BLOCK_UNCOMPRESS, SEQ_BLOCK_PADDING, SEQ_BLOCK_CHECK, SEQ_INDEX, SEQ_INDEX_PADDING, SEQ_INDEX_CRC32, SEQ_STREAM_FOOTER } sequence; /* Position in variable-length integers and Check fields */ uint32_t pos; /* Variable-length integer decoded by dec_vli() */ vli_type vli; /* Saved in_pos and out_pos */ size_t in_start; size_t out_start; /* CRC32 value in Block or Index */ uint32_t crc32; /* Type of the integrity check calculated from uncompressed data */ enum xz_check check_type; /* Operation mode */ enum xz_mode mode; /* * True if the next call to xz_dec_run() is allowed to return * XZ_BUF_ERROR. */ bool allow_buf_error; /* Information stored in Block Header */ struct { /* * Value stored in the Compressed Size field, or * VLI_UNKNOWN if Compressed Size is not present. */ vli_type compressed; /* * Value stored in the Uncompressed Size field, or * VLI_UNKNOWN if Uncompressed Size is not present. */ vli_type uncompressed; /* Size of the Block Header field */ uint32_t size; } block_header; /* Information collected when decoding Blocks */ struct { /* Observed compressed size of the current Block */ vli_type compressed; /* Observed uncompressed size of the current Block */ vli_type uncompressed; /* Number of Blocks decoded so far */ vli_type count; /* * Hash calculated from the Block sizes. This is used to * validate the Index field. */ struct xz_dec_hash hash; } block; /* Variables needed when verifying the Index field */ struct { /* Position in dec_index() */ enum { SEQ_INDEX_COUNT, SEQ_INDEX_UNPADDED, SEQ_INDEX_UNCOMPRESSED } sequence; /* Size of the Index in bytes */ vli_type size; /* Number of Records (matches block.count in valid files) */ vli_type count; /* * Hash calculated from the Records (matches block.hash in * valid files). */ struct xz_dec_hash hash; } index; /* * Temporary buffer needed to hold Stream Header, Block Header, * and Stream Footer. The Block Header is the biggest (1 KiB) * so we reserve space according to that. buf[] has to be aligned * to a multiple of four bytes; the size_t variables before it * should guarantee this. */ struct { size_t pos; size_t size; uint8_t buf[1024]; } temp; struct xz_dec_lzma2 *lzma2; #ifdef XZ_DEC_BCJ struct xz_dec_bcj *bcj; bool bcj_active; #endif }; #ifdef XZ_DEC_ANY_CHECK /* Sizes of the Check field with different Check IDs */ static const uint8_t check_sizes[16] = { 0, 4, 4, 4, 8, 8, 8, 16, 16, 16, 32, 32, 32, 64, 64, 64 }; #endif /* * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller * must have set s->temp.pos to indicate how much data we are supposed * to copy into s->temp.buf. Return true once s->temp.pos has reached * s->temp.size. */ static bool fill_temp(struct xz_dec *s, struct xz_buf *b) { size_t copy_size = min_t(size_t, b->in_size - b->in_pos, s->temp.size - s->temp.pos); memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); b->in_pos += copy_size; s->temp.pos += copy_size; if (s->temp.pos == s->temp.size) { s->temp.pos = 0; return true; } return false; } /* Decode a variable-length integer (little-endian base-128 encoding) */ static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, size_t *in_pos, size_t in_size) { uint8_t byte; if (s->pos == 0) s->vli = 0; while (*in_pos < in_size) { byte = in[*in_pos]; ++*in_pos; s->vli |= (vli_type)(byte & 0x7F) << s->pos; if ((byte & 0x80) == 0) { /* Don't allow non-minimal encodings. */ if (byte == 0 && s->pos != 0) return XZ_DATA_ERROR; s->pos = 0; return XZ_STREAM_END; } s->pos += 7; if (s->pos == 7 * VLI_BYTES_MAX) return XZ_DATA_ERROR; } return XZ_OK; } /* * Decode the Compressed Data field from a Block. Update and validate * the observed compressed and uncompressed sizes of the Block so that * they don't exceed the values possibly stored in the Block Header * (validation assumes that no integer overflow occurs, since vli_type * is normally uint64_t). Update the CRC32 if presence of the CRC32 * field was indicated in Stream Header. * * Once the decoding is finished, validate that the observed sizes match * the sizes possibly stored in the Block Header. Update the hash and * Block count, which are later used to validate the Index field. */ static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b) { enum xz_ret ret; s->in_start = b->in_pos; s->out_start = b->out_pos; #ifdef XZ_DEC_BCJ if (s->bcj_active) ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); else #endif ret = xz_dec_lzma2_run(s->lzma2, b); s->block.compressed += b->in_pos - s->in_start; s->block.uncompressed += b->out_pos - s->out_start; /* * There is no need to separately check for VLI_UNKNOWN, since * the observed sizes are always smaller than VLI_UNKNOWN. */ if (s->block.compressed > s->block_header.compressed || s->block.uncompressed > s->block_header.uncompressed) return XZ_DATA_ERROR; if (s->check_type == XZ_CHECK_CRC32) s->crc32 = xz_crc32(b->out + s->out_start, b->out_pos - s->out_start, s->crc32); if (ret == XZ_STREAM_END) { if (s->block_header.compressed != VLI_UNKNOWN && s->block_header.compressed != s->block.compressed) return XZ_DATA_ERROR; if (s->block_header.uncompressed != VLI_UNKNOWN && s->block_header.uncompressed != s->block.uncompressed) return XZ_DATA_ERROR; s->block.hash.unpadded += s->block_header.size + s->block.compressed; #ifdef XZ_DEC_ANY_CHECK s->block.hash.unpadded += check_sizes[s->check_type]; #else if (s->check_type == XZ_CHECK_CRC32) s->block.hash.unpadded += 4; #endif s->block.hash.uncompressed += s->block.uncompressed; s->block.hash.crc32 = xz_crc32( (const uint8_t *)&s->block.hash, sizeof(s->block.hash), s->block.hash.crc32); ++s->block.count; } return ret; } /* Update the Index size and the CRC32 value. */ static void index_update(struct xz_dec *s, const struct xz_buf *b) { size_t in_used = b->in_pos - s->in_start; s->index.size += in_used; s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32); } /* * Decode the Number of Records, Unpadded Size, and Uncompressed Size * fields from the Index field. That is, Index Padding and CRC32 are not * decoded by this function. * * This can return XZ_OK (more input needed), XZ_STREAM_END (everything * successfully decoded), or XZ_DATA_ERROR (input is corrupt). */ static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) { enum xz_ret ret; do { ret = dec_vli(s, b->in, &b->in_pos, b->in_size); if (ret != XZ_STREAM_END) { index_update(s, b); return ret; } switch (s->index.sequence) { case SEQ_INDEX_COUNT: s->index.count = s->vli; /* * Validate that the Number of Records field * indicates the same number of Records as * there were Blocks in the Stream. */ if (s->index.count != s->block.count) return XZ_DATA_ERROR; s->index.sequence = SEQ_INDEX_UNPADDED; break; case SEQ_INDEX_UNPADDED: s->index.hash.unpadded += s->vli; s->index.sequence = SEQ_INDEX_UNCOMPRESSED; break; case SEQ_INDEX_UNCOMPRESSED: s->index.hash.uncompressed += s->vli; s->index.hash.crc32 = xz_crc32( (const uint8_t *)&s->index.hash, sizeof(s->index.hash), s->index.hash.crc32); --s->index.count; s->index.sequence = SEQ_INDEX_UNPADDED; break; } } while (s->index.count > 0); return XZ_STREAM_END; } /* * Validate that the next four input bytes match the value of s->crc32. * s->pos must be zero when starting to validate the first byte. */ static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b) { do { if (b->in_pos == b->in_size) return XZ_OK; if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++]) return XZ_DATA_ERROR; s->pos += 8; } while (s->pos < 32); s->crc32 = 0; s->pos = 0; return XZ_STREAM_END; } #ifdef XZ_DEC_ANY_CHECK /* * Skip over the Check field when the Check ID is not supported. * Returns true once the whole Check field has been skipped over. */ static bool check_skip(struct xz_dec *s, struct xz_buf *b) { while (s->pos < check_sizes[s->check_type]) { if (b->in_pos == b->in_size) return false; ++b->in_pos; ++s->pos; } s->pos = 0; return true; } #endif /* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ static enum xz_ret dec_stream_header(struct xz_dec *s) { if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) return XZ_FORMAT_ERROR; if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) return XZ_DATA_ERROR; if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) return XZ_OPTIONS_ERROR; /* * Of integrity checks, we support only none (Check ID = 0) and * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined, * we will accept other check types too, but then the check won't * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given. */ s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; #ifdef XZ_DEC_ANY_CHECK if (s->check_type > XZ_CHECK_MAX) return XZ_OPTIONS_ERROR; if (s->check_type > XZ_CHECK_CRC32) return XZ_UNSUPPORTED_CHECK; #else if (s->check_type > XZ_CHECK_CRC32) return XZ_OPTIONS_ERROR; #endif return XZ_OK; } /* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ static enum xz_ret dec_stream_footer(struct xz_dec *s) { if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) return XZ_DATA_ERROR; if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) return XZ_DATA_ERROR; /* * Validate Backward Size. Note that we never added the size of the * Index CRC32 field to s->index.size, thus we use s->index.size / 4 * instead of s->index.size / 4 - 1. */ if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) return XZ_DATA_ERROR; if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) return XZ_DATA_ERROR; /* * Use XZ_STREAM_END instead of XZ_OK to be more convenient * for the caller. */ return XZ_STREAM_END; } /* Decode the Block Header and initialize the filter chain. */ static enum xz_ret dec_block_header(struct xz_dec *s) { enum xz_ret ret; /* * Validate the CRC32. We know that the temp buffer is at least * eight bytes so this is safe. */ s->temp.size -= 4; if (xz_crc32(s->temp.buf, s->temp.size, 0) != get_le32(s->temp.buf + s->temp.size)) return XZ_DATA_ERROR; s->temp.pos = 2; /* * Catch unsupported Block Flags. We support only one or two filters * in the chain, so we catch that with the same test. */ #ifdef XZ_DEC_BCJ if (s->temp.buf[1] & 0x3E) #else if (s->temp.buf[1] & 0x3F) #endif return XZ_OPTIONS_ERROR; /* Compressed Size */ if (s->temp.buf[1] & 0x40) { if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != XZ_STREAM_END) return XZ_DATA_ERROR; s->block_header.compressed = s->vli; } else { s->block_header.compressed = VLI_UNKNOWN; } /* Uncompressed Size */ if (s->temp.buf[1] & 0x80) { if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != XZ_STREAM_END) return XZ_DATA_ERROR; s->block_header.uncompressed = s->vli; } else { s->block_header.uncompressed = VLI_UNKNOWN; } #ifdef XZ_DEC_BCJ /* If there are two filters, the first one must be a BCJ filter. */ s->bcj_active = s->temp.buf[1] & 0x01; if (s->bcj_active) { if (s->temp.size - s->temp.pos < 2) return XZ_OPTIONS_ERROR; ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); if (ret != XZ_OK) return ret; /* * We don't support custom start offset, * so Size of Properties must be zero. */ if (s->temp.buf[s->temp.pos++] != 0x00) return XZ_OPTIONS_ERROR; } #endif /* Valid Filter Flags always take at least two bytes. */ if (s->temp.size - s->temp.pos < 2) return XZ_DATA_ERROR; /* Filter ID = LZMA2 */ if (s->temp.buf[s->temp.pos++] != 0x21) return XZ_OPTIONS_ERROR; /* Size of Properties = 1-byte Filter Properties */ if (s->temp.buf[s->temp.pos++] != 0x01) return XZ_OPTIONS_ERROR; /* Filter Properties contains LZMA2 dictionary size. */ if (s->temp.size - s->temp.pos < 1) return XZ_DATA_ERROR; ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); if (ret != XZ_OK) return ret; /* The rest must be Header Padding. */ while (s->temp.pos < s->temp.size) if (s->temp.buf[s->temp.pos++] != 0x00) return XZ_OPTIONS_ERROR; s->temp.pos = 0; s->block.compressed = 0; s->block.uncompressed = 0; return XZ_OK; } static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) { enum xz_ret ret; /* * Store the start position for the case when we are in the middle * of the Index field. */ s->in_start = b->in_pos; while (true) { switch (s->sequence) { case SEQ_STREAM_HEADER: /* * Stream Header is copied to s->temp, and then * decoded from there. This way if the caller * gives us only little input at a time, we can * still keep the Stream Header decoding code * simple. Similar approach is used in many places * in this file. */ if (!fill_temp(s, b)) return XZ_OK; /* * If dec_stream_header() returns * XZ_UNSUPPORTED_CHECK, it is still possible * to continue decoding if working in multi-call * mode. Thus, update s->sequence before calling * dec_stream_header(). */ s->sequence = SEQ_BLOCK_START; ret = dec_stream_header(s); if (ret != XZ_OK) return ret; case SEQ_BLOCK_START: /* We need one byte of input to continue. */ if (b->in_pos == b->in_size) return XZ_OK; /* See if this is the beginning of the Index field. */ if (b->in[b->in_pos] == 0) { s->in_start = b->in_pos++; s->sequence = SEQ_INDEX; break; } /* * Calculate the size of the Block Header and * prepare to decode it. */ s->block_header.size = ((uint32_t)b->in[b->in_pos] + 1) * 4; s->temp.size = s->block_header.size; s->temp.pos = 0; s->sequence = SEQ_BLOCK_HEADER; case SEQ_BLOCK_HEADER: if (!fill_temp(s, b)) return XZ_OK; ret = dec_block_header(s); if (ret != XZ_OK) return ret; s->sequence = SEQ_BLOCK_UNCOMPRESS; case SEQ_BLOCK_UNCOMPRESS: ret = dec_block(s, b); if (ret != XZ_STREAM_END) return ret; s->sequence = SEQ_BLOCK_PADDING; case SEQ_BLOCK_PADDING: /* * Size of Compressed Data + Block Padding * must be a multiple of four. We don't need * s->block.compressed for anything else * anymore, so we use it here to test the size * of the Block Padding field. */ while (s->block.compressed & 3) { if (b->in_pos == b->in_size) return XZ_OK; if (b->in[b->in_pos++] != 0) return XZ_DATA_ERROR; ++s->block.compressed; } s->sequence = SEQ_BLOCK_CHECK; case SEQ_BLOCK_CHECK: if (s->check_type == XZ_CHECK_CRC32) { ret = crc32_validate(s, b); if (ret != XZ_STREAM_END) return ret; } #ifdef XZ_DEC_ANY_CHECK else if (!check_skip(s, b)) { return XZ_OK; } #endif s->sequence = SEQ_BLOCK_START; break; case SEQ_INDEX: ret = dec_index(s, b); if (ret != XZ_STREAM_END) return ret; s->sequence = SEQ_INDEX_PADDING; case SEQ_INDEX_PADDING: while ((s->index.size + (b->in_pos - s->in_start)) & 3) { if (b->in_pos == b->in_size) { index_update(s, b); return XZ_OK; } if (b->in[b->in_pos++] != 0) return XZ_DATA_ERROR; } /* Finish the CRC32 value and Index size. */ index_update(s, b); /* Compare the hashes to validate the Index field. */ if (!memeq(&s->block.hash, &s->index.hash, sizeof(s->block.hash))) return XZ_DATA_ERROR; s->sequence = SEQ_INDEX_CRC32; case SEQ_INDEX_CRC32: ret = crc32_validate(s, b); if (ret != XZ_STREAM_END) return ret; s->temp.size = STREAM_HEADER_SIZE; s->sequence = SEQ_STREAM_FOOTER; case SEQ_STREAM_FOOTER: if (!fill_temp(s, b)) return XZ_OK; return dec_stream_footer(s); } } /* Never reached */ } /* * xz_dec_run() is a wrapper for dec_main() to handle some special cases in * multi-call and single-call decoding. * * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we * are not going to make any progress anymore. This is to prevent the caller * from calling us infinitely when the input file is truncated or otherwise * corrupt. Since zlib-style API allows that the caller fills the input buffer * only when the decoder doesn't produce any new output, we have to be careful * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only * after the second consecutive call to xz_dec_run() that makes no progress. * * In single-call mode, if we couldn't decode everything and no error * occurred, either the input is truncated or the output buffer is too small. * Since we know that the last input byte never produces any output, we know * that if all the input was consumed and decoding wasn't finished, the file * must be corrupt. Otherwise the output buffer has to be too small or the * file is corrupt in a way that decoding it produces too big output. * * If single-call decoding fails, we reset b->in_pos and b->out_pos back to * their original values. This is because with some filter chains there won't * be any valid uncompressed data in the output buffer unless the decoding * actually succeeds (that's the price to pay of using the output buffer as * the workspace). */ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b) { size_t in_start; size_t out_start; enum xz_ret ret; if (DEC_IS_SINGLE(s->mode)) xz_dec_reset(s); in_start = b->in_pos; out_start = b->out_pos; ret = dec_main(s, b); if (DEC_IS_SINGLE(s->mode)) { if (ret == XZ_OK) ret = b->in_pos == b->in_size ? XZ_DATA_ERROR : XZ_BUF_ERROR; if (ret != XZ_STREAM_END) { b->in_pos = in_start; b->out_pos = out_start; } } else if (ret == XZ_OK && in_start == b->in_pos && out_start == b->out_pos) { if (s->allow_buf_error) ret = XZ_BUF_ERROR; s->allow_buf_error = true; } else { s->allow_buf_error = false; } return ret; } XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max) { struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL); if (s == NULL) return NULL; s->mode = mode; #ifdef XZ_DEC_BCJ s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); if (s->bcj == NULL) goto error_bcj; #endif s->lzma2 = xz_dec_lzma2_create(mode, dict_max); if (s->lzma2 == NULL) goto error_lzma2; xz_dec_reset(s); return s; error_lzma2: #ifdef XZ_DEC_BCJ xz_dec_bcj_end(s->bcj); error_bcj: #endif kfree(s); return NULL; } XZ_EXTERN void xz_dec_reset(struct xz_dec *s) { s->sequence = SEQ_STREAM_HEADER; s->allow_buf_error = false; s->pos = 0; s->crc32 = 0; memzero(&s->block, sizeof(s->block)); memzero(&s->index, sizeof(s->index)); s->temp.pos = 0; s->temp.size = STREAM_HEADER_SIZE; } XZ_EXTERN void xz_dec_end(struct xz_dec *s) { if (s != NULL) { xz_dec_lzma2_end(s->lzma2); #ifdef XZ_DEC_BCJ xz_dec_bcj_end(s->bcj); #endif kfree(s); } }
gpl-2.0
Fagyi/kernel_msm_hammerhead
arch/alpha/boot/tools/mkbb.c
13757
3562
/* This utility makes a bootblock suitable for the SRM console/miniloader */ /* Usage: * mkbb <device> <lxboot> * * Where <device> is the name of the device to install the bootblock on, * and <lxboot> is the name of a bootblock to merge in. This bootblock * contains the offset and size of the bootloader. It must be exactly * 512 bytes long. */ #include <fcntl.h> #include <unistd.h> #include <stdlib.h> #include <stdio.h> /* Minimal definition of disklabel, so we don't have to include * asm/disklabel.h (confuses make) */ #ifndef MAXPARTITIONS #define MAXPARTITIONS 8 /* max. # of partitions */ #endif #ifndef u8 #define u8 unsigned char #endif #ifndef u16 #define u16 unsigned short #endif #ifndef u32 #define u32 unsigned int #endif struct disklabel { u32 d_magic; /* must be DISKLABELMAGIC */ u16 d_type, d_subtype; u8 d_typename[16]; u8 d_packname[16]; u32 d_secsize; u32 d_nsectors; u32 d_ntracks; u32 d_ncylinders; u32 d_secpercyl; u32 d_secprtunit; u16 d_sparespertrack; u16 d_sparespercyl; u32 d_acylinders; u16 d_rpm, d_interleave, d_trackskew, d_cylskew; u32 d_headswitch, d_trkseek, d_flags; u32 d_drivedata[5]; u32 d_spare[5]; u32 d_magic2; /* must be DISKLABELMAGIC */ u16 d_checksum; u16 d_npartitions; u32 d_bbsize, d_sbsize; struct d_partition { u32 p_size; u32 p_offset; u32 p_fsize; u8 p_fstype; u8 p_frag; u16 p_cpg; } d_partitions[MAXPARTITIONS]; }; typedef union __bootblock { struct { char __pad1[64]; struct disklabel __label; } __u1; struct { unsigned long __pad2[63]; unsigned long __checksum; } __u2; char bootblock_bytes[512]; unsigned long bootblock_quadwords[64]; } bootblock; #define bootblock_label __u1.__label #define bootblock_checksum __u2.__checksum int main(int argc, char ** argv) { bootblock bootblock_from_disk; bootblock bootloader_image; int dev, fd; int i; int nread; /* Make sure of the arg count */ if(argc != 3) { fprintf(stderr, "Usage: %s device lxboot\n", argv[0]); exit(0); } /* First, open the device and make sure it's accessible */ dev = open(argv[1], O_RDWR); if(dev < 0) { perror(argv[1]); exit(0); } /* Now open the lxboot and make sure it's reasonable */ fd = open(argv[2], O_RDONLY); if(fd < 0) { perror(argv[2]); close(dev); exit(0); } /* Read in the lxboot */ nread = read(fd, &bootloader_image, sizeof(bootblock)); if(nread != sizeof(bootblock)) { perror("lxboot read"); fprintf(stderr, "expected %zd, got %d\n", sizeof(bootblock), nread); exit(0); } /* Read in the bootblock from disk. */ nread = read(dev, &bootblock_from_disk, sizeof(bootblock)); if(nread != sizeof(bootblock)) { perror("bootblock read"); fprintf(stderr, "expected %zd, got %d\n", sizeof(bootblock), nread); exit(0); } /* Swap the bootblock's disklabel into the bootloader */ bootloader_image.bootblock_label = bootblock_from_disk.bootblock_label; /* Calculate the bootblock checksum */ bootloader_image.bootblock_checksum = 0; for(i = 0; i < 63; i++) { bootloader_image.bootblock_checksum += bootloader_image.bootblock_quadwords[i]; } /* Write the whole thing out! */ lseek(dev, 0L, SEEK_SET); if(write(dev, &bootloader_image, sizeof(bootblock)) != sizeof(bootblock)) { perror("bootblock write"); exit(0); } close(fd); close(dev); exit(0); }
gpl-2.0
RonGokhale/lge-kernel-pecan
sound/oss/midibuf.c
1470
8866
/* * sound/oss/midibuf.c * * Device file manager for /dev/midi# */ /* * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. */ /* * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed) */ #include <linux/stddef.h> #include <linux/kmod.h> #include <linux/spinlock.h> #define MIDIBUF_C #include "sound_config.h" /* * Don't make MAX_QUEUE_SIZE larger than 4000 */ #define MAX_QUEUE_SIZE 4000 static wait_queue_head_t midi_sleeper[MAX_MIDI_DEV]; static wait_queue_head_t input_sleeper[MAX_MIDI_DEV]; struct midi_buf { int len, head, tail; unsigned char queue[MAX_QUEUE_SIZE]; }; struct midi_parms { long prech_timeout; /* * Timeout before the first ch */ }; static struct midi_buf *midi_out_buf[MAX_MIDI_DEV] = {NULL}; static struct midi_buf *midi_in_buf[MAX_MIDI_DEV] = {NULL}; static struct midi_parms parms[MAX_MIDI_DEV]; static void midi_poll(unsigned long dummy); static DEFINE_TIMER(poll_timer, midi_poll, 0, 0); static volatile int open_devs; static DEFINE_SPINLOCK(lock); #define DATA_AVAIL(q) (q->len) #define SPACE_AVAIL(q) (MAX_QUEUE_SIZE - q->len) #define QUEUE_BYTE(q, data) \ if (SPACE_AVAIL(q)) \ { \ unsigned long flags; \ spin_lock_irqsave(&lock, flags); \ q->queue[q->tail] = (data); \ q->len++; q->tail = (q->tail+1) % MAX_QUEUE_SIZE; \ spin_unlock_irqrestore(&lock, flags); \ } #define REMOVE_BYTE(q, data) \ if (DATA_AVAIL(q)) \ { \ unsigned long flags; \ spin_lock_irqsave(&lock, flags); \ data = q->queue[q->head]; \ q->len--; q->head = (q->head+1) % MAX_QUEUE_SIZE; \ spin_unlock_irqrestore(&lock, flags); \ } static void drain_midi_queue(int dev) { /* * Give the Midi driver time to drain its output queues */ if (midi_devs[dev]->buffer_status != NULL) while (!signal_pending(current) && midi_devs[dev]->buffer_status(dev)) interruptible_sleep_on_timeout(&midi_sleeper[dev], HZ/10); } static void midi_input_intr(int dev, unsigned char data) { if (midi_in_buf[dev] == NULL) return; if (data == 0xfe) /* * Active sensing */ return; /* * Ignore */ if (SPACE_AVAIL(midi_in_buf[dev])) { QUEUE_BYTE(midi_in_buf[dev], data); wake_up(&input_sleeper[dev]); } } static void midi_output_intr(int dev) { /* * Currently NOP */ } static void midi_poll(unsigned long dummy) { unsigned long flags; int dev; spin_lock_irqsave(&lock, flags); if (open_devs) { for (dev = 0; dev < num_midis; dev++) if (midi_devs[dev] != NULL && midi_out_buf[dev] != NULL) { while (DATA_AVAIL(midi_out_buf[dev])) { int ok; int c = midi_out_buf[dev]->queue[midi_out_buf[dev]->head]; spin_unlock_irqrestore(&lock,flags);/* Give some time to others */ ok = midi_devs[dev]->outputc(dev, c); spin_lock_irqsave(&lock, flags); if (!ok) break; midi_out_buf[dev]->head = (midi_out_buf[dev]->head + 1) % MAX_QUEUE_SIZE; midi_out_buf[dev]->len--; } if (DATA_AVAIL(midi_out_buf[dev]) < 100) wake_up(&midi_sleeper[dev]); } poll_timer.expires = (1) + jiffies; add_timer(&poll_timer); /* * Come back later */ } spin_unlock_irqrestore(&lock, flags); } int MIDIbuf_open(int dev, struct file *file) { int mode, err; dev = dev >> 4; mode = translate_mode(file); if (num_midis > MAX_MIDI_DEV) { printk(KERN_ERR "midi: Too many midi interfaces\n"); num_midis = MAX_MIDI_DEV; } if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL) return -ENXIO; /* * Interrupts disabled. Be careful */ module_put(midi_devs[dev]->owner); if ((err = midi_devs[dev]->open(dev, mode, midi_input_intr, midi_output_intr)) < 0) return err; parms[dev].prech_timeout = MAX_SCHEDULE_TIMEOUT; midi_in_buf[dev] = (struct midi_buf *) vmalloc(sizeof(struct midi_buf)); if (midi_in_buf[dev] == NULL) { printk(KERN_WARNING "midi: Can't allocate buffer\n"); midi_devs[dev]->close(dev); return -EIO; } midi_in_buf[dev]->len = midi_in_buf[dev]->head = midi_in_buf[dev]->tail = 0; midi_out_buf[dev] = (struct midi_buf *) vmalloc(sizeof(struct midi_buf)); if (midi_out_buf[dev] == NULL) { printk(KERN_WARNING "midi: Can't allocate buffer\n"); midi_devs[dev]->close(dev); vfree(midi_in_buf[dev]); midi_in_buf[dev] = NULL; return -EIO; } midi_out_buf[dev]->len = midi_out_buf[dev]->head = midi_out_buf[dev]->tail = 0; open_devs++; init_waitqueue_head(&midi_sleeper[dev]); init_waitqueue_head(&input_sleeper[dev]); if (open_devs < 2) /* This was first open */ { poll_timer.expires = 1 + jiffies; add_timer(&poll_timer); /* Start polling */ } return err; } void MIDIbuf_release(int dev, struct file *file) { int mode; dev = dev >> 4; mode = translate_mode(file); if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL) return; /* * Wait until the queue is empty */ if (mode != OPEN_READ) { midi_devs[dev]->outputc(dev, 0xfe); /* * Active sensing to shut the * devices */ while (!signal_pending(current) && DATA_AVAIL(midi_out_buf[dev])) interruptible_sleep_on(&midi_sleeper[dev]); /* * Sync */ drain_midi_queue(dev); /* * Ensure the output queues are empty */ } midi_devs[dev]->close(dev); open_devs--; if (open_devs == 0) del_timer_sync(&poll_timer); vfree(midi_in_buf[dev]); vfree(midi_out_buf[dev]); midi_in_buf[dev] = NULL; midi_out_buf[dev] = NULL; module_put(midi_devs[dev]->owner); } int MIDIbuf_write(int dev, struct file *file, const char __user *buf, int count) { int c, n, i; unsigned char tmp_data; dev = dev >> 4; if (!count) return 0; c = 0; while (c < count) { n = SPACE_AVAIL(midi_out_buf[dev]); if (n == 0) { /* * No space just now. */ if (file->f_flags & O_NONBLOCK) { c = -EAGAIN; goto out; } interruptible_sleep_on(&midi_sleeper[dev]); if (signal_pending(current)) { c = -EINTR; goto out; } n = SPACE_AVAIL(midi_out_buf[dev]); } if (n > (count - c)) n = count - c; for (i = 0; i < n; i++) { /* BROKE BROKE BROKE - CANT DO THIS WITH CLI !! */ /* yes, think the same, so I removed the cli() brackets QUEUE_BYTE is protected against interrupts */ if (copy_from_user((char *) &tmp_data, &(buf)[c], 1)) { c = -EFAULT; goto out; } QUEUE_BYTE(midi_out_buf[dev], tmp_data); c++; } } out: return c; } int MIDIbuf_read(int dev, struct file *file, char __user *buf, int count) { int n, c = 0; unsigned char tmp_data; dev = dev >> 4; if (!DATA_AVAIL(midi_in_buf[dev])) { /* * No data yet, wait */ if (file->f_flags & O_NONBLOCK) { c = -EAGAIN; goto out; } interruptible_sleep_on_timeout(&input_sleeper[dev], parms[dev].prech_timeout); if (signal_pending(current)) c = -EINTR; /* The user is getting restless */ } if (c == 0 && DATA_AVAIL(midi_in_buf[dev])) /* * Got some bytes */ { n = DATA_AVAIL(midi_in_buf[dev]); if (n > count) n = count; c = 0; while (c < n) { char *fixit; REMOVE_BYTE(midi_in_buf[dev], tmp_data); fixit = (char *) &tmp_data; /* BROKE BROKE BROKE */ /* yes removed the cli() brackets again should q->len,tail&head be atomic_t? */ if (copy_to_user(&(buf)[c], fixit, 1)) { c = -EFAULT; goto out; } c++; } } out: return c; } int MIDIbuf_ioctl(int dev, struct file *file, unsigned int cmd, void __user *arg) { int val; dev = dev >> 4; if (((cmd >> 8) & 0xff) == 'C') { if (midi_devs[dev]->coproc) /* Coprocessor ioctl */ return midi_devs[dev]->coproc->ioctl(midi_devs[dev]->coproc->devc, cmd, arg, 0); /* printk("/dev/midi%d: No coprocessor for this device\n", dev);*/ return -ENXIO; } else { switch (cmd) { case SNDCTL_MIDI_PRETIME: if (get_user(val, (int __user *)arg)) return -EFAULT; if (val < 0) val = 0; val = (HZ * val) / 10; parms[dev].prech_timeout = val; return put_user(val, (int __user *)arg); default: if (!midi_devs[dev]->ioctl) return -EINVAL; return midi_devs[dev]->ioctl(dev, cmd, arg); } } } /* No kernel lock - fine */ unsigned int MIDIbuf_poll(int dev, struct file *file, poll_table * wait) { unsigned int mask = 0; dev = dev >> 4; /* input */ poll_wait(file, &input_sleeper[dev], wait); if (DATA_AVAIL(midi_in_buf[dev])) mask |= POLLIN | POLLRDNORM; /* output */ poll_wait(file, &midi_sleeper[dev], wait); if (!SPACE_AVAIL(midi_out_buf[dev])) mask |= POLLOUT | POLLWRNORM; return mask; } int MIDIbuf_avail(int dev) { if (midi_in_buf[dev]) return DATA_AVAIL (midi_in_buf[dev]); return 0; } EXPORT_SYMBOL(MIDIbuf_avail);
gpl-2.0
Entropy512/linux_kernel_galaxyplayer
arch/sh/boards/mach-systemh/io.c
1470
4057
/* * linux/arch/sh/boards/renesas/systemh/io.c * * Copyright (C) 2001 Ian da Silva, Jeremy Siegel * Based largely on io_se.c. * * I/O routine for Hitachi 7751 Systemh. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <mach/systemh7751.h> #include <asm/addrspace.h> #include <asm/io.h> #define ETHER_IOMAP(adr) (0xB3000000 + (adr)) /*map to 16bits access area of smc lan chip*/ static inline volatile __u16 * port2adr(unsigned int port) { if (port >= 0x2000) return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000)); maybebadio((unsigned long)port); return (volatile __u16*)port; } /* * General outline: remap really low stuff [eventually] to SuperIO, * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO) * is mapped through the PCI IO window. Stuff with high bits (PXSEG) * should be way beyond the window, and is used w/o translation for * compatibility. */ unsigned char sh7751systemh_inb(unsigned long port) { if (PXSEG(port)) return *(volatile unsigned char *)port; else if (port <= 0x3F1) return *(volatile unsigned char *)ETHER_IOMAP(port); else return (*port2adr(port))&0xff; } unsigned char sh7751systemh_inb_p(unsigned long port) { unsigned char v; if (PXSEG(port)) v = *(volatile unsigned char *)port; else if (port <= 0x3F1) v = *(volatile unsigned char *)ETHER_IOMAP(port); else v = (*port2adr(port))&0xff; ctrl_delay(); return v; } unsigned short sh7751systemh_inw(unsigned long port) { if (PXSEG(port)) return *(volatile unsigned short *)port; else if (port >= 0x2000) return *port2adr(port); else if (port <= 0x3F1) return *(volatile unsigned int *)ETHER_IOMAP(port); else maybebadio(port); return 0; } unsigned int sh7751systemh_inl(unsigned long port) { if (PXSEG(port)) return *(volatile unsigned long *)port; else if (port >= 0x2000) return *port2adr(port); else if (port <= 0x3F1) return *(volatile unsigned int *)ETHER_IOMAP(port); else maybebadio(port); return 0; } void sh7751systemh_outb(unsigned char value, unsigned long port) { if (PXSEG(port)) *(volatile unsigned char *)port = value; else if (port <= 0x3F1) *(volatile unsigned char *)ETHER_IOMAP(port) = value; else *(port2adr(port)) = value; } void sh7751systemh_outb_p(unsigned char value, unsigned long port) { if (PXSEG(port)) *(volatile unsigned char *)port = value; else if (port <= 0x3F1) *(volatile unsigned char *)ETHER_IOMAP(port) = value; else *(port2adr(port)) = value; ctrl_delay(); } void sh7751systemh_outw(unsigned short value, unsigned long port) { if (PXSEG(port)) *(volatile unsigned short *)port = value; else if (port >= 0x2000) *port2adr(port) = value; else if (port <= 0x3F1) *(volatile unsigned short *)ETHER_IOMAP(port) = value; else maybebadio(port); } void sh7751systemh_outl(unsigned int value, unsigned long port) { if (PXSEG(port)) *(volatile unsigned long *)port = value; else maybebadio(port); } void sh7751systemh_insb(unsigned long port, void *addr, unsigned long count) { unsigned char *p = addr; while (count--) *p++ = sh7751systemh_inb(port); } void sh7751systemh_insw(unsigned long port, void *addr, unsigned long count) { unsigned short *p = addr; while (count--) *p++ = sh7751systemh_inw(port); } void sh7751systemh_insl(unsigned long port, void *addr, unsigned long count) { maybebadio(port); } void sh7751systemh_outsb(unsigned long port, const void *addr, unsigned long count) { unsigned char *p = (unsigned char*)addr; while (count--) sh7751systemh_outb(*p++, port); } void sh7751systemh_outsw(unsigned long port, const void *addr, unsigned long count) { unsigned short *p = (unsigned short*)addr; while (count--) sh7751systemh_outw(*p++, port); } void sh7751systemh_outsl(unsigned long port, const void *addr, unsigned long count) { maybebadio(port); }
gpl-2.0
Cyantist-Arcane9/kernel_cyanogen_msm8916
drivers/dma/ioat/dca.c
2750
18204
/* * Intel I/OAT DMA Linux driver * Copyright(c) 2007 - 2009 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/dca.h> /* either a kernel change is needed, or we need something like this in kernel */ #ifndef CONFIG_SMP #include <asm/smp.h> #undef cpu_physical_id #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) #endif #include "dma.h" #include "registers.h" /* * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 * contain the bit number of the APIC ID to map into the DCA tag. If the valid * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. */ #define DCA_TAG_MAP_VALID 0x80 #define DCA3_TAG_MAP_BIT_TO_INV 0x80 #define DCA3_TAG_MAP_BIT_TO_SEL 0x40 #define DCA3_TAG_MAP_LITERAL_VAL 0x1 #define DCA_TAG_MAP_MASK 0xDF /* expected tag map bytes for I/OAT ver.2 */ #define DCA2_TAG_MAP_BYTE0 0x80 #define DCA2_TAG_MAP_BYTE1 0x0 #define DCA2_TAG_MAP_BYTE2 0x81 #define DCA2_TAG_MAP_BYTE3 0x82 #define DCA2_TAG_MAP_BYTE4 0x82 /* verify if tag map matches expected values */ static inline int dca2_tag_map_valid(u8 *tag_map) { return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) && (tag_map[1] == DCA2_TAG_MAP_BYTE1) && (tag_map[2] == DCA2_TAG_MAP_BYTE2) && (tag_map[3] == DCA2_TAG_MAP_BYTE3) && (tag_map[4] == DCA2_TAG_MAP_BYTE4)); } /* * "Legacy" DCA systems do not implement the DCA register set in the * I/OAT device. Software needs direct support for their tag mappings. */ #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x)) #define IOAT_TAG_MAP_LEN 8 static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = { 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = { 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = { 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), }; static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 }; /* pack PCI B/D/F into a u16 */ static inline u16 dcaid_from_pcidev(struct pci_dev *pci) { return (pci->bus->number << 8) | pci->devfn; } static int dca_enabled_in_bios(struct pci_dev *pdev) { /* CPUID level 9 returns DCA configuration */ /* Bit 0 indicates DCA enabled by the BIOS */ unsigned long cpuid_level_9; int res; cpuid_level_9 = cpuid_eax(9); res = test_bit(0, &cpuid_level_9); if (!res) dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n"); return res; } int system_has_dca_enabled(struct pci_dev *pdev) { if (boot_cpu_has(X86_FEATURE_DCA)) return dca_enabled_in_bios(pdev); dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); return 0; } struct ioat_dca_slot { struct pci_dev *pdev; /* requester device */ u16 rid; /* requester id, as used by IOAT */ }; #define IOAT_DCA_MAX_REQ 6 #define IOAT3_DCA_MAX_REQ 2 struct ioat_dca_priv { void __iomem *iobase; void __iomem *dca_base; int max_requesters; int requester_count; u8 tag_map[IOAT_TAG_MAP_LEN]; struct ioat_dca_slot req_slots[0]; }; /* 5000 series chipset DCA Port Requester ID Table Entry Format * [15:8] PCI-Express Bus Number * [7:3] PCI-Express Device Number * [2:0] PCI-Express Function Number * * 5000 series chipset DCA control register format * [7:1] Reserved (0) * [0] Ignore Function Number */ static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 id; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); if (ioatdca->requester_count == ioatdca->max_requesters) return -ENODEV; for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == NULL) { /* found an empty slot */ ioatdca->requester_count++; ioatdca->req_slots[i].pdev = pdev; ioatdca->req_slots[i].rid = id; writew(id, ioatdca->dca_base + (i * 4)); /* make sure the ignore function bit is off */ writeb(0, ioatdca->dca_base + (i * 4) + 2); return i; } } /* Error, ioatdma->requester_count is out of whack */ return -EFAULT; } static int ioat_dca_remove_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == pdev) { writew(0, ioatdca->dca_base + (i * 4)); ioatdca->req_slots[i].pdev = NULL; ioatdca->req_slots[i].rid = 0; ioatdca->requester_count--; return i; } } return -ENODEV; } static u8 ioat_dca_get_tag(struct dca_provider *dca, struct device *dev, int cpu) { struct ioat_dca_priv *ioatdca = dca_priv(dca); int i, apic_id, bit, value; u8 entry, tag; tag = 0; apic_id = cpu_physical_id(cpu); for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { entry = ioatdca->tag_map[i]; if (entry & DCA_TAG_MAP_VALID) { bit = entry & ~DCA_TAG_MAP_VALID; value = (apic_id & (1 << bit)) ? 1 : 0; } else { value = entry ? 1 : 0; } tag |= (value << i); } return tag; } static int ioat_dca_dev_managed(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; pdev = to_pci_dev(dev); for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == pdev) return 1; } return 0; } static struct dca_ops ioat_dca_ops = { .add_requester = ioat_dca_add_requester, .remove_requester = ioat_dca_remove_requester, .get_tag = ioat_dca_get_tag, .dev_managed = ioat_dca_dev_managed, }; struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; u8 *tag_map = NULL; int i; int err; u8 version; u8 max_requesters; if (!system_has_dca_enabled(pdev)) return NULL; /* I/OAT v1 systems must have a known tag_map to support DCA */ switch (pdev->vendor) { case PCI_VENDOR_ID_INTEL: switch (pdev->device) { case PCI_DEVICE_ID_INTEL_IOAT: tag_map = ioat_tag_map_BNB; break; case PCI_DEVICE_ID_INTEL_IOAT_CNB: tag_map = ioat_tag_map_CNB; break; case PCI_DEVICE_ID_INTEL_IOAT_SCNB: tag_map = ioat_tag_map_SCNB; break; } break; case PCI_VENDOR_ID_UNISYS: switch (pdev->device) { case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR: tag_map = ioat_tag_map_UNISYS; break; } break; } if (tag_map == NULL) return NULL; version = readb(iobase + IOAT_VER_OFFSET); if (version == IOAT_VER_3_0) max_requesters = IOAT3_DCA_MAX_REQ; else max_requesters = IOAT_DCA_MAX_REQ; dca = alloc_dca_provider(&ioat_dca_ops, sizeof(*ioatdca) + (sizeof(struct ioat_dca_slot) * max_requesters)); if (!dca) return NULL; ioatdca = dca_priv(dca); ioatdca->max_requesters = max_requesters; ioatdca->dca_base = iobase + 0x54; /* copy over the APIC ID to DCA tag mapping */ for (i = 0; i < IOAT_TAG_MAP_LEN; i++) ioatdca->tag_map[i] = tag_map[i]; err = register_dca_provider(dca, &pdev->dev); if (err) { free_dca_provider(dca); return NULL; } return dca; } static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 id; u16 global_req_table; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); if (ioatdca->requester_count == ioatdca->max_requesters) return -ENODEV; for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == NULL) { /* found an empty slot */ ioatdca->requester_count++; ioatdca->req_slots[i].pdev = pdev; ioatdca->req_slots[i].rid = id; global_req_table = readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); writel(id | IOAT_DCA_GREQID_VALID, ioatdca->iobase + global_req_table + (i * 4)); return i; } } /* Error, ioatdma->requester_count is out of whack */ return -EFAULT; } static int ioat2_dca_remove_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 global_req_table; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == pdev) { global_req_table = readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); writel(0, ioatdca->iobase + global_req_table + (i * 4)); ioatdca->req_slots[i].pdev = NULL; ioatdca->req_slots[i].rid = 0; ioatdca->requester_count--; return i; } } return -ENODEV; } static u8 ioat2_dca_get_tag(struct dca_provider *dca, struct device *dev, int cpu) { u8 tag; tag = ioat_dca_get_tag(dca, dev, cpu); tag = (~tag) & 0x1F; return tag; } static struct dca_ops ioat2_dca_ops = { .add_requester = ioat2_dca_add_requester, .remove_requester = ioat2_dca_remove_requester, .get_tag = ioat2_dca_get_tag, .dev_managed = ioat_dca_dev_managed, }; static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) { int slots = 0; u32 req; u16 global_req_table; global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET); if (global_req_table == 0) return 0; do { req = readl(iobase + global_req_table + (slots * sizeof(u32))); slots++; } while ((req & IOAT_DCA_GREQID_LASTID) == 0); return slots; } struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; int slots; int i; int err; u32 tag_map; u16 dca_offset; u16 csi_fsb_control; u16 pcie_control; u8 bit; if (!system_has_dca_enabled(pdev)) return NULL; dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); if (dca_offset == 0) return NULL; slots = ioat2_dca_count_dca_slots(iobase, dca_offset); if (slots == 0) return NULL; dca = alloc_dca_provider(&ioat2_dca_ops, sizeof(*ioatdca) + (sizeof(struct ioat_dca_slot) * slots)); if (!dca) return NULL; ioatdca = dca_priv(dca); ioatdca->iobase = iobase; ioatdca->dca_base = iobase + dca_offset; ioatdca->max_requesters = slots; /* some bios might not know to turn these on */ csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) { csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH; writew(csi_fsb_control, ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); } pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) { pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR; writew(pcie_control, ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); } /* TODO version, compatibility and configuration checks */ /* copy out the APIC to DCA tag map */ tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET); for (i = 0; i < 5; i++) { bit = (tag_map >> (4 * i)) & 0x0f; if (bit < 8) ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID; else ioatdca->tag_map[i] = 0; } if (!dca2_tag_map_valid(ioatdca->tag_map)) { WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND, "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); free_dca_provider(dca); return NULL; } err = register_dca_provider(dca, &pdev->dev); if (err) { free_dca_provider(dca); return NULL; } return dca; } static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 id; u16 global_req_table; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); if (ioatdca->requester_count == ioatdca->max_requesters) return -ENODEV; for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == NULL) { /* found an empty slot */ ioatdca->requester_count++; ioatdca->req_slots[i].pdev = pdev; ioatdca->req_slots[i].rid = id; global_req_table = readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); writel(id | IOAT_DCA_GREQID_VALID, ioatdca->iobase + global_req_table + (i * 4)); return i; } } /* Error, ioatdma->requester_count is out of whack */ return -EFAULT; } static int ioat3_dca_remove_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 global_req_table; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == pdev) { global_req_table = readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); writel(0, ioatdca->iobase + global_req_table + (i * 4)); ioatdca->req_slots[i].pdev = NULL; ioatdca->req_slots[i].rid = 0; ioatdca->requester_count--; return i; } } return -ENODEV; } static u8 ioat3_dca_get_tag(struct dca_provider *dca, struct device *dev, int cpu) { u8 tag; struct ioat_dca_priv *ioatdca = dca_priv(dca); int i, apic_id, bit, value; u8 entry; tag = 0; apic_id = cpu_physical_id(cpu); for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { entry = ioatdca->tag_map[i]; if (entry & DCA3_TAG_MAP_BIT_TO_SEL) { bit = entry & ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV); value = (apic_id & (1 << bit)) ? 1 : 0; } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) { bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV; value = (apic_id & (1 << bit)) ? 0 : 1; } else { value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0; } tag |= (value << i); } return tag; } static struct dca_ops ioat3_dca_ops = { .add_requester = ioat3_dca_add_requester, .remove_requester = ioat3_dca_remove_requester, .get_tag = ioat3_dca_get_tag, .dev_managed = ioat_dca_dev_managed, }; static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) { int slots = 0; u32 req; u16 global_req_table; global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET); if (global_req_table == 0) return 0; do { req = readl(iobase + global_req_table + (slots * sizeof(u32))); slots++; } while ((req & IOAT_DCA_GREQID_LASTID) == 0); return slots; } static inline int dca3_tag_map_invalid(u8 *tag_map) { /* * If the tag map is not programmed by the BIOS the default is: * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00 * * This an invalid map and will result in only 2 possible tags * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that * this entire definition is invalid. */ return ((tag_map[0] == DCA_TAG_MAP_VALID) && (tag_map[1] == DCA_TAG_MAP_VALID) && (tag_map[2] == DCA_TAG_MAP_VALID) && (tag_map[3] == DCA_TAG_MAP_VALID) && (tag_map[4] == DCA_TAG_MAP_VALID)); } struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; int slots; int i; int err; u16 dca_offset; u16 csi_fsb_control; u16 pcie_control; u8 bit; union { u64 full; struct { u32 low; u32 high; }; } tag_map; if (!system_has_dca_enabled(pdev)) return NULL; dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); if (dca_offset == 0) return NULL; slots = ioat3_dca_count_dca_slots(iobase, dca_offset); if (slots == 0) return NULL; dca = alloc_dca_provider(&ioat3_dca_ops, sizeof(*ioatdca) + (sizeof(struct ioat_dca_slot) * slots)); if (!dca) return NULL; ioatdca = dca_priv(dca); ioatdca->iobase = iobase; ioatdca->dca_base = iobase + dca_offset; ioatdca->max_requesters = slots; /* some bios might not know to turn these on */ csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) { csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH; writew(csi_fsb_control, ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); } pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) { pcie_control |= IOAT3_PCI_CONTROL_MEMWR; writew(pcie_control, ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); } /* TODO version, compatibility and configuration checks */ /* copy out the APIC to DCA tag map */ tag_map.low = readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW); tag_map.high = readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH); for (i = 0; i < 8; i++) { bit = tag_map.full >> (8 * i); ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; } if (dca3_tag_map_invalid(ioatdca->tag_map)) { WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND, "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); free_dca_provider(dca); return NULL; } err = register_dca_provider(dca, &pdev->dev); if (err) { free_dca_provider(dca); return NULL; } return dca; }
gpl-2.0
s2ack/edison-kernel
drivers/dma/ioat/dca.c
2750
18204
/* * Intel I/OAT DMA Linux driver * Copyright(c) 2007 - 2009 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/dca.h> /* either a kernel change is needed, or we need something like this in kernel */ #ifndef CONFIG_SMP #include <asm/smp.h> #undef cpu_physical_id #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) #endif #include "dma.h" #include "registers.h" /* * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 * contain the bit number of the APIC ID to map into the DCA tag. If the valid * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. */ #define DCA_TAG_MAP_VALID 0x80 #define DCA3_TAG_MAP_BIT_TO_INV 0x80 #define DCA3_TAG_MAP_BIT_TO_SEL 0x40 #define DCA3_TAG_MAP_LITERAL_VAL 0x1 #define DCA_TAG_MAP_MASK 0xDF /* expected tag map bytes for I/OAT ver.2 */ #define DCA2_TAG_MAP_BYTE0 0x80 #define DCA2_TAG_MAP_BYTE1 0x0 #define DCA2_TAG_MAP_BYTE2 0x81 #define DCA2_TAG_MAP_BYTE3 0x82 #define DCA2_TAG_MAP_BYTE4 0x82 /* verify if tag map matches expected values */ static inline int dca2_tag_map_valid(u8 *tag_map) { return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) && (tag_map[1] == DCA2_TAG_MAP_BYTE1) && (tag_map[2] == DCA2_TAG_MAP_BYTE2) && (tag_map[3] == DCA2_TAG_MAP_BYTE3) && (tag_map[4] == DCA2_TAG_MAP_BYTE4)); } /* * "Legacy" DCA systems do not implement the DCA register set in the * I/OAT device. Software needs direct support for their tag mappings. */ #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x)) #define IOAT_TAG_MAP_LEN 8 static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = { 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = { 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = { 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), }; static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 }; /* pack PCI B/D/F into a u16 */ static inline u16 dcaid_from_pcidev(struct pci_dev *pci) { return (pci->bus->number << 8) | pci->devfn; } static int dca_enabled_in_bios(struct pci_dev *pdev) { /* CPUID level 9 returns DCA configuration */ /* Bit 0 indicates DCA enabled by the BIOS */ unsigned long cpuid_level_9; int res; cpuid_level_9 = cpuid_eax(9); res = test_bit(0, &cpuid_level_9); if (!res) dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n"); return res; } int system_has_dca_enabled(struct pci_dev *pdev) { if (boot_cpu_has(X86_FEATURE_DCA)) return dca_enabled_in_bios(pdev); dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); return 0; } struct ioat_dca_slot { struct pci_dev *pdev; /* requester device */ u16 rid; /* requester id, as used by IOAT */ }; #define IOAT_DCA_MAX_REQ 6 #define IOAT3_DCA_MAX_REQ 2 struct ioat_dca_priv { void __iomem *iobase; void __iomem *dca_base; int max_requesters; int requester_count; u8 tag_map[IOAT_TAG_MAP_LEN]; struct ioat_dca_slot req_slots[0]; }; /* 5000 series chipset DCA Port Requester ID Table Entry Format * [15:8] PCI-Express Bus Number * [7:3] PCI-Express Device Number * [2:0] PCI-Express Function Number * * 5000 series chipset DCA control register format * [7:1] Reserved (0) * [0] Ignore Function Number */ static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 id; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); if (ioatdca->requester_count == ioatdca->max_requesters) return -ENODEV; for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == NULL) { /* found an empty slot */ ioatdca->requester_count++; ioatdca->req_slots[i].pdev = pdev; ioatdca->req_slots[i].rid = id; writew(id, ioatdca->dca_base + (i * 4)); /* make sure the ignore function bit is off */ writeb(0, ioatdca->dca_base + (i * 4) + 2); return i; } } /* Error, ioatdma->requester_count is out of whack */ return -EFAULT; } static int ioat_dca_remove_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == pdev) { writew(0, ioatdca->dca_base + (i * 4)); ioatdca->req_slots[i].pdev = NULL; ioatdca->req_slots[i].rid = 0; ioatdca->requester_count--; return i; } } return -ENODEV; } static u8 ioat_dca_get_tag(struct dca_provider *dca, struct device *dev, int cpu) { struct ioat_dca_priv *ioatdca = dca_priv(dca); int i, apic_id, bit, value; u8 entry, tag; tag = 0; apic_id = cpu_physical_id(cpu); for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { entry = ioatdca->tag_map[i]; if (entry & DCA_TAG_MAP_VALID) { bit = entry & ~DCA_TAG_MAP_VALID; value = (apic_id & (1 << bit)) ? 1 : 0; } else { value = entry ? 1 : 0; } tag |= (value << i); } return tag; } static int ioat_dca_dev_managed(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; pdev = to_pci_dev(dev); for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == pdev) return 1; } return 0; } static struct dca_ops ioat_dca_ops = { .add_requester = ioat_dca_add_requester, .remove_requester = ioat_dca_remove_requester, .get_tag = ioat_dca_get_tag, .dev_managed = ioat_dca_dev_managed, }; struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; u8 *tag_map = NULL; int i; int err; u8 version; u8 max_requesters; if (!system_has_dca_enabled(pdev)) return NULL; /* I/OAT v1 systems must have a known tag_map to support DCA */ switch (pdev->vendor) { case PCI_VENDOR_ID_INTEL: switch (pdev->device) { case PCI_DEVICE_ID_INTEL_IOAT: tag_map = ioat_tag_map_BNB; break; case PCI_DEVICE_ID_INTEL_IOAT_CNB: tag_map = ioat_tag_map_CNB; break; case PCI_DEVICE_ID_INTEL_IOAT_SCNB: tag_map = ioat_tag_map_SCNB; break; } break; case PCI_VENDOR_ID_UNISYS: switch (pdev->device) { case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR: tag_map = ioat_tag_map_UNISYS; break; } break; } if (tag_map == NULL) return NULL; version = readb(iobase + IOAT_VER_OFFSET); if (version == IOAT_VER_3_0) max_requesters = IOAT3_DCA_MAX_REQ; else max_requesters = IOAT_DCA_MAX_REQ; dca = alloc_dca_provider(&ioat_dca_ops, sizeof(*ioatdca) + (sizeof(struct ioat_dca_slot) * max_requesters)); if (!dca) return NULL; ioatdca = dca_priv(dca); ioatdca->max_requesters = max_requesters; ioatdca->dca_base = iobase + 0x54; /* copy over the APIC ID to DCA tag mapping */ for (i = 0; i < IOAT_TAG_MAP_LEN; i++) ioatdca->tag_map[i] = tag_map[i]; err = register_dca_provider(dca, &pdev->dev); if (err) { free_dca_provider(dca); return NULL; } return dca; } static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 id; u16 global_req_table; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); if (ioatdca->requester_count == ioatdca->max_requesters) return -ENODEV; for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == NULL) { /* found an empty slot */ ioatdca->requester_count++; ioatdca->req_slots[i].pdev = pdev; ioatdca->req_slots[i].rid = id; global_req_table = readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); writel(id | IOAT_DCA_GREQID_VALID, ioatdca->iobase + global_req_table + (i * 4)); return i; } } /* Error, ioatdma->requester_count is out of whack */ return -EFAULT; } static int ioat2_dca_remove_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 global_req_table; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == pdev) { global_req_table = readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); writel(0, ioatdca->iobase + global_req_table + (i * 4)); ioatdca->req_slots[i].pdev = NULL; ioatdca->req_slots[i].rid = 0; ioatdca->requester_count--; return i; } } return -ENODEV; } static u8 ioat2_dca_get_tag(struct dca_provider *dca, struct device *dev, int cpu) { u8 tag; tag = ioat_dca_get_tag(dca, dev, cpu); tag = (~tag) & 0x1F; return tag; } static struct dca_ops ioat2_dca_ops = { .add_requester = ioat2_dca_add_requester, .remove_requester = ioat2_dca_remove_requester, .get_tag = ioat2_dca_get_tag, .dev_managed = ioat_dca_dev_managed, }; static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) { int slots = 0; u32 req; u16 global_req_table; global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET); if (global_req_table == 0) return 0; do { req = readl(iobase + global_req_table + (slots * sizeof(u32))); slots++; } while ((req & IOAT_DCA_GREQID_LASTID) == 0); return slots; } struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; int slots; int i; int err; u32 tag_map; u16 dca_offset; u16 csi_fsb_control; u16 pcie_control; u8 bit; if (!system_has_dca_enabled(pdev)) return NULL; dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); if (dca_offset == 0) return NULL; slots = ioat2_dca_count_dca_slots(iobase, dca_offset); if (slots == 0) return NULL; dca = alloc_dca_provider(&ioat2_dca_ops, sizeof(*ioatdca) + (sizeof(struct ioat_dca_slot) * slots)); if (!dca) return NULL; ioatdca = dca_priv(dca); ioatdca->iobase = iobase; ioatdca->dca_base = iobase + dca_offset; ioatdca->max_requesters = slots; /* some bios might not know to turn these on */ csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) { csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH; writew(csi_fsb_control, ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); } pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) { pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR; writew(pcie_control, ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); } /* TODO version, compatibility and configuration checks */ /* copy out the APIC to DCA tag map */ tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET); for (i = 0; i < 5; i++) { bit = (tag_map >> (4 * i)) & 0x0f; if (bit < 8) ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID; else ioatdca->tag_map[i] = 0; } if (!dca2_tag_map_valid(ioatdca->tag_map)) { WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND, "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); free_dca_provider(dca); return NULL; } err = register_dca_provider(dca, &pdev->dev); if (err) { free_dca_provider(dca); return NULL; } return dca; } static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 id; u16 global_req_table; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); if (ioatdca->requester_count == ioatdca->max_requesters) return -ENODEV; for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == NULL) { /* found an empty slot */ ioatdca->requester_count++; ioatdca->req_slots[i].pdev = pdev; ioatdca->req_slots[i].rid = id; global_req_table = readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); writel(id | IOAT_DCA_GREQID_VALID, ioatdca->iobase + global_req_table + (i * 4)); return i; } } /* Error, ioatdma->requester_count is out of whack */ return -EFAULT; } static int ioat3_dca_remove_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 global_req_table; /* This implementation only supports PCI-Express */ if (dev->bus != &pci_bus_type) return -ENODEV; pdev = to_pci_dev(dev); for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == pdev) { global_req_table = readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); writel(0, ioatdca->iobase + global_req_table + (i * 4)); ioatdca->req_slots[i].pdev = NULL; ioatdca->req_slots[i].rid = 0; ioatdca->requester_count--; return i; } } return -ENODEV; } static u8 ioat3_dca_get_tag(struct dca_provider *dca, struct device *dev, int cpu) { u8 tag; struct ioat_dca_priv *ioatdca = dca_priv(dca); int i, apic_id, bit, value; u8 entry; tag = 0; apic_id = cpu_physical_id(cpu); for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { entry = ioatdca->tag_map[i]; if (entry & DCA3_TAG_MAP_BIT_TO_SEL) { bit = entry & ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV); value = (apic_id & (1 << bit)) ? 1 : 0; } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) { bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV; value = (apic_id & (1 << bit)) ? 0 : 1; } else { value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0; } tag |= (value << i); } return tag; } static struct dca_ops ioat3_dca_ops = { .add_requester = ioat3_dca_add_requester, .remove_requester = ioat3_dca_remove_requester, .get_tag = ioat3_dca_get_tag, .dev_managed = ioat_dca_dev_managed, }; static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) { int slots = 0; u32 req; u16 global_req_table; global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET); if (global_req_table == 0) return 0; do { req = readl(iobase + global_req_table + (slots * sizeof(u32))); slots++; } while ((req & IOAT_DCA_GREQID_LASTID) == 0); return slots; } static inline int dca3_tag_map_invalid(u8 *tag_map) { /* * If the tag map is not programmed by the BIOS the default is: * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00 * * This an invalid map and will result in only 2 possible tags * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that * this entire definition is invalid. */ return ((tag_map[0] == DCA_TAG_MAP_VALID) && (tag_map[1] == DCA_TAG_MAP_VALID) && (tag_map[2] == DCA_TAG_MAP_VALID) && (tag_map[3] == DCA_TAG_MAP_VALID) && (tag_map[4] == DCA_TAG_MAP_VALID)); } struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; int slots; int i; int err; u16 dca_offset; u16 csi_fsb_control; u16 pcie_control; u8 bit; union { u64 full; struct { u32 low; u32 high; }; } tag_map; if (!system_has_dca_enabled(pdev)) return NULL; dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); if (dca_offset == 0) return NULL; slots = ioat3_dca_count_dca_slots(iobase, dca_offset); if (slots == 0) return NULL; dca = alloc_dca_provider(&ioat3_dca_ops, sizeof(*ioatdca) + (sizeof(struct ioat_dca_slot) * slots)); if (!dca) return NULL; ioatdca = dca_priv(dca); ioatdca->iobase = iobase; ioatdca->dca_base = iobase + dca_offset; ioatdca->max_requesters = slots; /* some bios might not know to turn these on */ csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) { csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH; writew(csi_fsb_control, ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); } pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) { pcie_control |= IOAT3_PCI_CONTROL_MEMWR; writew(pcie_control, ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); } /* TODO version, compatibility and configuration checks */ /* copy out the APIC to DCA tag map */ tag_map.low = readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW); tag_map.high = readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH); for (i = 0; i < 8; i++) { bit = tag_map.full >> (8 * i); ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; } if (dca3_tag_map_invalid(ioatdca->tag_map)) { WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND, "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); free_dca_provider(dca); return NULL; } err = register_dca_provider(dca, &pdev->dev); if (err) { free_dca_provider(dca); return NULL; } return dca; }
gpl-2.0
Cl3Kener/HERC-KERNELS
drivers/i2c/busses/i2c-pca-platform.c
3518
6936
/* * i2c_pca_platform.c * * Platform driver for the PCA9564 I2C controller. * * Copyright (C) 2008 Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/i2c-algo-pca.h> #include <linux/i2c-pca-platform.h> #include <linux/gpio.h> #include <linux/io.h> #include <asm/irq.h> struct i2c_pca_pf_data { void __iomem *reg_base; int irq; /* if 0, use polling */ int gpio; wait_queue_head_t wait; struct i2c_adapter adap; struct i2c_algo_pca_data algo_data; unsigned long io_base; unsigned long io_size; }; /* Read/Write functions for different register alignments */ static int i2c_pca_pf_readbyte8(void *pd, int reg) { struct i2c_pca_pf_data *i2c = pd; return ioread8(i2c->reg_base + reg); } static int i2c_pca_pf_readbyte16(void *pd, int reg) { struct i2c_pca_pf_data *i2c = pd; return ioread8(i2c->reg_base + reg * 2); } static int i2c_pca_pf_readbyte32(void *pd, int reg) { struct i2c_pca_pf_data *i2c = pd; return ioread8(i2c->reg_base + reg * 4); } static void i2c_pca_pf_writebyte8(void *pd, int reg, int val) { struct i2c_pca_pf_data *i2c = pd; iowrite8(val, i2c->reg_base + reg); } static void i2c_pca_pf_writebyte16(void *pd, int reg, int val) { struct i2c_pca_pf_data *i2c = pd; iowrite8(val, i2c->reg_base + reg * 2); } static void i2c_pca_pf_writebyte32(void *pd, int reg, int val) { struct i2c_pca_pf_data *i2c = pd; iowrite8(val, i2c->reg_base + reg * 4); } static int i2c_pca_pf_waitforcompletion(void *pd) { struct i2c_pca_pf_data *i2c = pd; unsigned long timeout; long ret; if (i2c->irq) { ret = wait_event_timeout(i2c->wait, i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI, i2c->adap.timeout); } else { /* Do polling */ timeout = jiffies + i2c->adap.timeout; do { ret = time_before(jiffies, timeout); if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) break; udelay(100); } while (ret); } return ret > 0; } static void i2c_pca_pf_dummyreset(void *pd) { struct i2c_pca_pf_data *i2c = pd; printk(KERN_WARNING "%s: No reset-pin found. Chip may get stuck!\n", i2c->adap.name); } static void i2c_pca_pf_resetchip(void *pd) { struct i2c_pca_pf_data *i2c = pd; gpio_set_value(i2c->gpio, 0); ndelay(100); gpio_set_value(i2c->gpio, 1); } static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id) { struct i2c_pca_pf_data *i2c = dev_id; if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) return IRQ_NONE; wake_up(&i2c->wait); return IRQ_HANDLED; } static int __devinit i2c_pca_pf_probe(struct platform_device *pdev) { struct i2c_pca_pf_data *i2c; struct resource *res; struct i2c_pca9564_pf_platform_data *platform_data = pdev->dev.platform_data; int ret = 0; int irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); /* If irq is 0, we do polling. */ if (res == NULL) { ret = -ENODEV; goto e_print; } if (!request_mem_region(res->start, resource_size(res), res->name)) { ret = -ENOMEM; goto e_print; } i2c = kzalloc(sizeof(struct i2c_pca_pf_data), GFP_KERNEL); if (!i2c) { ret = -ENOMEM; goto e_alloc; } init_waitqueue_head(&i2c->wait); i2c->reg_base = ioremap(res->start, resource_size(res)); if (!i2c->reg_base) { ret = -ENOMEM; goto e_remap; } i2c->io_base = res->start; i2c->io_size = resource_size(res); i2c->irq = irq; i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0; i2c->adap.owner = THIS_MODULE; snprintf(i2c->adap.name, sizeof(i2c->adap.name), "PCA9564/PCA9665 at 0x%08lx", (unsigned long) res->start); i2c->adap.algo_data = &i2c->algo_data; i2c->adap.dev.parent = &pdev->dev; if (platform_data) { i2c->adap.timeout = platform_data->timeout; i2c->algo_data.i2c_clock = platform_data->i2c_clock_speed; i2c->gpio = platform_data->gpio; } else { i2c->adap.timeout = HZ; i2c->algo_data.i2c_clock = 59000; i2c->gpio = -1; } i2c->algo_data.data = i2c; i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion; i2c->algo_data.reset_chip = i2c_pca_pf_dummyreset; switch (res->flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_32BIT: i2c->algo_data.write_byte = i2c_pca_pf_writebyte32; i2c->algo_data.read_byte = i2c_pca_pf_readbyte32; break; case IORESOURCE_MEM_16BIT: i2c->algo_data.write_byte = i2c_pca_pf_writebyte16; i2c->algo_data.read_byte = i2c_pca_pf_readbyte16; break; case IORESOURCE_MEM_8BIT: default: i2c->algo_data.write_byte = i2c_pca_pf_writebyte8; i2c->algo_data.read_byte = i2c_pca_pf_readbyte8; break; } /* Use gpio_is_valid() when in mainline */ if (i2c->gpio > -1) { ret = gpio_request(i2c->gpio, i2c->adap.name); if (ret == 0) { gpio_direction_output(i2c->gpio, 1); i2c->algo_data.reset_chip = i2c_pca_pf_resetchip; } else { printk(KERN_WARNING "%s: Registering gpio failed!\n", i2c->adap.name); i2c->gpio = ret; } } if (irq) { ret = request_irq(irq, i2c_pca_pf_handler, IRQF_TRIGGER_FALLING, pdev->name, i2c); if (ret) goto e_reqirq; } if (i2c_pca_add_numbered_bus(&i2c->adap) < 0) { ret = -ENODEV; goto e_adapt; } platform_set_drvdata(pdev, i2c); printk(KERN_INFO "%s registered.\n", i2c->adap.name); return 0; e_adapt: if (irq) free_irq(irq, i2c); e_reqirq: if (i2c->gpio > -1) gpio_free(i2c->gpio); iounmap(i2c->reg_base); e_remap: kfree(i2c); e_alloc: release_mem_region(res->start, resource_size(res)); e_print: printk(KERN_ERR "Registering PCA9564/PCA9665 FAILED! (%d)\n", ret); return ret; } static int __devexit i2c_pca_pf_remove(struct platform_device *pdev) { struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); i2c_del_adapter(&i2c->adap); if (i2c->irq) free_irq(i2c->irq, i2c); if (i2c->gpio > -1) gpio_free(i2c->gpio); iounmap(i2c->reg_base); release_mem_region(i2c->io_base, i2c->io_size); kfree(i2c); return 0; } static struct platform_driver i2c_pca_pf_driver = { .probe = i2c_pca_pf_probe, .remove = __devexit_p(i2c_pca_pf_remove), .driver = { .name = "i2c-pca-platform", .owner = THIS_MODULE, }, }; static int __init i2c_pca_pf_init(void) { return platform_driver_register(&i2c_pca_pf_driver); } static void __exit i2c_pca_pf_exit(void) { platform_driver_unregister(&i2c_pca_pf_driver); } MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); MODULE_DESCRIPTION("I2C-PCA9564/PCA9665 platform driver"); MODULE_LICENSE("GPL"); module_init(i2c_pca_pf_init); module_exit(i2c_pca_pf_exit);
gpl-2.0
EPDCenterSpain/kernel_Archos_97b_Titan
drivers/i2c/busses/i2c-pca-platform.c
3518
6936
/* * i2c_pca_platform.c * * Platform driver for the PCA9564 I2C controller. * * Copyright (C) 2008 Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/i2c-algo-pca.h> #include <linux/i2c-pca-platform.h> #include <linux/gpio.h> #include <linux/io.h> #include <asm/irq.h> struct i2c_pca_pf_data { void __iomem *reg_base; int irq; /* if 0, use polling */ int gpio; wait_queue_head_t wait; struct i2c_adapter adap; struct i2c_algo_pca_data algo_data; unsigned long io_base; unsigned long io_size; }; /* Read/Write functions for different register alignments */ static int i2c_pca_pf_readbyte8(void *pd, int reg) { struct i2c_pca_pf_data *i2c = pd; return ioread8(i2c->reg_base + reg); } static int i2c_pca_pf_readbyte16(void *pd, int reg) { struct i2c_pca_pf_data *i2c = pd; return ioread8(i2c->reg_base + reg * 2); } static int i2c_pca_pf_readbyte32(void *pd, int reg) { struct i2c_pca_pf_data *i2c = pd; return ioread8(i2c->reg_base + reg * 4); } static void i2c_pca_pf_writebyte8(void *pd, int reg, int val) { struct i2c_pca_pf_data *i2c = pd; iowrite8(val, i2c->reg_base + reg); } static void i2c_pca_pf_writebyte16(void *pd, int reg, int val) { struct i2c_pca_pf_data *i2c = pd; iowrite8(val, i2c->reg_base + reg * 2); } static void i2c_pca_pf_writebyte32(void *pd, int reg, int val) { struct i2c_pca_pf_data *i2c = pd; iowrite8(val, i2c->reg_base + reg * 4); } static int i2c_pca_pf_waitforcompletion(void *pd) { struct i2c_pca_pf_data *i2c = pd; unsigned long timeout; long ret; if (i2c->irq) { ret = wait_event_timeout(i2c->wait, i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI, i2c->adap.timeout); } else { /* Do polling */ timeout = jiffies + i2c->adap.timeout; do { ret = time_before(jiffies, timeout); if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) break; udelay(100); } while (ret); } return ret > 0; } static void i2c_pca_pf_dummyreset(void *pd) { struct i2c_pca_pf_data *i2c = pd; printk(KERN_WARNING "%s: No reset-pin found. Chip may get stuck!\n", i2c->adap.name); } static void i2c_pca_pf_resetchip(void *pd) { struct i2c_pca_pf_data *i2c = pd; gpio_set_value(i2c->gpio, 0); ndelay(100); gpio_set_value(i2c->gpio, 1); } static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id) { struct i2c_pca_pf_data *i2c = dev_id; if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) return IRQ_NONE; wake_up(&i2c->wait); return IRQ_HANDLED; } static int __devinit i2c_pca_pf_probe(struct platform_device *pdev) { struct i2c_pca_pf_data *i2c; struct resource *res; struct i2c_pca9564_pf_platform_data *platform_data = pdev->dev.platform_data; int ret = 0; int irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); /* If irq is 0, we do polling. */ if (res == NULL) { ret = -ENODEV; goto e_print; } if (!request_mem_region(res->start, resource_size(res), res->name)) { ret = -ENOMEM; goto e_print; } i2c = kzalloc(sizeof(struct i2c_pca_pf_data), GFP_KERNEL); if (!i2c) { ret = -ENOMEM; goto e_alloc; } init_waitqueue_head(&i2c->wait); i2c->reg_base = ioremap(res->start, resource_size(res)); if (!i2c->reg_base) { ret = -ENOMEM; goto e_remap; } i2c->io_base = res->start; i2c->io_size = resource_size(res); i2c->irq = irq; i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0; i2c->adap.owner = THIS_MODULE; snprintf(i2c->adap.name, sizeof(i2c->adap.name), "PCA9564/PCA9665 at 0x%08lx", (unsigned long) res->start); i2c->adap.algo_data = &i2c->algo_data; i2c->adap.dev.parent = &pdev->dev; if (platform_data) { i2c->adap.timeout = platform_data->timeout; i2c->algo_data.i2c_clock = platform_data->i2c_clock_speed; i2c->gpio = platform_data->gpio; } else { i2c->adap.timeout = HZ; i2c->algo_data.i2c_clock = 59000; i2c->gpio = -1; } i2c->algo_data.data = i2c; i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion; i2c->algo_data.reset_chip = i2c_pca_pf_dummyreset; switch (res->flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_32BIT: i2c->algo_data.write_byte = i2c_pca_pf_writebyte32; i2c->algo_data.read_byte = i2c_pca_pf_readbyte32; break; case IORESOURCE_MEM_16BIT: i2c->algo_data.write_byte = i2c_pca_pf_writebyte16; i2c->algo_data.read_byte = i2c_pca_pf_readbyte16; break; case IORESOURCE_MEM_8BIT: default: i2c->algo_data.write_byte = i2c_pca_pf_writebyte8; i2c->algo_data.read_byte = i2c_pca_pf_readbyte8; break; } /* Use gpio_is_valid() when in mainline */ if (i2c->gpio > -1) { ret = gpio_request(i2c->gpio, i2c->adap.name); if (ret == 0) { gpio_direction_output(i2c->gpio, 1); i2c->algo_data.reset_chip = i2c_pca_pf_resetchip; } else { printk(KERN_WARNING "%s: Registering gpio failed!\n", i2c->adap.name); i2c->gpio = ret; } } if (irq) { ret = request_irq(irq, i2c_pca_pf_handler, IRQF_TRIGGER_FALLING, pdev->name, i2c); if (ret) goto e_reqirq; } if (i2c_pca_add_numbered_bus(&i2c->adap) < 0) { ret = -ENODEV; goto e_adapt; } platform_set_drvdata(pdev, i2c); printk(KERN_INFO "%s registered.\n", i2c->adap.name); return 0; e_adapt: if (irq) free_irq(irq, i2c); e_reqirq: if (i2c->gpio > -1) gpio_free(i2c->gpio); iounmap(i2c->reg_base); e_remap: kfree(i2c); e_alloc: release_mem_region(res->start, resource_size(res)); e_print: printk(KERN_ERR "Registering PCA9564/PCA9665 FAILED! (%d)\n", ret); return ret; } static int __devexit i2c_pca_pf_remove(struct platform_device *pdev) { struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); i2c_del_adapter(&i2c->adap); if (i2c->irq) free_irq(i2c->irq, i2c); if (i2c->gpio > -1) gpio_free(i2c->gpio); iounmap(i2c->reg_base); release_mem_region(i2c->io_base, i2c->io_size); kfree(i2c); return 0; } static struct platform_driver i2c_pca_pf_driver = { .probe = i2c_pca_pf_probe, .remove = __devexit_p(i2c_pca_pf_remove), .driver = { .name = "i2c-pca-platform", .owner = THIS_MODULE, }, }; static int __init i2c_pca_pf_init(void) { return platform_driver_register(&i2c_pca_pf_driver); } static void __exit i2c_pca_pf_exit(void) { platform_driver_unregister(&i2c_pca_pf_driver); } MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); MODULE_DESCRIPTION("I2C-PCA9564/PCA9665 platform driver"); MODULE_LICENSE("GPL"); module_init(i2c_pca_pf_init); module_exit(i2c_pca_pf_exit);
gpl-2.0
bigbiff/android_kernel_samsung_trlte
drivers/infiniband/hw/ipath/ipath_verbs.c
3518
63552
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_mad.h> #include <rdma/ib_user_verbs.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/utsname.h> #include <linux/rculist.h> #include "ipath_kernel.h" #include "ipath_verbs.h" #include "ipath_common.h" static unsigned int ib_ipath_qp_table_size = 251; module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); unsigned int ib_ipath_lkey_table_size = 12; module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint, S_IRUGO); MODULE_PARM_DESC(lkey_table_size, "LKEY table size in bits (2^n, 1 <= n <= 23)"); static unsigned int ib_ipath_max_pds = 0xFFFF; module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_pds, "Maximum number of protection domains to support"); static unsigned int ib_ipath_max_ahs = 0xFFFF; module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); unsigned int ib_ipath_max_cqes = 0x2FFFF; module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_cqes, "Maximum number of completion queue entries to support"); unsigned int ib_ipath_max_cqs = 0x1FFFF; module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); unsigned int ib_ipath_max_qp_wrs = 0x3FFF; module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); unsigned int ib_ipath_max_qps = 16384; module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); unsigned int ib_ipath_max_sges = 0x60; module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); unsigned int ib_ipath_max_mcast_grps = 16384; module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_mcast_grps, "Maximum number of multicast groups to support"); unsigned int ib_ipath_max_mcast_qp_attached = 16; module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_mcast_qp_attached, "Maximum number of attached QPs to support"); unsigned int ib_ipath_max_srqs = 1024; module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); unsigned int ib_ipath_max_srq_sges = 128; module_param_named(max_srq_sges, ib_ipath_max_srq_sges, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); unsigned int ib_ipath_max_srq_wrs = 0x1FFFF; module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); static unsigned int ib_ipath_disable_sma; module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(disable_sma, "Disable the SMA"); /* * Note that it is OK to post send work requests in the SQE and ERR * states; ipath_do_send() will process them and generate error * completions as per IB 1.2 C10-96. */ const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { [IB_QPS_RESET] = 0, [IB_QPS_INIT] = IPATH_POST_RECV_OK, [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK, [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK | IPATH_PROCESS_NEXT_SEND_OK, [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK, [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | IPATH_POST_SEND_OK | IPATH_FLUSH_SEND, [IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV | IPATH_POST_SEND_OK | IPATH_FLUSH_SEND, }; struct ipath_ucontext { struct ib_ucontext ibucontext; }; static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext *ibucontext) { return container_of(ibucontext, struct ipath_ucontext, ibucontext); } /* * Translate ib_wr_opcode into ib_wc_opcode. */ const enum ib_wc_opcode ib_ipath_wc_opcode[] = { [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, [IB_WR_SEND] = IB_WC_SEND, [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD }; /* * System image GUID. */ static __be64 sys_image_guid; /** * ipath_copy_sge - copy data to SGE memory * @ss: the SGE state * @data: the data to copy * @length: the length of the data */ void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length) { struct ipath_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); memcpy(sge->vaddr, data, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } data += len; length -= len; } } /** * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func * @ss: the SGE state * @length: the number of bytes to skip */ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } length -= len; } } /* * Count the number of DMA descriptors needed to send length bytes of data. * Don't modify the ipath_sge_state to get the count. * Return zero if any of the segments is not aligned. */ static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sg_list = ss->sg_list; struct ipath_sge sge = ss->sge; u8 num_sge = ss->num_sge; u32 ndesc = 1; /* count the header */ while (length) { u32 len = sge.length; if (len > length) len = length; if (len > sge.sge_length) len = sge.sge_length; BUG_ON(len == 0); if (((long) sge.vaddr & (sizeof(u32) - 1)) || (len != length && (len & (sizeof(u32) - 1)))) { ndesc = 0; break; } ndesc++; sge.vaddr += len; sge.length -= len; sge.sge_length -= len; if (sge.sge_length == 0) { if (--num_sge) sge = *sg_list++; } else if (sge.length == 0 && sge.mr != NULL) { if (++sge.n >= IPATH_SEGSZ) { if (++sge.m >= sge.mr->mapsz) break; sge.n = 0; } sge.vaddr = sge.mr->map[sge.m]->segs[sge.n].vaddr; sge.length = sge.mr->map[sge.m]->segs[sge.n].length; } length -= len; } return ndesc; } /* * Copy from the SGEs to the data buffer. */ static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); memcpy(data, sge->vaddr, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } data += len; length -= len; } } /** * ipath_post_one_send - post one RC, UC, or UD send work request * @qp: the QP to post on * @wr: the work request to send */ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) { struct ipath_swqe *wqe; u32 next; int i; int j; int acc; int ret; unsigned long flags; struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; spin_lock_irqsave(&qp->s_lock, flags); if (qp->ibqp.qp_type != IB_QPT_SMI && !(dd->ipath_flags & IPATH_LINKACTIVE)) { ret = -ENETDOWN; goto bail; } /* Check that state is OK to post send. */ if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) goto bail_inval; /* IB spec says that num_sge == 0 is OK. */ if (wr->num_sge > qp->s_max_sge) goto bail_inval; /* * Don't allow RDMA reads or atomic operations on UC or * undefined operations. * Make sure buffer is large enough to hold the result for atomics. */ if (qp->ibqp.qp_type == IB_QPT_UC) { if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) goto bail_inval; } else if (qp->ibqp.qp_type == IB_QPT_UD) { /* Check UD opcode */ if (wr->opcode != IB_WR_SEND && wr->opcode != IB_WR_SEND_WITH_IMM) goto bail_inval; /* Check UD destination address PD */ if (qp->ibqp.pd != wr->wr.ud.ah->pd) goto bail_inval; } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) goto bail_inval; else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && (wr->num_sge == 0 || wr->sg_list[0].length < sizeof(u64) || wr->sg_list[0].addr & (sizeof(u64) - 1))) goto bail_inval; else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) goto bail_inval; next = qp->s_head + 1; if (next >= qp->s_size) next = 0; if (next == qp->s_last) { ret = -ENOMEM; goto bail; } wqe = get_swqe_ptr(qp, qp->s_head); wqe->wr = *wr; wqe->length = 0; if (wr->num_sge) { acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0; for (i = 0, j = 0; i < wr->num_sge; i++) { u32 length = wr->sg_list[i].length; int ok; if (length == 0) continue; ok = ipath_lkey_ok(qp, &wqe->sg_list[j], &wr->sg_list[i], acc); if (!ok) goto bail_inval; wqe->length += length; j++; } wqe->wr.num_sge = j; } if (qp->ibqp.qp_type == IB_QPT_UC || qp->ibqp.qp_type == IB_QPT_RC) { if (wqe->length > 0x80000000U) goto bail_inval; } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) goto bail_inval; wqe->ssn = qp->s_ssn++; qp->s_head = next; ret = 0; goto bail; bail_inval: ret = -EINVAL; bail: spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } /** * ipath_post_send - post a send on a QP * @ibqp: the QP to post the send on * @wr: the list of work requests to post * @bad_wr: the first bad WR is put here * * This may be called from interrupt context. */ static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct ipath_qp *qp = to_iqp(ibqp); int err = 0; for (; wr; wr = wr->next) { err = ipath_post_one_send(qp, wr); if (err) { *bad_wr = wr; goto bail; } } /* Try to do the send work in the caller's context. */ ipath_do_send((unsigned long) qp); bail: return err; } /** * ipath_post_receive - post a receive on a QP * @ibqp: the QP to post the receive on * @wr: the WR to post * @bad_wr: the first bad WR is put here * * This may be called from interrupt context. */ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct ipath_qp *qp = to_iqp(ibqp); struct ipath_rwq *wq = qp->r_rq.wq; unsigned long flags; int ret; /* Check that state is OK to post receive. */ if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) { *bad_wr = wr; ret = -EINVAL; goto bail; } for (; wr; wr = wr->next) { struct ipath_rwqe *wqe; u32 next; int i; if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { *bad_wr = wr; ret = -EINVAL; goto bail; } spin_lock_irqsave(&qp->r_rq.lock, flags); next = wq->head + 1; if (next >= qp->r_rq.size) next = 0; if (next == wq->tail) { spin_unlock_irqrestore(&qp->r_rq.lock, flags); *bad_wr = wr; ret = -ENOMEM; goto bail; } wqe = get_rwqe_ptr(&qp->r_rq, wq->head); wqe->wr_id = wr->wr_id; wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) wqe->sg_list[i] = wr->sg_list[i]; /* Make sure queue entry is written before the head index. */ smp_wmb(); wq->head = next; spin_unlock_irqrestore(&qp->r_rq.lock, flags); } ret = 0; bail: return ret; } /** * ipath_qp_rcv - processing an incoming packet on a QP * @dev: the device the packet came on * @hdr: the packet header * @has_grh: true if the packet has a GRH * @data: the packet data * @tlen: the packet length * @qp: the QP the packet came on * * This is called from ipath_ib_rcv() to process an incoming packet * for the given QP. * Called at interrupt level. */ static void ipath_qp_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) { /* Check for valid receive state. */ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { dev->n_pkt_drops++; return; } switch (qp->ibqp.qp_type) { case IB_QPT_SMI: case IB_QPT_GSI: if (ib_ipath_disable_sma) break; /* FALLTHROUGH */ case IB_QPT_UD: ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); break; case IB_QPT_RC: ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp); break; case IB_QPT_UC: ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp); break; default: break; } } /** * ipath_ib_rcv - process an incoming packet * @arg: the device pointer * @rhdr: the header of the packet * @data: the packet data * @tlen: the packet length * * This is called from ipath_kreceive() to process an incoming packet at * interrupt level. Tlen is the length of the header + data + CRC in bytes. */ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data, u32 tlen) { struct ipath_ib_header *hdr = rhdr; struct ipath_other_headers *ohdr; struct ipath_qp *qp; u32 qp_num; int lnh; u8 opcode; u16 lid; if (unlikely(dev == NULL)) goto bail; if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */ dev->rcv_errors++; goto bail; } /* Check for a valid destination LID (see ch. 7.11.1). */ lid = be16_to_cpu(hdr->lrh[1]); if (lid < IPATH_MULTICAST_LID_BASE) { lid &= ~((1 << dev->dd->ipath_lmc) - 1); if (unlikely(lid != dev->dd->ipath_lid)) { dev->rcv_errors++; goto bail; } } /* Check for GRH */ lnh = be16_to_cpu(hdr->lrh[0]) & 3; if (lnh == IPATH_LRH_BTH) ohdr = &hdr->u.oth; else if (lnh == IPATH_LRH_GRH) ohdr = &hdr->u.l.oth; else { dev->rcv_errors++; goto bail; } opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f; dev->opstats[opcode].n_bytes += tlen; dev->opstats[opcode].n_packets++; /* Get the destination QP number. */ qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK; if (qp_num == IPATH_MULTICAST_QPN) { struct ipath_mcast *mcast; struct ipath_mcast_qp *p; if (lnh != IPATH_LRH_GRH) { dev->n_pkt_drops++; goto bail; } mcast = ipath_mcast_find(&hdr->u.l.grh.dgid); if (mcast == NULL) { dev->n_pkt_drops++; goto bail; } dev->n_multicast_rcv++; list_for_each_entry_rcu(p, &mcast->qp_list, list) ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp); /* * Notify ipath_multicast_detach() if it is waiting for us * to finish. */ if (atomic_dec_return(&mcast->refcount) <= 1) wake_up(&mcast->wait); } else { qp = ipath_lookup_qpn(&dev->qp_table, qp_num); if (qp) { dev->n_unicast_rcv++; ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data, tlen, qp); /* * Notify ipath_destroy_qp() if it is waiting * for us to finish. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } else dev->n_pkt_drops++; } bail:; } /** * ipath_ib_timer - verbs timer * @arg: the device pointer * * This is called from ipath_do_rcv_timer() at interrupt level to check for * QPs which need retransmits and to collect performance numbers. */ static void ipath_ib_timer(struct ipath_ibdev *dev) { struct ipath_qp *resend = NULL; struct ipath_qp *rnr = NULL; struct list_head *last; struct ipath_qp *qp; unsigned long flags; if (dev == NULL) return; spin_lock_irqsave(&dev->pending_lock, flags); /* Start filling the next pending queue. */ if (++dev->pending_index >= ARRAY_SIZE(dev->pending)) dev->pending_index = 0; /* Save any requests still in the new queue, they have timed out. */ last = &dev->pending[dev->pending_index]; while (!list_empty(last)) { qp = list_entry(last->next, struct ipath_qp, timerwait); list_del_init(&qp->timerwait); qp->timer_next = resend; resend = qp; atomic_inc(&qp->refcount); } last = &dev->rnrwait; if (!list_empty(last)) { qp = list_entry(last->next, struct ipath_qp, timerwait); if (--qp->s_rnr_timeout == 0) { do { list_del_init(&qp->timerwait); qp->timer_next = rnr; rnr = qp; atomic_inc(&qp->refcount); if (list_empty(last)) break; qp = list_entry(last->next, struct ipath_qp, timerwait); } while (qp->s_rnr_timeout == 0); } } /* * We should only be in the started state if pma_sample_start != 0 */ if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED && --dev->pma_sample_start == 0) { dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; ipath_snapshot_counters(dev->dd, &dev->ipath_sword, &dev->ipath_rword, &dev->ipath_spkts, &dev->ipath_rpkts, &dev->ipath_xmit_wait); } if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { if (dev->pma_sample_interval == 0) { u64 ta, tb, tc, td, te; dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; ipath_snapshot_counters(dev->dd, &ta, &tb, &tc, &td, &te); dev->ipath_sword = ta - dev->ipath_sword; dev->ipath_rword = tb - dev->ipath_rword; dev->ipath_spkts = tc - dev->ipath_spkts; dev->ipath_rpkts = td - dev->ipath_rpkts; dev->ipath_xmit_wait = te - dev->ipath_xmit_wait; } else dev->pma_sample_interval--; } spin_unlock_irqrestore(&dev->pending_lock, flags); /* XXX What if timer fires again while this is running? */ while (resend != NULL) { qp = resend; resend = qp->timer_next; spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_last != qp->s_tail && ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { dev->n_timeouts++; ipath_restart_rc(qp, qp->s_last_psn + 1); } spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify ipath_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } while (rnr != NULL) { qp = rnr; rnr = qp->timer_next; spin_lock_irqsave(&qp->s_lock, flags); if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify ipath_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } } static void update_sge(struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sge = &ss->sge; sge->vaddr += length; sge->length -= length; sge->sge_length -= length; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) return; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } } #ifdef __LITTLE_ENDIAN static inline u32 get_upper_bits(u32 data, u32 shift) { return data >> shift; } static inline u32 set_upper_bits(u32 data, u32 shift) { return data << shift; } static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) { data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); return data; } #else static inline u32 get_upper_bits(u32 data, u32 shift) { return data << shift; } static inline u32 set_upper_bits(u32 data, u32 shift) { return data >> shift; } static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) { data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); return data; } #endif static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, u32 length, unsigned flush_wc) { u32 extra = 0; u32 data = 0; u32 last; while (1) { u32 len = ss->sge.length; u32 off; if (len > length) len = length; if (len > ss->sge.sge_length) len = ss->sge.sge_length; BUG_ON(len == 0); /* If the source address is not aligned, try to align it. */ off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); if (off) { u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & ~(sizeof(u32) - 1)); u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); u32 y; y = sizeof(u32) - off; if (len > y) len = y; if (len + extra >= sizeof(u32)) { data |= set_upper_bits(v, extra * BITS_PER_BYTE); len = sizeof(u32) - extra; if (len == length) { last = data; break; } __raw_writel(data, piobuf); piobuf++; extra = 0; data = 0; } else { /* Clear unused upper bytes */ data |= clear_upper_bytes(v, len, extra); if (len == length) { last = data; break; } extra += len; } } else if (extra) { /* Source address is aligned. */ u32 *addr = (u32 *) ss->sge.vaddr; int shift = extra * BITS_PER_BYTE; int ushift = 32 - shift; u32 l = len; while (l >= sizeof(u32)) { u32 v = *addr; data |= set_upper_bits(v, shift); __raw_writel(data, piobuf); data = get_upper_bits(v, ushift); piobuf++; addr++; l -= sizeof(u32); } /* * We still have 'extra' number of bytes leftover. */ if (l) { u32 v = *addr; if (l + extra >= sizeof(u32)) { data |= set_upper_bits(v, shift); len -= l + extra - sizeof(u32); if (len == length) { last = data; break; } __raw_writel(data, piobuf); piobuf++; extra = 0; data = 0; } else { /* Clear unused upper bytes */ data |= clear_upper_bytes(v, l, extra); if (len == length) { last = data; break; } extra += l; } } else if (len == length) { last = data; break; } } else if (len == length) { u32 w; /* * Need to round up for the last dword in the * packet. */ w = (len + 3) >> 2; __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); piobuf += w - 1; last = ((u32 *) ss->sge.vaddr)[w - 1]; break; } else { u32 w = len >> 2; __iowrite32_copy(piobuf, ss->sge.vaddr, w); piobuf += w; extra = len & (sizeof(u32) - 1); if (extra) { u32 v = ((u32 *) ss->sge.vaddr)[w]; /* Clear unused upper bytes */ data = clear_upper_bytes(v, extra, 0); } } update_sge(ss, len); length -= len; } /* Update address before sending packet. */ update_sge(ss, length); if (flush_wc) { /* must flush early everything before trigger word */ ipath_flush_wc(); __raw_writel(last, piobuf); /* be sure trigger word is written */ ipath_flush_wc(); } else __raw_writel(last, piobuf); } /* * Convert IB rate to delay multiplier. */ unsigned ipath_ib_rate_to_mult(enum ib_rate rate) { switch (rate) { case IB_RATE_2_5_GBPS: return 8; case IB_RATE_5_GBPS: return 4; case IB_RATE_10_GBPS: return 2; case IB_RATE_20_GBPS: return 1; default: return 0; } } /* * Convert delay multiplier to IB rate */ static enum ib_rate ipath_mult_to_ib_rate(unsigned mult) { switch (mult) { case 8: return IB_RATE_2_5_GBPS; case 4: return IB_RATE_5_GBPS; case 2: return IB_RATE_10_GBPS; case 1: return IB_RATE_20_GBPS; default: return IB_RATE_PORT_CURRENT; } } static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev) { struct ipath_verbs_txreq *tx = NULL; unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); if (!list_empty(&dev->txreq_free)) { struct list_head *l = dev->txreq_free.next; list_del(l); tx = list_entry(l, struct ipath_verbs_txreq, txreq.list); } spin_unlock_irqrestore(&dev->pending_lock, flags); return tx; } static inline void put_txreq(struct ipath_ibdev *dev, struct ipath_verbs_txreq *tx) { unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); list_add(&tx->txreq.list, &dev->txreq_free); spin_unlock_irqrestore(&dev->pending_lock, flags); } static void sdma_complete(void *cookie, int status) { struct ipath_verbs_txreq *tx = cookie; struct ipath_qp *qp = tx->qp; struct ipath_ibdev *dev = to_idev(qp->ibqp.device); unsigned long flags; enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; if (atomic_dec_and_test(&qp->s_dma_busy)) { spin_lock_irqsave(&qp->s_lock, flags); if (tx->wqe) ipath_send_complete(qp, tx->wqe, ibs); if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && qp->s_last != qp->s_head) || (qp->s_flags & IPATH_S_WAIT_DMA)) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); wake_up(&qp->wait_dma); } else if (tx->wqe) { spin_lock_irqsave(&qp->s_lock, flags); ipath_send_complete(qp, tx->wqe, ibs); spin_unlock_irqrestore(&qp->s_lock, flags); } if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) kfree(tx->txreq.map_addr); put_txreq(dev, tx); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } static void decrement_dma_busy(struct ipath_qp *qp) { unsigned long flags; if (atomic_dec_and_test(&qp->s_dma_busy)) { spin_lock_irqsave(&qp->s_lock, flags); if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && qp->s_last != qp->s_head) || (qp->s_flags & IPATH_S_WAIT_DMA)) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); wake_up(&qp->wait_dma); } } /* * Compute the number of clock cycles of delay before sending the next packet. * The multipliers reflect the number of clocks for the fastest rate so * one tick at 4xDDR is 8 ticks at 1xSDR. * If the destination port will take longer to receive a packet than * the outgoing link can send it, we need to delay sending the next packet * by the difference in time it takes the receiver to receive and the sender * to send this packet. * Note that this delay is always correct for UC and RC but not always * optimal for UD. For UD, the destination HCA can be different for each * packet, in which case, we could send packets to a different destination * while "waiting" for the delay. The overhead for doing this without * HW support is more than just paying the cost of delaying some packets * unnecessarily. */ static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult) { return (rcv_mult > snd_mult) ? (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0; } static int ipath_verbs_send_dma(struct ipath_qp *qp, struct ipath_ib_header *hdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len, u32 plen, u32 dwords) { struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ipath_devdata *dd = dev->dd; struct ipath_verbs_txreq *tx; u32 *piobuf; u32 control; u32 ndesc; int ret; tx = qp->s_tx; if (tx) { qp->s_tx = NULL; /* resend previously constructed packet */ atomic_inc(&qp->s_dma_busy); ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx); if (ret) { qp->s_tx = tx; decrement_dma_busy(qp); } goto bail; } tx = get_txreq(dev); if (!tx) { ret = -EBUSY; goto bail; } /* * Get the saved delay count we computed for the previous packet * and save the delay count for this packet to be used next time * we get here. */ control = qp->s_pkt_delay; qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); tx->qp = qp; atomic_inc(&qp->refcount); tx->wqe = qp->s_wqe; tx->txreq.callback = sdma_complete; tx->txreq.callback_cookie = tx; tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST | IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC; if (plen + 1 >= IPATH_SMALLBUF_DWORDS) tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF; /* VL15 packets bypass credit check */ if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) { control |= 1ULL << 31; tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15; } if (len) { /* * Don't try to DMA if it takes more descriptors than * the queue holds. */ ndesc = ipath_count_sge(ss, len); if (ndesc >= dd->ipath_sdma_descq_cnt) ndesc = 0; } else ndesc = 1; if (ndesc) { tx->hdr.pbc[0] = cpu_to_le32(plen); tx->hdr.pbc[1] = cpu_to_le32(control); memcpy(&tx->hdr.hdr, hdr, hdrwords << 2); tx->txreq.sg_count = ndesc; tx->map_len = (hdrwords + 2) << 2; tx->txreq.map_addr = &tx->hdr; atomic_inc(&qp->s_dma_busy); ret = ipath_sdma_verbs_send(dd, ss, dwords, tx); if (ret) { /* save ss and length in dwords */ tx->ss = ss; tx->len = dwords; qp->s_tx = tx; decrement_dma_busy(qp); } goto bail; } /* Allocate a buffer and copy the header and payload to it. */ tx->map_len = (plen + 1) << 2; piobuf = kmalloc(tx->map_len, GFP_ATOMIC); if (unlikely(piobuf == NULL)) { ret = -EBUSY; goto err_tx; } tx->txreq.map_addr = piobuf; tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF; tx->txreq.sg_count = 1; *piobuf++ = (__force u32) cpu_to_le32(plen); *piobuf++ = (__force u32) cpu_to_le32(control); memcpy(piobuf, hdr, hdrwords << 2); ipath_copy_from_sge(piobuf + hdrwords, ss, len); atomic_inc(&qp->s_dma_busy); ret = ipath_sdma_verbs_send(dd, NULL, 0, tx); /* * If we couldn't queue the DMA request, save the info * and try again later rather than destroying the * buffer and undoing the side effects of the copy. */ if (ret) { tx->ss = NULL; tx->len = 0; qp->s_tx = tx; decrement_dma_busy(qp); } dev->n_unaligned++; goto bail; err_tx: if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); put_txreq(dev, tx); bail: return ret; } static int ipath_verbs_send_pio(struct ipath_qp *qp, struct ipath_ib_header *ibhdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len, u32 plen, u32 dwords) { struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; u32 *hdr = (u32 *) ibhdr; u32 __iomem *piobuf; unsigned flush_wc; u32 control; int ret; unsigned long flags; piobuf = ipath_getpiobuf(dd, plen, NULL); if (unlikely(piobuf == NULL)) { ret = -EBUSY; goto bail; } /* * Get the saved delay count we computed for the previous packet * and save the delay count for this packet to be used next time * we get here. */ control = qp->s_pkt_delay; qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); /* VL15 packets bypass credit check */ if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15) control |= 1ULL << 31; /* * Write the length to the control qword plus any needed flags. * We have to flush after the PBC for correctness on some cpus * or WC buffer can be written out of order. */ writeq(((u64) control << 32) | plen, piobuf); piobuf += 2; flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC; if (len == 0) { /* * If there is just the header portion, must flush before * writing last word of header for correctness, and after * the last header word (trigger word). */ if (flush_wc) { ipath_flush_wc(); __iowrite32_copy(piobuf, hdr, hdrwords - 1); ipath_flush_wc(); __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); ipath_flush_wc(); } else __iowrite32_copy(piobuf, hdr, hdrwords); goto done; } if (flush_wc) ipath_flush_wc(); __iowrite32_copy(piobuf, hdr, hdrwords); piobuf += hdrwords; /* The common case is aligned and contained in one segment. */ if (likely(ss->num_sge == 1 && len <= ss->sge.length && !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { u32 *addr = (u32 *) ss->sge.vaddr; /* Update address before sending packet. */ update_sge(ss, len); if (flush_wc) { __iowrite32_copy(piobuf, addr, dwords - 1); /* must flush early everything before trigger word */ ipath_flush_wc(); __raw_writel(addr[dwords - 1], piobuf + dwords - 1); /* be sure trigger word is written */ ipath_flush_wc(); } else __iowrite32_copy(piobuf, addr, dwords); goto done; } copy_io(piobuf, ss, len, flush_wc); done: if (qp->s_wqe) { spin_lock_irqsave(&qp->s_lock, flags); ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); spin_unlock_irqrestore(&qp->s_lock, flags); } ret = 0; bail: return ret; } /** * ipath_verbs_send - send a packet * @qp: the QP to send on * @hdr: the packet header * @hdrwords: the number of 32-bit words in the header * @ss: the SGE to send * @len: the length of the packet in bytes */ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len) { struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; u32 plen; int ret; u32 dwords = (len + 3) >> 2; /* * Calculate the send buffer trigger address. * The +1 counts for the pbc control dword following the pbc length. */ plen = hdrwords + dwords + 1; /* * VL15 packets (IB_QPT_SMI) will always use PIO, so we * can defer SDMA restart until link goes ACTIVE without * worrying about just how we got there. */ if (qp->ibqp.qp_type == IB_QPT_SMI || !(dd->ipath_flags & IPATH_HAS_SEND_DMA)) ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, plen, dwords); else ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, plen, dwords); return ret; } int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords, u64 *rwords, u64 *spkts, u64 *rpkts, u64 *xmit_wait) { int ret; if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ret = -EINVAL; goto bail; } *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt); ret = 0; bail: return ret; } /** * ipath_get_counters - get various chip counters * @dd: the infinipath device * @cntrs: counters are placed here * * Return the counters needed by recv_pma_get_portcounters(). */ int ipath_get_counters(struct ipath_devdata *dd, struct ipath_verbs_counters *cntrs) { struct ipath_cregs const *crp = dd->ipath_cregs; int ret; if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ret = -EINVAL; goto bail; } cntrs->symbol_error_counter = ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt); cntrs->link_error_recovery_counter = ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt); /* * The link downed counter counts when the other side downs the * connection. We add in the number of times we downed the link * due to local link integrity errors to compensate. */ cntrs->link_downed_counter = ipath_snap_cntr(dd, crp->cr_iblinkdowncnt); cntrs->port_rcv_errors = ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) + ipath_snap_cntr(dd, crp->cr_rcvovflcnt) + ipath_snap_cntr(dd, crp->cr_portovflcnt) + ipath_snap_cntr(dd, crp->cr_err_rlencnt) + ipath_snap_cntr(dd, crp->cr_invalidrlencnt) + ipath_snap_cntr(dd, crp->cr_errlinkcnt) + ipath_snap_cntr(dd, crp->cr_erricrccnt) + ipath_snap_cntr(dd, crp->cr_errvcrccnt) + ipath_snap_cntr(dd, crp->cr_errlpcrccnt) + ipath_snap_cntr(dd, crp->cr_badformatcnt) + dd->ipath_rxfc_unsupvl_errs; if (crp->cr_rxotherlocalphyerrcnt) cntrs->port_rcv_errors += ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt); if (crp->cr_rxvlerrcnt) cntrs->port_rcv_errors += ipath_snap_cntr(dd, crp->cr_rxvlerrcnt); cntrs->port_rcv_remphys_errors = ipath_snap_cntr(dd, crp->cr_rcvebpcnt); cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt); cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt); cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt); cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt); cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt); cntrs->local_link_integrity_errors = crp->cr_locallinkintegrityerrcnt ? ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) : ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ? dd->ipath_lli_errs : dd->ipath_lli_errors); cntrs->excessive_buffer_overrun_errors = crp->cr_excessbufferovflcnt ? ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) : dd->ipath_overrun_thresh_errs; cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ? ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0; ret = 0; bail: return ret; } /** * ipath_ib_piobufavail - callback when a PIO buffer is available * @arg: the device pointer * * This is called from ipath_intr() at interrupt level when a PIO buffer is * available after ipath_verbs_send() returned an error that no buffers were * available. Return 1 if we consumed all the PIO buffers and we still have * QPs waiting for buffers (for now, just restart the send tasklet and * return zero). */ int ipath_ib_piobufavail(struct ipath_ibdev *dev) { struct list_head *list; struct ipath_qp *qplist; struct ipath_qp *qp; unsigned long flags; if (dev == NULL) goto bail; list = &dev->piowait; qplist = NULL; spin_lock_irqsave(&dev->pending_lock, flags); while (!list_empty(list)) { qp = list_entry(list->next, struct ipath_qp, piowait); list_del_init(&qp->piowait); qp->pio_next = qplist; qplist = qp; atomic_inc(&qp->refcount); } spin_unlock_irqrestore(&dev->pending_lock, flags); while (qplist != NULL) { qp = qplist; qplist = qp->pio_next; spin_lock_irqsave(&qp->s_lock, flags); if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify ipath_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } bail: return 0; } static int ipath_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct ipath_ibdev *dev = to_idev(ibdev); memset(props, 0, sizeof(*props)); props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; props->page_size_cap = PAGE_SIZE; props->vendor_id = IPATH_SRC_OUI_1 << 16 | IPATH_SRC_OUI_2 << 8 | IPATH_SRC_OUI_3; props->vendor_part_id = dev->dd->ipath_deviceid; props->hw_ver = dev->dd->ipath_pcirev; props->sys_image_guid = dev->sys_image_guid; props->max_mr_size = ~0ull; props->max_qp = ib_ipath_max_qps; props->max_qp_wr = ib_ipath_max_qp_wrs; props->max_sge = ib_ipath_max_sges; props->max_cq = ib_ipath_max_cqs; props->max_ah = ib_ipath_max_ahs; props->max_cqe = ib_ipath_max_cqes; props->max_mr = dev->lk_table.max; props->max_fmr = dev->lk_table.max; props->max_map_per_fmr = 32767; props->max_pd = ib_ipath_max_pds; props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC; props->max_qp_init_rd_atom = 255; /* props->max_res_rd_atom */ props->max_srq = ib_ipath_max_srqs; props->max_srq_wr = ib_ipath_max_srq_wrs; props->max_srq_sge = ib_ipath_max_srq_sges; /* props->local_ca_ack_delay */ props->atomic_cap = IB_ATOMIC_GLOB; props->max_pkeys = ipath_get_npkeys(dev->dd); props->max_mcast_grp = ib_ipath_max_mcast_grps; props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; return 0; } const u8 ipath_cvt_physportstate[32] = { [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN }; u32 ipath_get_cr_errpkey(struct ipath_devdata *dd) { return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey); } static int ipath_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_devdata *dd = dev->dd; enum ib_mtu mtu; u16 lid = dd->ipath_lid; u64 ibcstat; memset(props, 0, sizeof(*props)); props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); props->lmc = dd->ipath_lmc; props->sm_lid = dev->sm_lid; props->sm_sl = dev->sm_sl; ibcstat = dd->ipath_lastibcstat; /* map LinkState to IB portinfo values. */ props->state = ipath_ib_linkstate(dd, ibcstat) + 1; /* See phys_state_show() */ props->phys_state = /* MEA: assumes shift == 0 */ ipath_cvt_physportstate[dd->ipath_lastibcstat & dd->ibcs_lts_mask]; props->port_cap_flags = dev->port_cap_flags; props->gid_tbl_len = 1; props->max_msg_sz = 0x80000000; props->pkey_tbl_len = ipath_get_npkeys(dd); props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) - dev->z_pkey_violations; props->qkey_viol_cntr = dev->qkey_violations; props->active_width = dd->ipath_link_width_active; /* See rate_show() */ props->active_speed = dd->ipath_link_speed_active; props->max_vl_num = 1; /* VLCap = VL0 */ props->init_type_reply = 0; props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048; switch (dd->ipath_ibmtu) { case 4096: mtu = IB_MTU_4096; break; case 2048: mtu = IB_MTU_2048; break; case 1024: mtu = IB_MTU_1024; break; case 512: mtu = IB_MTU_512; break; case 256: mtu = IB_MTU_256; break; default: mtu = IB_MTU_2048; } props->active_mtu = mtu; props->subnet_timeout = dev->subnet_timeout; return 0; } static int ipath_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) { int ret; if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | IB_DEVICE_MODIFY_NODE_DESC)) { ret = -EOPNOTSUPP; goto bail; } if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) memcpy(device->node_desc, device_modify->node_desc, 64); if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) to_idev(device)->sys_image_guid = cpu_to_be64(device_modify->sys_image_guid); ret = 0; bail: return ret; } static int ipath_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props) { struct ipath_ibdev *dev = to_idev(ibdev); dev->port_cap_flags |= props->set_port_cap_mask; dev->port_cap_flags &= ~props->clr_port_cap_mask; if (port_modify_mask & IB_PORT_SHUTDOWN) ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) dev->qkey_violations = 0; return 0; } static int ipath_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct ipath_ibdev *dev = to_idev(ibdev); int ret; if (index >= 1) { ret = -EINVAL; goto bail; } gid->global.subnet_prefix = dev->gid_prefix; gid->global.interface_id = dev->dd->ipath_guid; ret = 0; bail: return ret; } static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_pd *pd; struct ib_pd *ret; /* * This is actually totally arbitrary. Some correctness tests * assume there's a maximum number of PDs that can be allocated. * We don't actually have this limit, but we fail the test if * we allow allocations of more than we report for this value. */ pd = kmalloc(sizeof *pd, GFP_KERNEL); if (!pd) { ret = ERR_PTR(-ENOMEM); goto bail; } spin_lock(&dev->n_pds_lock); if (dev->n_pds_allocated == ib_ipath_max_pds) { spin_unlock(&dev->n_pds_lock); kfree(pd); ret = ERR_PTR(-ENOMEM); goto bail; } dev->n_pds_allocated++; spin_unlock(&dev->n_pds_lock); /* ib_alloc_pd() will initialize pd->ibpd. */ pd->user = udata != NULL; ret = &pd->ibpd; bail: return ret; } static int ipath_dealloc_pd(struct ib_pd *ibpd) { struct ipath_pd *pd = to_ipd(ibpd); struct ipath_ibdev *dev = to_idev(ibpd->device); spin_lock(&dev->n_pds_lock); dev->n_pds_allocated--; spin_unlock(&dev->n_pds_lock); kfree(pd); return 0; } /** * ipath_create_ah - create an address handle * @pd: the protection domain * @ah_attr: the attributes of the AH * * This may be called from interrupt context. */ static struct ib_ah *ipath_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { struct ipath_ah *ah; struct ib_ah *ret; struct ipath_ibdev *dev = to_idev(pd->device); unsigned long flags; /* A multicast address requires a GRH (see ch. 8.4.1). */ if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && ah_attr->dlid != IPATH_PERMISSIVE_LID && !(ah_attr->ah_flags & IB_AH_GRH)) { ret = ERR_PTR(-EINVAL); goto bail; } if (ah_attr->dlid == 0) { ret = ERR_PTR(-EINVAL); goto bail; } if (ah_attr->port_num < 1 || ah_attr->port_num > pd->device->phys_port_cnt) { ret = ERR_PTR(-EINVAL); goto bail; } ah = kmalloc(sizeof *ah, GFP_ATOMIC); if (!ah) { ret = ERR_PTR(-ENOMEM); goto bail; } spin_lock_irqsave(&dev->n_ahs_lock, flags); if (dev->n_ahs_allocated == ib_ipath_max_ahs) { spin_unlock_irqrestore(&dev->n_ahs_lock, flags); kfree(ah); ret = ERR_PTR(-ENOMEM); goto bail; } dev->n_ahs_allocated++; spin_unlock_irqrestore(&dev->n_ahs_lock, flags); /* ib_create_ah() will initialize ah->ibah. */ ah->attr = *ah_attr; ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate); ret = &ah->ibah; bail: return ret; } /** * ipath_destroy_ah - destroy an address handle * @ibah: the AH to destroy * * This may be called from interrupt context. */ static int ipath_destroy_ah(struct ib_ah *ibah) { struct ipath_ibdev *dev = to_idev(ibah->device); struct ipath_ah *ah = to_iah(ibah); unsigned long flags; spin_lock_irqsave(&dev->n_ahs_lock, flags); dev->n_ahs_allocated--; spin_unlock_irqrestore(&dev->n_ahs_lock, flags); kfree(ah); return 0; } static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) { struct ipath_ah *ah = to_iah(ibah); *ah_attr = ah->attr; ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate); return 0; } /** * ipath_get_npkeys - return the size of the PKEY table for port 0 * @dd: the infinipath device */ unsigned ipath_get_npkeys(struct ipath_devdata *dd) { return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys); } /** * ipath_get_pkey - return the indexed PKEY from the port PKEY table * @dd: the infinipath device * @index: the PKEY index */ unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index) { unsigned ret; /* always a kernel port, no locking needed */ if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) ret = 0; else ret = dd->ipath_pd[0]->port_pkeys[index]; return ret; } static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { struct ipath_ibdev *dev = to_idev(ibdev); int ret; if (index >= ipath_get_npkeys(dev->dd)) { ret = -EINVAL; goto bail; } *pkey = ipath_get_pkey(dev->dd, index); ret = 0; bail: return ret; } /** * ipath_alloc_ucontext - allocate a ucontest * @ibdev: the infiniband device * @udata: not used by the InfiniPath driver */ static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct ipath_ucontext *context; struct ib_ucontext *ret; context = kmalloc(sizeof *context, GFP_KERNEL); if (!context) { ret = ERR_PTR(-ENOMEM); goto bail; } ret = &context->ibucontext; bail: return ret; } static int ipath_dealloc_ucontext(struct ib_ucontext *context) { kfree(to_iucontext(context)); return 0; } static int ipath_verbs_register_sysfs(struct ib_device *dev); static void __verbs_timer(unsigned long arg) { struct ipath_devdata *dd = (struct ipath_devdata *) arg; /* Handle verbs layer timeouts. */ ipath_ib_timer(dd->verbs_dev); mod_timer(&dd->verbs_timer, jiffies + 1); } static int enable_timer(struct ipath_devdata *dd) { /* * Early chips had a design flaw where the chip and kernel idea * of the tail register don't always agree, and therefore we won't * get an interrupt on the next packet received. * If the board supports per packet receive interrupts, use it. * Otherwise, the timer function periodically checks for packets * to cover this case. * Either way, the timer is needed for verbs layer related * processing. */ if (dd->ipath_flags & IPATH_GPIO_INTR) { ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, 0x2074076542310ULL); /* Enable GPIO bit 2 interrupt */ dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT); ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, dd->ipath_gpio_mask); } init_timer(&dd->verbs_timer); dd->verbs_timer.function = __verbs_timer; dd->verbs_timer.data = (unsigned long)dd; dd->verbs_timer.expires = jiffies + 1; add_timer(&dd->verbs_timer); return 0; } static int disable_timer(struct ipath_devdata *dd) { /* Disable GPIO bit 2 interrupt */ if (dd->ipath_flags & IPATH_GPIO_INTR) { /* Disable GPIO bit 2 interrupt */ dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT)); ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, dd->ipath_gpio_mask); /* * We might want to undo changes to debugportselect, * but how? */ } del_timer_sync(&dd->verbs_timer); return 0; } /** * ipath_register_ib_device - register our device with the infiniband core * @dd: the device data structure * Return the allocated ipath_ibdev pointer or NULL on error. */ int ipath_register_ib_device(struct ipath_devdata *dd) { struct ipath_verbs_counters cntrs; struct ipath_ibdev *idev; struct ib_device *dev; struct ipath_verbs_txreq *tx; unsigned i; int ret; idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); if (idev == NULL) { ret = -ENOMEM; goto bail; } dev = &idev->ibdev; if (dd->ipath_sdma_descq_cnt) { tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx, GFP_KERNEL); if (tx == NULL) { ret = -ENOMEM; goto err_tx; } } else tx = NULL; idev->txreq_bufs = tx; /* Only need to initialize non-zero fields. */ spin_lock_init(&idev->n_pds_lock); spin_lock_init(&idev->n_ahs_lock); spin_lock_init(&idev->n_cqs_lock); spin_lock_init(&idev->n_qps_lock); spin_lock_init(&idev->n_srqs_lock); spin_lock_init(&idev->n_mcast_grps_lock); spin_lock_init(&idev->qp_table.lock); spin_lock_init(&idev->lk_table.lock); idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); /* Set the prefix to the default value (see ch. 4.1.1) */ idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL); ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size); if (ret) goto err_qp; /* * The top ib_ipath_lkey_table_size bits are used to index the * table. The lower 8 bits can be owned by the user (copied from * the LKEY). The remaining bits act as a generation number or tag. */ idev->lk_table.max = 1 << ib_ipath_lkey_table_size; idev->lk_table.table = kzalloc(idev->lk_table.max * sizeof(*idev->lk_table.table), GFP_KERNEL); if (idev->lk_table.table == NULL) { ret = -ENOMEM; goto err_lk; } INIT_LIST_HEAD(&idev->pending_mmaps); spin_lock_init(&idev->pending_lock); idev->mmap_offset = PAGE_SIZE; spin_lock_init(&idev->mmap_offset_lock); INIT_LIST_HEAD(&idev->pending[0]); INIT_LIST_HEAD(&idev->pending[1]); INIT_LIST_HEAD(&idev->pending[2]); INIT_LIST_HEAD(&idev->piowait); INIT_LIST_HEAD(&idev->rnrwait); INIT_LIST_HEAD(&idev->txreq_free); idev->pending_index = 0; idev->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP; if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY) idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; /* Snapshot current HW counters to "clear" them. */ ipath_get_counters(dd, &cntrs); idev->z_symbol_error_counter = cntrs.symbol_error_counter; idev->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; idev->z_link_downed_counter = cntrs.link_downed_counter; idev->z_port_rcv_errors = cntrs.port_rcv_errors; idev->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; idev->z_port_xmit_discards = cntrs.port_xmit_discards; idev->z_port_xmit_data = cntrs.port_xmit_data; idev->z_port_rcv_data = cntrs.port_rcv_data; idev->z_port_xmit_packets = cntrs.port_xmit_packets; idev->z_port_rcv_packets = cntrs.port_rcv_packets; idev->z_local_link_integrity_errors = cntrs.local_link_integrity_errors; idev->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; idev->z_vl15_dropped = cntrs.vl15_dropped; for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++) list_add(&tx->txreq.list, &idev->txreq_free); /* * The system image GUID is supposed to be the same for all * IB HCAs in a single system but since there can be other * device types in the system, we can't be sure this is unique. */ if (!sys_image_guid) sys_image_guid = dd->ipath_guid; idev->sys_image_guid = sys_image_guid; idev->ib_unit = dd->ipath_unit; idev->dd = dd; strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); dev->owner = THIS_MODULE; dev->node_guid = dd->ipath_guid; dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_CREATE_AH) | (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | (1ull << IB_USER_VERBS_CMD_QUERY_AH) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_POST_SEND) | (1ull << IB_USER_VERBS_CMD_POST_RECV) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; dev->num_comp_vectors = 1; dev->dma_device = &dd->pcidev->dev; dev->query_device = ipath_query_device; dev->modify_device = ipath_modify_device; dev->query_port = ipath_query_port; dev->modify_port = ipath_modify_port; dev->query_pkey = ipath_query_pkey; dev->query_gid = ipath_query_gid; dev->alloc_ucontext = ipath_alloc_ucontext; dev->dealloc_ucontext = ipath_dealloc_ucontext; dev->alloc_pd = ipath_alloc_pd; dev->dealloc_pd = ipath_dealloc_pd; dev->create_ah = ipath_create_ah; dev->destroy_ah = ipath_destroy_ah; dev->query_ah = ipath_query_ah; dev->create_srq = ipath_create_srq; dev->modify_srq = ipath_modify_srq; dev->query_srq = ipath_query_srq; dev->destroy_srq = ipath_destroy_srq; dev->create_qp = ipath_create_qp; dev->modify_qp = ipath_modify_qp; dev->query_qp = ipath_query_qp; dev->destroy_qp = ipath_destroy_qp; dev->post_send = ipath_post_send; dev->post_recv = ipath_post_receive; dev->post_srq_recv = ipath_post_srq_receive; dev->create_cq = ipath_create_cq; dev->destroy_cq = ipath_destroy_cq; dev->resize_cq = ipath_resize_cq; dev->poll_cq = ipath_poll_cq; dev->req_notify_cq = ipath_req_notify_cq; dev->get_dma_mr = ipath_get_dma_mr; dev->reg_phys_mr = ipath_reg_phys_mr; dev->reg_user_mr = ipath_reg_user_mr; dev->dereg_mr = ipath_dereg_mr; dev->alloc_fmr = ipath_alloc_fmr; dev->map_phys_fmr = ipath_map_phys_fmr; dev->unmap_fmr = ipath_unmap_fmr; dev->dealloc_fmr = ipath_dealloc_fmr; dev->attach_mcast = ipath_multicast_attach; dev->detach_mcast = ipath_multicast_detach; dev->process_mad = ipath_process_mad; dev->mmap = ipath_mmap; dev->dma_ops = &ipath_dma_mapping_ops; snprintf(dev->node_desc, sizeof(dev->node_desc), IPATH_IDSTR " %s", init_utsname()->nodename); ret = ib_register_device(dev, NULL); if (ret) goto err_reg; ret = ipath_verbs_register_sysfs(dev); if (ret) goto err_class; enable_timer(dd); goto bail; err_class: ib_unregister_device(dev); err_reg: kfree(idev->lk_table.table); err_lk: kfree(idev->qp_table.table); err_qp: kfree(idev->txreq_bufs); err_tx: ib_dealloc_device(dev); ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret); idev = NULL; bail: dd->verbs_dev = idev; return ret; } void ipath_unregister_ib_device(struct ipath_ibdev *dev) { struct ib_device *ibdev = &dev->ibdev; u32 qps_inuse; ib_unregister_device(ibdev); disable_timer(dev->dd); if (!list_empty(&dev->pending[0]) || !list_empty(&dev->pending[1]) || !list_empty(&dev->pending[2])) ipath_dev_err(dev->dd, "pending list not empty!\n"); if (!list_empty(&dev->piowait)) ipath_dev_err(dev->dd, "piowait list not empty!\n"); if (!list_empty(&dev->rnrwait)) ipath_dev_err(dev->dd, "rnrwait list not empty!\n"); if (!ipath_mcast_tree_empty()) ipath_dev_err(dev->dd, "multicast table memory leak!\n"); /* * Note that ipath_unregister_ib_device() can be called before all * the QPs are destroyed! */ qps_inuse = ipath_free_all_qps(&dev->qp_table); if (qps_inuse) ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n", qps_inuse); kfree(dev->qp_table.table); kfree(dev->lk_table.table); kfree(dev->txreq_bufs); ib_dealloc_device(ibdev); } static ssize_t show_rev(struct device *device, struct device_attribute *attr, char *buf) { struct ipath_ibdev *dev = container_of(device, struct ipath_ibdev, ibdev.dev); return sprintf(buf, "%x\n", dev->dd->ipath_pcirev); } static ssize_t show_hca(struct device *device, struct device_attribute *attr, char *buf) { struct ipath_ibdev *dev = container_of(device, struct ipath_ibdev, ibdev.dev); int ret; ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128); if (ret < 0) goto bail; strcat(buf, "\n"); ret = strlen(buf); bail: return ret; } static ssize_t show_stats(struct device *device, struct device_attribute *attr, char *buf) { struct ipath_ibdev *dev = container_of(device, struct ipath_ibdev, ibdev.dev); int i; int len; len = sprintf(buf, "RC resends %d\n" "RC no QACK %d\n" "RC ACKs %d\n" "RC SEQ NAKs %d\n" "RC RDMA seq %d\n" "RC RNR NAKs %d\n" "RC OTH NAKs %d\n" "RC timeouts %d\n" "RC RDMA dup %d\n" "piobuf wait %d\n" "unaligned %d\n" "PKT drops %d\n" "WQE errs %d\n", dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, dev->n_other_naks, dev->n_timeouts, dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned, dev->n_pkt_drops, dev->n_wqe_errs); for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { const struct ipath_opcode_stats *si = &dev->opstats[i]; if (!si->n_packets && !si->n_bytes) continue; len += sprintf(buf + len, "%02x %llu/%llu\n", i, (unsigned long long) si->n_packets, (unsigned long long) si->n_bytes); } return len; } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); static struct device_attribute *ipath_class_attributes[] = { &dev_attr_hw_rev, &dev_attr_hca_type, &dev_attr_board_id, &dev_attr_stats }; static int ipath_verbs_register_sysfs(struct ib_device *dev) { int i; int ret; for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) { ret = device_create_file(&dev->dev, ipath_class_attributes[i]); if (ret) goto bail; } return 0; bail: for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) device_remove_file(&dev->dev, ipath_class_attributes[i]); return ret; }
gpl-2.0
kbehren/android_kernel_lenovo_msm8226
fs/file_table.c
3774
12893
/* * linux/fs/file_table.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/init.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/security.h> #include <linux/eventpoll.h> #include <linux/rcupdate.h> #include <linux/mount.h> #include <linux/capability.h> #include <linux/cdev.h> #include <linux/fsnotify.h> #include <linux/sysctl.h> #include <linux/lglock.h> #include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/ima.h> #include <linux/atomic.h> #include "internal.h" /* sysctl tunables... */ struct files_stat_struct files_stat = { .max_files = NR_FILE }; DECLARE_LGLOCK(files_lglock); DEFINE_LGLOCK(files_lglock); /* SLAB cache for file structures */ static struct kmem_cache *filp_cachep __read_mostly; static struct percpu_counter nr_files __cacheline_aligned_in_smp; static inline void file_free_rcu(struct rcu_head *head) { struct file *f = container_of(head, struct file, f_u.fu_rcuhead); put_cred(f->f_cred); kmem_cache_free(filp_cachep, f); } static inline void file_free(struct file *f) { percpu_counter_dec(&nr_files); file_check_state(f); call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); } /* * Return the total number of open files in the system */ static long get_nr_files(void) { return percpu_counter_read_positive(&nr_files); } /* * Return the maximum number of open files in the system */ unsigned long get_max_files(void) { return files_stat.max_files; } EXPORT_SYMBOL_GPL(get_max_files); /* * Handle nr_files sysctl */ #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) int proc_nr_files(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { files_stat.nr_files = get_nr_files(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } #else int proc_nr_files(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } #endif /* Find an unused file structure and return a pointer to it. * Returns NULL, if there are no more free file structures or * we run out of memory. * * Be very careful using this. You are responsible for * getting write access to any mount that you might assign * to this filp, if it is opened for write. If this is not * done, you will imbalance int the mount's writer count * and a warning at __fput() time. */ struct file *get_empty_filp(void) { const struct cred *cred = current_cred(); static long old_max; struct file * f; /* * Privileged users can go above max_files */ if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { /* * percpu_counters are inaccurate. Do an expensive check before * we go and fail. */ if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) goto over; } f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); if (f == NULL) goto fail; percpu_counter_inc(&nr_files); f->f_cred = get_cred(cred); if (security_file_alloc(f)) goto fail_sec; INIT_LIST_HEAD(&f->f_u.fu_list); atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); spin_lock_init(&f->f_lock); eventpoll_init_file(f); /* f->f_version: 0 */ return f; over: /* Ran out of filps - report that */ if (get_nr_files() > old_max) { pr_info("VFS: file-max limit %lu reached\n", get_max_files()); old_max = get_nr_files(); } goto fail; fail_sec: file_free(f); fail: return NULL; } /** * alloc_file - allocate and initialize a 'struct file' * @mnt: the vfsmount on which the file will reside * @dentry: the dentry representing the new file * @mode: the mode with which the new file will be opened * @fop: the 'struct file_operations' for the new file * * Use this instead of get_empty_filp() to get a new * 'struct file'. Do so because of the same initialization * pitfalls reasons listed for init_file(). This is a * preferred interface to using init_file(). * * If all the callers of init_file() are eliminated, its * code should be moved into this function. */ struct file *alloc_file(struct path *path, fmode_t mode, const struct file_operations *fop) { struct file *file; file = get_empty_filp(); if (!file) return NULL; file->f_path = *path; file->f_mapping = path->dentry->d_inode->i_mapping; file->f_mode = mode; file->f_op = fop; /* * These mounts don't really matter in practice * for r/o bind mounts. They aren't userspace- * visible. We do this for consistency, and so * that we can do debugging checks at __fput() */ if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { file_take_write(file); WARN_ON(mnt_clone_write(path->mnt)); } if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_inc(path->dentry->d_inode); return file; } EXPORT_SYMBOL(alloc_file); /** * drop_file_write_access - give up ability to write to a file * @file: the file to which we will stop writing * * This is a central place which will give up the ability * to write to @file, along with access to write through * its vfsmount. */ static void drop_file_write_access(struct file *file) { struct vfsmount *mnt = file->f_path.mnt; struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; put_write_access(inode); if (special_file(inode->i_mode)) return; if (file_check_writeable(file) != 0) return; mnt_drop_write(mnt); file_release_write(file); } /* the real guts of fput() - releasing the last reference to file */ static void __fput(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct vfsmount *mnt = file->f_path.mnt; struct inode *inode = dentry->d_inode; might_sleep(); fsnotify_close(file); /* * The function eventpoll_release() should be the first called * in the file cleanup chain. */ eventpoll_release(file); locks_remove_flock(file); if (unlikely(file->f_flags & FASYNC)) { if (file->f_op && file->f_op->fasync) file->f_op->fasync(-1, file, 0); } if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); ima_file_free(file); if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && !(file->f_mode & FMODE_PATH))) { cdev_put(inode->i_cdev); } fops_put(file->f_op); put_pid(file->f_owner.pid); file_sb_list_del(file); if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_dec(inode); if (file->f_mode & FMODE_WRITE) drop_file_write_access(file); file->f_path.dentry = NULL; file->f_path.mnt = NULL; file_free(file); dput(dentry); mntput(mnt); } void fput(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) __fput(file); } EXPORT_SYMBOL(fput); struct file *fget(unsigned int fd) { struct file *file; struct files_struct *files = current->files; rcu_read_lock(); file = fcheck_files(files, fd); if (file) { /* File object ref couldn't be taken */ if (file->f_mode & FMODE_PATH || !atomic_long_inc_not_zero(&file->f_count)) file = NULL; } rcu_read_unlock(); return file; } EXPORT_SYMBOL(fget); struct file *fget_raw(unsigned int fd) { struct file *file; struct files_struct *files = current->files; rcu_read_lock(); file = fcheck_files(files, fd); if (file) { /* File object ref couldn't be taken */ if (!atomic_long_inc_not_zero(&file->f_count)) file = NULL; } rcu_read_unlock(); return file; } EXPORT_SYMBOL(fget_raw); /* * Lightweight file lookup - no refcnt increment if fd table isn't shared. * * You can use this instead of fget if you satisfy all of the following * conditions: * 1) You must call fput_light before exiting the syscall and returning control * to userspace (i.e. you cannot remember the returned struct file * after * returning to userspace). * 2) You must not call filp_close on the returned struct file * in between * calls to fget_light and fput_light. * 3) You must not clone the current task in between the calls to fget_light * and fput_light. * * The fput_needed flag returned by fget_light should be passed to the * corresponding fput_light. */ struct file *fget_light(unsigned int fd, int *fput_needed) { struct file *file; struct files_struct *files = current->files; *fput_needed = 0; if (atomic_read(&files->count) == 1) { file = fcheck_files(files, fd); if (file && (file->f_mode & FMODE_PATH)) file = NULL; } else { rcu_read_lock(); file = fcheck_files(files, fd); if (file) { if (!(file->f_mode & FMODE_PATH) && atomic_long_inc_not_zero(&file->f_count)) *fput_needed = 1; else /* Didn't get the reference, someone's freed */ file = NULL; } rcu_read_unlock(); } return file; } struct file *fget_raw_light(unsigned int fd, int *fput_needed) { struct file *file; struct files_struct *files = current->files; *fput_needed = 0; if (atomic_read(&files->count) == 1) { file = fcheck_files(files, fd); } else { rcu_read_lock(); file = fcheck_files(files, fd); if (file) { if (atomic_long_inc_not_zero(&file->f_count)) *fput_needed = 1; else /* Didn't get the reference, someone's freed */ file = NULL; } rcu_read_unlock(); } return file; } void put_filp(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) { security_file_free(file); file_sb_list_del(file); file_free(file); } } static inline int file_list_cpu(struct file *file) { #ifdef CONFIG_SMP return file->f_sb_list_cpu; #else return smp_processor_id(); #endif } /* helper for file_sb_list_add to reduce ifdefs */ static inline void __file_sb_list_add(struct file *file, struct super_block *sb) { struct list_head *list; #ifdef CONFIG_SMP int cpu; cpu = smp_processor_id(); file->f_sb_list_cpu = cpu; list = per_cpu_ptr(sb->s_files, cpu); #else list = &sb->s_files; #endif list_add(&file->f_u.fu_list, list); } /** * file_sb_list_add - add a file to the sb's file list * @file: file to add * @sb: sb to add it to * * Use this function to associate a file with the superblock of the inode it * refers to. */ void file_sb_list_add(struct file *file, struct super_block *sb) { lg_local_lock(files_lglock); __file_sb_list_add(file, sb); lg_local_unlock(files_lglock); } /** * file_sb_list_del - remove a file from the sb's file list * @file: file to remove * @sb: sb to remove it from * * Use this function to remove a file from its superblock. */ void file_sb_list_del(struct file *file) { if (!list_empty(&file->f_u.fu_list)) { lg_local_lock_cpu(files_lglock, file_list_cpu(file)); list_del_init(&file->f_u.fu_list); lg_local_unlock_cpu(files_lglock, file_list_cpu(file)); } } #ifdef CONFIG_SMP /* * These macros iterate all files on all CPUs for a given superblock. * files_lglock must be held globally. */ #define do_file_list_for_each_entry(__sb, __file) \ { \ int i; \ for_each_possible_cpu(i) { \ struct list_head *list; \ list = per_cpu_ptr((__sb)->s_files, i); \ list_for_each_entry((__file), list, f_u.fu_list) #define while_file_list_for_each_entry \ } \ } #else #define do_file_list_for_each_entry(__sb, __file) \ { \ struct list_head *list; \ list = &(sb)->s_files; \ list_for_each_entry((__file), list, f_u.fu_list) #define while_file_list_for_each_entry \ } #endif /** * mark_files_ro - mark all files read-only * @sb: superblock in question * * All files are marked read-only. We don't care about pending * delete files so this should be used in 'force' mode only. */ void mark_files_ro(struct super_block *sb) { struct file *f; retry: lg_global_lock(files_lglock); do_file_list_for_each_entry(sb, f) { struct vfsmount *mnt; if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) continue; if (!file_count(f)) continue; if (!(f->f_mode & FMODE_WRITE)) continue; spin_lock(&f->f_lock); f->f_mode &= ~FMODE_WRITE; spin_unlock(&f->f_lock); if (file_check_writeable(f) != 0) continue; file_release_write(f); mnt = mntget(f->f_path.mnt); /* This can sleep, so we can't hold the spinlock. */ lg_global_unlock(files_lglock); mnt_drop_write(mnt); mntput(mnt); goto retry; } while_file_list_for_each_entry; lg_global_unlock(files_lglock); } void __init files_init(unsigned long mempages) { unsigned long n; filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); /* * One file with associated inode and dcache is very roughly 1K. * Per default don't use more than 10% of our memory for files. */ n = (mempages * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = max_t(unsigned long, n, NR_FILE); files_defer_init(); lg_lock_init(files_lglock); percpu_counter_init(&nr_files, 0); }
gpl-2.0
elbermu/cerux_kernel-touchwiz
drivers/net/wireless/b43legacy/radio.c
4286
60251
/* Broadcom B43legacy wireless driver Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, Stefano Brivio <stefano.brivio@polimi.it> Michael Buesch <mbuesch@freenet.de> Danny van Dyk <kugelfang@gentoo.org> Andreas Jaggi <andreas.jaggi@waterwave.ch> Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net> Some parts of the code in this file are derived from the ipw2200 driver Copyright(c) 2003 - 2004 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/delay.h> #include "b43legacy.h" #include "main.h" #include "phy.h" #include "radio.h" #include "ilt.h" /* Table for b43legacy_radio_calibrationvalue() */ static const u16 rcc_table[16] = { 0x0002, 0x0003, 0x0001, 0x000F, 0x0006, 0x0007, 0x0005, 0x000F, 0x000A, 0x000B, 0x0009, 0x000F, 0x000E, 0x000F, 0x000D, 0x000F, }; /* Reverse the bits of a 4bit value. * Example: 1101 is flipped 1011 */ static u16 flip_4bit(u16 value) { u16 flipped = 0x0000; B43legacy_BUG_ON(!((value & ~0x000F) == 0x0000)); flipped |= (value & 0x0001) << 3; flipped |= (value & 0x0002) << 1; flipped |= (value & 0x0004) >> 1; flipped |= (value & 0x0008) >> 3; return flipped; } /* Get the freq, as it has to be written to the device. */ static inline u16 channel2freq_bg(u8 channel) { /* Frequencies are given as frequencies_bg[index] + 2.4GHz * Starting with channel 1 */ static const u16 frequencies_bg[14] = { 12, 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 84, }; if (unlikely(channel < 1 || channel > 14)) { printk(KERN_INFO "b43legacy: Channel %d is out of range\n", channel); dump_stack(); return 2412; } return frequencies_bg[channel - 1]; } void b43legacy_radio_lock(struct b43legacy_wldev *dev) { u32 status; status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); B43legacy_WARN_ON(status & B43legacy_MACCTL_RADIOLOCK); status |= B43legacy_MACCTL_RADIOLOCK; b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status); mmiowb(); udelay(10); } void b43legacy_radio_unlock(struct b43legacy_wldev *dev) { u32 status; b43legacy_read16(dev, B43legacy_MMIO_PHY_VER); /* dummy read */ status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); B43legacy_WARN_ON(!(status & B43legacy_MACCTL_RADIOLOCK)); status &= ~B43legacy_MACCTL_RADIOLOCK; b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status); mmiowb(); } u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset) { struct b43legacy_phy *phy = &dev->phy; switch (phy->type) { case B43legacy_PHYTYPE_B: if (phy->radio_ver == 0x2053) { if (offset < 0x70) offset += 0x80; else if (offset < 0x80) offset += 0x70; } else if (phy->radio_ver == 0x2050) offset |= 0x80; else B43legacy_WARN_ON(1); break; case B43legacy_PHYTYPE_G: offset |= 0x80; break; default: B43legacy_BUG_ON(1); } b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset); return b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_LOW); } void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val) { b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset); mmiowb(); b43legacy_write16(dev, B43legacy_MMIO_RADIO_DATA_LOW, val); } static void b43legacy_set_all_gains(struct b43legacy_wldev *dev, s16 first, s16 second, s16 third) { struct b43legacy_phy *phy = &dev->phy; u16 i; u16 start = 0x08; u16 end = 0x18; u16 offset = 0x0400; u16 tmp; if (phy->rev <= 1) { offset = 0x5000; start = 0x10; end = 0x20; } for (i = 0; i < 4; i++) b43legacy_ilt_write(dev, offset + i, first); for (i = start; i < end; i++) b43legacy_ilt_write(dev, offset + i, second); if (third != -1) { tmp = ((u16)third << 14) | ((u16)third << 6); b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) & 0xBFBF) | tmp); b43legacy_phy_write(dev, 0x04A1, (b43legacy_phy_read(dev, 0x04A1) & 0xBFBF) | tmp); b43legacy_phy_write(dev, 0x04A2, (b43legacy_phy_read(dev, 0x04A2) & 0xBFBF) | tmp); } b43legacy_dummy_transmission(dev); } static void b43legacy_set_original_gains(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; u16 i; u16 tmp; u16 offset = 0x0400; u16 start = 0x0008; u16 end = 0x0018; if (phy->rev <= 1) { offset = 0x5000; start = 0x0010; end = 0x0020; } for (i = 0; i < 4; i++) { tmp = (i & 0xFFFC); tmp |= (i & 0x0001) << 1; tmp |= (i & 0x0002) >> 1; b43legacy_ilt_write(dev, offset + i, tmp); } for (i = start; i < end; i++) b43legacy_ilt_write(dev, offset + i, i - start); b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) & 0xBFBF) | 0x4040); b43legacy_phy_write(dev, 0x04A1, (b43legacy_phy_read(dev, 0x04A1) & 0xBFBF) | 0x4040); b43legacy_phy_write(dev, 0x04A2, (b43legacy_phy_read(dev, 0x04A2) & 0xBFBF) | 0x4000); b43legacy_dummy_transmission(dev); } /* Synthetic PU workaround */ static void b43legacy_synth_pu_workaround(struct b43legacy_wldev *dev, u8 channel) { struct b43legacy_phy *phy = &dev->phy; might_sleep(); if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) /* We do not need the workaround. */ return; if (channel <= 10) b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, channel2freq_bg(channel + 4)); else b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, channel2freq_bg(channel)); msleep(1); b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, channel2freq_bg(channel)); } u8 b43legacy_radio_aci_detect(struct b43legacy_wldev *dev, u8 channel) { struct b43legacy_phy *phy = &dev->phy; u8 ret = 0; u16 saved; u16 rssi; u16 temp; int i; int j = 0; saved = b43legacy_phy_read(dev, 0x0403); b43legacy_radio_selectchannel(dev, channel, 0); b43legacy_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5); if (phy->aci_hw_rssi) rssi = b43legacy_phy_read(dev, 0x048A) & 0x3F; else rssi = saved & 0x3F; /* clamp temp to signed 5bit */ if (rssi > 32) rssi -= 64; for (i = 0; i < 100; i++) { temp = (b43legacy_phy_read(dev, 0x047F) >> 8) & 0x3F; if (temp > 32) temp -= 64; if (temp < rssi) j++; if (j >= 20) ret = 1; } b43legacy_phy_write(dev, 0x0403, saved); return ret; } u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; u8 ret[13]; unsigned int channel = phy->channel; unsigned int i; unsigned int j; unsigned int start; unsigned int end; if (!((phy->type == B43legacy_PHYTYPE_G) && (phy->rev > 0))) return 0; b43legacy_phy_lock(dev); b43legacy_radio_lock(dev); b43legacy_phy_write(dev, 0x0802, b43legacy_phy_read(dev, 0x0802) & 0xFFFC); b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) & 0x7FFF); b43legacy_set_all_gains(dev, 3, 8, 1); start = (channel - 5 > 0) ? channel - 5 : 1; end = (channel + 5 < 14) ? channel + 5 : 13; for (i = start; i <= end; i++) { if (abs(channel - i) > 2) ret[i-1] = b43legacy_radio_aci_detect(dev, i); } b43legacy_radio_selectchannel(dev, channel, 0); b43legacy_phy_write(dev, 0x0802, (b43legacy_phy_read(dev, 0x0802) & 0xFFFC) | 0x0003); b43legacy_phy_write(dev, 0x0403, b43legacy_phy_read(dev, 0x0403) & 0xFFF8); b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) | 0x8000); b43legacy_set_original_gains(dev); for (i = 0; i < 13; i++) { if (!ret[i]) continue; end = (i + 5 < 13) ? i + 5 : 13; for (j = i; j < end; j++) ret[j] = 1; } b43legacy_radio_unlock(dev); b43legacy_phy_unlock(dev); return ret[channel - 1]; } /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val) { b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); mmiowb(); b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val); } /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset) { u16 val; b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); val = b43legacy_phy_read(dev, B43legacy_PHY_NRSSILT_DATA); return (s16)val; } /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val) { u16 i; s16 tmp; for (i = 0; i < 64; i++) { tmp = b43legacy_nrssi_hw_read(dev, i); tmp -= val; tmp = clamp_val(tmp, -32, 31); b43legacy_nrssi_hw_write(dev, i, tmp); } } /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; s16 i; s16 delta; s32 tmp; delta = 0x1F - phy->nrssi[0]; for (i = 0; i < 64; i++) { tmp = (i - delta) * phy->nrssislope; tmp /= 0x10000; tmp += 0x3A; tmp = clamp_val(tmp, 0, 0x3F); phy->nrssi_lt[i] = tmp; } } static void b43legacy_calc_nrssi_offset(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; u16 backup[20] = { 0 }; s16 v47F; u16 i; u16 saved = 0xFFFF; backup[0] = b43legacy_phy_read(dev, 0x0001); backup[1] = b43legacy_phy_read(dev, 0x0811); backup[2] = b43legacy_phy_read(dev, 0x0812); backup[3] = b43legacy_phy_read(dev, 0x0814); backup[4] = b43legacy_phy_read(dev, 0x0815); backup[5] = b43legacy_phy_read(dev, 0x005A); backup[6] = b43legacy_phy_read(dev, 0x0059); backup[7] = b43legacy_phy_read(dev, 0x0058); backup[8] = b43legacy_phy_read(dev, 0x000A); backup[9] = b43legacy_phy_read(dev, 0x0003); backup[10] = b43legacy_radio_read16(dev, 0x007A); backup[11] = b43legacy_radio_read16(dev, 0x0043); b43legacy_phy_write(dev, 0x0429, b43legacy_phy_read(dev, 0x0429) & 0x7FFF); b43legacy_phy_write(dev, 0x0001, (b43legacy_phy_read(dev, 0x0001) & 0x3FFF) | 0x4000); b43legacy_phy_write(dev, 0x0811, b43legacy_phy_read(dev, 0x0811) | 0x000C); b43legacy_phy_write(dev, 0x0812, (b43legacy_phy_read(dev, 0x0812) & 0xFFF3) | 0x0004); b43legacy_phy_write(dev, 0x0802, b43legacy_phy_read(dev, 0x0802) & ~(0x1 | 0x2)); if (phy->rev >= 6) { backup[12] = b43legacy_phy_read(dev, 0x002E); backup[13] = b43legacy_phy_read(dev, 0x002F); backup[14] = b43legacy_phy_read(dev, 0x080F); backup[15] = b43legacy_phy_read(dev, 0x0810); backup[16] = b43legacy_phy_read(dev, 0x0801); backup[17] = b43legacy_phy_read(dev, 0x0060); backup[18] = b43legacy_phy_read(dev, 0x0014); backup[19] = b43legacy_phy_read(dev, 0x0478); b43legacy_phy_write(dev, 0x002E, 0); b43legacy_phy_write(dev, 0x002F, 0); b43legacy_phy_write(dev, 0x080F, 0); b43legacy_phy_write(dev, 0x0810, 0); b43legacy_phy_write(dev, 0x0478, b43legacy_phy_read(dev, 0x0478) | 0x0100); b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801) | 0x0040); b43legacy_phy_write(dev, 0x0060, b43legacy_phy_read(dev, 0x0060) | 0x0040); b43legacy_phy_write(dev, 0x0014, b43legacy_phy_read(dev, 0x0014) | 0x0200); } b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) | 0x0070); b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) | 0x0080); udelay(30); v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); if (v47F >= 0x20) v47F -= 0x40; if (v47F == 31) { for (i = 7; i >= 4; i--) { b43legacy_radio_write16(dev, 0x007B, i); udelay(20); v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); if (v47F >= 0x20) v47F -= 0x40; if (v47F < 31 && saved == 0xFFFF) saved = i; } if (saved == 0xFFFF) saved = 4; } else { b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) & 0x007F); b43legacy_phy_write(dev, 0x0814, b43legacy_phy_read(dev, 0x0814) | 0x0001); b43legacy_phy_write(dev, 0x0815, b43legacy_phy_read(dev, 0x0815) & 0xFFFE); b43legacy_phy_write(dev, 0x0811, b43legacy_phy_read(dev, 0x0811) | 0x000C); b43legacy_phy_write(dev, 0x0812, b43legacy_phy_read(dev, 0x0812) | 0x000C); b43legacy_phy_write(dev, 0x0811, b43legacy_phy_read(dev, 0x0811) | 0x0030); b43legacy_phy_write(dev, 0x0812, b43legacy_phy_read(dev, 0x0812) | 0x0030); b43legacy_phy_write(dev, 0x005A, 0x0480); b43legacy_phy_write(dev, 0x0059, 0x0810); b43legacy_phy_write(dev, 0x0058, 0x000D); if (phy->analog == 0) b43legacy_phy_write(dev, 0x0003, 0x0122); else b43legacy_phy_write(dev, 0x000A, b43legacy_phy_read(dev, 0x000A) | 0x2000); b43legacy_phy_write(dev, 0x0814, b43legacy_phy_read(dev, 0x0814) | 0x0004); b43legacy_phy_write(dev, 0x0815, b43legacy_phy_read(dev, 0x0815) & 0xFFFB); b43legacy_phy_write(dev, 0x0003, (b43legacy_phy_read(dev, 0x0003) & 0xFF9F) | 0x0040); b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) | 0x000F); b43legacy_set_all_gains(dev, 3, 0, 1); b43legacy_radio_write16(dev, 0x0043, (b43legacy_radio_read16(dev, 0x0043) & 0x00F0) | 0x000F); udelay(30); v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); if (v47F >= 0x20) v47F -= 0x40; if (v47F == -32) { for (i = 0; i < 4; i++) { b43legacy_radio_write16(dev, 0x007B, i); udelay(20); v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); if (v47F >= 0x20) v47F -= 0x40; if (v47F > -31 && saved == 0xFFFF) saved = i; } if (saved == 0xFFFF) saved = 3; } else saved = 0; } b43legacy_radio_write16(dev, 0x007B, saved); if (phy->rev >= 6) { b43legacy_phy_write(dev, 0x002E, backup[12]); b43legacy_phy_write(dev, 0x002F, backup[13]); b43legacy_phy_write(dev, 0x080F, backup[14]); b43legacy_phy_write(dev, 0x0810, backup[15]); } b43legacy_phy_write(dev, 0x0814, backup[3]); b43legacy_phy_write(dev, 0x0815, backup[4]); b43legacy_phy_write(dev, 0x005A, backup[5]); b43legacy_phy_write(dev, 0x0059, backup[6]); b43legacy_phy_write(dev, 0x0058, backup[7]); b43legacy_phy_write(dev, 0x000A, backup[8]); b43legacy_phy_write(dev, 0x0003, backup[9]); b43legacy_radio_write16(dev, 0x0043, backup[11]); b43legacy_radio_write16(dev, 0x007A, backup[10]); b43legacy_phy_write(dev, 0x0802, b43legacy_phy_read(dev, 0x0802) | 0x1 | 0x2); b43legacy_phy_write(dev, 0x0429, b43legacy_phy_read(dev, 0x0429) | 0x8000); b43legacy_set_original_gains(dev); if (phy->rev >= 6) { b43legacy_phy_write(dev, 0x0801, backup[16]); b43legacy_phy_write(dev, 0x0060, backup[17]); b43legacy_phy_write(dev, 0x0014, backup[18]); b43legacy_phy_write(dev, 0x0478, backup[19]); } b43legacy_phy_write(dev, 0x0001, backup[0]); b43legacy_phy_write(dev, 0x0812, backup[2]); b43legacy_phy_write(dev, 0x0811, backup[1]); } void b43legacy_calc_nrssi_slope(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; u16 backup[18] = { 0 }; u16 tmp; s16 nrssi0; s16 nrssi1; switch (phy->type) { case B43legacy_PHYTYPE_B: backup[0] = b43legacy_radio_read16(dev, 0x007A); backup[1] = b43legacy_radio_read16(dev, 0x0052); backup[2] = b43legacy_radio_read16(dev, 0x0043); backup[3] = b43legacy_phy_read(dev, 0x0030); backup[4] = b43legacy_phy_read(dev, 0x0026); backup[5] = b43legacy_phy_read(dev, 0x0015); backup[6] = b43legacy_phy_read(dev, 0x002A); backup[7] = b43legacy_phy_read(dev, 0x0020); backup[8] = b43legacy_phy_read(dev, 0x005A); backup[9] = b43legacy_phy_read(dev, 0x0059); backup[10] = b43legacy_phy_read(dev, 0x0058); backup[11] = b43legacy_read16(dev, 0x03E2); backup[12] = b43legacy_read16(dev, 0x03E6); backup[13] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); tmp = b43legacy_radio_read16(dev, 0x007A); tmp &= (phy->rev >= 5) ? 0x007F : 0x000F; b43legacy_radio_write16(dev, 0x007A, tmp); b43legacy_phy_write(dev, 0x0030, 0x00FF); b43legacy_write16(dev, 0x03EC, 0x7F7F); b43legacy_phy_write(dev, 0x0026, 0x0000); b43legacy_phy_write(dev, 0x0015, b43legacy_phy_read(dev, 0x0015) | 0x0020); b43legacy_phy_write(dev, 0x002A, 0x08A3); b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) | 0x0080); nrssi0 = (s16)b43legacy_phy_read(dev, 0x0027); b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) & 0x007F); if (phy->analog >= 2) b43legacy_write16(dev, 0x03E6, 0x0040); else if (phy->analog == 0) b43legacy_write16(dev, 0x03E6, 0x0122); else b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT) & 0x2000); b43legacy_phy_write(dev, 0x0020, 0x3F3F); b43legacy_phy_write(dev, 0x0015, 0xF330); b43legacy_radio_write16(dev, 0x005A, 0x0060); b43legacy_radio_write16(dev, 0x0043, b43legacy_radio_read16(dev, 0x0043) & 0x00F0); b43legacy_phy_write(dev, 0x005A, 0x0480); b43legacy_phy_write(dev, 0x0059, 0x0810); b43legacy_phy_write(dev, 0x0058, 0x000D); udelay(20); nrssi1 = (s16)b43legacy_phy_read(dev, 0x0027); b43legacy_phy_write(dev, 0x0030, backup[3]); b43legacy_radio_write16(dev, 0x007A, backup[0]); b43legacy_write16(dev, 0x03E2, backup[11]); b43legacy_phy_write(dev, 0x0026, backup[4]); b43legacy_phy_write(dev, 0x0015, backup[5]); b43legacy_phy_write(dev, 0x002A, backup[6]); b43legacy_synth_pu_workaround(dev, phy->channel); if (phy->analog != 0) b43legacy_write16(dev, 0x03F4, backup[13]); b43legacy_phy_write(dev, 0x0020, backup[7]); b43legacy_phy_write(dev, 0x005A, backup[8]); b43legacy_phy_write(dev, 0x0059, backup[9]); b43legacy_phy_write(dev, 0x0058, backup[10]); b43legacy_radio_write16(dev, 0x0052, backup[1]); b43legacy_radio_write16(dev, 0x0043, backup[2]); if (nrssi0 == nrssi1) phy->nrssislope = 0x00010000; else phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); if (nrssi0 <= -4) { phy->nrssi[0] = nrssi0; phy->nrssi[1] = nrssi1; } break; case B43legacy_PHYTYPE_G: if (phy->radio_rev >= 9) return; if (phy->radio_rev == 8) b43legacy_calc_nrssi_offset(dev); b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) & 0x7FFF); b43legacy_phy_write(dev, 0x0802, b43legacy_phy_read(dev, 0x0802) & 0xFFFC); backup[7] = b43legacy_read16(dev, 0x03E2); b43legacy_write16(dev, 0x03E2, b43legacy_read16(dev, 0x03E2) | 0x8000); backup[0] = b43legacy_radio_read16(dev, 0x007A); backup[1] = b43legacy_radio_read16(dev, 0x0052); backup[2] = b43legacy_radio_read16(dev, 0x0043); backup[3] = b43legacy_phy_read(dev, 0x0015); backup[4] = b43legacy_phy_read(dev, 0x005A); backup[5] = b43legacy_phy_read(dev, 0x0059); backup[6] = b43legacy_phy_read(dev, 0x0058); backup[8] = b43legacy_read16(dev, 0x03E6); backup[9] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); if (phy->rev >= 3) { backup[10] = b43legacy_phy_read(dev, 0x002E); backup[11] = b43legacy_phy_read(dev, 0x002F); backup[12] = b43legacy_phy_read(dev, 0x080F); backup[13] = b43legacy_phy_read(dev, B43legacy_PHY_G_LO_CONTROL); backup[14] = b43legacy_phy_read(dev, 0x0801); backup[15] = b43legacy_phy_read(dev, 0x0060); backup[16] = b43legacy_phy_read(dev, 0x0014); backup[17] = b43legacy_phy_read(dev, 0x0478); b43legacy_phy_write(dev, 0x002E, 0); b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, 0); switch (phy->rev) { case 4: case 6: case 7: b43legacy_phy_write(dev, 0x0478, b43legacy_phy_read(dev, 0x0478) | 0x0100); b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801) | 0x0040); break; case 3: case 5: b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801) & 0xFFBF); break; } b43legacy_phy_write(dev, 0x0060, b43legacy_phy_read(dev, 0x0060) | 0x0040); b43legacy_phy_write(dev, 0x0014, b43legacy_phy_read(dev, 0x0014) | 0x0200); } b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) | 0x0070); b43legacy_set_all_gains(dev, 0, 8, 0); b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) & 0x00F7); if (phy->rev >= 2) { b43legacy_phy_write(dev, 0x0811, (b43legacy_phy_read(dev, 0x0811) & 0xFFCF) | 0x0030); b43legacy_phy_write(dev, 0x0812, (b43legacy_phy_read(dev, 0x0812) & 0xFFCF) | 0x0010); } b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) | 0x0080); udelay(20); nrssi0 = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); if (nrssi0 >= 0x0020) nrssi0 -= 0x0040; b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) & 0x007F); if (phy->analog >= 2) b43legacy_phy_write(dev, 0x0003, (b43legacy_phy_read(dev, 0x0003) & 0xFF9F) | 0x0040); b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT) | 0x2000); b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, 0x007A) | 0x000F); b43legacy_phy_write(dev, 0x0015, 0xF330); if (phy->rev >= 2) { b43legacy_phy_write(dev, 0x0812, (b43legacy_phy_read(dev, 0x0812) & 0xFFCF) | 0x0020); b43legacy_phy_write(dev, 0x0811, (b43legacy_phy_read(dev, 0x0811) & 0xFFCF) | 0x0020); } b43legacy_set_all_gains(dev, 3, 0, 1); if (phy->radio_rev == 8) b43legacy_radio_write16(dev, 0x0043, 0x001F); else { tmp = b43legacy_radio_read16(dev, 0x0052) & 0xFF0F; b43legacy_radio_write16(dev, 0x0052, tmp | 0x0060); tmp = b43legacy_radio_read16(dev, 0x0043) & 0xFFF0; b43legacy_radio_write16(dev, 0x0043, tmp | 0x0009); } b43legacy_phy_write(dev, 0x005A, 0x0480); b43legacy_phy_write(dev, 0x0059, 0x0810); b43legacy_phy_write(dev, 0x0058, 0x000D); udelay(20); nrssi1 = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); if (nrssi1 >= 0x0020) nrssi1 -= 0x0040; if (nrssi0 == nrssi1) phy->nrssislope = 0x00010000; else phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); if (nrssi0 >= -4) { phy->nrssi[0] = nrssi1; phy->nrssi[1] = nrssi0; } if (phy->rev >= 3) { b43legacy_phy_write(dev, 0x002E, backup[10]); b43legacy_phy_write(dev, 0x002F, backup[11]); b43legacy_phy_write(dev, 0x080F, backup[12]); b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, backup[13]); } if (phy->rev >= 2) { b43legacy_phy_write(dev, 0x0812, b43legacy_phy_read(dev, 0x0812) & 0xFFCF); b43legacy_phy_write(dev, 0x0811, b43legacy_phy_read(dev, 0x0811) & 0xFFCF); } b43legacy_radio_write16(dev, 0x007A, backup[0]); b43legacy_radio_write16(dev, 0x0052, backup[1]); b43legacy_radio_write16(dev, 0x0043, backup[2]); b43legacy_write16(dev, 0x03E2, backup[7]); b43legacy_write16(dev, 0x03E6, backup[8]); b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, backup[9]); b43legacy_phy_write(dev, 0x0015, backup[3]); b43legacy_phy_write(dev, 0x005A, backup[4]); b43legacy_phy_write(dev, 0x0059, backup[5]); b43legacy_phy_write(dev, 0x0058, backup[6]); b43legacy_synth_pu_workaround(dev, phy->channel); b43legacy_phy_write(dev, 0x0802, b43legacy_phy_read(dev, 0x0802) | 0x0003); b43legacy_set_original_gains(dev); b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) | 0x8000); if (phy->rev >= 3) { b43legacy_phy_write(dev, 0x0801, backup[14]); b43legacy_phy_write(dev, 0x0060, backup[15]); b43legacy_phy_write(dev, 0x0014, backup[16]); b43legacy_phy_write(dev, 0x0478, backup[17]); } b43legacy_nrssi_mem_update(dev); b43legacy_calc_nrssi_threshold(dev); break; default: B43legacy_BUG_ON(1); } } void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; s32 threshold; s32 a; s32 b; s16 tmp16; u16 tmp_u16; switch (phy->type) { case B43legacy_PHYTYPE_B: { if (phy->radio_ver != 0x2050) return; if (!(dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_RSSI)) return; if (phy->radio_rev >= 6) { threshold = (phy->nrssi[1] - phy->nrssi[0]) * 32; threshold += 20 * (phy->nrssi[0] + 1); threshold /= 40; } else threshold = phy->nrssi[1] - 5; threshold = clamp_val(threshold, 0, 0x3E); b43legacy_phy_read(dev, 0x0020); /* dummy read */ b43legacy_phy_write(dev, 0x0020, (((u16)threshold) << 8) | 0x001C); if (phy->radio_rev >= 6) { b43legacy_phy_write(dev, 0x0087, 0x0E0D); b43legacy_phy_write(dev, 0x0086, 0x0C0B); b43legacy_phy_write(dev, 0x0085, 0x0A09); b43legacy_phy_write(dev, 0x0084, 0x0808); b43legacy_phy_write(dev, 0x0083, 0x0808); b43legacy_phy_write(dev, 0x0082, 0x0604); b43legacy_phy_write(dev, 0x0081, 0x0302); b43legacy_phy_write(dev, 0x0080, 0x0100); } break; } case B43legacy_PHYTYPE_G: if (!phy->gmode || !(dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_RSSI)) { tmp16 = b43legacy_nrssi_hw_read(dev, 0x20); if (tmp16 >= 0x20) tmp16 -= 0x40; if (tmp16 < 3) b43legacy_phy_write(dev, 0x048A, (b43legacy_phy_read(dev, 0x048A) & 0xF000) | 0x09EB); else b43legacy_phy_write(dev, 0x048A, (b43legacy_phy_read(dev, 0x048A) & 0xF000) | 0x0AED); } else { if (phy->interfmode == B43legacy_RADIO_INTERFMODE_NONWLAN) { a = 0xE; b = 0xA; } else if (!phy->aci_wlan_automatic && phy->aci_enable) { a = 0x13; b = 0x12; } else { a = 0xE; b = 0x11; } a = a * (phy->nrssi[1] - phy->nrssi[0]); a += (phy->nrssi[0] << 6); if (a < 32) a += 31; else a += 32; a = a >> 6; a = clamp_val(a, -31, 31); b = b * (phy->nrssi[1] - phy->nrssi[0]); b += (phy->nrssi[0] << 6); if (b < 32) b += 31; else b += 32; b = b >> 6; b = clamp_val(b, -31, 31); tmp_u16 = b43legacy_phy_read(dev, 0x048A) & 0xF000; tmp_u16 |= ((u32)b & 0x0000003F); tmp_u16 |= (((u32)a & 0x0000003F) << 6); b43legacy_phy_write(dev, 0x048A, tmp_u16); } break; default: B43legacy_BUG_ON(1); } } /* Stack implementation to save/restore values from the * interference mitigation code. * It is save to restore values in random order. */ static void _stack_save(u32 *_stackptr, size_t *stackidx, u8 id, u16 offset, u16 value) { u32 *stackptr = &(_stackptr[*stackidx]); B43legacy_WARN_ON(!((offset & 0xE000) == 0x0000)); B43legacy_WARN_ON(!((id & 0xF8) == 0x00)); *stackptr = offset; *stackptr |= ((u32)id) << 13; *stackptr |= ((u32)value) << 16; (*stackidx)++; B43legacy_WARN_ON(!(*stackidx < B43legacy_INTERFSTACK_SIZE)); } static u16 _stack_restore(u32 *stackptr, u8 id, u16 offset) { size_t i; B43legacy_WARN_ON(!((offset & 0xE000) == 0x0000)); B43legacy_WARN_ON(!((id & 0xF8) == 0x00)); for (i = 0; i < B43legacy_INTERFSTACK_SIZE; i++, stackptr++) { if ((*stackptr & 0x00001FFF) != offset) continue; if (((*stackptr & 0x00007000) >> 13) != id) continue; return ((*stackptr & 0xFFFF0000) >> 16); } B43legacy_BUG_ON(1); return 0; } #define phy_stacksave(offset) \ do { \ _stack_save(stack, &stackidx, 0x1, (offset), \ b43legacy_phy_read(dev, (offset))); \ } while (0) #define phy_stackrestore(offset) \ do { \ b43legacy_phy_write(dev, (offset), \ _stack_restore(stack, 0x1, \ (offset))); \ } while (0) #define radio_stacksave(offset) \ do { \ _stack_save(stack, &stackidx, 0x2, (offset), \ b43legacy_radio_read16(dev, (offset))); \ } while (0) #define radio_stackrestore(offset) \ do { \ b43legacy_radio_write16(dev, (offset), \ _stack_restore(stack, 0x2, \ (offset))); \ } while (0) #define ilt_stacksave(offset) \ do { \ _stack_save(stack, &stackidx, 0x3, (offset), \ b43legacy_ilt_read(dev, (offset))); \ } while (0) #define ilt_stackrestore(offset) \ do { \ b43legacy_ilt_write(dev, (offset), \ _stack_restore(stack, 0x3, \ (offset))); \ } while (0) static void b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev, int mode) { struct b43legacy_phy *phy = &dev->phy; u16 tmp; u16 flipped; u32 tmp32; size_t stackidx = 0; u32 *stack = phy->interfstack; switch (mode) { case B43legacy_RADIO_INTERFMODE_NONWLAN: if (phy->rev != 1) { b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, 0x042B) | 0x0800); b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) & ~0x4000); break; } radio_stacksave(0x0078); tmp = (b43legacy_radio_read16(dev, 0x0078) & 0x001E); flipped = flip_4bit(tmp); if (flipped < 10 && flipped >= 8) flipped = 7; else if (flipped >= 10) flipped -= 3; flipped = flip_4bit(flipped); flipped = (flipped << 1) | 0x0020; b43legacy_radio_write16(dev, 0x0078, flipped); b43legacy_calc_nrssi_threshold(dev); phy_stacksave(0x0406); b43legacy_phy_write(dev, 0x0406, 0x7E28); b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, 0x042B) | 0x0800); b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, b43legacy_phy_read(dev, B43legacy_PHY_RADIO_BITFIELD) | 0x1000); phy_stacksave(0x04A0); b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) & 0xC0C0) | 0x0008); phy_stacksave(0x04A1); b43legacy_phy_write(dev, 0x04A1, (b43legacy_phy_read(dev, 0x04A1) & 0xC0C0) | 0x0605); phy_stacksave(0x04A2); b43legacy_phy_write(dev, 0x04A2, (b43legacy_phy_read(dev, 0x04A2) & 0xC0C0) | 0x0204); phy_stacksave(0x04A8); b43legacy_phy_write(dev, 0x04A8, (b43legacy_phy_read(dev, 0x04A8) & 0xC0C0) | 0x0803); phy_stacksave(0x04AB); b43legacy_phy_write(dev, 0x04AB, (b43legacy_phy_read(dev, 0x04AB) & 0xC0C0) | 0x0605); phy_stacksave(0x04A7); b43legacy_phy_write(dev, 0x04A7, 0x0002); phy_stacksave(0x04A3); b43legacy_phy_write(dev, 0x04A3, 0x287A); phy_stacksave(0x04A9); b43legacy_phy_write(dev, 0x04A9, 0x2027); phy_stacksave(0x0493); b43legacy_phy_write(dev, 0x0493, 0x32F5); phy_stacksave(0x04AA); b43legacy_phy_write(dev, 0x04AA, 0x2027); phy_stacksave(0x04AC); b43legacy_phy_write(dev, 0x04AC, 0x32F5); break; case B43legacy_RADIO_INTERFMODE_MANUALWLAN: if (b43legacy_phy_read(dev, 0x0033) & 0x0800) break; phy->aci_enable = 1; phy_stacksave(B43legacy_PHY_RADIO_BITFIELD); phy_stacksave(B43legacy_PHY_G_CRS); if (phy->rev < 2) phy_stacksave(0x0406); else { phy_stacksave(0x04C0); phy_stacksave(0x04C1); } phy_stacksave(0x0033); phy_stacksave(0x04A7); phy_stacksave(0x04A3); phy_stacksave(0x04A9); phy_stacksave(0x04AA); phy_stacksave(0x04AC); phy_stacksave(0x0493); phy_stacksave(0x04A1); phy_stacksave(0x04A0); phy_stacksave(0x04A2); phy_stacksave(0x048A); phy_stacksave(0x04A8); phy_stacksave(0x04AB); if (phy->rev == 2) { phy_stacksave(0x04AD); phy_stacksave(0x04AE); } else if (phy->rev >= 3) { phy_stacksave(0x04AD); phy_stacksave(0x0415); phy_stacksave(0x0416); phy_stacksave(0x0417); ilt_stacksave(0x1A00 + 0x2); ilt_stacksave(0x1A00 + 0x3); } phy_stacksave(0x042B); phy_stacksave(0x048C); b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, b43legacy_phy_read(dev, B43legacy_PHY_RADIO_BITFIELD) & ~0x1000); b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, (b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) & 0xFFFC) | 0x0002); b43legacy_phy_write(dev, 0x0033, 0x0800); b43legacy_phy_write(dev, 0x04A3, 0x2027); b43legacy_phy_write(dev, 0x04A9, 0x1CA8); b43legacy_phy_write(dev, 0x0493, 0x287A); b43legacy_phy_write(dev, 0x04AA, 0x1CA8); b43legacy_phy_write(dev, 0x04AC, 0x287A); b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) & 0xFFC0) | 0x001A); b43legacy_phy_write(dev, 0x04A7, 0x000D); if (phy->rev < 2) b43legacy_phy_write(dev, 0x0406, 0xFF0D); else if (phy->rev == 2) { b43legacy_phy_write(dev, 0x04C0, 0xFFFF); b43legacy_phy_write(dev, 0x04C1, 0x00A9); } else { b43legacy_phy_write(dev, 0x04C0, 0x00C1); b43legacy_phy_write(dev, 0x04C1, 0x0059); } b43legacy_phy_write(dev, 0x04A1, (b43legacy_phy_read(dev, 0x04A1) & 0xC0FF) | 0x1800); b43legacy_phy_write(dev, 0x04A1, (b43legacy_phy_read(dev, 0x04A1) & 0xFFC0) | 0x0015); b43legacy_phy_write(dev, 0x04A8, (b43legacy_phy_read(dev, 0x04A8) & 0xCFFF) | 0x1000); b43legacy_phy_write(dev, 0x04A8, (b43legacy_phy_read(dev, 0x04A8) & 0xF0FF) | 0x0A00); b43legacy_phy_write(dev, 0x04AB, (b43legacy_phy_read(dev, 0x04AB) & 0xCFFF) | 0x1000); b43legacy_phy_write(dev, 0x04AB, (b43legacy_phy_read(dev, 0x04AB) & 0xF0FF) | 0x0800); b43legacy_phy_write(dev, 0x04AB, (b43legacy_phy_read(dev, 0x04AB) & 0xFFCF) | 0x0010); b43legacy_phy_write(dev, 0x04AB, (b43legacy_phy_read(dev, 0x04AB) & 0xFFF0) | 0x0005); b43legacy_phy_write(dev, 0x04A8, (b43legacy_phy_read(dev, 0x04A8) & 0xFFCF) | 0x0010); b43legacy_phy_write(dev, 0x04A8, (b43legacy_phy_read(dev, 0x04A8) & 0xFFF0) | 0x0006); b43legacy_phy_write(dev, 0x04A2, (b43legacy_phy_read(dev, 0x04A2) & 0xF0FF) | 0x0800); b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) & 0xF0FF) | 0x0500); b43legacy_phy_write(dev, 0x04A2, (b43legacy_phy_read(dev, 0x04A2) & 0xFFF0) | 0x000B); if (phy->rev >= 3) { b43legacy_phy_write(dev, 0x048A, b43legacy_phy_read(dev, 0x048A) & ~0x8000); b43legacy_phy_write(dev, 0x0415, (b43legacy_phy_read(dev, 0x0415) & 0x8000) | 0x36D8); b43legacy_phy_write(dev, 0x0416, (b43legacy_phy_read(dev, 0x0416) & 0x8000) | 0x36D8); b43legacy_phy_write(dev, 0x0417, (b43legacy_phy_read(dev, 0x0417) & 0xFE00) | 0x016D); } else { b43legacy_phy_write(dev, 0x048A, b43legacy_phy_read(dev, 0x048A) | 0x1000); b43legacy_phy_write(dev, 0x048A, (b43legacy_phy_read(dev, 0x048A) & 0x9FFF) | 0x2000); tmp32 = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, B43legacy_UCODEFLAGS_OFFSET); if (!(tmp32 & 0x800)) { tmp32 |= 0x800; b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, B43legacy_UCODEFLAGS_OFFSET, tmp32); } } if (phy->rev >= 2) b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, 0x042B) | 0x0800); b43legacy_phy_write(dev, 0x048C, (b43legacy_phy_read(dev, 0x048C) & 0xF0FF) | 0x0200); if (phy->rev == 2) { b43legacy_phy_write(dev, 0x04AE, (b43legacy_phy_read(dev, 0x04AE) & 0xFF00) | 0x007F); b43legacy_phy_write(dev, 0x04AD, (b43legacy_phy_read(dev, 0x04AD) & 0x00FF) | 0x1300); } else if (phy->rev >= 6) { b43legacy_ilt_write(dev, 0x1A00 + 0x3, 0x007F); b43legacy_ilt_write(dev, 0x1A00 + 0x2, 0x007F); b43legacy_phy_write(dev, 0x04AD, b43legacy_phy_read(dev, 0x04AD) & 0x00FF); } b43legacy_calc_nrssi_slope(dev); break; default: B43legacy_BUG_ON(1); } } static void b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev, int mode) { struct b43legacy_phy *phy = &dev->phy; u32 tmp32; u32 *stack = phy->interfstack; switch (mode) { case B43legacy_RADIO_INTERFMODE_NONWLAN: if (phy->rev != 1) { b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, 0x042B) & ~0x0800); b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) | 0x4000); break; } phy_stackrestore(0x0078); b43legacy_calc_nrssi_threshold(dev); phy_stackrestore(0x0406); b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, 0x042B) & ~0x0800); if (!dev->bad_frames_preempt) b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, b43legacy_phy_read(dev, B43legacy_PHY_RADIO_BITFIELD) & ~(1 << 11)); b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) | 0x4000); phy_stackrestore(0x04A0); phy_stackrestore(0x04A1); phy_stackrestore(0x04A2); phy_stackrestore(0x04A8); phy_stackrestore(0x04AB); phy_stackrestore(0x04A7); phy_stackrestore(0x04A3); phy_stackrestore(0x04A9); phy_stackrestore(0x0493); phy_stackrestore(0x04AA); phy_stackrestore(0x04AC); break; case B43legacy_RADIO_INTERFMODE_MANUALWLAN: if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800)) break; phy->aci_enable = 0; phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD); phy_stackrestore(B43legacy_PHY_G_CRS); phy_stackrestore(0x0033); phy_stackrestore(0x04A3); phy_stackrestore(0x04A9); phy_stackrestore(0x0493); phy_stackrestore(0x04AA); phy_stackrestore(0x04AC); phy_stackrestore(0x04A0); phy_stackrestore(0x04A7); if (phy->rev >= 2) { phy_stackrestore(0x04C0); phy_stackrestore(0x04C1); } else phy_stackrestore(0x0406); phy_stackrestore(0x04A1); phy_stackrestore(0x04AB); phy_stackrestore(0x04A8); if (phy->rev == 2) { phy_stackrestore(0x04AD); phy_stackrestore(0x04AE); } else if (phy->rev >= 3) { phy_stackrestore(0x04AD); phy_stackrestore(0x0415); phy_stackrestore(0x0416); phy_stackrestore(0x0417); ilt_stackrestore(0x1A00 + 0x2); ilt_stackrestore(0x1A00 + 0x3); } phy_stackrestore(0x04A2); phy_stackrestore(0x04A8); phy_stackrestore(0x042B); phy_stackrestore(0x048C); tmp32 = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, B43legacy_UCODEFLAGS_OFFSET); if (tmp32 & 0x800) { tmp32 &= ~0x800; b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, B43legacy_UCODEFLAGS_OFFSET, tmp32); } b43legacy_calc_nrssi_slope(dev); break; default: B43legacy_BUG_ON(1); } } #undef phy_stacksave #undef phy_stackrestore #undef radio_stacksave #undef radio_stackrestore #undef ilt_stacksave #undef ilt_stackrestore int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, int mode) { struct b43legacy_phy *phy = &dev->phy; int currentmode; if ((phy->type != B43legacy_PHYTYPE_G) || (phy->rev == 0) || (!phy->gmode)) return -ENODEV; phy->aci_wlan_automatic = 0; switch (mode) { case B43legacy_RADIO_INTERFMODE_AUTOWLAN: phy->aci_wlan_automatic = 1; if (phy->aci_enable) mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN; else mode = B43legacy_RADIO_INTERFMODE_NONE; break; case B43legacy_RADIO_INTERFMODE_NONE: case B43legacy_RADIO_INTERFMODE_NONWLAN: case B43legacy_RADIO_INTERFMODE_MANUALWLAN: break; default: return -EINVAL; } currentmode = phy->interfmode; if (currentmode == mode) return 0; if (currentmode != B43legacy_RADIO_INTERFMODE_NONE) b43legacy_radio_interference_mitigation_disable(dev, currentmode); if (mode == B43legacy_RADIO_INTERFMODE_NONE) { phy->aci_enable = 0; phy->aci_hw_rssi = 0; } else b43legacy_radio_interference_mitigation_enable(dev, mode); phy->interfmode = mode; return 0; } u16 b43legacy_radio_calibrationvalue(struct b43legacy_wldev *dev) { u16 reg; u16 index; u16 ret; reg = b43legacy_radio_read16(dev, 0x0060); index = (reg & 0x001E) >> 1; ret = rcc_table[index] << 1; ret |= (reg & 0x0001); ret |= 0x0020; return ret; } #define LPD(L, P, D) (((L) << 2) | ((P) << 1) | ((D) << 0)) static u16 b43legacy_get_812_value(struct b43legacy_wldev *dev, u8 lpd) { struct b43legacy_phy *phy = &dev->phy; u16 loop_or = 0; u16 adj_loopback_gain = phy->loopback_gain[0]; u8 loop; u16 extern_lna_control; if (!phy->gmode) return 0; if (!has_loopback_gain(phy)) { if (phy->rev < 7 || !(dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_EXTLNA)) { switch (lpd) { case LPD(0, 1, 1): return 0x0FB2; case LPD(0, 0, 1): return 0x00B2; case LPD(1, 0, 1): return 0x30B2; case LPD(1, 0, 0): return 0x30B3; default: B43legacy_BUG_ON(1); } } else { switch (lpd) { case LPD(0, 1, 1): return 0x8FB2; case LPD(0, 0, 1): return 0x80B2; case LPD(1, 0, 1): return 0x20B2; case LPD(1, 0, 0): return 0x20B3; default: B43legacy_BUG_ON(1); } } } else { if (phy->radio_rev == 8) adj_loopback_gain += 0x003E; else adj_loopback_gain += 0x0026; if (adj_loopback_gain >= 0x46) { adj_loopback_gain -= 0x46; extern_lna_control = 0x3000; } else if (adj_loopback_gain >= 0x3A) { adj_loopback_gain -= 0x3A; extern_lna_control = 0x2000; } else if (adj_loopback_gain >= 0x2E) { adj_loopback_gain -= 0x2E; extern_lna_control = 0x1000; } else { adj_loopback_gain -= 0x10; extern_lna_control = 0x0000; } for (loop = 0; loop < 16; loop++) { u16 tmp = adj_loopback_gain - 6 * loop; if (tmp < 6) break; } loop_or = (loop << 8) | extern_lna_control; if (phy->rev >= 7 && dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_EXTLNA) { if (extern_lna_control) loop_or |= 0x8000; switch (lpd) { case LPD(0, 1, 1): return 0x8F92; case LPD(0, 0, 1): return (0x8092 | loop_or); case LPD(1, 0, 1): return (0x2092 | loop_or); case LPD(1, 0, 0): return (0x2093 | loop_or); default: B43legacy_BUG_ON(1); } } else { switch (lpd) { case LPD(0, 1, 1): return 0x0F92; case LPD(0, 0, 1): case LPD(1, 0, 1): return (0x0092 | loop_or); case LPD(1, 0, 0): return (0x0093 | loop_or); default: B43legacy_BUG_ON(1); } } } return 0; } u16 b43legacy_radio_init2050(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; u16 backup[21] = { 0 }; u16 ret; u16 i; u16 j; u32 tmp1 = 0; u32 tmp2 = 0; backup[0] = b43legacy_radio_read16(dev, 0x0043); backup[14] = b43legacy_radio_read16(dev, 0x0051); backup[15] = b43legacy_radio_read16(dev, 0x0052); backup[1] = b43legacy_phy_read(dev, 0x0015); backup[16] = b43legacy_phy_read(dev, 0x005A); backup[17] = b43legacy_phy_read(dev, 0x0059); backup[18] = b43legacy_phy_read(dev, 0x0058); if (phy->type == B43legacy_PHYTYPE_B) { backup[2] = b43legacy_phy_read(dev, 0x0030); backup[3] = b43legacy_read16(dev, 0x03EC); b43legacy_phy_write(dev, 0x0030, 0x00FF); b43legacy_write16(dev, 0x03EC, 0x3F3F); } else { if (phy->gmode) { backup[4] = b43legacy_phy_read(dev, 0x0811); backup[5] = b43legacy_phy_read(dev, 0x0812); backup[6] = b43legacy_phy_read(dev, 0x0814); backup[7] = b43legacy_phy_read(dev, 0x0815); backup[8] = b43legacy_phy_read(dev, B43legacy_PHY_G_CRS); backup[9] = b43legacy_phy_read(dev, 0x0802); b43legacy_phy_write(dev, 0x0814, (b43legacy_phy_read(dev, 0x0814) | 0x0003)); b43legacy_phy_write(dev, 0x0815, (b43legacy_phy_read(dev, 0x0815) & 0xFFFC)); b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, (b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) & 0x7FFF)); b43legacy_phy_write(dev, 0x0802, (b43legacy_phy_read(dev, 0x0802) & 0xFFFC)); if (phy->rev > 1) { /* loopback gain enabled */ backup[19] = b43legacy_phy_read(dev, 0x080F); backup[20] = b43legacy_phy_read(dev, 0x0810); if (phy->rev >= 3) b43legacy_phy_write(dev, 0x080F, 0xC020); else b43legacy_phy_write(dev, 0x080F, 0x8020); b43legacy_phy_write(dev, 0x0810, 0x0000); } b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(0, 1, 1))); if (phy->rev < 7 || !(dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_EXTLNA)) b43legacy_phy_write(dev, 0x0811, 0x01B3); else b43legacy_phy_write(dev, 0x0811, 0x09B3); } } b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, (b43legacy_read16(dev, B43legacy_MMIO_PHY_RADIO) | 0x8000)); backup[10] = b43legacy_phy_read(dev, 0x0035); b43legacy_phy_write(dev, 0x0035, (b43legacy_phy_read(dev, 0x0035) & 0xFF7F)); backup[11] = b43legacy_read16(dev, 0x03E6); backup[12] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); /* Initialization */ if (phy->analog == 0) b43legacy_write16(dev, 0x03E6, 0x0122); else { if (phy->analog >= 2) b43legacy_phy_write(dev, 0x0003, (b43legacy_phy_read(dev, 0x0003) & 0xFFBF) | 0x0040); b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, (b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT) | 0x2000)); } ret = b43legacy_radio_calibrationvalue(dev); if (phy->type == B43legacy_PHYTYPE_B) b43legacy_radio_write16(dev, 0x0078, 0x0026); if (phy->gmode) b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(0, 1, 1))); b43legacy_phy_write(dev, 0x0015, 0xBFAF); b43legacy_phy_write(dev, 0x002B, 0x1403); if (phy->gmode) b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(0, 0, 1))); b43legacy_phy_write(dev, 0x0015, 0xBFA0); b43legacy_radio_write16(dev, 0x0051, (b43legacy_radio_read16(dev, 0x0051) | 0x0004)); if (phy->radio_rev == 8) b43legacy_radio_write16(dev, 0x0043, 0x001F); else { b43legacy_radio_write16(dev, 0x0052, 0x0000); b43legacy_radio_write16(dev, 0x0043, (b43legacy_radio_read16(dev, 0x0043) & 0xFFF0) | 0x0009); } b43legacy_phy_write(dev, 0x0058, 0x0000); for (i = 0; i < 16; i++) { b43legacy_phy_write(dev, 0x005A, 0x0480); b43legacy_phy_write(dev, 0x0059, 0xC810); b43legacy_phy_write(dev, 0x0058, 0x000D); if (phy->gmode) b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(1, 0, 1))); b43legacy_phy_write(dev, 0x0015, 0xAFB0); udelay(10); if (phy->gmode) b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(1, 0, 1))); b43legacy_phy_write(dev, 0x0015, 0xEFB0); udelay(10); if (phy->gmode) b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(1, 0, 0))); b43legacy_phy_write(dev, 0x0015, 0xFFF0); udelay(20); tmp1 += b43legacy_phy_read(dev, 0x002D); b43legacy_phy_write(dev, 0x0058, 0x0000); if (phy->gmode) b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(1, 0, 1))); b43legacy_phy_write(dev, 0x0015, 0xAFB0); } tmp1++; tmp1 >>= 9; udelay(10); b43legacy_phy_write(dev, 0x0058, 0x0000); for (i = 0; i < 16; i++) { b43legacy_radio_write16(dev, 0x0078, (flip_4bit(i) << 1) | 0x0020); backup[13] = b43legacy_radio_read16(dev, 0x0078); udelay(10); for (j = 0; j < 16; j++) { b43legacy_phy_write(dev, 0x005A, 0x0D80); b43legacy_phy_write(dev, 0x0059, 0xC810); b43legacy_phy_write(dev, 0x0058, 0x000D); if (phy->gmode) b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(1, 0, 1))); b43legacy_phy_write(dev, 0x0015, 0xAFB0); udelay(10); if (phy->gmode) b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(1, 0, 1))); b43legacy_phy_write(dev, 0x0015, 0xEFB0); udelay(10); if (phy->gmode) b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(1, 0, 0))); b43legacy_phy_write(dev, 0x0015, 0xFFF0); udelay(10); tmp2 += b43legacy_phy_read(dev, 0x002D); b43legacy_phy_write(dev, 0x0058, 0x0000); if (phy->gmode) b43legacy_phy_write(dev, 0x0812, b43legacy_get_812_value(dev, LPD(1, 0, 1))); b43legacy_phy_write(dev, 0x0015, 0xAFB0); } tmp2++; tmp2 >>= 8; if (tmp1 < tmp2) break; } /* Restore the registers */ b43legacy_phy_write(dev, 0x0015, backup[1]); b43legacy_radio_write16(dev, 0x0051, backup[14]); b43legacy_radio_write16(dev, 0x0052, backup[15]); b43legacy_radio_write16(dev, 0x0043, backup[0]); b43legacy_phy_write(dev, 0x005A, backup[16]); b43legacy_phy_write(dev, 0x0059, backup[17]); b43legacy_phy_write(dev, 0x0058, backup[18]); b43legacy_write16(dev, 0x03E6, backup[11]); if (phy->analog != 0) b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, backup[12]); b43legacy_phy_write(dev, 0x0035, backup[10]); b43legacy_radio_selectchannel(dev, phy->channel, 1); if (phy->type == B43legacy_PHYTYPE_B) { b43legacy_phy_write(dev, 0x0030, backup[2]); b43legacy_write16(dev, 0x03EC, backup[3]); } else { if (phy->gmode) { b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, (b43legacy_read16(dev, B43legacy_MMIO_PHY_RADIO) & 0x7FFF)); b43legacy_phy_write(dev, 0x0811, backup[4]); b43legacy_phy_write(dev, 0x0812, backup[5]); b43legacy_phy_write(dev, 0x0814, backup[6]); b43legacy_phy_write(dev, 0x0815, backup[7]); b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, backup[8]); b43legacy_phy_write(dev, 0x0802, backup[9]); if (phy->rev > 1) { b43legacy_phy_write(dev, 0x080F, backup[19]); b43legacy_phy_write(dev, 0x0810, backup[20]); } } } if (i >= 15) ret = backup[13]; return ret; } static inline u16 freq_r3A_value(u16 frequency) { u16 value; if (frequency < 5091) value = 0x0040; else if (frequency < 5321) value = 0x0000; else if (frequency < 5806) value = 0x0080; else value = 0x0040; return value; } void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev) { static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; u16 tmp = b43legacy_radio_read16(dev, 0x001E); int i; int j; for (i = 0; i < 5; i++) { for (j = 0; j < 5; j++) { if (tmp == (data_high[i] | data_low[j])) { b43legacy_phy_write(dev, 0x0069, (i - j) << 8 | 0x00C0); return; } } } } int b43legacy_radio_selectchannel(struct b43legacy_wldev *dev, u8 channel, int synthetic_pu_workaround) { struct b43legacy_phy *phy = &dev->phy; if (channel == 0xFF) { switch (phy->type) { case B43legacy_PHYTYPE_B: case B43legacy_PHYTYPE_G: channel = B43legacy_RADIO_DEFAULT_CHANNEL_BG; break; default: B43legacy_WARN_ON(1); } } /* TODO: Check if channel is valid - return -EINVAL if not */ if (synthetic_pu_workaround) b43legacy_synth_pu_workaround(dev, channel); b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, channel2freq_bg(channel)); if (channel == 14) { if (dev->dev->bus->sprom.country_code == 5) /* JAPAN) */ b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, B43legacy_UCODEFLAGS_OFFSET, b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, B43legacy_UCODEFLAGS_OFFSET) & ~(1 << 7)); else b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, B43legacy_UCODEFLAGS_OFFSET, b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, B43legacy_UCODEFLAGS_OFFSET) | (1 << 7)); b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT) | (1 << 11)); } else b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT) & 0xF7BF); phy->channel = channel; /*XXX: Using the longer of 2 timeouts (8000 vs 2000 usecs). Specs states * that 2000 usecs might suffice. */ msleep(8); return 0; } void b43legacy_radio_set_txantenna(struct b43legacy_wldev *dev, u32 val) { u16 tmp; val <<= 8; tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0022) & 0xFCFF; b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0022, tmp | val); tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x03A8) & 0xFCFF; b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x03A8, tmp | val); tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0054) & 0xFCFF; b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0054, tmp | val); } /* http://bcm-specs.sipsolutions.net/TX_Gain_Base_Band */ static u16 b43legacy_get_txgain_base_band(u16 txpower) { u16 ret; B43legacy_WARN_ON(txpower > 63); if (txpower >= 54) ret = 2; else if (txpower >= 49) ret = 4; else if (txpower >= 44) ret = 5; else ret = 6; return ret; } /* http://bcm-specs.sipsolutions.net/TX_Gain_Radio_Frequency_Power_Amplifier */ static u16 b43legacy_get_txgain_freq_power_amp(u16 txpower) { u16 ret; B43legacy_WARN_ON(txpower > 63); if (txpower >= 32) ret = 0; else if (txpower >= 25) ret = 1; else if (txpower >= 20) ret = 2; else if (txpower >= 12) ret = 3; else ret = 4; return ret; } /* http://bcm-specs.sipsolutions.net/TX_Gain_Digital_Analog_Converter */ static u16 b43legacy_get_txgain_dac(u16 txpower) { u16 ret; B43legacy_WARN_ON(txpower > 63); if (txpower >= 54) ret = txpower - 53; else if (txpower >= 49) ret = txpower - 42; else if (txpower >= 44) ret = txpower - 37; else if (txpower >= 32) ret = txpower - 32; else if (txpower >= 25) ret = txpower - 20; else if (txpower >= 20) ret = txpower - 13; else if (txpower >= 12) ret = txpower - 8; else ret = txpower; return ret; } void b43legacy_radio_set_txpower_a(struct b43legacy_wldev *dev, u16 txpower) { struct b43legacy_phy *phy = &dev->phy; u16 pamp; u16 base; u16 dac; u16 ilt; txpower = clamp_val(txpower, 0, 63); pamp = b43legacy_get_txgain_freq_power_amp(txpower); pamp <<= 5; pamp &= 0x00E0; b43legacy_phy_write(dev, 0x0019, pamp); base = b43legacy_get_txgain_base_band(txpower); base &= 0x000F; b43legacy_phy_write(dev, 0x0017, base | 0x0020); ilt = b43legacy_ilt_read(dev, 0x3001); ilt &= 0x0007; dac = b43legacy_get_txgain_dac(txpower); dac <<= 3; dac |= ilt; b43legacy_ilt_write(dev, 0x3001, dac); phy->txpwr_offset = txpower; /* TODO: FuncPlaceholder (Adjust BB loft cancel) */ } void b43legacy_radio_set_txpower_bg(struct b43legacy_wldev *dev, u16 baseband_attenuation, u16 radio_attenuation, u16 txpower) { struct b43legacy_phy *phy = &dev->phy; if (baseband_attenuation == 0xFFFF) baseband_attenuation = phy->bbatt; if (radio_attenuation == 0xFFFF) radio_attenuation = phy->rfatt; if (txpower == 0xFFFF) txpower = phy->txctl1; phy->bbatt = baseband_attenuation; phy->rfatt = radio_attenuation; phy->txctl1 = txpower; B43legacy_WARN_ON(baseband_attenuation > 11); if (phy->radio_rev < 6) B43legacy_WARN_ON(radio_attenuation > 9); else B43legacy_WARN_ON(radio_attenuation > 31); B43legacy_WARN_ON(txpower > 7); b43legacy_phy_set_baseband_attenuation(dev, baseband_attenuation); b43legacy_radio_write16(dev, 0x0043, radio_attenuation); b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0064, radio_attenuation); if (phy->radio_ver == 0x2050) b43legacy_radio_write16(dev, 0x0052, (b43legacy_radio_read16(dev, 0x0052) & ~0x0070) | ((txpower << 4) & 0x0070)); /* FIXME: The spec is very weird and unclear here. */ if (phy->type == B43legacy_PHYTYPE_G) b43legacy_phy_lo_adjust(dev, 0); } u16 b43legacy_default_baseband_attenuation(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; if (phy->radio_ver == 0x2050 && phy->radio_rev < 6) return 0; return 2; } u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; u16 att = 0xFFFF; switch (phy->radio_ver) { case 0x2053: switch (phy->radio_rev) { case 1: att = 6; break; } break; case 0x2050: switch (phy->radio_rev) { case 0: att = 5; break; case 1: if (phy->type == B43legacy_PHYTYPE_G) { if (is_bcm_board_vendor(dev) && dev->dev->bus->boardinfo.type == 0x421 && dev->dev->bus->boardinfo.rev >= 30) att = 3; else if (is_bcm_board_vendor(dev) && dev->dev->bus->boardinfo.type == 0x416) att = 3; else att = 1; } else { if (is_bcm_board_vendor(dev) && dev->dev->bus->boardinfo.type == 0x421 && dev->dev->bus->boardinfo.rev >= 30) att = 7; else att = 6; } break; case 2: if (phy->type == B43legacy_PHYTYPE_G) { if (is_bcm_board_vendor(dev) && dev->dev->bus->boardinfo.type == 0x421 && dev->dev->bus->boardinfo.rev >= 30) att = 3; else if (is_bcm_board_vendor(dev) && dev->dev->bus->boardinfo.type == 0x416) att = 5; else if (dev->dev->bus->chip_id == 0x4320) att = 4; else att = 3; } else att = 6; break; case 3: att = 5; break; case 4: case 5: att = 1; break; case 6: case 7: att = 5; break; case 8: att = 0x1A; break; case 9: default: att = 5; } } if (is_bcm_board_vendor(dev) && dev->dev->bus->boardinfo.type == 0x421) { if (dev->dev->bus->boardinfo.rev < 0x43) att = 2; else if (dev->dev->bus->boardinfo.rev < 0x51) att = 3; } if (att == 0xFFFF) att = 5; return att; } u16 b43legacy_default_txctl1(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; if (phy->radio_ver != 0x2050) return 0; if (phy->radio_rev == 1) return 3; if (phy->radio_rev < 6) return 2; if (phy->radio_rev == 8) return 1; return 0; } void b43legacy_radio_turn_on(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; int err; u8 channel; might_sleep(); if (phy->radio_on) return; switch (phy->type) { case B43legacy_PHYTYPE_B: case B43legacy_PHYTYPE_G: b43legacy_phy_write(dev, 0x0015, 0x8000); b43legacy_phy_write(dev, 0x0015, 0xCC00); b43legacy_phy_write(dev, 0x0015, (phy->gmode ? 0x00C0 : 0x0000)); if (phy->radio_off_context.valid) { /* Restore the RFover values. */ b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, phy->radio_off_context.rfover); b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, phy->radio_off_context.rfoverval); phy->radio_off_context.valid = 0; } channel = phy->channel; err = b43legacy_radio_selectchannel(dev, B43legacy_RADIO_DEFAULT_CHANNEL_BG, 1); err |= b43legacy_radio_selectchannel(dev, channel, 0); B43legacy_WARN_ON(err); break; default: B43legacy_BUG_ON(1); } phy->radio_on = 1; } void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force) { struct b43legacy_phy *phy = &dev->phy; if (!phy->radio_on && !force) return; if (phy->type == B43legacy_PHYTYPE_G && dev->dev->id.revision >= 5) { u16 rfover, rfoverval; rfover = b43legacy_phy_read(dev, B43legacy_PHY_RFOVER); rfoverval = b43legacy_phy_read(dev, B43legacy_PHY_RFOVERVAL); if (!force) { phy->radio_off_context.rfover = rfover; phy->radio_off_context.rfoverval = rfoverval; phy->radio_off_context.valid = 1; } b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C); b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, rfoverval & 0xFF73); } else b43legacy_phy_write(dev, 0x0015, 0xAA00); phy->radio_on = 0; b43legacydbg(dev->wl, "Radio initialized\n"); } void b43legacy_radio_clear_tssi(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; switch (phy->type) { case B43legacy_PHYTYPE_B: case B43legacy_PHYTYPE_G: b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0058, 0x7F7F); b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x005a, 0x7F7F); b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0070, 0x7F7F); b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0072, 0x7F7F); break; } }
gpl-2.0
weritos666/kernel_L7_II_KK_P715
arch/powerpc/platforms/pseries/eeh_sysfs.c
4542
3471
/* * Sysfs entries for PCI Error Recovery for PAPR-compliant platform. * Copyright IBM Corporation 2007 * Copyright Linas Vepstas <linas@austin.ibm.com> 2007 * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com> */ #include <linux/pci.h> #include <linux/stat.h> #include <asm/ppc-pci.h> #include <asm/pci-bridge.h> /** * EEH_SHOW_ATTR -- Create sysfs entry for eeh statistic * @_name: name of file in sysfs directory * @_memb: name of member in struct pci_dn to access * @_format: printf format for display * * All of the attributes look very similar, so just * auto-gen a cut-n-paste routine to display them. */ #define EEH_SHOW_ATTR(_name,_memb,_format) \ static ssize_t eeh_show_##_name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct pci_dev *pdev = to_pci_dev(dev); \ struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); \ \ if (!edev) \ return 0; \ \ return sprintf(buf, _format "\n", edev->_memb); \ } \ static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL); EEH_SHOW_ATTR(eeh_mode, mode, "0x%x"); EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x"); EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x"); EEH_SHOW_ATTR(eeh_check_count, check_count, "%d" ); EEH_SHOW_ATTR(eeh_freeze_count, freeze_count, "%d" ); EEH_SHOW_ATTR(eeh_false_positives, false_positives, "%d" ); void eeh_sysfs_add_device(struct pci_dev *pdev) { int rc=0; rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); rc += device_create_file(&pdev->dev, &dev_attr_eeh_check_count); rc += device_create_file(&pdev->dev, &dev_attr_eeh_false_positives); rc += device_create_file(&pdev->dev, &dev_attr_eeh_freeze_count); if (rc) printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); } void eeh_sysfs_remove_device(struct pci_dev *pdev) { device_remove_file(&pdev->dev, &dev_attr_eeh_mode); device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); device_remove_file(&pdev->dev, &dev_attr_eeh_check_count); device_remove_file(&pdev->dev, &dev_attr_eeh_false_positives); device_remove_file(&pdev->dev, &dev_attr_eeh_freeze_count); }
gpl-2.0
energycsdx/kernel-msm-taoshan
drivers/hid/hid-roccat-pyra.c
4542
19432
/* * Roccat Pyra driver for Linux * * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* * Roccat Pyra is a mobile gamer mouse which comes in wired and wireless * variant. Wireless variant is not tested. * Userland tools can be found at http://sourceforge.net/projects/roccat */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-pyra.h" static uint profile_numbers[5] = {0, 1, 2, 3, 4}; /* pyra_class is used for creating sysfs attributes via roccat char device */ static struct class *pyra_class; static void profile_activated(struct pyra_device *pyra, unsigned int new_profile) { pyra->actual_profile = new_profile; pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; } static int pyra_send_control(struct usb_device *usb_dev, int value, enum pyra_control_requests request) { struct pyra_control control; if ((request == PYRA_CONTROL_REQUEST_PROFILE_SETTINGS || request == PYRA_CONTROL_REQUEST_PROFILE_BUTTONS) && (value < 0 || value > 4)) return -EINVAL; control.command = PYRA_COMMAND_CONTROL; control.value = value; control.request = request; return roccat_common_send(usb_dev, PYRA_COMMAND_CONTROL, &control, sizeof(struct pyra_control)); } static int pyra_receive_control_status(struct usb_device *usb_dev) { int retval; struct pyra_control control; do { msleep(10); retval = roccat_common_receive(usb_dev, PYRA_COMMAND_CONTROL, &control, sizeof(struct pyra_control)); /* requested too early, try again */ } while (retval == -EPROTO); if (!retval && control.command == PYRA_COMMAND_CONTROL && control.request == PYRA_CONTROL_REQUEST_STATUS && control.value == 1) return 0; else { hid_err(usb_dev, "receive control status: unknown response 0x%x 0x%x\n", control.request, control.value); return retval ? retval : -EINVAL; } } static int pyra_get_profile_settings(struct usb_device *usb_dev, struct pyra_profile_settings *buf, int number) { int retval; retval = pyra_send_control(usb_dev, number, PYRA_CONTROL_REQUEST_PROFILE_SETTINGS); if (retval) return retval; return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS, buf, sizeof(struct pyra_profile_settings)); } static int pyra_get_profile_buttons(struct usb_device *usb_dev, struct pyra_profile_buttons *buf, int number) { int retval; retval = pyra_send_control(usb_dev, number, PYRA_CONTROL_REQUEST_PROFILE_BUTTONS); if (retval) return retval; return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS, buf, sizeof(struct pyra_profile_buttons)); } static int pyra_get_settings(struct usb_device *usb_dev, struct pyra_settings *buf) { return roccat_common_receive(usb_dev, PYRA_COMMAND_SETTINGS, buf, sizeof(struct pyra_settings)); } static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf) { return roccat_common_receive(usb_dev, PYRA_COMMAND_INFO, buf, sizeof(struct pyra_info)); } static int pyra_send(struct usb_device *usb_dev, uint command, void const *buf, uint size) { int retval; retval = roccat_common_send(usb_dev, command, buf, size); if (retval) return retval; return pyra_receive_control_status(usb_dev); } static int pyra_set_profile_settings(struct usb_device *usb_dev, struct pyra_profile_settings const *settings) { return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS, settings, sizeof(struct pyra_profile_settings)); } static int pyra_set_profile_buttons(struct usb_device *usb_dev, struct pyra_profile_buttons const *buttons) { return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS, buttons, sizeof(struct pyra_profile_buttons)); } static int pyra_set_settings(struct usb_device *usb_dev, struct pyra_settings const *settings) { return pyra_send(usb_dev, PYRA_COMMAND_SETTINGS, settings, sizeof(struct pyra_settings)); } static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); if (off >= sizeof(struct pyra_profile_settings)) return 0; if (off + count > sizeof(struct pyra_profile_settings)) count = sizeof(struct pyra_profile_settings) - off; mutex_lock(&pyra->pyra_lock); memcpy(buf, ((char const *)&pyra->profile_settings[*(uint *)(attr->private)]) + off, count); mutex_unlock(&pyra->pyra_lock); return count; } static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); if (off >= sizeof(struct pyra_profile_buttons)) return 0; if (off + count > sizeof(struct pyra_profile_buttons)) count = sizeof(struct pyra_profile_buttons) - off; mutex_lock(&pyra->pyra_lock); memcpy(buf, ((char const *)&pyra->profile_buttons[*(uint *)(attr->private)]) + off, count); mutex_unlock(&pyra->pyra_lock); return count; } static ssize_t pyra_sysfs_write_profile_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval = 0; int difference; int profile_number; struct pyra_profile_settings *profile_settings; if (off != 0 || count != sizeof(struct pyra_profile_settings)) return -EINVAL; profile_number = ((struct pyra_profile_settings const *)buf)->number; profile_settings = &pyra->profile_settings[profile_number]; mutex_lock(&pyra->pyra_lock); difference = memcmp(buf, profile_settings, sizeof(struct pyra_profile_settings)); if (difference) { retval = pyra_set_profile_settings(usb_dev, (struct pyra_profile_settings const *)buf); if (!retval) memcpy(profile_settings, buf, sizeof(struct pyra_profile_settings)); } mutex_unlock(&pyra->pyra_lock); if (retval) return retval; return sizeof(struct pyra_profile_settings); } static ssize_t pyra_sysfs_write_profile_buttons(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval = 0; int difference; int profile_number; struct pyra_profile_buttons *profile_buttons; if (off != 0 || count != sizeof(struct pyra_profile_buttons)) return -EINVAL; profile_number = ((struct pyra_profile_buttons const *)buf)->number; profile_buttons = &pyra->profile_buttons[profile_number]; mutex_lock(&pyra->pyra_lock); difference = memcmp(buf, profile_buttons, sizeof(struct pyra_profile_buttons)); if (difference) { retval = pyra_set_profile_buttons(usb_dev, (struct pyra_profile_buttons const *)buf); if (!retval) memcpy(profile_buttons, buf, sizeof(struct pyra_profile_buttons)); } mutex_unlock(&pyra->pyra_lock); if (retval) return retval; return sizeof(struct pyra_profile_buttons); } static ssize_t pyra_sysfs_read_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); if (off >= sizeof(struct pyra_settings)) return 0; if (off + count > sizeof(struct pyra_settings)) count = sizeof(struct pyra_settings) - off; mutex_lock(&pyra->pyra_lock); memcpy(buf, ((char const *)&pyra->settings) + off, count); mutex_unlock(&pyra->pyra_lock); return count; } static ssize_t pyra_sysfs_write_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval = 0; int difference; struct pyra_roccat_report roccat_report; if (off != 0 || count != sizeof(struct pyra_settings)) return -EINVAL; mutex_lock(&pyra->pyra_lock); difference = memcmp(buf, &pyra->settings, sizeof(struct pyra_settings)); if (difference) { retval = pyra_set_settings(usb_dev, (struct pyra_settings const *)buf); if (retval) { mutex_unlock(&pyra->pyra_lock); return retval; } memcpy(&pyra->settings, buf, sizeof(struct pyra_settings)); profile_activated(pyra, pyra->settings.startup_profile); roccat_report.type = PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2; roccat_report.value = pyra->settings.startup_profile + 1; roccat_report.key = 0; roccat_report_event(pyra->chrdev_minor, (uint8_t const *)&roccat_report); } mutex_unlock(&pyra->pyra_lock); return sizeof(struct pyra_settings); } static ssize_t pyra_sysfs_show_actual_cpi(struct device *dev, struct device_attribute *attr, char *buf) { struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_cpi); } static ssize_t pyra_sysfs_show_actual_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_profile); } static ssize_t pyra_sysfs_show_firmware_version(struct device *dev, struct device_attribute *attr, char *buf) { struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", pyra->firmware_version); } static ssize_t pyra_sysfs_show_startup_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", pyra->settings.startup_profile); } static struct device_attribute pyra_attributes[] = { __ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL), __ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL), __ATTR(firmware_version, 0440, pyra_sysfs_show_firmware_version, NULL), __ATTR(startup_profile, 0440, pyra_sysfs_show_startup_profile, NULL), __ATTR_NULL }; static struct bin_attribute pyra_bin_attributes[] = { { .attr = { .name = "profile_settings", .mode = 0220 }, .size = sizeof(struct pyra_profile_settings), .write = pyra_sysfs_write_profile_settings }, { .attr = { .name = "profile1_settings", .mode = 0440 }, .size = sizeof(struct pyra_profile_settings), .read = pyra_sysfs_read_profilex_settings, .private = &profile_numbers[0] }, { .attr = { .name = "profile2_settings", .mode = 0440 }, .size = sizeof(struct pyra_profile_settings), .read = pyra_sysfs_read_profilex_settings, .private = &profile_numbers[1] }, { .attr = { .name = "profile3_settings", .mode = 0440 }, .size = sizeof(struct pyra_profile_settings), .read = pyra_sysfs_read_profilex_settings, .private = &profile_numbers[2] }, { .attr = { .name = "profile4_settings", .mode = 0440 }, .size = sizeof(struct pyra_profile_settings), .read = pyra_sysfs_read_profilex_settings, .private = &profile_numbers[3] }, { .attr = { .name = "profile5_settings", .mode = 0440 }, .size = sizeof(struct pyra_profile_settings), .read = pyra_sysfs_read_profilex_settings, .private = &profile_numbers[4] }, { .attr = { .name = "profile_buttons", .mode = 0220 }, .size = sizeof(struct pyra_profile_buttons), .write = pyra_sysfs_write_profile_buttons }, { .attr = { .name = "profile1_buttons", .mode = 0440 }, .size = sizeof(struct pyra_profile_buttons), .read = pyra_sysfs_read_profilex_buttons, .private = &profile_numbers[0] }, { .attr = { .name = "profile2_buttons", .mode = 0440 }, .size = sizeof(struct pyra_profile_buttons), .read = pyra_sysfs_read_profilex_buttons, .private = &profile_numbers[1] }, { .attr = { .name = "profile3_buttons", .mode = 0440 }, .size = sizeof(struct pyra_profile_buttons), .read = pyra_sysfs_read_profilex_buttons, .private = &profile_numbers[2] }, { .attr = { .name = "profile4_buttons", .mode = 0440 }, .size = sizeof(struct pyra_profile_buttons), .read = pyra_sysfs_read_profilex_buttons, .private = &profile_numbers[3] }, { .attr = { .name = "profile5_buttons", .mode = 0440 }, .size = sizeof(struct pyra_profile_buttons), .read = pyra_sysfs_read_profilex_buttons, .private = &profile_numbers[4] }, { .attr = { .name = "settings", .mode = 0660 }, .size = sizeof(struct pyra_settings), .read = pyra_sysfs_read_settings, .write = pyra_sysfs_write_settings }, __ATTR_NULL }; static int pyra_init_pyra_device_struct(struct usb_device *usb_dev, struct pyra_device *pyra) { struct pyra_info info; int retval, i; mutex_init(&pyra->pyra_lock); retval = pyra_get_info(usb_dev, &info); if (retval) return retval; pyra->firmware_version = info.firmware_version; retval = pyra_get_settings(usb_dev, &pyra->settings); if (retval) return retval; for (i = 0; i < 5; ++i) { retval = pyra_get_profile_settings(usb_dev, &pyra->profile_settings[i], i); if (retval) return retval; retval = pyra_get_profile_buttons(usb_dev, &pyra->profile_buttons[i], i); if (retval) return retval; } profile_activated(pyra, pyra->settings.startup_profile); return 0; } static int pyra_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct pyra_device *pyra; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { pyra = kzalloc(sizeof(*pyra), GFP_KERNEL); if (!pyra) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, pyra); retval = pyra_init_pyra_device_struct(usb_dev, pyra); if (retval) { hid_err(hdev, "couldn't init struct pyra_device\n"); goto exit_free; } retval = roccat_connect(pyra_class, hdev, sizeof(struct pyra_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { pyra->chrdev_minor = retval; pyra->roccat_claimed = 1; } } else { hid_set_drvdata(hdev, NULL); } return 0; exit_free: kfree(pyra); return retval; } static void pyra_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct pyra_device *pyra; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { pyra = hid_get_drvdata(hdev); if (pyra->roccat_claimed) roccat_disconnect(pyra->chrdev_minor); kfree(hid_get_drvdata(hdev)); } } static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = pyra_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void pyra_remove(struct hid_device *hdev) { pyra_remove_specials(hdev); hid_hw_stop(hdev); } static void pyra_keep_values_up_to_date(struct pyra_device *pyra, u8 const *data) { struct pyra_mouse_event_button const *button_event; switch (data[0]) { case PYRA_MOUSE_REPORT_NUMBER_BUTTON: button_event = (struct pyra_mouse_event_button const *)data; switch (button_event->type) { case PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2: profile_activated(pyra, button_event->data1 - 1); break; case PYRA_MOUSE_EVENT_BUTTON_TYPE_CPI: pyra->actual_cpi = button_event->data1; break; } break; } } static void pyra_report_to_chrdev(struct pyra_device const *pyra, u8 const *data) { struct pyra_roccat_report roccat_report; struct pyra_mouse_event_button const *button_event; if (data[0] != PYRA_MOUSE_REPORT_NUMBER_BUTTON) return; button_event = (struct pyra_mouse_event_button const *)data; switch (button_event->type) { case PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2: case PYRA_MOUSE_EVENT_BUTTON_TYPE_CPI: roccat_report.type = button_event->type; roccat_report.value = button_event->data1; roccat_report.key = 0; roccat_report_event(pyra->chrdev_minor, (uint8_t const *)&roccat_report); break; case PYRA_MOUSE_EVENT_BUTTON_TYPE_MACRO: case PYRA_MOUSE_EVENT_BUTTON_TYPE_SHORTCUT: case PYRA_MOUSE_EVENT_BUTTON_TYPE_QUICKLAUNCH: if (button_event->data2 == PYRA_MOUSE_EVENT_BUTTON_PRESS) { roccat_report.type = button_event->type; roccat_report.key = button_event->data1; /* * pyra reports profile numbers with range 1-5. * Keeping this behaviour. */ roccat_report.value = pyra->actual_profile + 1; roccat_report_event(pyra->chrdev_minor, (uint8_t const *)&roccat_report); } break; } } static int pyra_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct pyra_device *pyra = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) return 0; if (pyra == NULL) return 0; pyra_keep_values_up_to_date(pyra, data); if (pyra->roccat_claimed) pyra_report_to_chrdev(pyra, data); return 0; } static const struct hid_device_id pyra_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) }, { } }; MODULE_DEVICE_TABLE(hid, pyra_devices); static struct hid_driver pyra_driver = { .name = "pyra", .id_table = pyra_devices, .probe = pyra_probe, .remove = pyra_remove, .raw_event = pyra_raw_event }; static int __init pyra_init(void) { int retval; /* class name has to be same as driver name */ pyra_class = class_create(THIS_MODULE, "pyra"); if (IS_ERR(pyra_class)) return PTR_ERR(pyra_class); pyra_class->dev_attrs = pyra_attributes; pyra_class->dev_bin_attrs = pyra_bin_attributes; retval = hid_register_driver(&pyra_driver); if (retval) class_destroy(pyra_class); return retval; } static void __exit pyra_exit(void) { hid_unregister_driver(&pyra_driver); class_destroy(pyra_class); } module_init(pyra_init); module_exit(pyra_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Pyra driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
CarbonROM/android_kernel_htc_msm8974
drivers/scsi/mpt2sas/mpt2sas_ctl.c
4798
88347
/* * Management Module Support for MPT (Message Passing Technology) based * controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c * Copyright (C) 2007-2010 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/compat.h> #include <linux/poll.h> #include <linux/io.h> #include <linux/uaccess.h> #include "mpt2sas_base.h" #include "mpt2sas_ctl.h" static DEFINE_MUTEX(_ctl_mutex); static struct fasync_struct *async_queue; static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); static int _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset); /** * enum block_state - blocking state * @NON_BLOCKING: non blocking * @BLOCKING: blocking * * These states are for ioctls that need to wait for a response * from firmware, so they probably require sleep. */ enum block_state { NON_BLOCKING, BLOCKING, }; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _ctl_sas_device_find_by_handle - sas device search * @ioc: per adapter object * @handle: sas device handle (assigned by firmware) * Context: Calling function should acquire ioc->sas_device_lock * * This searches for sas_device based on sas_address, then return sas_device * object. */ static struct _sas_device * _ctl_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct _sas_device *sas_device, *r; r = NULL; list_for_each_entry(sas_device, &ioc->sas_device_list, list) { if (sas_device->handle != handle) continue; r = sas_device; goto out; } out: return r; } /** * _ctl_display_some_debug - debug routine * @ioc: per adapter object * @smid: system request message index * @calling_function_name: string pass from calling function * @mpi_reply: reply message frame * Context: none. * * Function for displaying debug info helpful when debugging issues * in this module. */ static void _ctl_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid, char *calling_function_name, MPI2DefaultReply_t *mpi_reply) { Mpi2ConfigRequest_t *mpi_request; char *desc = NULL; if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) return; mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); switch (mpi_request->Function) { case MPI2_FUNCTION_SCSI_IO_REQUEST: { Mpi2SCSIIORequest_t *scsi_request = (Mpi2SCSIIORequest_t *)mpi_request; snprintf(ioc->tmp_string, MPT_STRING_LENGTH, "scsi_io, cmd(0x%02x), cdb_len(%d)", scsi_request->CDB.CDB32[0], le16_to_cpu(scsi_request->IoFlags) & 0xF); desc = ioc->tmp_string; break; } case MPI2_FUNCTION_SCSI_TASK_MGMT: desc = "task_mgmt"; break; case MPI2_FUNCTION_IOC_INIT: desc = "ioc_init"; break; case MPI2_FUNCTION_IOC_FACTS: desc = "ioc_facts"; break; case MPI2_FUNCTION_CONFIG: { Mpi2ConfigRequest_t *config_request = (Mpi2ConfigRequest_t *)mpi_request; snprintf(ioc->tmp_string, MPT_STRING_LENGTH, "config, type(0x%02x), ext_type(0x%02x), number(%d)", (config_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, config_request->Header.PageNumber); desc = ioc->tmp_string; break; } case MPI2_FUNCTION_PORT_FACTS: desc = "port_facts"; break; case MPI2_FUNCTION_PORT_ENABLE: desc = "port_enable"; break; case MPI2_FUNCTION_EVENT_NOTIFICATION: desc = "event_notification"; break; case MPI2_FUNCTION_FW_DOWNLOAD: desc = "fw_download"; break; case MPI2_FUNCTION_FW_UPLOAD: desc = "fw_upload"; break; case MPI2_FUNCTION_RAID_ACTION: desc = "raid_action"; break; case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: { Mpi2SCSIIORequest_t *scsi_request = (Mpi2SCSIIORequest_t *)mpi_request; snprintf(ioc->tmp_string, MPT_STRING_LENGTH, "raid_pass, cmd(0x%02x), cdb_len(%d)", scsi_request->CDB.CDB32[0], le16_to_cpu(scsi_request->IoFlags) & 0xF); desc = ioc->tmp_string; break; } case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: desc = "sas_iounit_cntl"; break; case MPI2_FUNCTION_SATA_PASSTHROUGH: desc = "sata_pass"; break; case MPI2_FUNCTION_DIAG_BUFFER_POST: desc = "diag_buffer_post"; break; case MPI2_FUNCTION_DIAG_RELEASE: desc = "diag_release"; break; case MPI2_FUNCTION_SMP_PASSTHROUGH: desc = "smp_passthrough"; break; } if (!desc) return; printk(MPT2SAS_INFO_FMT "%s: %s, smid(%d)\n", ioc->name, calling_function_name, desc, smid); if (!mpi_reply) return; if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) printk(MPT2SAS_INFO_FMT "\tiocstatus(0x%04x), loginfo(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo)); if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { Mpi2SCSIIOReply_t *scsi_reply = (Mpi2SCSIIOReply_t *)mpi_reply; struct _sas_device *sas_device = NULL; unsigned long flags; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _ctl_sas_device_find_by_handle(ioc, le16_to_cpu(scsi_reply->DevHandle)); if (sas_device) { printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), " "phy(%d)\n", ioc->name, (unsigned long long) sas_device->sas_address, sas_device->phy); printk(MPT2SAS_WARN_FMT "\tenclosure_logical_id(0x%016llx), slot(%d)\n", ioc->name, sas_device->enclosure_logical_id, sas_device->slot); } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) printk(MPT2SAS_INFO_FMT "\tscsi_state(0x%02x), scsi_status" "(0x%02x)\n", ioc->name, scsi_reply->SCSIState, scsi_reply->SCSIStatus); } } #endif /** * mpt2sas_ctl_done - ctl module completion routine * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: none. * * The callback handler when using ioc->ctl_cb_idx. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { MPI2DefaultReply_t *mpi_reply; Mpi2SCSIIOReply_t *scsiio_reply; const void *sense_data; u32 sz; if (ioc->ctl_cmds.status == MPT2_CMD_NOT_USED) return 1; if (ioc->ctl_cmds.smid != smid) return 1; ioc->ctl_cmds.status |= MPT2_CMD_COMPLETE; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (mpi_reply) { memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); ioc->ctl_cmds.status |= MPT2_CMD_REPLY_VALID; /* get sense data */ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_reply->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; if (scsiio_reply->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, le32_to_cpu(scsiio_reply->SenseCount)); sense_data = mpt2sas_base_get_sense_buffer(ioc, smid); memcpy(ioc->ctl_cmds.sense, sense_data, sz); } } } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); #endif ioc->ctl_cmds.status &= ~MPT2_CMD_PENDING; complete(&ioc->ctl_cmds.done); return 1; } /** * _ctl_check_event_type - determines when an event needs logging * @ioc: per adapter object * @event: firmware event * * The bitmask in ioc->event_type[] indicates which events should be * be saved in the driver event_log. This bitmask is set by application. * * Returns 1 when event should be captured, or zero means no match. */ static int _ctl_check_event_type(struct MPT2SAS_ADAPTER *ioc, u16 event) { u16 i; u32 desired_event; if (event >= 128 || !event || !ioc->event_log) return 0; desired_event = (1 << (event % 32)); if (!desired_event) desired_event = 1; i = event / 32; return desired_event & ioc->event_type[i]; } /** * mpt2sas_ctl_add_to_event_log - add event * @ioc: per adapter object * @mpi_reply: reply message frame * * Return nothing. */ void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, Mpi2EventNotificationReply_t *mpi_reply) { struct MPT2_IOCTL_EVENTS *event_log; u16 event; int i; u32 sz, event_data_sz; u8 send_aen = 0; if (!ioc->event_log) return; event = le16_to_cpu(mpi_reply->Event); if (_ctl_check_event_type(ioc, event)) { /* insert entry into circular event_log */ i = ioc->event_context % MPT2SAS_CTL_EVENT_LOG_SIZE; event_log = ioc->event_log; event_log[i].event = event; event_log[i].context = ioc->event_context++; event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; sz = min_t(u32, event_data_sz, MPT2_EVENT_DATA_SIZE); memset(event_log[i].data, 0, MPT2_EVENT_DATA_SIZE); memcpy(event_log[i].data, mpi_reply->EventData, sz); send_aen = 1; } /* This aen_event_read_flag flag is set until the * application has read the event log. * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. */ if (event == MPI2_EVENT_LOG_ENTRY_ADDED || (send_aen && !ioc->aen_event_read_flag)) { ioc->aen_event_read_flag = 1; wake_up_interruptible(&ctl_poll_wait); if (async_queue) kill_fasync(&async_queue, SIGIO, POLL_IN); } } /** * mpt2sas_ctl_event_callback - firmware event handler (called at ISR time) * @ioc: per adapter object * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: interrupt. * * This function merely adds a new work task into ioc->firmware_event_thread. * The tasks are worked from _firmware_event_work in user context. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) { Mpi2EventNotificationReply_t *mpi_reply; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); mpt2sas_ctl_add_to_event_log(ioc, mpi_reply); return 1; } /** * _ctl_verify_adapter - validates ioc_number passed from application * @ioc: per adapter object * @iocpp: The ioc pointer is returned in this. * * Return (-1) means error, else ioc_number. */ static int _ctl_verify_adapter(int ioc_number, struct MPT2SAS_ADAPTER **iocpp) { struct MPT2SAS_ADAPTER *ioc; list_for_each_entry(ioc, &mpt2sas_ioc_list, list) { if (ioc->id != ioc_number) continue; *iocpp = ioc; return ioc_number; } *iocpp = NULL; return -1; } /** * mpt2sas_ctl_reset_handler - reset callback handler (for ctl) * @ioc: per adapter object * @reset_phase: phase * * The handler for doing any required cleanup or initialization. * * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET, * MPT2_IOC_DONE_RESET */ void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) { int i; u8 issue_reset; switch (reset_phase) { case MPT2_IOC_PRE_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { if (!(ioc->diag_buffer_status[i] & MPT2_DIAG_BUFFER_IS_REGISTERED)) continue; if ((ioc->diag_buffer_status[i] & MPT2_DIAG_BUFFER_IS_RELEASED)) continue; _ctl_send_release(ioc, i, &issue_reset); } break; case MPT2_IOC_AFTER_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); if (ioc->ctl_cmds.status & MPT2_CMD_PENDING) { ioc->ctl_cmds.status |= MPT2_CMD_RESET; mpt2sas_base_free_smid(ioc, ioc->ctl_cmds.smid); complete(&ioc->ctl_cmds.done); } break; case MPT2_IOC_DONE_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { if (!(ioc->diag_buffer_status[i] & MPT2_DIAG_BUFFER_IS_REGISTERED)) continue; if ((ioc->diag_buffer_status[i] & MPT2_DIAG_BUFFER_IS_RELEASED)) continue; ioc->diag_buffer_status[i] |= MPT2_DIAG_BUFFER_IS_DIAG_RESET; } break; } } /** * _ctl_fasync - * @fd - * @filep - * @mode - * * Called when application request fasyn callback handler. */ static int _ctl_fasync(int fd, struct file *filep, int mode) { return fasync_helper(fd, filep, mode, &async_queue); } /** * _ctl_release - * @inode - * @filep - * * Called when application releases the fasyn callback handler. */ static int _ctl_release(struct inode *inode, struct file *filep) { return fasync_helper(-1, filep, 0, &async_queue); } /** * _ctl_poll - * @file - * @wait - * */ static unsigned int _ctl_poll(struct file *filep, poll_table *wait) { struct MPT2SAS_ADAPTER *ioc; poll_wait(filep, &ctl_poll_wait, wait); list_for_each_entry(ioc, &mpt2sas_ioc_list, list) { if (ioc->aen_event_read_flag) return POLLIN | POLLRDNORM; } return 0; } /** * _ctl_set_task_mid - assign an active smid to tm request * @ioc: per adapter object * @karg - (struct mpt2_ioctl_command) * @tm_request - pointer to mf from user space * * Returns 0 when an smid if found, else fail. * during failure, the reply frame is filled. */ static int _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, Mpi2SCSITaskManagementRequest_t *tm_request) { u8 found = 0; u16 i; u16 handle; struct scsi_cmnd *scmd; struct MPT2SAS_DEVICE *priv_data; unsigned long flags; Mpi2SCSITaskManagementReply_t *tm_reply; u32 sz; u32 lun; char *desc = NULL; if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) desc = "abort_task"; else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) desc = "query_task"; else return 0; lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); handle = le16_to_cpu(tm_request->DevHandle); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); for (i = ioc->scsiio_depth; i && !found; i--) { scmd = ioc->scsi_lookup[i - 1].scmd; if (scmd == NULL || scmd->device == NULL || scmd->device->hostdata == NULL) continue; if (lun != scmd->device->lun) continue; priv_data = scmd->device->hostdata; if (priv_data->sas_target == NULL) continue; if (priv_data->sas_target->handle != handle) continue; tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid); found = 1; } spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); if (!found) { dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name, desc, le16_to_cpu(tm_request->DevHandle), lun)); tm_reply = ioc->ctl_cmds.reply; tm_reply->DevHandle = tm_request->DevHandle; tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; tm_reply->TaskType = tm_request->TaskType; tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; tm_reply->VP_ID = tm_request->VP_ID; tm_reply->VF_ID = tm_request->VF_ID; sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz)) printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return 1; } dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, desc, le16_to_cpu(tm_request->DevHandle), lun, le16_to_cpu(tm_request->TaskMID))); return 0; } /** * _ctl_do_mpt_command - main handler for MPT2COMMAND opcode * @ioc: per adapter object * @karg - (struct mpt2_ioctl_command) * @mf - pointer to mf in user space * @state - NON_BLOCKING or BLOCKING */ static long _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg, void __user *mf, enum block_state state) { MPI2RequestHeader_t *mpi_request = NULL, *request; MPI2DefaultReply_t *mpi_reply; u32 ioc_state; u16 ioc_status; u16 smid; unsigned long timeout, timeleft; u8 issue_reset; u32 sz; void *psge; void *data_out = NULL; dma_addr_t data_out_dma; size_t data_out_sz = 0; void *data_in = NULL; dma_addr_t data_in_dma; size_t data_in_sz = 0; u32 sgl_flags; long ret; u16 wait_state_count; issue_reset = 0; if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) return -EAGAIN; else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) return -ERESTARTSYS; if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", ioc->name, __func__); ret = -EAGAIN; goto out; } wait_state_count = 0; ioc_state = mpt2sas_base_get_iocstate(ioc, 1); while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { if (wait_state_count++ == 10) { printk(MPT2SAS_ERR_FMT "%s: failed due to ioc not operational\n", ioc->name, __func__); ret = -EFAULT; goto out; } ssleep(1); ioc_state = mpt2sas_base_get_iocstate(ioc, 1); printk(MPT2SAS_INFO_FMT "%s: waiting for " "operational state(count=%d)\n", ioc->name, __func__, wait_state_count); } if (wait_state_count) printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n", ioc->name, __func__); mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); if (!mpi_request) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a memory for " "mpi_request\n", ioc->name, __func__); ret = -ENOMEM; goto out; } /* Check for overflow and wraparound */ if (karg.data_sge_offset * 4 > ioc->request_sz || karg.data_sge_offset > (UINT_MAX / 4)) { ret = -EINVAL; goto out; } /* copy in request message frame from user */ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -EFAULT; goto out; } if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { smid = mpt2sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); ret = -EAGAIN; goto out; } } else { smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); ret = -EAGAIN; goto out; } } ret = 0; ioc->ctl_cmds.status = MPT2_CMD_PENDING; memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); request = mpt2sas_base_get_msg_frame(ioc, smid); memcpy(request, mpi_request, karg.data_sge_offset*4); ioc->ctl_cmds.smid = smid; data_out_sz = karg.data_out_size; data_in_sz = karg.data_in_size; if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { if (!le16_to_cpu(mpi_request->FunctionDependent1) || le16_to_cpu(mpi_request->FunctionDependent1) > ioc->facts.MaxDevHandle) { ret = -EINVAL; mpt2sas_base_free_smid(ioc, smid); goto out; } } /* obtain dma-able memory for data transfer */ if (data_out_sz) /* WRITE */ { data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, &data_out_dma); if (!data_out) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENOMEM; mpt2sas_base_free_smid(ioc, smid); goto out; } if (copy_from_user(data_out, karg.data_out_buf_ptr, data_out_sz)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -EFAULT; mpt2sas_base_free_smid(ioc, smid); goto out; } } if (data_in_sz) /* READ */ { data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, &data_in_dma); if (!data_in) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENOMEM; mpt2sas_base_free_smid(ioc, smid); goto out; } } /* add scatter gather elements */ psge = (void *)request + (karg.data_sge_offset*4); if (!data_out_sz && !data_in_sz) { mpt2sas_base_build_zero_len_sge(ioc, psge); } else if (data_out_sz && data_in_sz) { /* WRITE sgel first */ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; ioc->base_add_sg_single(psge, sgl_flags | data_out_sz, data_out_dma); /* incr sgel */ psge += ioc->sge_size; /* READ sgel last */ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; ioc->base_add_sg_single(psge, sgl_flags | data_in_sz, data_in_dma); } else if (data_out_sz) /* WRITE */ { sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; ioc->base_add_sg_single(psge, sgl_flags | data_out_sz, data_out_dma); } else if (data_in_sz) /* READ */ { sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; ioc->base_add_sg_single(psge, sgl_flags | data_in_sz, data_in_dma); } /* send command to firmware */ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); #endif init_completion(&ioc->ctl_cmds.done); switch (mpi_request->Function) { case MPI2_FUNCTION_SCSI_IO_REQUEST: case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: { Mpi2SCSIIORequest_t *scsiio_request = (Mpi2SCSIIORequest_t *)request; scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; scsiio_request->SenseBufferLowAddress = mpt2sas_base_get_sense_buffer_dma(ioc, smid); memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) mpt2sas_base_put_smid_scsi_io(ioc, smid, le16_to_cpu(mpi_request->FunctionDependent1)); else mpt2sas_base_put_smid_default(ioc, smid); break; } case MPI2_FUNCTION_SCSI_TASK_MGMT: { Mpi2SCSITaskManagementRequest_t *tm_request = (Mpi2SCSITaskManagementRequest_t *)request; dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "TASK_MGMT: " "handle(0x%04x), task_type(0x%02x)\n", ioc->name, le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { if (_ctl_set_task_mid(ioc, &karg, tm_request)) { mpt2sas_base_free_smid(ioc, smid); goto out; } } mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu( tm_request->DevHandle)); mpt2sas_base_put_smid_hi_priority(ioc, smid); break; } case MPI2_FUNCTION_SMP_PASSTHROUGH: { Mpi2SmpPassthroughRequest_t *smp_request = (Mpi2SmpPassthroughRequest_t *)mpi_request; u8 *data; /* ioc determines which port to use */ smp_request->PhysicalPort = 0xFF; if (smp_request->PassthroughFlags & MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) data = (u8 *)&smp_request->SGL; else data = data_out; if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { ioc->ioc_link_reset_in_progress = 1; ioc->ignore_loginfos = 1; } mpt2sas_base_put_smid_default(ioc, smid); break; } case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: { Mpi2SasIoUnitControlRequest_t *sasiounit_request = (Mpi2SasIoUnitControlRequest_t *)mpi_request; if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || sasiounit_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) { ioc->ioc_link_reset_in_progress = 1; ioc->ignore_loginfos = 1; } mpt2sas_base_put_smid_default(ioc, smid); break; } default: mpt2sas_base_put_smid_default(ioc, smid); break; } if (karg.timeout < MPT2_IOCTL_DEFAULT_TIMEOUT) timeout = MPT2_IOCTL_DEFAULT_TIMEOUT; else timeout = karg.timeout; timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { Mpi2SCSITaskManagementRequest_t *tm_request = (Mpi2SCSITaskManagementRequest_t *)mpi_request; mpt2sas_scsih_clear_tm_flag(ioc, le16_to_cpu( tm_request->DevHandle)); } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && ioc->ioc_link_reset_in_progress) { ioc->ioc_link_reset_in_progress = 0; ioc->ignore_loginfos = 0; } if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, karg.data_sge_offset); if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET)) issue_reset = 1; goto issue_host_reset; } mpi_reply = ioc->ctl_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && (ioc->logging_level & MPT_DEBUG_TM)) { Mpi2SCSITaskManagementReply_t *tm_reply = (Mpi2SCSITaskManagementReply_t *)mpi_reply; printk(MPT2SAS_INFO_FMT "TASK_MGMT: " "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " "TerminationCount(0x%08x)\n", ioc->name, le16_to_cpu(tm_reply->IOCStatus), le32_to_cpu(tm_reply->IOCLogInfo), le32_to_cpu(tm_reply->TerminationCount)); } #endif /* copy out xdata to user */ if (data_in_sz) { if (copy_to_user(karg.data_in_buf_ptr, data_in, data_in_sz)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENODATA; goto out; } } /* copy out reply message frame to user */ if (karg.max_reply_bytes) { sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENODATA; goto out; } } /* copy out sense to user */ if (karg.max_sense_bytes && (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE); if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, sz)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENODATA; goto out; } } issue_host_reset: if (issue_reset) { ret = -ENODATA; if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { printk(MPT2SAS_INFO_FMT "issue target reset: handle " "= (0x%04x)\n", ioc->name, le16_to_cpu(mpi_request->FunctionDependent1)); mpt2sas_halt_firmware(ioc); mpt2sas_scsih_issue_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10, 0, TM_MUTEX_ON); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; } else mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); } out: /* free memory associated with sg buffers */ if (data_in) pci_free_consistent(ioc->pdev, data_in_sz, data_in, data_in_dma); if (data_out) pci_free_consistent(ioc->pdev, data_out_sz, data_out, data_out_dma); kfree(mpi_request); ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; mutex_unlock(&ioc->ctl_cmds.mutex); return ret; } /** * _ctl_getiocinfo - main handler for MPT2IOCINFO opcode * @arg - user space buffer containing ioctl content */ static long _ctl_getiocinfo(void __user *arg) { struct mpt2_ioctl_iocinfo karg; struct MPT2SAS_ADAPTER *ioc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); memset(&karg, 0 , sizeof(karg)); if (ioc->is_warpdrive) karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; else karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; if (ioc->pfacts) karg.port_number = ioc->pfacts[0].PortNumber; karg.hw_rev = ioc->pdev->revision; karg.pci_id = ioc->pdev->device; karg.subsystem_device = ioc->pdev->subsystem_device; karg.subsystem_vendor = ioc->pdev->subsystem_vendor; karg.pci_information.u.bits.bus = ioc->pdev->bus->number; karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); karg.firmware_version = ioc->facts.FWVersion.Word; strcpy(karg.driver_version, MPT2SAS_DRIVER_NAME); strcat(karg.driver_version, "-"); strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); if (copy_to_user(arg, &karg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } return 0; } /** * _ctl_eventquery - main handler for MPT2EVENTQUERY opcode * @arg - user space buffer containing ioctl content */ static long _ctl_eventquery(void __user *arg) { struct mpt2_ioctl_eventquery karg; struct MPT2SAS_ADAPTER *ioc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); karg.event_entries = MPT2SAS_CTL_EVENT_LOG_SIZE; memcpy(karg.event_types, ioc->event_type, MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); if (copy_to_user(arg, &karg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } return 0; } /** * _ctl_eventenable - main handler for MPT2EVENTENABLE opcode * @arg - user space buffer containing ioctl content */ static long _ctl_eventenable(void __user *arg) { struct mpt2_ioctl_eventenable karg; struct MPT2SAS_ADAPTER *ioc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); if (ioc->event_log) return 0; memcpy(ioc->event_type, karg.event_types, MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); mpt2sas_base_validate_event_type(ioc, ioc->event_type); /* initialize event_log */ ioc->event_context = 0; ioc->aen_event_read_flag = 0; ioc->event_log = kcalloc(MPT2SAS_CTL_EVENT_LOG_SIZE, sizeof(struct MPT2_IOCTL_EVENTS), GFP_KERNEL); if (!ioc->event_log) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -ENOMEM; } return 0; } /** * _ctl_eventreport - main handler for MPT2EVENTREPORT opcode * @arg - user space buffer containing ioctl content */ static long _ctl_eventreport(void __user *arg) { struct mpt2_ioctl_eventreport karg; struct MPT2SAS_ADAPTER *ioc; u32 number_bytes, max_events, max; struct mpt2_ioctl_eventreport __user *uarg = arg; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); number_bytes = karg.hdr.max_data_size - sizeof(struct mpt2_ioctl_header); max_events = number_bytes/sizeof(struct MPT2_IOCTL_EVENTS); max = min_t(u32, MPT2SAS_CTL_EVENT_LOG_SIZE, max_events); /* If fewer than 1 event is requested, there must have * been some type of error. */ if (!max || !ioc->event_log) return -ENODATA; number_bytes = max * sizeof(struct MPT2_IOCTL_EVENTS); if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } /* reset flag so SIGIO can restart */ ioc->aen_event_read_flag = 0; return 0; } /** * _ctl_do_reset - main handler for MPT2HARDRESET opcode * @arg - user space buffer containing ioctl content */ static long _ctl_do_reset(void __user *arg) { struct mpt2_ioctl_diag_reset karg; struct MPT2SAS_ADAPTER *ioc; int retval; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; if (ioc->shost_recovery || ioc->pci_error_recovery || ioc->is_driver_loading) return -EAGAIN; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); printk(MPT2SAS_INFO_FMT "host reset: %s\n", ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); return 0; } /** * _ctl_btdh_search_sas_device - searching for sas device * @ioc: per adapter object * @btdh: btdh ioctl payload */ static int _ctl_btdh_search_sas_device(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_btdh_mapping *btdh) { struct _sas_device *sas_device; unsigned long flags; int rc = 0; if (list_empty(&ioc->sas_device_list)) return rc; spin_lock_irqsave(&ioc->sas_device_lock, flags); list_for_each_entry(sas_device, &ioc->sas_device_list, list) { if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && btdh->handle == sas_device->handle) { btdh->bus = sas_device->channel; btdh->id = sas_device->id; rc = 1; goto out; } else if (btdh->bus == sas_device->channel && btdh->id == sas_device->id && btdh->handle == 0xFFFF) { btdh->handle = sas_device->handle; rc = 1; goto out; } } out: spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return rc; } /** * _ctl_btdh_search_raid_device - searching for raid device * @ioc: per adapter object * @btdh: btdh ioctl payload */ static int _ctl_btdh_search_raid_device(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_btdh_mapping *btdh) { struct _raid_device *raid_device; unsigned long flags; int rc = 0; if (list_empty(&ioc->raid_device_list)) return rc; spin_lock_irqsave(&ioc->raid_device_lock, flags); list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && btdh->handle == raid_device->handle) { btdh->bus = raid_device->channel; btdh->id = raid_device->id; rc = 1; goto out; } else if (btdh->bus == raid_device->channel && btdh->id == raid_device->id && btdh->handle == 0xFFFF) { btdh->handle = raid_device->handle; rc = 1; goto out; } } out: spin_unlock_irqrestore(&ioc->raid_device_lock, flags); return rc; } /** * _ctl_btdh_mapping - main handler for MPT2BTDHMAPPING opcode * @arg - user space buffer containing ioctl content */ static long _ctl_btdh_mapping(void __user *arg) { struct mpt2_ioctl_btdh_mapping karg; struct MPT2SAS_ADAPTER *ioc; int rc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); rc = _ctl_btdh_search_sas_device(ioc, &karg); if (!rc) _ctl_btdh_search_raid_device(ioc, &karg); if (copy_to_user(arg, &karg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } return 0; } /** * _ctl_diag_capability - return diag buffer capability * @ioc: per adapter object * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED * * returns 1 when diag buffer support is enabled in firmware */ static u8 _ctl_diag_capability(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type) { u8 rc = 0; switch (buffer_type) { case MPI2_DIAG_BUF_TYPE_TRACE: if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) rc = 1; break; case MPI2_DIAG_BUF_TYPE_SNAPSHOT: if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) rc = 1; break; case MPI2_DIAG_BUF_TYPE_EXTENDED: if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) rc = 1; } return rc; } /** * _ctl_diag_register_2 - wrapper for registering diag buffer support * @ioc: per adapter object * @diag_register: the diag_register struct passed in from user space * */ static long _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc, struct mpt2_diag_register *diag_register) { int rc, i; void *request_data = NULL; dma_addr_t request_data_dma; u32 request_data_sz = 0; Mpi2DiagBufferPostRequest_t *mpi_request; Mpi2DiagBufferPostReply_t *mpi_reply; u8 buffer_type; unsigned long timeleft; u16 smid; u16 ioc_status; u8 issue_reset = 0; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", ioc->name, __func__); rc = -EAGAIN; goto out; } buffer_type = diag_register->buffer_type; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if (ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_REGISTERED) { printk(MPT2SAS_ERR_FMT "%s: already has a registered " "buffer for buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EINVAL; } if (diag_register->requested_buffer_size % 4) { printk(MPT2SAS_ERR_FMT "%s: the requested_buffer_size " "is not 4 byte aligned\n", ioc->name, __func__); return -EINVAL; } smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); rc = -EAGAIN; goto out; } rc = 0; ioc->ctl_cmds.status = MPT2_CMD_PENDING; memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->ctl_cmds.smid = smid; request_data = ioc->diag_buffer[buffer_type]; request_data_sz = diag_register->requested_buffer_size; ioc->unique_id[buffer_type] = diag_register->unique_id; ioc->diag_buffer_status[buffer_type] = 0; memcpy(ioc->product_specific[buffer_type], diag_register->product_specific, MPT2_PRODUCT_SPECIFIC_DWORDS); ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; if (request_data) { request_data_dma = ioc->diag_buffer_dma[buffer_type]; if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[buffer_type], request_data, request_data_dma); request_data = NULL; } } if (request_data == NULL) { ioc->diag_buffer_sz[buffer_type] = 0; ioc->diag_buffer_dma[buffer_type] = 0; request_data = pci_alloc_consistent( ioc->pdev, request_data_sz, &request_data_dma); if (request_data == NULL) { printk(MPT2SAS_ERR_FMT "%s: failed allocating memory" " for diag buffers, requested size(%d)\n", ioc->name, __func__, request_data_sz); mpt2sas_base_free_smid(ioc, smid); return -ENOMEM; } ioc->diag_buffer[buffer_type] = request_data; ioc->diag_buffer_sz[buffer_type] = request_data_sz; ioc->diag_buffer_dma[buffer_type] = request_data_dma; } mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; mpi_request->BufferType = diag_register->buffer_type; mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); mpi_request->BufferAddress = cpu_to_le64(request_data_dma); mpi_request->BufferLength = cpu_to_le32(request_data_sz); mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(0x%p), " "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data, (unsigned long long)request_data_dma, le32_to_cpu(mpi_request->BufferLength))); for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++) mpi_request->ProductSpecific[i] = cpu_to_le32(ioc->product_specific[buffer_type][i]); init_completion(&ioc->ctl_cmds.done); mpt2sas_base_put_smid_default(ioc, smid); timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2DiagBufferPostRequest_t)/4); if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET)) issue_reset = 1; goto issue_host_reset; } /* process the completed Reply Message Frame */ if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) { printk(MPT2SAS_ERR_FMT "%s: no reply message\n", ioc->name, __func__); rc = -EFAULT; goto out; } mpi_reply = ioc->ctl_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { ioc->diag_buffer_status[buffer_type] |= MPT2_DIAG_BUFFER_IS_REGISTERED; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n", ioc->name, __func__)); } else { printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) " "log_info(0x%08x)\n", ioc->name, __func__, ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } issue_host_reset: if (issue_reset) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); out: if (rc && request_data) pci_free_consistent(ioc->pdev, request_data_sz, request_data, request_data_dma); ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; return rc; } /** * mpt2sas_enable_diag_buffer - enabling diag_buffers support driver load time * @ioc: per adapter object * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 * * This is called when command line option diag_buffer_enable is enabled * at driver load time. */ void mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register) { struct mpt2_diag_register diag_register; memset(&diag_register, 0, sizeof(struct mpt2_diag_register)); if (bits_to_register & 1) { printk(MPT2SAS_INFO_FMT "registering trace buffer support\n", ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; /* register for 1MB buffers */ diag_register.requested_buffer_size = (1024 * 1024); diag_register.unique_id = 0x7075900; _ctl_diag_register_2(ioc, &diag_register); } if (bits_to_register & 2) { printk(MPT2SAS_INFO_FMT "registering snapshot buffer support\n", ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; /* register for 2MB buffers */ diag_register.requested_buffer_size = 2 * (1024 * 1024); diag_register.unique_id = 0x7075901; _ctl_diag_register_2(ioc, &diag_register); } if (bits_to_register & 4) { printk(MPT2SAS_INFO_FMT "registering extended buffer support\n", ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; /* register for 2MB buffers */ diag_register.requested_buffer_size = 2 * (1024 * 1024); diag_register.unique_id = 0x7075901; _ctl_diag_register_2(ioc, &diag_register); } } /** * _ctl_diag_register - application register with driver * @arg - user space buffer containing ioctl content * @state - NON_BLOCKING or BLOCKING * * This will allow the driver to setup any required buffers that will be * needed by firmware to communicate with the driver. */ static long _ctl_diag_register(void __user *arg, enum block_state state) { struct mpt2_diag_register karg; struct MPT2SAS_ADAPTER *ioc; long rc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) return -EAGAIN; else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) return -ERESTARTSYS; rc = _ctl_diag_register_2(ioc, &karg); mutex_unlock(&ioc->ctl_cmds.mutex); return rc; } /** * _ctl_diag_unregister - application unregister with driver * @arg - user space buffer containing ioctl content * * This will allow the driver to cleanup any memory allocated for diag * messages and to free up any resources. */ static long _ctl_diag_unregister(void __user *arg) { struct mpt2_diag_unregister karg; struct MPT2SAS_ADAPTER *ioc; void *request_data; dma_addr_t request_data_dma; u32 request_data_sz; u8 buffer_type; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); buffer_type = karg.unique_id & 0x000000ff; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) { printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not " "registered\n", ioc->name, __func__, buffer_type); return -EINVAL; } if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED) == 0) { printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) has not been " "released\n", ioc->name, __func__, buffer_type); return -EINVAL; } if (karg.unique_id != ioc->unique_id[buffer_type]) { printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not " "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { printk(MPT2SAS_ERR_FMT "%s: doesn't have memory allocated for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } request_data_sz = ioc->diag_buffer_sz[buffer_type]; request_data_dma = ioc->diag_buffer_dma[buffer_type]; pci_free_consistent(ioc->pdev, request_data_sz, request_data, request_data_dma); ioc->diag_buffer[buffer_type] = NULL; ioc->diag_buffer_status[buffer_type] = 0; return 0; } /** * _ctl_diag_query - query relevant info associated with diag buffers * @arg - user space buffer containing ioctl content * * The application will send only buffer_type and unique_id. Driver will * inspect unique_id first, if valid, fill in all the info. If unique_id is * 0x00, the driver will return info specified by Buffer Type. */ static long _ctl_diag_query(void __user *arg) { struct mpt2_diag_query karg; struct MPT2SAS_ADAPTER *ioc; void *request_data; int i; u8 buffer_type; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); karg.application_flags = 0; buffer_type = karg.buffer_type; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) { printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not " "registered\n", ioc->name, __func__, buffer_type); return -EINVAL; } if (karg.unique_id & 0xffffff00) { if (karg.unique_id != ioc->unique_id[buffer_type]) { printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not " "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { printk(MPT2SAS_ERR_FMT "%s: doesn't have buffer for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } if (ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED) karg.application_flags = (MPT2_APP_FLAGS_APP_OWNED | MPT2_APP_FLAGS_BUFFER_VALID); else karg.application_flags = (MPT2_APP_FLAGS_APP_OWNED | MPT2_APP_FLAGS_BUFFER_VALID | MPT2_APP_FLAGS_FW_BUFFER_ACCESS); for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++) karg.product_specific[i] = ioc->product_specific[buffer_type][i]; karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; karg.driver_added_buffer_size = 0; karg.unique_id = ioc->unique_id[buffer_type]; karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; if (copy_to_user(arg, &karg, sizeof(struct mpt2_diag_query))) { printk(MPT2SAS_ERR_FMT "%s: unable to write mpt2_diag_query " "data @ %p\n", ioc->name, __func__, arg); return -EFAULT; } return 0; } /** * _ctl_send_release - Diag Release Message * @ioc: per adapter object * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED * @issue_reset - specifies whether host reset is required. * */ static int _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset) { Mpi2DiagReleaseRequest_t *mpi_request; Mpi2DiagReleaseReply_t *mpi_reply; u16 smid; u16 ioc_status; u32 ioc_state; int rc; unsigned long timeleft; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); rc = 0; *issue_reset = 0; ioc_state = mpt2sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "skipping due to FAULT state\n", ioc->name, __func__)); rc = -EAGAIN; goto out; } if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", ioc->name, __func__); rc = -EAGAIN; goto out; } smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); rc = -EAGAIN; goto out; } ioc->ctl_cmds.status = MPT2_CMD_PENDING; memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->ctl_cmds.smid = smid; mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; mpi_request->BufferType = buffer_type; mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; init_completion(&ioc->ctl_cmds.done); mpt2sas_base_put_smid_default(ioc, smid); timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2DiagReleaseRequest_t)/4); if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET)) *issue_reset = 1; rc = -EFAULT; goto out; } /* process the completed Reply Message Frame */ if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) { printk(MPT2SAS_ERR_FMT "%s: no reply message\n", ioc->name, __func__); rc = -EFAULT; goto out; } mpi_reply = ioc->ctl_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { ioc->diag_buffer_status[buffer_type] |= MPT2_DIAG_BUFFER_IS_RELEASED; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n", ioc->name, __func__)); } else { printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) " "log_info(0x%08x)\n", ioc->name, __func__, ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } out: ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; return rc; } /** * _ctl_diag_release - request to send Diag Release Message to firmware * @arg - user space buffer containing ioctl content * @state - NON_BLOCKING or BLOCKING * * This allows ownership of the specified buffer to returned to the driver, * allowing an application to read the buffer without fear that firmware is * overwritting information in the buffer. */ static long _ctl_diag_release(void __user *arg, enum block_state state) { struct mpt2_diag_release karg; struct MPT2SAS_ADAPTER *ioc; void *request_data; int rc; u8 buffer_type; u8 issue_reset = 0; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); buffer_type = karg.unique_id & 0x000000ff; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) { printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not " "registered\n", ioc->name, __func__, buffer_type); return -EINVAL; } if (karg.unique_id != ioc->unique_id[buffer_type]) { printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not " "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } if (ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED) { printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) " "is already released\n", ioc->name, __func__, buffer_type); return 0; } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { printk(MPT2SAS_ERR_FMT "%s: doesn't have memory allocated for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } /* buffers were released by due to host reset */ if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_DIAG_RESET)) { ioc->diag_buffer_status[buffer_type] |= MPT2_DIAG_BUFFER_IS_RELEASED; ioc->diag_buffer_status[buffer_type] &= ~MPT2_DIAG_BUFFER_IS_DIAG_RESET; printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) " "was released due to host reset\n", ioc->name, __func__, buffer_type); return 0; } if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) return -EAGAIN; else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) return -ERESTARTSYS; rc = _ctl_send_release(ioc, buffer_type, &issue_reset); if (issue_reset) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); mutex_unlock(&ioc->ctl_cmds.mutex); return rc; } /** * _ctl_diag_read_buffer - request for copy of the diag buffer * @arg - user space buffer containing ioctl content * @state - NON_BLOCKING or BLOCKING */ static long _ctl_diag_read_buffer(void __user *arg, enum block_state state) { struct mpt2_diag_read_buffer karg; struct mpt2_diag_read_buffer __user *uarg = arg; struct MPT2SAS_ADAPTER *ioc; void *request_data, *diag_data; Mpi2DiagBufferPostRequest_t *mpi_request; Mpi2DiagBufferPostReply_t *mpi_reply; int rc, i; u8 buffer_type; unsigned long timeleft, request_size, copy_size; u16 smid; u16 ioc_status; u8 issue_reset = 0; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); buffer_type = karg.unique_id & 0x000000ff; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if (karg.unique_id != ioc->unique_id[buffer_type]) { printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not " "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { printk(MPT2SAS_ERR_FMT "%s: doesn't have buffer for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } request_size = ioc->diag_buffer_sz[buffer_type]; if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { printk(MPT2SAS_ERR_FMT "%s: either the starting_offset " "or bytes_to_read are not 4 byte aligned\n", ioc->name, __func__); return -EINVAL; } if (karg.starting_offset > request_size) return -EINVAL; diag_data = (void *)(request_data + karg.starting_offset); dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(%p), " "offset(%d), sz(%d)\n", ioc->name, __func__, diag_data, karg.starting_offset, karg.bytes_to_read)); /* Truncate data on requests that are too large */ if ((diag_data + karg.bytes_to_read < diag_data) || (diag_data + karg.bytes_to_read > request_data + request_size)) copy_size = request_size - karg.starting_offset; else copy_size = karg.bytes_to_read; if (copy_to_user((void __user *)uarg->diagnostic_data, diag_data, copy_size)) { printk(MPT2SAS_ERR_FMT "%s: Unable to write " "mpt_diag_read_buffer_t data @ %p\n", ioc->name, __func__, diag_data); return -EFAULT; } if ((karg.flags & MPT2_FLAGS_REREGISTER) == 0) return 0; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: Reregister " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type)); if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED) == 0) { dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "buffer_type(0x%02x) is still registered\n", ioc->name, __func__, buffer_type)); return 0; } /* Get a free request frame and save the message context. */ if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) return -EAGAIN; else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) return -ERESTARTSYS; if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", ioc->name, __func__); rc = -EAGAIN; goto out; } smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); rc = -EAGAIN; goto out; } rc = 0; ioc->ctl_cmds.status = MPT2_CMD_PENDING; memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->ctl_cmds.smid = smid; mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; mpi_request->BufferType = buffer_type; mpi_request->BufferLength = cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); mpi_request->BufferAddress = cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++) mpi_request->ProductSpecific[i] = cpu_to_le32(ioc->product_specific[buffer_type][i]); mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; init_completion(&ioc->ctl_cmds.done); mpt2sas_base_put_smid_default(ioc, smid); timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2DiagBufferPostRequest_t)/4); if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET)) issue_reset = 1; goto issue_host_reset; } /* process the completed Reply Message Frame */ if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) { printk(MPT2SAS_ERR_FMT "%s: no reply message\n", ioc->name, __func__); rc = -EFAULT; goto out; } mpi_reply = ioc->ctl_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { ioc->diag_buffer_status[buffer_type] |= MPT2_DIAG_BUFFER_IS_REGISTERED; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n", ioc->name, __func__)); } else { printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) " "log_info(0x%08x)\n", ioc->name, __func__, ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } issue_host_reset: if (issue_reset) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); out: ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; mutex_unlock(&ioc->ctl_cmds.mutex); return rc; } /** * _ctl_ioctl_main - main ioctl entry point * @file - (struct file) * @cmd - ioctl opcode * @arg - */ static long _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg) { enum block_state state; long ret = -EINVAL; state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; switch (cmd) { case MPT2IOCINFO: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_iocinfo)) ret = _ctl_getiocinfo(arg); break; case MPT2COMMAND: { struct mpt2_ioctl_command karg; struct mpt2_ioctl_command __user *uarg; struct MPT2SAS_ADAPTER *ioc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; if (ioc->shost_recovery || ioc->pci_error_recovery || ioc->is_driver_loading) return -EAGAIN; if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) { uarg = arg; ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf, state); } break; } case MPT2EVENTQUERY: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventquery)) ret = _ctl_eventquery(arg); break; case MPT2EVENTENABLE: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventenable)) ret = _ctl_eventenable(arg); break; case MPT2EVENTREPORT: ret = _ctl_eventreport(arg); break; case MPT2HARDRESET: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_diag_reset)) ret = _ctl_do_reset(arg); break; case MPT2BTDHMAPPING: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_btdh_mapping)) ret = _ctl_btdh_mapping(arg); break; case MPT2DIAGREGISTER: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_register)) ret = _ctl_diag_register(arg, state); break; case MPT2DIAGUNREGISTER: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_unregister)) ret = _ctl_diag_unregister(arg); break; case MPT2DIAGQUERY: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_query)) ret = _ctl_diag_query(arg); break; case MPT2DIAGRELEASE: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_release)) ret = _ctl_diag_release(arg, state); break; case MPT2DIAGREADBUFFER: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_read_buffer)) ret = _ctl_diag_read_buffer(arg, state); break; default: { struct mpt2_ioctl_command karg; struct MPT2SAS_ADAPTER *ioc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); break; } } return ret; } /** * _ctl_ioctl - main ioctl entry point (unlocked) * @file - (struct file) * @cmd - ioctl opcode * @arg - */ static long _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&_ctl_mutex); ret = _ctl_ioctl_main(file, cmd, (void __user *)arg); mutex_unlock(&_ctl_mutex); return ret; } #ifdef CONFIG_COMPAT /** * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. * @file - (struct file) * @cmd - ioctl opcode * @arg - (struct mpt2_ioctl_command32) * * MPT2COMMAND32 - Handle 32bit applications running on 64bit os. */ static long _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg) { struct mpt2_ioctl_command32 karg32; struct mpt2_ioctl_command32 __user *uarg; struct mpt2_ioctl_command karg; struct MPT2SAS_ADAPTER *ioc; enum block_state state; if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32)) return -EINVAL; uarg = (struct mpt2_ioctl_command32 __user *) arg; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; if (ioc->shost_recovery || ioc->pci_error_recovery || ioc->is_driver_loading) return -EAGAIN; memset(&karg, 0, sizeof(struct mpt2_ioctl_command)); karg.hdr.ioc_number = karg32.hdr.ioc_number; karg.hdr.port_number = karg32.hdr.port_number; karg.hdr.max_data_size = karg32.hdr.max_data_size; karg.timeout = karg32.timeout; karg.max_reply_bytes = karg32.max_reply_bytes; karg.data_in_size = karg32.data_in_size; karg.data_out_size = karg32.data_out_size; karg.max_sense_bytes = karg32.max_sense_bytes; karg.data_sge_offset = karg32.data_sge_offset; karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state); } /** * _ctl_ioctl_compat - main ioctl entry point (compat) * @file - * @cmd - * @arg - * * This routine handles 32 bit applications in 64bit os. */ static long _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) { long ret; mutex_lock(&_ctl_mutex); if (cmd == MPT2COMMAND32) ret = _ctl_compat_mpt_command(file, cmd, arg); else ret = _ctl_ioctl_main(file, cmd, (void __user *)arg); mutex_unlock(&_ctl_mutex); return ret; } #endif /* scsi host attributes */ /** * _ctl_version_fw_show - firmware version * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, ioc->facts.FWVersion.Word & 0x000000FF); } static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); /** * _ctl_version_bios_show - bios version * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", (version & 0xFF000000) >> 24, (version & 0x00FF0000) >> 16, (version & 0x0000FF00) >> 8, version & 0x000000FF); } static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); /** * _ctl_version_mpi_show - MPI (message passing interface) version * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); } static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); /** * _ctl_version_product_show - product name * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_product_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); } static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); /** * _ctl_version_nvdata_persistent_show - ndvata persistent version * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_nvdata_persistent_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%08xh\n", le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); } static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, _ctl_version_nvdata_persistent_show, NULL); /** * _ctl_version_nvdata_default_show - nvdata default version * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%08xh\n", le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); } static DEVICE_ATTR(version_nvdata_default, S_IRUGO, _ctl_version_nvdata_default_show, NULL); /** * _ctl_board_name_show - board name * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_board_name_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); } static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); /** * _ctl_board_assembly_show - board assembly name * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); } static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); /** * _ctl_board_tracer_show - board tracer number * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); } static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); /** * _ctl_io_delay_show - io missing delay * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is for firmware implemention for deboucing device * removal events. * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); } static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); /** * _ctl_device_delay_show - device missing delay * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is for firmware implemention for deboucing device * removal events. * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); } static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); /** * _ctl_fw_queue_depth_show - global credits * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is firmware queue depth limit * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); } static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); /** * _ctl_sas_address_show - sas address * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is the controller sas address * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "0x%016llx\n", (unsigned long long)ioc->sas_hba.sas_address); } static DEVICE_ATTR(host_sas_address, S_IRUGO, _ctl_host_sas_address_show, NULL); /** * _ctl_logging_level_show - logging level * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ static ssize_t _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); } static ssize_t _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); int val = 0; if (sscanf(buf, "%x", &val) != 1) return -EINVAL; ioc->logging_level = val; printk(MPT2SAS_INFO_FMT "logging_level=%08xh\n", ioc->name, ioc->logging_level); return strlen(buf); } static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, _ctl_logging_level_store); /* device attributes */ /* * _ctl_fwfault_debug_show - show/store fwfault_debug * @cdev - pointer to embedded class device * @buf - the buffer returned * * mpt2sas_fwfault_debug is command line option * A sysfs 'read/write' shost attribute. */ static ssize_t _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); } static ssize_t _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); int val = 0; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; ioc->fwfault_debug = val; printk(MPT2SAS_INFO_FMT "fwfault_debug=%d\n", ioc->name, ioc->fwfault_debug); return strlen(buf); } static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); /** * _ctl_ioc_reset_count_show - ioc reset count * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is firmware queue depth limit * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%08d\n", ioc->ioc_reset_count); } static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); /** * _ctl_ioc_reply_queue_count_show - number of reply queues * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is number of reply queues * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_ioc_reply_queue_count_show(struct device *cdev, struct device_attribute *attr, char *buf) { u8 reply_queue_count; struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); if ((ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) reply_queue_count = ioc->reply_queue_count; else reply_queue_count = 1; return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); } static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, NULL); struct DIAG_BUFFER_START { __le32 Size; __le32 DiagVersion; u8 BufferType; u8 Reserved[3]; __le32 Reserved1; __le32 Reserved2; __le32 Reserved3; }; /** * _ctl_host_trace_buffer_size_show - host buffer size (trace only) * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_host_trace_buffer_size_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); u32 size = 0; struct DIAG_BUFFER_START *request_data; if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not " "registered\n", ioc->name, __func__); return 0; } if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) { printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not " "registered\n", ioc->name, __func__); return 0; } request_data = (struct DIAG_BUFFER_START *) ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || le32_to_cpu(request_data->DiagVersion) == 0x01000000) && le32_to_cpu(request_data->Reserved3) == 0x4742444c) size = le32_to_cpu(request_data->Size); ioc->ring_buffer_sz = size; return snprintf(buf, PAGE_SIZE, "%d\n", size); } static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, _ctl_host_trace_buffer_size_show, NULL); /** * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. * * You will only be able to read 4k bytes of ring buffer at a time. * In order to read beyond 4k bytes, you will have to write out the * offset to the same attribute, it will move the pointer. */ static ssize_t _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); void *request_data; u32 size; if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not " "registered\n", ioc->name, __func__); return 0; } if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) { printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not " "registered\n", ioc->name, __func__); return 0; } if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) return 0; size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; size = (size > PAGE_SIZE) ? PAGE_SIZE : size; request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; memcpy(buf, request_data, size); return size; } static ssize_t _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); int val = 0; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; ioc->ring_buffer_offset = val; return strlen(buf); } static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); /*****************************************/ /** * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. * * This is a mechnism to post/release host_trace_buffers */ static ssize_t _ctl_host_trace_buffer_enable_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0)) return snprintf(buf, PAGE_SIZE, "off\n"); else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_RELEASED)) return snprintf(buf, PAGE_SIZE, "release\n"); else return snprintf(buf, PAGE_SIZE, "post\n"); } static ssize_t _ctl_host_trace_buffer_enable_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); char str[10] = ""; struct mpt2_diag_register diag_register; u8 issue_reset = 0; if (sscanf(buf, "%s", str) != 1) return -EINVAL; if (!strcmp(str, "post")) { /* exit out if host buffers are already posted */ if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_REGISTERED) && ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_RELEASED) == 0)) goto out; memset(&diag_register, 0, sizeof(struct mpt2_diag_register)); printk(MPT2SAS_INFO_FMT "posting host trace buffers\n", ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; diag_register.requested_buffer_size = (1024 * 1024); diag_register.unique_id = 0x7075900; ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; _ctl_diag_register_2(ioc, &diag_register); } else if (!strcmp(str, "release")) { /* exit out if host buffers are already released */ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) goto out; if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) goto out; if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_RELEASED)) goto out; printk(MPT2SAS_INFO_FMT "releasing host trace buffer\n", ioc->name); _ctl_send_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, &issue_reset); } out: return strlen(buf); } static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, _ctl_host_trace_buffer_enable_show, _ctl_host_trace_buffer_enable_store); struct device_attribute *mpt2sas_host_attrs[] = { &dev_attr_version_fw, &dev_attr_version_bios, &dev_attr_version_mpi, &dev_attr_version_product, &dev_attr_version_nvdata_persistent, &dev_attr_version_nvdata_default, &dev_attr_board_name, &dev_attr_board_assembly, &dev_attr_board_tracer, &dev_attr_io_delay, &dev_attr_device_delay, &dev_attr_logging_level, &dev_attr_fwfault_debug, &dev_attr_fw_queue_depth, &dev_attr_host_sas_address, &dev_attr_ioc_reset_count, &dev_attr_host_trace_buffer_size, &dev_attr_host_trace_buffer, &dev_attr_host_trace_buffer_enable, &dev_attr_reply_queue_count, NULL, }; /** * _ctl_device_sas_address_show - sas address * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is the sas address for the target * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT2SAS_DEVICE *sas_device_priv_data = sdev->hostdata; return snprintf(buf, PAGE_SIZE, "0x%016llx\n", (unsigned long long)sas_device_priv_data->sas_target->sas_address); } static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); /** * _ctl_device_handle_show - device handle * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is the firmware assigned device handle * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_device_handle_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT2SAS_DEVICE *sas_device_priv_data = sdev->hostdata; return snprintf(buf, PAGE_SIZE, "0x%04x\n", sas_device_priv_data->sas_target->handle); } static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); struct device_attribute *mpt2sas_dev_attrs[] = { &dev_attr_sas_address, &dev_attr_sas_device_handle, NULL, }; static const struct file_operations ctl_fops = { .owner = THIS_MODULE, .unlocked_ioctl = _ctl_ioctl, .release = _ctl_release, .poll = _ctl_poll, .fasync = _ctl_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = _ctl_ioctl_compat, #endif .llseek = noop_llseek, }; static struct miscdevice ctl_dev = { .minor = MPT2SAS_MINOR, .name = MPT2SAS_DEV_NAME, .fops = &ctl_fops, }; /** * mpt2sas_ctl_init - main entry point for ctl. * */ void mpt2sas_ctl_init(void) { async_queue = NULL; if (misc_register(&ctl_dev) < 0) printk(KERN_ERR "%s can't register misc device [minor=%d]\n", MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); init_waitqueue_head(&ctl_poll_wait); } /** * mpt2sas_ctl_exit - exit point for ctl * */ void mpt2sas_ctl_exit(void) { struct MPT2SAS_ADAPTER *ioc; int i; list_for_each_entry(ioc, &mpt2sas_ioc_list, list) { /* free memory associated to diag buffers */ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { if (!ioc->diag_buffer[i]) continue; pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); ioc->diag_buffer[i] = NULL; ioc->diag_buffer_status[i] = 0; } kfree(ioc->event_log); } misc_deregister(&ctl_dev); }
gpl-2.0
devil1210/EvilKernel
tools/perf/util/thread.c
5054
2776
#include "../perf.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include "session.h" #include "thread.h" #include "util.h" #include "debug.h" static struct thread *thread__new(pid_t pid) { struct thread *self = zalloc(sizeof(*self)); if (self != NULL) { map_groups__init(&self->mg); self->pid = pid; self->comm = malloc(32); if (self->comm) snprintf(self->comm, 32, ":%d", self->pid); } return self; } void thread__delete(struct thread *self) { map_groups__exit(&self->mg); free(self->comm); free(self); } int thread__set_comm(struct thread *self, const char *comm) { int err; if (self->comm) free(self->comm); self->comm = strdup(comm); err = self->comm == NULL ? -ENOMEM : 0; if (!err) { self->comm_set = true; map_groups__flush(&self->mg); } return err; } int thread__comm_len(struct thread *self) { if (!self->comm_len) { if (!self->comm) return 0; self->comm_len = strlen(self->comm); } return self->comm_len; } static size_t thread__fprintf(struct thread *self, FILE *fp) { return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) + map_groups__fprintf(&self->mg, verbose, fp); } struct thread *machine__findnew_thread(struct machine *self, pid_t pid) { struct rb_node **p = &self->threads.rb_node; struct rb_node *parent = NULL; struct thread *th; /* * Font-end cache - PID lookups come in blocks, * so most of the time we dont have to look up * the full rbtree: */ if (self->last_match && self->last_match->pid == pid) return self->last_match; while (*p != NULL) { parent = *p; th = rb_entry(parent, struct thread, rb_node); if (th->pid == pid) { self->last_match = th; return th; } if (pid < th->pid) p = &(*p)->rb_left; else p = &(*p)->rb_right; } th = thread__new(pid); if (th != NULL) { rb_link_node(&th->rb_node, parent, p); rb_insert_color(&th->rb_node, &self->threads); self->last_match = th; } return th; } void thread__insert_map(struct thread *self, struct map *map) { map_groups__fixup_overlappings(&self->mg, map, verbose, stderr); map_groups__insert(&self->mg, map); } int thread__fork(struct thread *self, struct thread *parent) { int i; if (parent->comm_set) { if (self->comm) free(self->comm); self->comm = strdup(parent->comm); if (!self->comm) return -ENOMEM; self->comm_set = true; } for (i = 0; i < MAP__NR_TYPES; ++i) if (map_groups__clone(&self->mg, &parent->mg, i) < 0) return -ENOMEM; return 0; } size_t machine__fprintf(struct machine *machine, FILE *fp) { size_t ret = 0; struct rb_node *nd; for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { struct thread *pos = rb_entry(nd, struct thread, rb_node); ret += thread__fprintf(pos, fp); } return ret; }
gpl-2.0
CoolDevelopment/MoshKernel-amami
drivers/staging/speakup/speakup_decpc.c
7358
15035
/* * This is the DECtalk PC speakup driver * * Some constants from DEC's DOS driver: * Copyright (c) by Digital Equipment Corp. * * 386BSD DECtalk PC driver: * Copyright (c) 1996 Brian Buhrow <buhrow@lothlorien.nfbcal.org> * * Linux DECtalk PC driver: * Copyright (c) 1997 Nicolas Pitre <nico@cam.org> * * speakup DECtalk PC Internal driver: * Copyright (c) 2003 David Borowski <david575@golden.net> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "speakup.h" #define MODULE_init 0x0dec /* module in boot code */ #define MODULE_self_test 0x8800 /* module in self-test */ #define MODULE_reset 0xffff /* reinit the whole module */ #define MODE_mask 0xf000 /* mode bits in high nibble */ #define MODE_null 0x0000 #define MODE_test 0x2000 /* in testing mode */ #define MODE_status 0x8000 #define STAT_int 0x0001 /* running in interrupt mode */ #define STAT_tr_char 0x0002 /* character data to transmit */ #define STAT_rr_char 0x0004 /* ready to receive char data */ #define STAT_cmd_ready 0x0008 /* ready to accept commands */ #define STAT_dma_ready 0x0010 /* dma command ready */ #define STAT_digitized 0x0020 /* spc in digitized mode */ #define STAT_new_index 0x0040 /* new last index ready */ #define STAT_new_status 0x0080 /* new status posted */ #define STAT_dma_state 0x0100 /* dma state toggle */ #define STAT_index_valid 0x0200 /* indexs are valid */ #define STAT_flushing 0x0400 /* flush in progress */ #define STAT_self_test 0x0800 /* module in self test */ #define MODE_ready 0xc000 /* module ready for next phase */ #define READY_boot 0x0000 #define READY_kernel 0x0001 #define MODE_error 0xf000 #define CMD_mask 0xf000 /* mask for command nibble */ #define CMD_null 0x0000 /* post status */ #define CMD_control 0x1000 /* hard control command */ #define CTRL_mask 0x0F00 /* mask off control nibble */ #define CTRL_data 0x00FF /* madk to get data byte */ #define CTRL_null 0x0000 /* null control */ #define CTRL_vol_up 0x0100 /* increase volume */ #define CTRL_vol_down 0x0200 /* decrease volume */ #define CTRL_vol_set 0x0300 /* set volume */ #define CTRL_pause 0x0400 /* pause spc */ #define CTRL_resume 0x0500 /* resume spc clock */ #define CTRL_resume_spc 0x0001 /* resume spc soft pause */ #define CTRL_flush 0x0600 /* flush all buffers */ #define CTRL_int_enable 0x0700 /* enable status change ints */ #define CTRL_buff_free 0x0800 /* buffer remain count */ #define CTRL_buff_used 0x0900 /* buffer in use */ #define CTRL_speech 0x0a00 /* immediate speech change */ #define CTRL_SP_voice 0x0001 /* voice change */ #define CTRL_SP_rate 0x0002 /* rate change */ #define CTRL_SP_comma 0x0003 /* comma pause change */ #define CTRL_SP_period 0x0004 /* period pause change */ #define CTRL_SP_rate_delta 0x0005 /* delta rate change */ #define CTRL_SP_get_param 0x0006 /* return the desired parameter */ #define CTRL_last_index 0x0b00 /* get last index spoken */ #define CTRL_io_priority 0x0c00 /* change i/o priority */ #define CTRL_free_mem 0x0d00 /* get free paragraphs on module */ #define CTRL_get_lang 0x0e00 /* return bit mask of loaded * languages */ #define CMD_test 0x2000 /* self-test request */ #define TEST_mask 0x0F00 /* isolate test field */ #define TEST_null 0x0000 /* no test requested */ #define TEST_isa_int 0x0100 /* assert isa irq */ #define TEST_echo 0x0200 /* make data in == data out */ #define TEST_seg 0x0300 /* set peek/poke segment */ #define TEST_off 0x0400 /* set peek/poke offset */ #define TEST_peek 0x0500 /* data out == *peek */ #define TEST_poke 0x0600 /* *peek == data in */ #define TEST_sub_code 0x00FF /* user defined test sub codes */ #define CMD_id 0x3000 /* return software id */ #define ID_null 0x0000 /* null id */ #define ID_kernel 0x0100 /* kernel code executing */ #define ID_boot 0x0200 /* boot code executing */ #define CMD_dma 0x4000 /* force a dma start */ #define CMD_reset 0x5000 /* reset module status */ #define CMD_sync 0x6000 /* kernel sync command */ #define CMD_char_in 0x7000 /* single character send */ #define CMD_char_out 0x8000 /* single character get */ #define CHAR_count_1 0x0100 /* one char in cmd_low */ #define CHAR_count_2 0x0200 /* the second in data_low */ #define CHAR_count_3 0x0300 /* the third in data_high */ #define CMD_spc_mode 0x9000 /* change spc mode */ #define CMD_spc_to_text 0x0100 /* set to text mode */ #define CMD_spc_to_digit 0x0200 /* set to digital mode */ #define CMD_spc_rate 0x0400 /* change spc data rate */ #define CMD_error 0xf000 /* severe error */ enum { PRIMARY_DIC = 0, USER_DIC, COMMAND_DIC, ABBREV_DIC }; #define DMA_single_in 0x01 #define DMA_single_out 0x02 #define DMA_buff_in 0x03 #define DMA_buff_out 0x04 #define DMA_control 0x05 #define DT_MEM_ALLOC 0x03 #define DT_SET_DIC 0x04 #define DT_START_TASK 0x05 #define DT_LOAD_MEM 0x06 #define DT_READ_MEM 0x07 #define DT_DIGITAL_IN 0x08 #define DMA_sync 0x06 #define DMA_sync_char 0x07 #define DRV_VERSION "2.12" #define PROCSPEECH 0x0b #define SYNTH_IO_EXTENT 8 static int synth_probe(struct spk_synth *synth); static void dtpc_release(void); static const char *synth_immediate(struct spk_synth *synth, const char *buf); static void do_catch_up(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); static int synth_portlist[] = { 0x340, 0x350, 0x240, 0x250, 0 }; static int in_escape, is_flushing; static int dt_stat, dma_state; static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 200]" } }, { CAPS_STOP, .u.s = {"[:dv ap 100]" } }, { RATE, .u.n = {"[:ra %d]", 9, 0, 18, 150, 25, NULL } }, { PITCH, .u.n = {"[:dv ap %d]", 80, 0, 100, 20, 0, NULL } }, { VOL, .u.n = {"[:vo se %d]", 5, 0, 9, 5, 10, NULL } }, { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/decpc. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_dec_pc = { .name = "decpc", .version = DRV_VERSION, .long_name = "Dectalk PC", .init = "[:pe -380]", .procspeech = PROCSPEECH, .delay = 500, .trigger = 50, .jiffies = 50, .full = 1000, .flags = SF_DEC, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = synth_probe, .release = dtpc_release, .synth_immediate = synth_immediate, .catch_up = do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_nop, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = NULL, .indexing = { .command = NULL, .lowindex = 0, .highindex = 0, .currindex = 0, }, .attributes = { .attrs = synth_attrs, .name = "decpc", }, }; static int dt_getstatus(void) { dt_stat = inb_p(speakup_info.port_tts) | (inb_p(speakup_info.port_tts + 1) << 8); return dt_stat; } static void dt_sendcmd(u_int cmd) { outb_p(cmd & 0xFF, speakup_info.port_tts); outb_p((cmd >> 8) & 0xFF, speakup_info.port_tts+1); } static int dt_waitbit(int bit) { int timeout = 100; while (--timeout > 0) { if ((dt_getstatus() & bit) == bit) return 1; udelay(50); } return 0; } static int dt_wait_dma(void) { int timeout = 100, state = dma_state; if (!dt_waitbit(STAT_dma_ready)) return 0; while (--timeout > 0) { if ((dt_getstatus()&STAT_dma_state) == state) return 1; udelay(50); } dma_state = dt_getstatus() & STAT_dma_state; return 1; } static int dt_ctrl(u_int cmd) { int timeout = 10; if (!dt_waitbit(STAT_cmd_ready)) return -1; outb_p(0, speakup_info.port_tts+2); outb_p(0, speakup_info.port_tts+3); dt_getstatus(); dt_sendcmd(CMD_control|cmd); outb_p(0, speakup_info.port_tts+6); while (dt_getstatus() & STAT_cmd_ready) { udelay(20); if (--timeout == 0) break; } dt_sendcmd(CMD_null); return 0; } static void synth_flush(struct spk_synth *synth) { int timeout = 10; if (is_flushing) return; is_flushing = 4; in_escape = 0; while (dt_ctrl(CTRL_flush)) { if (--timeout == 0) break; udelay(50); } for (timeout = 0; timeout < 10; timeout++) { if (dt_waitbit(STAT_dma_ready)) break; udelay(50); } outb_p(DMA_sync, speakup_info.port_tts+4); outb_p(0, speakup_info.port_tts+4); udelay(100); for (timeout = 0; timeout < 10; timeout++) { if (!(dt_getstatus() & STAT_flushing)) break; udelay(50); } dma_state = dt_getstatus() & STAT_dma_state; dma_state ^= STAT_dma_state; is_flushing = 0; } static int dt_sendchar(char ch) { if (!dt_wait_dma()) return -1; if (!(dt_stat & STAT_rr_char)) return -2; outb_p(DMA_single_in, speakup_info.port_tts+4); outb_p(ch, speakup_info.port_tts+4); dma_state ^= STAT_dma_state; return 0; } static int testkernel(void) { int status = 0; if (dt_getstatus() == 0xffff) { status = -1; goto oops; } dt_sendcmd(CMD_sync); if (!dt_waitbit(STAT_cmd_ready)) status = -2; else if (dt_stat&0x8000) return 0; else if (dt_stat == 0x0dec) pr_warn("dec_pc at 0x%x, software not loaded\n", speakup_info.port_tts); status = -3; oops: synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT); speakup_info.port_tts = 0; return status; } static void do_catch_up(struct spk_synth *synth) { u_char ch; static u_char last; unsigned long flags; unsigned long jiff_max; struct var_t *jiffy_delta; struct var_t *delay_time; int jiffy_delta_val; int delay_time_val; jiffy_delta = get_var(JIFFY); delay_time = get_var(DELAY); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; spk_unlock(flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { spk_lock(flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spk_unlock(flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spk_unlock(flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); delay_time_val = delay_time->u.n.value; spk_unlock(flags); if (ch == '\n') ch = 0x0D; if (dt_sendchar(ch)) { schedule_timeout(msecs_to_jiffies(delay_time_val)); continue; } set_current_state(TASK_RUNNING); spk_lock(flags); synth_buffer_getc(); spk_unlock(flags); if (ch == '[') in_escape = 1; else if (ch == ']') in_escape = 0; else if (ch <= SPACE) { if (!in_escape && strchr(",.!?;:", last)) dt_sendchar(PROCSPEECH); if (jiffies >= jiff_max) { if (!in_escape) dt_sendchar(PROCSPEECH); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; delay_time_val = delay_time->u.n.value; spk_unlock(flags); schedule_timeout(msecs_to_jiffies (delay_time_val)); jiff_max = jiffies + jiffy_delta_val; } } last = ch; ch = 0; } if (!in_escape) dt_sendchar(PROCSPEECH); } static const char *synth_immediate(struct spk_synth *synth, const char *buf) { u_char ch; while ((ch = *buf)) { if (ch == '\n') ch = PROCSPEECH; if (dt_sendchar(ch)) return buf; buf++; } return 0; } static int synth_probe(struct spk_synth *synth) { int i = 0, failed = 0; pr_info("Probing for %s.\n", synth->long_name); for (i = 0; synth_portlist[i]; i++) { if (synth_request_region(synth_portlist[i], SYNTH_IO_EXTENT)) { pr_warn("request_region: failed with 0x%x, %d\n", synth_portlist[i], SYNTH_IO_EXTENT); continue; } speakup_info.port_tts = synth_portlist[i]; failed = testkernel(); if (failed == 0) break; } if (failed) { pr_info("%s: not found\n", synth->long_name); return -ENODEV; } pr_info("%s: %03x-%03x, Driver Version %s,\n", synth->long_name, speakup_info.port_tts, speakup_info.port_tts + 7, synth->version); synth->alive = 1; return 0; } static void dtpc_release(void) { if (speakup_info.port_tts) synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT); speakup_info.port_tts = 0; } module_param_named(start, synth_dec_pc.startup, short, S_IRUGO); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init decpc_init(void) { return synth_add(&synth_dec_pc); } static void __exit decpc_exit(void) { synth_remove(&synth_dec_pc); } module_init(decpc_init); module_exit(decpc_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DECtalk PC synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
Droid-Concepts/DC-Elite_kernel_jf
drivers/char/mmtimer.c
8126
21446
/* * Timer device implementation for SGI SN platforms. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2001-2006 Silicon Graphics, Inc. All rights reserved. * * This driver exports an API that should be supportable by any HPET or IA-PC * multimedia timer. The code below is currently specific to the SGI Altix * SHub RTC, however. * * 11/01/01 - jbarnes - initial revision * 9/10/04 - Christoph Lameter - remove interrupt support for kernel inclusion * 10/1/04 - Christoph Lameter - provide posix clock CLOCK_SGI_CYCLE * 10/13/04 - Christoph Lameter, Dimitri Sivanich - provide timer interrupt * support via the posix timer interface */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/ioctl.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/mmtimer.h> #include <linux/miscdevice.h> #include <linux/posix-timers.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/math64.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/sn/addrs.h> #include <asm/sn/intr.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/nodepda.h> #include <asm/sn/shubio.h> MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>"); MODULE_DESCRIPTION("SGI Altix RTC Timer"); MODULE_LICENSE("GPL"); /* name of the device, usually in /dev */ #define MMTIMER_NAME "mmtimer" #define MMTIMER_DESC "SGI Altix RTC Timer" #define MMTIMER_VERSION "2.1" #define RTC_BITS 55 /* 55 bits for this implementation */ static struct k_clock sgi_clock; extern unsigned long sn_rtc_cycles_per_second; #define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC)) #define rtc_time() (*RTC_COUNTER_ADDR) static DEFINE_MUTEX(mmtimer_mutex); static long mmtimer_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma); /* * Period in femtoseconds (10^-15 s) */ static unsigned long mmtimer_femtoperiod = 0; static const struct file_operations mmtimer_fops = { .owner = THIS_MODULE, .mmap = mmtimer_mmap, .unlocked_ioctl = mmtimer_ioctl, .llseek = noop_llseek, }; /* * We only have comparison registers RTC1-4 currently available per * node. RTC0 is used by SAL. */ /* Check for an RTC interrupt pending */ static int mmtimer_int_pending(int comparator) { if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator) return 1; else return 0; } /* Clear the RTC interrupt pending bit */ static void mmtimer_clr_int_pending(int comparator) { HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator); } /* Setup timer on comparator RTC1 */ static void mmtimer_setup_int_0(int cpu, u64 expires) { u64 val; /* Disable interrupt */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL); /* Initialize comparator value */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L); /* Clear pending bit */ mmtimer_clr_int_pending(0); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC1_INT_CONFIG_PID_SHFT); /* Set configuration */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val); /* Enable RTC interrupts */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL); /* Initialize comparator value */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires); } /* Setup timer on comparator RTC2 */ static void mmtimer_setup_int_1(int cpu, u64 expires) { u64 val; HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L); mmtimer_clr_int_pending(1); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC2_INT_CONFIG_PID_SHFT); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 1UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires); } /* Setup timer on comparator RTC3 */ static void mmtimer_setup_int_2(int cpu, u64 expires) { u64 val; HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L); mmtimer_clr_int_pending(2); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC3_INT_CONFIG_PID_SHFT); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires); } /* * This function must be called with interrupts disabled and preemption off * in order to insure that the setup succeeds in a deterministic time frame. * It will check if the interrupt setup succeeded. */ static int mmtimer_setup(int cpu, int comparator, unsigned long expires, u64 *set_completion_time) { switch (comparator) { case 0: mmtimer_setup_int_0(cpu, expires); break; case 1: mmtimer_setup_int_1(cpu, expires); break; case 2: mmtimer_setup_int_2(cpu, expires); break; } /* We might've missed our expiration time */ *set_completion_time = rtc_time(); if (*set_completion_time <= expires) return 1; /* * If an interrupt is already pending then its okay * if not then we failed */ return mmtimer_int_pending(comparator); } static int mmtimer_disable_int(long nasid, int comparator) { switch (comparator) { case 0: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC1_INT_ENABLE, 0UL); break; case 1: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC2_INT_ENABLE, 0UL); break; case 2: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC3_INT_ENABLE, 0UL); break; default: return -EFAULT; } return 0; } #define COMPARATOR 1 /* The comparator to use */ #define TIMER_OFF 0xbadcabLL /* Timer is not setup */ #define TIMER_SET 0 /* Comparator is set for this timer */ #define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40 /* There is one of these for each timer */ struct mmtimer { struct rb_node list; struct k_itimer *timer; int cpu; }; struct mmtimer_node { spinlock_t lock ____cacheline_aligned; struct rb_root timer_head; struct rb_node *next; struct tasklet_struct tasklet; }; static struct mmtimer_node *timers; static unsigned mmtimer_interval_retry_increment = MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT; module_param(mmtimer_interval_retry_increment, uint, 0644); MODULE_PARM_DESC(mmtimer_interval_retry_increment, "RTC ticks to add to expiration on interval retry (default 40)"); /* * Add a new mmtimer struct to the node's mmtimer list. * This function assumes the struct mmtimer_node is locked. */ static void mmtimer_add_list(struct mmtimer *n) { int nodeid = n->timer->it.mmtimer.node; unsigned long expires = n->timer->it.mmtimer.expires; struct rb_node **link = &timers[nodeid].timer_head.rb_node; struct rb_node *parent = NULL; struct mmtimer *x; /* * Find the right place in the rbtree: */ while (*link) { parent = *link; x = rb_entry(parent, struct mmtimer, list); if (expires < x->timer->it.mmtimer.expires) link = &(*link)->rb_left; else link = &(*link)->rb_right; } /* * Insert the timer to the rbtree and check whether it * replaces the first pending timer */ rb_link_node(&n->list, parent, link); rb_insert_color(&n->list, &timers[nodeid].timer_head); if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next, struct mmtimer, list)->timer->it.mmtimer.expires) timers[nodeid].next = &n->list; } /* * Set the comparator for the next timer. * This function assumes the struct mmtimer_node is locked. */ static void mmtimer_set_next_timer(int nodeid) { struct mmtimer_node *n = &timers[nodeid]; struct mmtimer *x; struct k_itimer *t; u64 expires, exp, set_completion_time; int i; restart: if (n->next == NULL) return; x = rb_entry(n->next, struct mmtimer, list); t = x->timer; if (!t->it.mmtimer.incr) { /* Not an interval timer */ if (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires, &set_completion_time)) { /* Late setup, fire now */ tasklet_schedule(&n->tasklet); } return; } /* Interval timer */ i = 0; expires = exp = t->it.mmtimer.expires; while (!mmtimer_setup(x->cpu, COMPARATOR, expires, &set_completion_time)) { int to; i++; expires = set_completion_time + mmtimer_interval_retry_increment + (1 << i); /* Calculate overruns as we go. */ to = ((u64)(expires - exp) / t->it.mmtimer.incr); if (to) { t->it_overrun += to; t->it.mmtimer.expires += t->it.mmtimer.incr * to; exp = t->it.mmtimer.expires; } if (i > 20) { printk(KERN_ALERT "mmtimer: cannot reschedule timer\n"); t->it.mmtimer.clock = TIMER_OFF; n->next = rb_next(&x->list); rb_erase(&x->list, &n->timer_head); kfree(x); goto restart; } } } /** * mmtimer_ioctl - ioctl interface for /dev/mmtimer * @file: file structure for the device * @cmd: command to execute * @arg: optional argument to command * * Executes the command specified by @cmd. Returns 0 for success, < 0 for * failure. * * Valid commands: * * %MMTIMER_GETOFFSET - Should return the offset (relative to the start * of the page where the registers are mapped) for the counter in question. * * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15) * seconds * * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address * specified by @arg * * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter * * %MMTIMER_MMAPAVAIL - Returns 1 if the registers can be mmap'd into userspace * * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it * in the address specified by @arg. */ static long mmtimer_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0; mutex_lock(&mmtimer_mutex); switch (cmd) { case MMTIMER_GETOFFSET: /* offset of the counter */ /* * SN RTC registers are on their own 64k page */ if(PAGE_SIZE <= (1 << 16)) ret = (((long)RTC_COUNTER_ADDR) & (PAGE_SIZE-1)) / 8; else ret = -ENOSYS; break; case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ if(copy_to_user((unsigned long __user *)arg, &mmtimer_femtoperiod, sizeof(unsigned long))) ret = -EFAULT; break; case MMTIMER_GETFREQ: /* frequency in Hz */ if(copy_to_user((unsigned long __user *)arg, &sn_rtc_cycles_per_second, sizeof(unsigned long))) ret = -EFAULT; break; case MMTIMER_GETBITS: /* number of bits in the clock */ ret = RTC_BITS; break; case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */ ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0; break; case MMTIMER_GETCOUNTER: if(copy_to_user((unsigned long __user *)arg, RTC_COUNTER_ADDR, sizeof(unsigned long))) ret = -EFAULT; break; default: ret = -ENOTTY; break; } mutex_unlock(&mmtimer_mutex); return ret; } /** * mmtimer_mmap - maps the clock's registers into userspace * @file: file structure for the device * @vma: VMA to map the registers into * * Calls remap_pfn_range() to map the clock's registers into * the calling process' address space. */ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long mmtimer_addr; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_flags & VM_WRITE) return -EPERM; if (PAGE_SIZE > (1 << 16)) return -ENOSYS; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mmtimer_addr = __pa(RTC_COUNTER_ADDR); mmtimer_addr &= ~(PAGE_SIZE - 1); mmtimer_addr &= 0xfffffffffffffffUL; if (remap_pfn_range(vma, vma->vm_start, mmtimer_addr >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot)) { printk(KERN_ERR "remap_pfn_range failed in mmtimer.c\n"); return -EAGAIN; } return 0; } static struct miscdevice mmtimer_miscdev = { SGI_MMTIMER, MMTIMER_NAME, &mmtimer_fops }; static struct timespec sgi_clock_offset; static int sgi_clock_period; /* * Posix Timer Interface */ static struct timespec sgi_clock_offset; static int sgi_clock_period; static int sgi_clock_get(clockid_t clockid, struct timespec *tp) { u64 nsec; nsec = rtc_time() * sgi_clock_period + sgi_clock_offset.tv_nsec; *tp = ns_to_timespec(nsec); tp->tv_sec += sgi_clock_offset.tv_sec; return 0; }; static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp) { u64 nsec; u32 rem; nsec = rtc_time() * sgi_clock_period; sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem); if (rem <= tp->tv_nsec) sgi_clock_offset.tv_nsec = tp->tv_sec - rem; else { sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem; sgi_clock_offset.tv_sec--; } return 0; } /** * mmtimer_interrupt - timer interrupt handler * @irq: irq received * @dev_id: device the irq came from * * Called when one of the comarators matches the counter, This * routine will send signals to processes that have requested * them. * * This interrupt is run in an interrupt context * by the SHUB. It is therefore safe to locally access SHub * registers. */ static irqreturn_t mmtimer_interrupt(int irq, void *dev_id) { unsigned long expires = 0; int result = IRQ_NONE; unsigned indx = cpu_to_node(smp_processor_id()); struct mmtimer *base; spin_lock(&timers[indx].lock); base = rb_entry(timers[indx].next, struct mmtimer, list); if (base == NULL) { spin_unlock(&timers[indx].lock); return result; } if (base->cpu == smp_processor_id()) { if (base->timer) expires = base->timer->it.mmtimer.expires; /* expires test won't work with shared irqs */ if ((mmtimer_int_pending(COMPARATOR) > 0) || (expires && (expires <= rtc_time()))) { mmtimer_clr_int_pending(COMPARATOR); tasklet_schedule(&timers[indx].tasklet); result = IRQ_HANDLED; } } spin_unlock(&timers[indx].lock); return result; } static void mmtimer_tasklet(unsigned long data) { int nodeid = data; struct mmtimer_node *mn = &timers[nodeid]; struct mmtimer *x; struct k_itimer *t; unsigned long flags; /* Send signal and deal with periodic signals */ spin_lock_irqsave(&mn->lock, flags); if (!mn->next) goto out; x = rb_entry(mn->next, struct mmtimer, list); t = x->timer; if (t->it.mmtimer.clock == TIMER_OFF) goto out; t->it_overrun = 0; mn->next = rb_next(&x->list); rb_erase(&x->list, &mn->timer_head); if (posix_timer_event(t, 0) != 0) t->it_overrun++; if(t->it.mmtimer.incr) { t->it.mmtimer.expires += t->it.mmtimer.incr; mmtimer_add_list(x); } else { /* Ensure we don't false trigger in mmtimer_interrupt */ t->it.mmtimer.clock = TIMER_OFF; t->it.mmtimer.expires = 0; kfree(x); } /* Set comparator for next timer, if there is one */ mmtimer_set_next_timer(nodeid); t->it_overrun_last = t->it_overrun; out: spin_unlock_irqrestore(&mn->lock, flags); } static int sgi_timer_create(struct k_itimer *timer) { /* Insure that a newly created timer is off */ timer->it.mmtimer.clock = TIMER_OFF; return 0; } /* This does not really delete a timer. It just insures * that the timer is not active * * Assumption: it_lock is already held with irq's disabled */ static int sgi_timer_del(struct k_itimer *timr) { cnodeid_t nodeid = timr->it.mmtimer.node; unsigned long irqflags; spin_lock_irqsave(&timers[nodeid].lock, irqflags); if (timr->it.mmtimer.clock != TIMER_OFF) { unsigned long expires = timr->it.mmtimer.expires; struct rb_node *n = timers[nodeid].timer_head.rb_node; struct mmtimer *uninitialized_var(t); int r = 0; timr->it.mmtimer.clock = TIMER_OFF; timr->it.mmtimer.expires = 0; while (n) { t = rb_entry(n, struct mmtimer, list); if (t->timer == timr) break; if (expires < t->timer->it.mmtimer.expires) n = n->rb_left; else n = n->rb_right; } if (!n) { spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); return 0; } if (timers[nodeid].next == n) { timers[nodeid].next = rb_next(n); r = 1; } rb_erase(n, &timers[nodeid].timer_head); kfree(t); if (r) { mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR); mmtimer_set_next_timer(nodeid); } } spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); return 0; } /* Assumption: it_lock is already held with irq's disabled */ static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) { if (timr->it.mmtimer.clock == TIMER_OFF) { cur_setting->it_interval.tv_nsec = 0; cur_setting->it_interval.tv_sec = 0; cur_setting->it_value.tv_nsec = 0; cur_setting->it_value.tv_sec =0; return; } cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period); cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period); } static int sgi_timer_set(struct k_itimer *timr, int flags, struct itimerspec * new_setting, struct itimerspec * old_setting) { unsigned long when, period, irqflags; int err = 0; cnodeid_t nodeid; struct mmtimer *base; struct rb_node *n; if (old_setting) sgi_timer_get(timr, old_setting); sgi_timer_del(timr); when = timespec_to_ns(&new_setting->it_value); period = timespec_to_ns(&new_setting->it_interval); if (when == 0) /* Clear timer */ return 0; base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL); if (base == NULL) return -ENOMEM; if (flags & TIMER_ABSTIME) { struct timespec n; unsigned long now; getnstimeofday(&n); now = timespec_to_ns(&n); if (when > now) when -= now; else /* Fire the timer immediately */ when = 0; } /* * Convert to sgi clock period. Need to keep rtc_time() as near as possible * to getnstimeofday() in order to be as faithful as possible to the time * specified. */ when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time(); period = (period + sgi_clock_period - 1) / sgi_clock_period; /* * We are allocating a local SHub comparator. If we would be moved to another * cpu then another SHub may be local to us. Prohibit that by switching off * preemption. */ preempt_disable(); nodeid = cpu_to_node(smp_processor_id()); /* Lock the node timer structure */ spin_lock_irqsave(&timers[nodeid].lock, irqflags); base->timer = timr; base->cpu = smp_processor_id(); timr->it.mmtimer.clock = TIMER_SET; timr->it.mmtimer.node = nodeid; timr->it.mmtimer.incr = period; timr->it.mmtimer.expires = when; n = timers[nodeid].next; /* Add the new struct mmtimer to node's timer list */ mmtimer_add_list(base); if (timers[nodeid].next == n) { /* No need to reprogram comparator for now */ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); preempt_enable(); return err; } /* We need to reprogram the comparator */ if (n) mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR); mmtimer_set_next_timer(nodeid); /* Unlock the node timer structure */ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); preempt_enable(); return err; } static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp) { tp->tv_sec = 0; tp->tv_nsec = sgi_clock_period; return 0; } static struct k_clock sgi_clock = { .clock_set = sgi_clock_set, .clock_get = sgi_clock_get, .clock_getres = sgi_clock_getres, .timer_create = sgi_timer_create, .timer_set = sgi_timer_set, .timer_del = sgi_timer_del, .timer_get = sgi_timer_get }; /** * mmtimer_init - device initialization routine * * Does initial setup for the mmtimer device. */ static int __init mmtimer_init(void) { cnodeid_t node, maxn = -1; if (!ia64_platform_is("sn2")) return 0; /* * Sanity check the cycles/sec variable */ if (sn_rtc_cycles_per_second < 100000) { printk(KERN_ERR "%s: unable to determine clock frequency\n", MMTIMER_NAME); goto out1; } mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / 2) / sn_rtc_cycles_per_second; if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) { printk(KERN_WARNING "%s: unable to allocate interrupt.", MMTIMER_NAME); goto out1; } if (misc_register(&mmtimer_miscdev)) { printk(KERN_ERR "%s: failed to register device\n", MMTIMER_NAME); goto out2; } /* Get max numbered node, calculate slots needed */ for_each_online_node(node) { maxn = node; } maxn++; /* Allocate list of node ptrs to mmtimer_t's */ timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL); if (timers == NULL) { printk(KERN_ERR "%s: failed to allocate memory for device\n", MMTIMER_NAME); goto out3; } /* Initialize struct mmtimer's for each online node */ for_each_online_node(node) { spin_lock_init(&timers[node].lock); tasklet_init(&timers[node].tasklet, mmtimer_tasklet, (unsigned long) node); } sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second; posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock); printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION, sn_rtc_cycles_per_second/(unsigned long)1E6); return 0; out3: kfree(timers); misc_deregister(&mmtimer_miscdev); out2: free_irq(SGI_MMTIMER_VECTOR, NULL); out1: return -1; } module_init(mmtimer_init);
gpl-2.0
XPerience-AOSP-Lollipop/android_kernel_sony_lbmsm8960t
drivers/video/via/via_aux_sii164.c
9662
1505
/* * Copyright 2011 Florian Tobias Schandinat <FlorianSchandinat@gmx.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * driver for Silicon Image SiI 164 PanelLink Transmitter */ #include <linux/slab.h> #include "via_aux.h" static const char *name = "SiI 164 PanelLink Transmitter"; static void probe(struct via_aux_bus *bus, u8 addr) { struct via_aux_drv drv = { .bus = bus, .addr = addr, .name = name}; /* check vendor id and device id */ const u8 id[] = {0x01, 0x00, 0x06, 0x00}, len = ARRAY_SIZE(id); u8 tmp[len]; if (!via_aux_read(&drv, 0x00, tmp, len) || memcmp(id, tmp, len)) return; printk(KERN_INFO "viafb: Found %s at address 0x%x\n", name, addr); via_aux_add(&drv); } void via_aux_sii164_probe(struct via_aux_bus *bus) { u8 i; for (i = 0x38; i <= 0x3F; i++) probe(bus, i); }
gpl-2.0
TaichiN/kernel_omap_otter-common
drivers/net/chelsio/espi.c
11710
12793
/***************************************************************************** * * * File: espi.c * * $Revision: 1.14 $ * * $Date: 2005/05/14 00:59:32 $ * * Description: * * Ethernet SPI functionality. * * part of the Chelsio 10Gb Ethernet Driver. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License, version 2, as * * published by the Free Software Foundation. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, write to the Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: maintainers@chelsio.com * * * * Authors: Dimitrios Michailidis <dm@chelsio.com> * * Tina Yang <tainay@chelsio.com> * * Felix Marti <felix@chelsio.com> * * Scott Bardone <sbardone@chelsio.com> * * Kurt Ottaway <kottaway@chelsio.com> * * Frank DiMambro <frank@chelsio.com> * * * * History: * * * ****************************************************************************/ #include "common.h" #include "regs.h" #include "espi.h" struct peespi { adapter_t *adapter; struct espi_intr_counts intr_cnt; u32 misc_ctrl; spinlock_t lock; }; #define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \ F_RAMPARITYERR | F_DIP2PARITYERR) #define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \ | F_MONITORED_INTERFACE) #define TRICN_CNFG 14 #define TRICN_CMD_READ 0x11 #define TRICN_CMD_WRITE 0x21 #define TRICN_CMD_ATTEMPTS 10 static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr, int ch_addr, int reg_offset, u32 wr_data) { int busy, attempts = TRICN_CMD_ATTEMPTS; writel(V_WRITE_DATA(wr_data) | V_REGISTER_OFFSET(reg_offset) | V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) | V_BUNDLE_ADDR(bundle_addr) | V_SPI4_COMMAND(TRICN_CMD_WRITE), adapter->regs + A_ESPI_CMD_ADDR); writel(0, adapter->regs + A_ESPI_GOSTAT); do { busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY; } while (busy && --attempts); if (busy) pr_err("%s: TRICN write timed out\n", adapter->name); return busy; } static int tricn_init(adapter_t *adapter) { int i, sme = 1; if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) { pr_err("%s: ESPI clock not ready\n", adapter->name); return -1; } writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET); if (sme) { tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); } for (i = 1; i <= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); for (i = 1; i <= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); for (i = 1; i <= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1); tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1); tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1); tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80); tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1); writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST, adapter->regs + A_ESPI_RX_RESET); return 0; } void t1_espi_intr_enable(struct peespi *espi) { u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE); /* * Cannot enable ESPI interrupts on T1B because HW asserts the * interrupt incorrectly, namely the driver gets ESPI interrupts * but no data is actually dropped (can verify this reading the ESPI * drop registers). Also, once the ESPI interrupt is asserted it * cannot be cleared (HW bug). */ enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK; writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE); writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE); } void t1_espi_intr_clear(struct peespi *espi) { readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS); writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE); } void t1_espi_intr_disable(struct peespi *espi) { u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE); writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE); writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE); } int t1_espi_intr_handler(struct peespi *espi) { u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS); if (status & F_DIP4ERR) espi->intr_cnt.DIP4_err++; if (status & F_RXDROP) espi->intr_cnt.rx_drops++; if (status & F_TXDROP) espi->intr_cnt.tx_drops++; if (status & F_RXOVERFLOW) espi->intr_cnt.rx_ovflw++; if (status & F_RAMPARITYERR) espi->intr_cnt.parity_err++; if (status & F_DIP2PARITYERR) { espi->intr_cnt.DIP2_parity_err++; /* * Must read the error count to clear the interrupt * that it causes. */ readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); } /* * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we * write the status as is. */ if (status && t1_is_T1B(espi->adapter)) status = 1; writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS); return 0; } const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi) { return &espi->intr_cnt; } static void espi_setup_for_pm3393(adapter_t *adapter) { u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200; writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1); writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3); writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH); writel(0x08000008, adapter->regs + A_ESPI_TRAIN); writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG); } static void espi_setup_for_vsc7321(adapter_t *adapter) { writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1); writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG); writel(0x08000008, adapter->regs + A_ESPI_TRAIN); } /* * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug. */ static void espi_setup_for_ixf1010(adapter_t *adapter, int nports) { writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); if (nports == 4) { if (is_T2(adapter)) { writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); } else { writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); } } else { writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); } writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG); } int t1_espi_init(struct peespi *espi, int mac_type, int nports) { u32 status_enable_extra = 0; adapter_t *adapter = espi->adapter; /* Disable ESPI training. MACs that can handle it enable it below. */ writel(0, adapter->regs + A_ESPI_TRAIN); if (is_T2(adapter)) { writel(V_OUT_OF_SYNC_COUNT(4) | V_DIP2_PARITY_ERR_THRES(3) | V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); writel(nports == 4 ? 0x200040 : 0x1000080, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); } else writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); if (mac_type == CHBT_MAC_PM3393) espi_setup_for_pm3393(adapter); else if (mac_type == CHBT_MAC_VSC7321) espi_setup_for_vsc7321(adapter); else if (mac_type == CHBT_MAC_IXF1010) { status_enable_extra = F_INTEL1010MODE; espi_setup_for_ixf1010(adapter, nports); } else return -1; writel(status_enable_extra | F_RXSTATUSENABLE, adapter->regs + A_ESPI_FIFO_STATUS_ENABLE); if (is_T2(adapter)) { tricn_init(adapter); /* * Always position the control at the 1st port egress IN * (sop,eop) counter to reduce PIOs for T/N210 workaround. */ espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL); espi->misc_ctrl &= ~MON_MASK; espi->misc_ctrl |= F_MONITORED_DIRECTION; if (adapter->params.nports == 1) espi->misc_ctrl |= F_MONITORED_INTERFACE; writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); spin_lock_init(&espi->lock); } return 0; } void t1_espi_destroy(struct peespi *espi) { kfree(espi); } struct peespi *t1_espi_create(adapter_t *adapter) { struct peespi *espi = kzalloc(sizeof(*espi), GFP_KERNEL); if (espi) espi->adapter = adapter; return espi; } #if 0 void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val) { struct peespi *espi = adapter->espi; if (!is_T2(adapter)) return; spin_lock(&espi->lock); espi->misc_ctrl = (val & ~MON_MASK) | (espi->misc_ctrl & MON_MASK); writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); spin_unlock(&espi->lock); } #endif /* 0 */ u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait) { struct peespi *espi = adapter->espi; u32 sel; if (!is_T2(adapter)) return 0; sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2); if (!wait) { if (!spin_trylock(&espi->lock)) return 0; } else spin_lock(&espi->lock); if ((sel != (espi->misc_ctrl & MON_MASK))) { writel(((espi->misc_ctrl & ~MON_MASK) | sel), adapter->regs + A_ESPI_MISC_CONTROL); sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); } else sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); spin_unlock(&espi->lock); return sel; } /* * This function is for T204 only. * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in * one shot, since there is no per port counter on the out side. */ int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait) { struct peespi *espi = adapter->espi; u8 i, nport = (u8)adapter->params.nports; if (!wait) { if (!spin_trylock(&espi->lock)) return -1; } else spin_lock(&espi->lock); if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) { espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) | F_MONITORED_DIRECTION; writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); } for (i = 0 ; i < nport; i++, valp++) { if (i) { writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i), adapter->regs + A_ESPI_MISC_CONTROL); } *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3); } writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); spin_unlock(&espi->lock); return 0; }
gpl-2.0
lycanthia/Find7-Kernel-Source-4.3
arch/mips/dec/kn02-irq.c
12222
1888
/* * DECstation 5000/200 (KN02) Control and Status Register * interrupts. * * Copyright (c) 2002, 2003, 2005 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/types.h> #include <asm/dec/kn02.h> /* * Bits 7:0 of the Control Register are write-only -- the * corresponding bits of the Status Register have a different * meaning. Hence we use a cache. It speeds up things a bit * as well. * * There is no default value -- it has to be initialized. */ u32 cached_kn02_csr; static int kn02_irq_base; static void unmask_kn02_irq(struct irq_data *d) { volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); cached_kn02_csr |= (1 << (d->irq - kn02_irq_base + 16)); *csr = cached_kn02_csr; } static void mask_kn02_irq(struct irq_data *d) { volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); cached_kn02_csr &= ~(1 << (d->irq - kn02_irq_base + 16)); *csr = cached_kn02_csr; } static void ack_kn02_irq(struct irq_data *d) { mask_kn02_irq(d); iob(); } static struct irq_chip kn02_irq_type = { .name = "KN02-CSR", .irq_ack = ack_kn02_irq, .irq_mask = mask_kn02_irq, .irq_mask_ack = ack_kn02_irq, .irq_unmask = unmask_kn02_irq, }; void __init init_kn02_irqs(int base) { volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); int i; /* Mask interrupts. */ cached_kn02_csr &= ~KN02_CSR_IOINTEN; *csr = cached_kn02_csr; iob(); for (i = base; i < base + KN02_IRQ_LINES; i++) irq_set_chip_and_handler(i, &kn02_irq_type, handle_level_irq); kn02_irq_base = base; }
gpl-2.0
mdr78/Linux-3.8.7-galileo
sound/pci/hda/hda_jack.c
447
14772
/* * Jack-detection handling for HD-audio * * Copyright (c) 2011 Takashi Iwai <tiwai@suse.de> * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/jack.h> #include "hda_codec.h" #include "hda_local.h" #include "hda_auto_parser.h" #include "hda_jack.h" bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid) { if (codec->no_jack_detect) return false; if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT)) return false; if (get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) & AC_DEFCFG_MISC_NO_PRESENCE) return false; if (!(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP) && !codec->jackpoll_interval) return false; return true; } EXPORT_SYMBOL_GPL(is_jack_detectable); /* execute pin sense measurement */ static u32 read_pin_sense(struct hda_codec *codec, hda_nid_t nid) { u32 pincap; u32 val; if (!codec->no_trigger_sense) { pincap = snd_hda_query_pin_caps(codec, nid); if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */ snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0); } val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_SENSE, 0); if (codec->inv_jack_detect) val ^= AC_PINSENSE_PRESENCE; return val; } /** * snd_hda_jack_tbl_get - query the jack-table entry for the given NID */ struct hda_jack_tbl * snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid) { struct hda_jack_tbl *jack = codec->jacktbl.list; int i; if (!nid || !jack) return NULL; for (i = 0; i < codec->jacktbl.used; i++, jack++) if (jack->nid == nid) return jack; return NULL; } EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get); /** * snd_hda_jack_tbl_get_from_tag - query the jack-table entry for the given tag */ struct hda_jack_tbl * snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag) { struct hda_jack_tbl *jack = codec->jacktbl.list; int i; if (!tag || !jack) return NULL; for (i = 0; i < codec->jacktbl.used; i++, jack++) if (jack->tag == tag) return jack; return NULL; } EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get_from_tag); /** * snd_hda_jack_tbl_new - create a jack-table entry for the given NID */ struct hda_jack_tbl * snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid) { struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid); if (jack) return jack; jack = snd_array_new(&codec->jacktbl); if (!jack) return NULL; jack->nid = nid; jack->jack_dirty = 1; jack->tag = codec->jacktbl.used; return jack; } EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_new); void snd_hda_jack_tbl_clear(struct hda_codec *codec) { #ifdef CONFIG_SND_HDA_INPUT_JACK /* free jack instances manually when clearing/reconfiguring */ if (!codec->bus->shutdown && codec->jacktbl.list) { struct hda_jack_tbl *jack = codec->jacktbl.list; int i; for (i = 0; i < codec->jacktbl.used; i++, jack++) { if (jack->jack) snd_device_free(codec->bus->card, jack->jack); } } #endif snd_array_free(&codec->jacktbl); } #define get_jack_plug_state(sense) !!(sense & AC_PINSENSE_PRESENCE) /* update the cached value and notification flag if needed */ static void jack_detect_update(struct hda_codec *codec, struct hda_jack_tbl *jack) { if (!jack->jack_dirty) return; if (jack->phantom_jack) jack->pin_sense = AC_PINSENSE_PRESENCE; else jack->pin_sense = read_pin_sense(codec, jack->nid); /* A gating jack indicates the jack is invalid if gating is unplugged */ if (jack->gating_jack && !snd_hda_jack_detect(codec, jack->gating_jack)) jack->pin_sense &= ~AC_PINSENSE_PRESENCE; jack->jack_dirty = 0; /* If a jack is gated by this one update it. */ if (jack->gated_jack) { struct hda_jack_tbl *gated = snd_hda_jack_tbl_get(codec, jack->gated_jack); if (gated) { gated->jack_dirty = 1; jack_detect_update(codec, gated); } } } /** * snd_hda_set_dirty_all - Mark all the cached as dirty * * This function sets the dirty flag to all entries of jack table. * It's called from the resume path in hda_codec.c. */ void snd_hda_jack_set_dirty_all(struct hda_codec *codec) { struct hda_jack_tbl *jack = codec->jacktbl.list; int i; for (i = 0; i < codec->jacktbl.used; i++, jack++) if (jack->nid) jack->jack_dirty = 1; } EXPORT_SYMBOL_GPL(snd_hda_jack_set_dirty_all); /** * snd_hda_pin_sense - execute pin sense measurement * @codec: the CODEC to sense * @nid: the pin NID to sense * * Execute necessary pin sense measurement and return its Presence Detect, * Impedance, ELD Valid etc. status bits. */ u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid) { struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid); if (jack) { jack_detect_update(codec, jack); return jack->pin_sense; } return read_pin_sense(codec, nid); } EXPORT_SYMBOL_GPL(snd_hda_pin_sense); /** * snd_hda_jack_detect_state - query pin Presence Detect status * @codec: the CODEC to sense * @nid: the pin NID to sense * * Query and return the pin's Presence Detect status, as either * HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT or HDA_JACK_PHANTOM. */ int snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid) { struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid); if (jack && jack->phantom_jack) return HDA_JACK_PHANTOM; else if (snd_hda_pin_sense(codec, nid) & AC_PINSENSE_PRESENCE) return HDA_JACK_PRESENT; else return HDA_JACK_NOT_PRESENT; } EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state); /** * snd_hda_jack_detect_enable - enable the jack-detection */ int snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid, unsigned char action, hda_jack_callback cb) { struct hda_jack_tbl *jack = snd_hda_jack_tbl_new(codec, nid); if (!jack) return -ENOMEM; if (jack->jack_detect) return 0; /* already registered */ jack->jack_detect = 1; if (action) jack->action = action; if (cb) jack->callback = cb; if (codec->jackpoll_interval > 0) return 0; /* No unsol if we're polling instead */ return snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | jack->tag); } EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable_callback); int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid, unsigned char action) { return snd_hda_jack_detect_enable_callback(codec, nid, action, NULL); } EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable); /** * snd_hda_jack_set_gating_jack - Set gating jack. * * Indicates the gated jack is only valid when the gating jack is plugged. */ int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid, hda_nid_t gating_nid) { struct hda_jack_tbl *gated = snd_hda_jack_tbl_new(codec, gated_nid); struct hda_jack_tbl *gating = snd_hda_jack_tbl_new(codec, gating_nid); if (!gated || !gating) return -EINVAL; gated->gating_jack = gating_nid; gating->gated_jack = gated_nid; return 0; } EXPORT_SYMBOL_GPL(snd_hda_jack_set_gating_jack); /** * snd_hda_jack_report_sync - sync the states of all jacks and report if changed */ void snd_hda_jack_report_sync(struct hda_codec *codec) { struct hda_jack_tbl *jack; int i, state; /* update all jacks at first */ jack = codec->jacktbl.list; for (i = 0; i < codec->jacktbl.used; i++, jack++) if (jack->nid) jack_detect_update(codec, jack); /* report the updated jacks; it's done after updating all jacks * to make sure that all gating jacks properly have been set */ jack = codec->jacktbl.list; for (i = 0; i < codec->jacktbl.used; i++, jack++) if (jack->nid) { if (!jack->kctl || jack->block_report) continue; state = get_jack_plug_state(jack->pin_sense); snd_kctl_jack_report(codec->bus->card, jack->kctl, state); #ifdef CONFIG_SND_HDA_INPUT_JACK if (jack->jack) snd_jack_report(jack->jack, state ? jack->type : 0); #endif } } EXPORT_SYMBOL_GPL(snd_hda_jack_report_sync); #ifdef CONFIG_SND_HDA_INPUT_JACK /* guess the jack type from the pin-config */ static int get_input_jack_type(struct hda_codec *codec, hda_nid_t nid) { unsigned int def_conf = snd_hda_codec_get_pincfg(codec, nid); switch (get_defcfg_device(def_conf)) { case AC_JACK_LINE_OUT: case AC_JACK_SPEAKER: return SND_JACK_LINEOUT; case AC_JACK_HP_OUT: return SND_JACK_HEADPHONE; case AC_JACK_SPDIF_OUT: case AC_JACK_DIG_OTHER_OUT: return SND_JACK_AVOUT; case AC_JACK_MIC_IN: return SND_JACK_MICROPHONE; default: return SND_JACK_LINEIN; } } static void hda_free_jack_priv(struct snd_jack *jack) { struct hda_jack_tbl *jacks = jack->private_data; jacks->nid = 0; jacks->jack = NULL; } #endif /** * snd_hda_jack_add_kctl - Add a kctl for the given pin * * This assigns a jack-detection kctl to the given pin. The kcontrol * will have the given name and index. */ static int __snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid, const char *name, int idx, bool phantom_jack) { struct hda_jack_tbl *jack; struct snd_kcontrol *kctl; int err, state; jack = snd_hda_jack_tbl_new(codec, nid); if (!jack) return 0; if (jack->kctl) return 0; /* already created */ kctl = snd_kctl_jack_new(name, idx, codec); if (!kctl) return -ENOMEM; err = snd_hda_ctl_add(codec, nid, kctl); if (err < 0) return err; jack->kctl = kctl; jack->phantom_jack = !!phantom_jack; state = snd_hda_jack_detect(codec, nid); snd_kctl_jack_report(codec->bus->card, kctl, state); #ifdef CONFIG_SND_HDA_INPUT_JACK if (!phantom_jack) { jack->type = get_input_jack_type(codec, nid); err = snd_jack_new(codec->bus->card, name, jack->type, &jack->jack); if (err < 0) return err; jack->jack->private_data = jack; jack->jack->private_free = hda_free_jack_priv; snd_jack_report(jack->jack, state ? jack->type : 0); } #endif return 0; } int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid, const char *name, int idx) { return __snd_hda_jack_add_kctl(codec, nid, name, idx, false); } EXPORT_SYMBOL_GPL(snd_hda_jack_add_kctl); /* get the unique index number for the given kctl name */ static int get_unique_index(struct hda_codec *codec, const char *name, int idx) { struct hda_jack_tbl *jack; int i, len = strlen(name); again: jack = codec->jacktbl.list; for (i = 0; i < codec->jacktbl.used; i++, jack++) { /* jack->kctl.id contains "XXX Jack" name string with index */ if (jack->kctl && !strncmp(name, jack->kctl->id.name, len) && !strcmp(" Jack", jack->kctl->id.name + len) && jack->kctl->id.index == idx) { idx++; goto again; } } return idx; } static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid, const struct auto_pin_cfg *cfg, const char *base_name) { unsigned int def_conf, conn; char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; int idx, err; bool phantom_jack; if (!nid) return 0; def_conf = snd_hda_codec_get_pincfg(codec, nid); conn = get_defcfg_connect(def_conf); if (conn == AC_JACK_PORT_NONE) return 0; phantom_jack = (conn != AC_JACK_PORT_COMPLEX) || !is_jack_detectable(codec, nid); if (base_name) { strlcpy(name, base_name, sizeof(name)); idx = 0; } else snd_hda_get_pin_label(codec, nid, cfg, name, sizeof(name), &idx); if (phantom_jack) /* Example final name: "Internal Mic Phantom Jack" */ strncat(name, " Phantom", sizeof(name) - strlen(name) - 1); idx = get_unique_index(codec, name, idx); err = __snd_hda_jack_add_kctl(codec, nid, name, idx, phantom_jack); if (err < 0) return err; if (!phantom_jack) return snd_hda_jack_detect_enable(codec, nid, 0); return 0; } /** * snd_hda_jack_add_kctls - Add kctls for all pins included in the given pincfg */ int snd_hda_jack_add_kctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { const hda_nid_t *p; int i, err; for (i = 0; i < cfg->num_inputs; i++) { /* If we have headphone mics; make sure they get the right name before grabbed by output pins */ if (cfg->inputs[i].is_headphone_mic) { if (auto_cfg_hp_outs(cfg) == 1) err = add_jack_kctl(codec, auto_cfg_hp_pins(cfg)[0], cfg, "Headphone Mic"); else err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg, "Headphone Mic"); } else err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg, NULL); if (err < 0) return err; } for (i = 0, p = cfg->line_out_pins; i < cfg->line_outs; i++, p++) { err = add_jack_kctl(codec, *p, cfg, NULL); if (err < 0) return err; } for (i = 0, p = cfg->hp_pins; i < cfg->hp_outs; i++, p++) { if (*p == *cfg->line_out_pins) /* might be duplicated */ break; err = add_jack_kctl(codec, *p, cfg, NULL); if (err < 0) return err; } for (i = 0, p = cfg->speaker_pins; i < cfg->speaker_outs; i++, p++) { if (*p == *cfg->line_out_pins) /* might be duplicated */ break; err = add_jack_kctl(codec, *p, cfg, NULL); if (err < 0) return err; } for (i = 0, p = cfg->dig_out_pins; i < cfg->dig_outs; i++, p++) { err = add_jack_kctl(codec, *p, cfg, NULL); if (err < 0) return err; } err = add_jack_kctl(codec, cfg->dig_in_pin, cfg, NULL); if (err < 0) return err; err = add_jack_kctl(codec, cfg->mono_out_pin, cfg, NULL); if (err < 0) return err; return 0; } EXPORT_SYMBOL_GPL(snd_hda_jack_add_kctls); static void call_jack_callback(struct hda_codec *codec, struct hda_jack_tbl *jack) { if (jack->callback) jack->callback(codec, jack); if (jack->gated_jack) { struct hda_jack_tbl *gated = snd_hda_jack_tbl_get(codec, jack->gated_jack); if (gated && gated->callback) gated->callback(codec, gated); } } void snd_hda_jack_unsol_event(struct hda_codec *codec, unsigned int res) { struct hda_jack_tbl *event; int tag = (res >> AC_UNSOL_RES_TAG_SHIFT) & 0x7f; event = snd_hda_jack_tbl_get_from_tag(codec, tag); if (!event) return; event->jack_dirty = 1; call_jack_callback(codec, event); snd_hda_jack_report_sync(codec); } EXPORT_SYMBOL_GPL(snd_hda_jack_unsol_event); void snd_hda_jack_poll_all(struct hda_codec *codec) { struct hda_jack_tbl *jack = codec->jacktbl.list; int i, changes = 0; for (i = 0; i < codec->jacktbl.used; i++, jack++) { unsigned int old_sense; if (!jack->nid || !jack->jack_dirty || jack->phantom_jack) continue; old_sense = get_jack_plug_state(jack->pin_sense); jack_detect_update(codec, jack); if (old_sense == get_jack_plug_state(jack->pin_sense)) continue; changes = 1; call_jack_callback(codec, jack); } if (changes) snd_hda_jack_report_sync(codec); } EXPORT_SYMBOL_GPL(snd_hda_jack_poll_all);
gpl-2.0
EZchip/linux
fs/gfs2/dentry.c
447
2796
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/gfs2_ondisk.h> #include <linux/namei.h> #include <linux/crc32.h> #include "gfs2.h" #include "incore.h" #include "dir.h" #include "glock.h" #include "super.h" #include "util.h" #include "inode.h" /** * gfs2_drevalidate - Check directory lookup consistency * @dentry: the mapping to check * @flags: lookup flags * * Check to make sure the lookup necessary to arrive at this inode from its * parent is still good. * * Returns: 1 if the dentry is ok, 0 if it isn't */ static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags) { struct dentry *parent; struct gfs2_sbd *sdp; struct gfs2_inode *dip; struct inode *inode; struct gfs2_holder d_gh; struct gfs2_inode *ip = NULL; int error; int had_lock = 0; if (flags & LOOKUP_RCU) return -ECHILD; parent = dget_parent(dentry); sdp = GFS2_SB(d_inode(parent)); dip = GFS2_I(d_inode(parent)); inode = d_inode(dentry); if (inode) { if (is_bad_inode(inode)) goto invalid; ip = GFS2_I(inode); } if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) goto valid; had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); if (!had_lock) { error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); if (error) goto fail; } error = gfs2_dir_check(d_inode(parent), &dentry->d_name, ip); switch (error) { case 0: if (!inode) goto invalid_gunlock; break; case -ENOENT: if (!inode) goto valid_gunlock; goto invalid_gunlock; default: goto fail_gunlock; } valid_gunlock: if (!had_lock) gfs2_glock_dq_uninit(&d_gh); valid: dput(parent); return 1; invalid_gunlock: if (!had_lock) gfs2_glock_dq_uninit(&d_gh); invalid: dput(parent); return 0; fail_gunlock: gfs2_glock_dq_uninit(&d_gh); fail: dput(parent); return 0; } static int gfs2_dhash(const struct dentry *dentry, struct qstr *str) { str->hash = gfs2_disk_hash(str->name, str->len); return 0; } static int gfs2_dentry_delete(const struct dentry *dentry) { struct gfs2_inode *ginode; if (d_really_is_negative(dentry)) return 0; ginode = GFS2_I(d_inode(dentry)); if (!gfs2_holder_initialized(&ginode->i_iopen_gh)) return 0; if (test_bit(GLF_DEMOTE, &ginode->i_iopen_gh.gh_gl->gl_flags)) return 1; return 0; } const struct dentry_operations gfs2_dops = { .d_revalidate = gfs2_drevalidate, .d_hash = gfs2_dhash, .d_delete = gfs2_dentry_delete, };
gpl-2.0
timduru/kernel-asus-tf101
drivers/pnp/pnpacpi/core.c
1471
9956
/* * pnpacpi -- PnP ACPI driver * * Copyright (c) 2004 Matthieu Castet <castet.matthieu@free.fr> * Copyright (c) 2004 Li Shaohua <shaohua.li@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/acpi.h> #include <linux/pnp.h> #include <linux/slab.h> #include <linux/mod_devicetable.h> #include <acpi/acpi_bus.h> #include "../base.h" #include "pnpacpi.h" static int num; /* We need only to blacklist devices that have already an acpi driver that * can't use pnp layer. We don't need to blacklist device that are directly * used by the kernel (PCI root, ...), as it is harmless and there were * already present in pnpbios. But there is an exception for devices that * have irqs (PIC, Timer) because we call acpi_register_gsi. * Finally, only devices that have a CRS method need to be in this list. */ static struct acpi_device_id excluded_id_list[] __initdata = { {"PNP0C09", 0}, /* EC */ {"PNP0C0F", 0}, /* Link device */ {"PNP0000", 0}, /* PIC */ {"PNP0100", 0}, /* Timer */ {"", 0}, }; static inline int __init is_exclusive_device(struct acpi_device *dev) { return (!acpi_match_device_ids(dev, excluded_id_list)); } /* * Compatible Device IDs */ #define TEST_HEX(c) \ if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ return 0 #define TEST_ALPHA(c) \ if (!('@' <= (c) || (c) <= 'Z')) \ return 0 static int __init ispnpidacpi(const char *id) { TEST_ALPHA(id[0]); TEST_ALPHA(id[1]); TEST_ALPHA(id[2]); TEST_HEX(id[3]); TEST_HEX(id[4]); TEST_HEX(id[5]); TEST_HEX(id[6]); if (id[7] != '\0') return 0; return 1; } static int pnpacpi_get_resources(struct pnp_dev *dev) { pnp_dbg(&dev->dev, "get resources\n"); return pnpacpi_parse_allocated_resource(dev); } static int pnpacpi_set_resources(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle; struct acpi_buffer buffer; int ret; pnp_dbg(&dev->dev, "set resources\n"); handle = DEVICE_ACPI_HANDLE(&dev->dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return -ENODEV; } ret = pnpacpi_build_resource_template(dev, &buffer); if (ret) return ret; ret = pnpacpi_encode_resources(dev, &buffer); if (ret) { kfree(buffer.pointer); return ret; } if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer))) ret = -EINVAL; else if (acpi_bus_power_manageable(handle)) ret = acpi_bus_set_power(handle, ACPI_STATE_D0); kfree(buffer.pointer); return ret; } static int pnpacpi_disable_resources(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle; int ret; dev_dbg(&dev->dev, "disable resources\n"); handle = DEVICE_ACPI_HANDLE(&dev->dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return 0; } /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ ret = 0; if (acpi_bus_power_manageable(handle)) acpi_bus_set_power(handle, ACPI_STATE_D3); /* continue even if acpi_bus_set_power() fails */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) ret = -ENODEV; return ret; } #ifdef CONFIG_ACPI_SLEEP static bool pnpacpi_can_wakeup(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle; handle = DEVICE_ACPI_HANDLE(&dev->dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return false; } return acpi_bus_can_wakeup(handle); } static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) { struct acpi_device *acpi_dev; acpi_handle handle; int error = 0; handle = DEVICE_ACPI_HANDLE(&dev->dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return 0; } if (device_can_wakeup(&dev->dev)) { error = acpi_pm_device_sleep_wake(&dev->dev, device_may_wakeup(&dev->dev)); if (error) return error; } if (acpi_bus_power_manageable(handle)) { int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); if (power_state < 0) power_state = (state.event == PM_EVENT_ON) ? ACPI_STATE_D0 : ACPI_STATE_D3; /* * acpi_bus_set_power() often fails (keyboard port can't be * powered-down?), and in any case, our return value is ignored * by pnp_bus_suspend(). Hence we don't revert the wakeup * setting if the set_power fails. */ error = acpi_bus_set_power(handle, power_state); } return error; } static int pnpacpi_resume(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); int error = 0; if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return -ENODEV; } if (device_may_wakeup(&dev->dev)) acpi_pm_device_sleep_wake(&dev->dev, false); if (acpi_bus_power_manageable(handle)) error = acpi_bus_set_power(handle, ACPI_STATE_D0); return error; } #endif struct pnp_protocol pnpacpi_protocol = { .name = "Plug and Play ACPI", .get = pnpacpi_get_resources, .set = pnpacpi_set_resources, .disable = pnpacpi_disable_resources, #ifdef CONFIG_ACPI_SLEEP .can_wakeup = pnpacpi_can_wakeup, .suspend = pnpacpi_suspend, .resume = pnpacpi_resume, #endif }; EXPORT_SYMBOL(pnpacpi_protocol); static char *__init pnpacpi_get_id(struct acpi_device *device) { struct acpi_hardware_id *id; list_for_each_entry(id, &device->pnp.ids, list) { if (ispnpidacpi(id->id)) return id->id; } return NULL; } static int __init pnpacpi_add_device(struct acpi_device *device) { acpi_handle temp = NULL; acpi_status status; struct pnp_dev *dev; char *pnpid; struct acpi_hardware_id *id; /* * If a PnPacpi device is not present , the device * driver should not be loaded. */ status = acpi_get_handle(device->handle, "_CRS", &temp); if (ACPI_FAILURE(status)) return 0; pnpid = pnpacpi_get_id(device); if (!pnpid) return 0; if (is_exclusive_device(device) || !device->status.present) return 0; dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid); if (!dev) return -ENOMEM; dev->data = device; /* .enabled means the device can decode the resources */ dev->active = device->status.enabled; status = acpi_get_handle(device->handle, "_SRS", &temp); if (ACPI_SUCCESS(status)) dev->capabilities |= PNP_CONFIGURABLE; dev->capabilities |= PNP_READ; if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE)) dev->capabilities |= PNP_WRITE; if (device->flags.removable) dev->capabilities |= PNP_REMOVABLE; status = acpi_get_handle(device->handle, "_DIS", &temp); if (ACPI_SUCCESS(status)) dev->capabilities |= PNP_DISABLE; if (strlen(acpi_device_name(device))) strncpy(dev->name, acpi_device_name(device), sizeof(dev->name)); else strncpy(dev->name, acpi_device_bid(device), sizeof(dev->name)); if (dev->active) pnpacpi_parse_allocated_resource(dev); if (dev->capabilities & PNP_CONFIGURABLE) pnpacpi_parse_resource_option_data(dev); list_for_each_entry(id, &device->pnp.ids, list) { if (!strcmp(id->id, pnpid)) continue; if (!ispnpidacpi(id->id)) continue; pnp_add_id(dev, id->id); } /* clear out the damaged flags */ if (!dev->active) pnp_init_resources(dev); pnp_add_device(dev); num++; return AE_OK; } static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle, u32 lvl, void *context, void **rv) { struct acpi_device *device; if (!acpi_bus_get_device(handle, &device)) pnpacpi_add_device(device); else return AE_CTRL_DEPTH; return AE_OK; } static int __init acpi_pnp_match(struct device *dev, void *_pnp) { struct acpi_device *acpi = to_acpi_device(dev); struct pnp_dev *pnp = _pnp; /* true means it matched */ return !acpi_get_physical_device(acpi->handle) && compare_pnp_id(pnp->id, acpi_device_hid(acpi)); } static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle) { struct device *adev; struct acpi_device *acpi; adev = bus_find_device(&acpi_bus_type, NULL, to_pnp_dev(dev), acpi_pnp_match); if (!adev) return -ENODEV; acpi = to_acpi_device(adev); *handle = acpi->handle; put_device(adev); return 0; } /* complete initialization of a PNPACPI device includes having * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling. */ static struct acpi_bus_type __initdata acpi_pnp_bus = { .bus = &pnp_bus_type, .find_device = acpi_pnp_find_device, }; int pnpacpi_disabled __initdata; static int __init pnpacpi_init(void) { if (acpi_disabled || pnpacpi_disabled) { printk(KERN_INFO "pnp: PnP ACPI: disabled\n"); return 0; } printk(KERN_INFO "pnp: PnP ACPI init\n"); pnp_register_protocol(&pnpacpi_protocol); register_acpi_bus_type(&acpi_pnp_bus); acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL); printk(KERN_INFO "pnp: PnP ACPI: found %d devices\n", num); unregister_acpi_bus_type(&acpi_pnp_bus); pnp_platform_devices = 1; return 0; } fs_initcall(pnpacpi_init); static int __init pnpacpi_setup(char *str) { if (str == NULL) return 1; if (!strncmp(str, "off", 3)) pnpacpi_disabled = 1; return 1; } __setup("pnpacpi=", pnpacpi_setup);
gpl-2.0
myjang0507/Polaris-slte
drivers/usb/host/ohci-spear.c
1983
5561
/* * OHCI HCD (Host Controller Driver) for USB. * * Copyright (C) 2010 ST Microelectronics. * Deepak Sikri<deepak.sikri@st.com> * * Based on various ohci-*.c drivers * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/signal.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/of.h> struct spear_ohci { struct ohci_hcd ohci; struct clk *clk; }; #define to_spear_ohci(hcd) (struct spear_ohci *)hcd_to_ohci(hcd) static void spear_start_ohci(struct spear_ohci *ohci) { clk_prepare_enable(ohci->clk); } static void spear_stop_ohci(struct spear_ohci *ohci) { clk_disable_unprepare(ohci->clk); } static int ohci_spear_start(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); int ret; ret = ohci_init(ohci); if (ret < 0) return ret; ohci->regs = hcd->regs; ret = ohci_run(ohci); if (ret < 0) { dev_err(hcd->self.controller, "can't start\n"); ohci_stop(hcd); return ret; } create_debug_files(ohci); #ifdef DEBUG ohci_dump(ohci, 1); #endif return 0; } static const struct hc_driver ohci_spear_hc_driver = { .description = hcd_name, .product_desc = "SPEAr OHCI", .hcd_priv_size = sizeof(struct spear_ohci), /* generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* basic lifecycle operations */ .start = ohci_spear_start, .stop = ohci_stop, .shutdown = ohci_shutdown, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif /* managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* scheduling support */ .get_frame_number = ohci_get_frame, /* root hub support */ .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, .start_port_reset = ohci_start_port_reset, }; static int spear_ohci_hcd_drv_probe(struct platform_device *pdev) { const struct hc_driver *driver = &ohci_spear_hc_driver; struct usb_hcd *hcd = NULL; struct clk *usbh_clk; struct spear_ohci *ohci_p; struct resource *res; int retval, irq; irq = platform_get_irq(pdev, 0); if (irq < 0) { retval = irq; goto fail; } /* * Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); usbh_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(usbh_clk)) { dev_err(&pdev->dev, "Error getting interface clock\n"); retval = PTR_ERR(usbh_clk); goto fail; } hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { retval = -ENOMEM; goto fail; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { retval = -ENODEV; goto err_put_hcd; } hcd->rsrc_start = pdev->resource[0].start; hcd->rsrc_len = resource_size(res); if (!devm_request_mem_region(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { dev_dbg(&pdev->dev, "request_mem_region failed\n"); retval = -EBUSY; goto err_put_hcd; } hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_dbg(&pdev->dev, "ioremap failed\n"); retval = -ENOMEM; goto err_put_hcd; } ohci_p = (struct spear_ohci *)hcd_to_ohci(hcd); ohci_p->clk = usbh_clk; spear_start_ohci(ohci_p); ohci_hcd_init(hcd_to_ohci(hcd)); retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), 0); if (retval == 0) return retval; spear_stop_ohci(ohci_p); err_put_hcd: usb_put_hcd(hcd); fail: dev_err(&pdev->dev, "init fail, %d\n", retval); return retval; } static int spear_ohci_hcd_drv_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct spear_ohci *ohci_p = to_spear_ohci(hcd); usb_remove_hcd(hcd); if (ohci_p->clk) spear_stop_ohci(ohci_p); usb_put_hcd(hcd); platform_set_drvdata(pdev, NULL); return 0; } #if defined(CONFIG_PM) static int spear_ohci_hcd_drv_suspend(struct platform_device *dev, pm_message_t message) { struct usb_hcd *hcd = platform_get_drvdata(dev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); struct spear_ohci *ohci_p = to_spear_ohci(hcd); if (time_before(jiffies, ohci->next_statechange)) msleep(5); ohci->next_statechange = jiffies; spear_stop_ohci(ohci_p); return 0; } static int spear_ohci_hcd_drv_resume(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); struct spear_ohci *ohci_p = to_spear_ohci(hcd); if (time_before(jiffies, ohci->next_statechange)) msleep(5); ohci->next_statechange = jiffies; spear_start_ohci(ohci_p); ohci_resume(hcd, false); return 0; } #endif static struct of_device_id spear_ohci_id_table[] = { { .compatible = "st,spear600-ohci", }, { }, }; /* Driver definition to register with the platform bus */ static struct platform_driver spear_ohci_hcd_driver = { .probe = spear_ohci_hcd_drv_probe, .remove = spear_ohci_hcd_drv_remove, #ifdef CONFIG_PM .suspend = spear_ohci_hcd_drv_suspend, .resume = spear_ohci_hcd_drv_resume, #endif .driver = { .owner = THIS_MODULE, .name = "spear-ohci", .of_match_table = of_match_ptr(spear_ohci_id_table), }, }; MODULE_ALIAS("platform:spear-ohci");
gpl-2.0
sebirdman/android_kernel_motorola_msm8992
kernel/async.c
2751
10176
/* * async.c: Asynchronous function calls for boot performance * * (C) Copyright 2009 Intel Corporation * Author: Arjan van de Ven <arjan@linux.intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ /* Goals and Theory of Operation The primary goal of this feature is to reduce the kernel boot time, by doing various independent hardware delays and discovery operations decoupled and not strictly serialized. More specifically, the asynchronous function call concept allows certain operations (primarily during system boot) to happen asynchronously, out of order, while these operations still have their externally visible parts happen sequentially and in-order. (not unlike how out-of-order CPUs retire their instructions in order) Key to the asynchronous function call implementation is the concept of a "sequence cookie" (which, although it has an abstracted type, can be thought of as a monotonically incrementing number). The async core will assign each scheduled event such a sequence cookie and pass this to the called functions. The asynchronously called function should before doing a globally visible operation, such as registering device numbers, call the async_synchronize_cookie() function and pass in its own cookie. The async_synchronize_cookie() function will make sure that all asynchronous operations that were scheduled prior to the operation corresponding with the cookie have completed. Subsystem/driver initialization code that scheduled asynchronous probe functions, but which shares global resources with other drivers/subsystems that do not use the asynchronous call feature, need to do a full synchronization with the async_synchronize_full() function, before returning from their init function. This is to maintain strict ordering between the asynchronous and synchronous parts of the kernel. */ #include <linux/async.h> #include <linux/atomic.h> #include <linux/ktime.h> #include <linux/export.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "workqueue_internal.h" static async_cookie_t next_cookie = 1; #define MAX_WORK 32768 #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */ static LIST_HEAD(async_global_pending); /* pending from all registered doms */ static ASYNC_DOMAIN(async_dfl_domain); static DEFINE_SPINLOCK(async_lock); struct async_entry { struct list_head domain_list; struct list_head global_list; struct work_struct work; async_cookie_t cookie; async_func_t func; void *data; struct async_domain *domain; }; static DECLARE_WAIT_QUEUE_HEAD(async_done); static atomic_t entry_count; static async_cookie_t lowest_in_progress(struct async_domain *domain) { struct list_head *pending; async_cookie_t ret = ASYNC_COOKIE_MAX; unsigned long flags; spin_lock_irqsave(&async_lock, flags); if (domain) pending = &domain->pending; else pending = &async_global_pending; if (!list_empty(pending)) ret = list_first_entry(pending, struct async_entry, domain_list)->cookie; spin_unlock_irqrestore(&async_lock, flags); return ret; } /* * pick the first pending entry and run it */ static void async_run_entry_fn(struct work_struct *work) { struct async_entry *entry = container_of(work, struct async_entry, work); unsigned long flags; ktime_t uninitialized_var(calltime), delta, rettime; /* 1) run (and print duration) */ if (initcall_debug && system_state == SYSTEM_BOOTING) { printk(KERN_DEBUG "calling %lli_%pF @ %i\n", (long long)entry->cookie, entry->func, task_pid_nr(current)); calltime = ktime_get(); } entry->func(entry->data, entry->cookie); if (initcall_debug && system_state == SYSTEM_BOOTING) { rettime = ktime_get(); delta = ktime_sub(rettime, calltime); printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n", (long long)entry->cookie, entry->func, (long long)ktime_to_ns(delta) >> 10); } /* 2) remove self from the pending queues */ spin_lock_irqsave(&async_lock, flags); list_del_init(&entry->domain_list); list_del_init(&entry->global_list); /* 3) free the entry */ kfree(entry); atomic_dec(&entry_count); spin_unlock_irqrestore(&async_lock, flags); /* 4) wake up any waiters */ wake_up(&async_done); } static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain) { struct async_entry *entry; unsigned long flags; async_cookie_t newcookie; /* allow irq-off callers */ entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); /* * If we're out of memory or if there's too much work * pending already, we execute synchronously. */ if (!entry || atomic_read(&entry_count) > MAX_WORK) { kfree(entry); spin_lock_irqsave(&async_lock, flags); newcookie = next_cookie++; spin_unlock_irqrestore(&async_lock, flags); /* low on memory.. run synchronously */ func(data, newcookie); return newcookie; } INIT_LIST_HEAD(&entry->domain_list); INIT_LIST_HEAD(&entry->global_list); INIT_WORK(&entry->work, async_run_entry_fn); entry->func = func; entry->data = data; entry->domain = domain; spin_lock_irqsave(&async_lock, flags); /* allocate cookie and queue */ newcookie = entry->cookie = next_cookie++; list_add_tail(&entry->domain_list, &domain->pending); if (domain->registered) list_add_tail(&entry->global_list, &async_global_pending); atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); /* mark that this task has queued an async job, used by module init */ current->flags |= PF_USED_ASYNC; /* schedule for execution */ queue_work(system_unbound_wq, &entry->work); return newcookie; } /** * async_schedule - schedule a function for asynchronous execution * @func: function to execute asynchronously * @data: data pointer to pass to the function * * Returns an async_cookie_t that may be used for checkpointing later. * Note: This function may be called from atomic or non-atomic contexts. */ async_cookie_t async_schedule(async_func_t func, void *data) { return __async_schedule(func, data, &async_dfl_domain); } EXPORT_SYMBOL_GPL(async_schedule); /** * async_schedule_domain - schedule a function for asynchronous execution within a certain domain * @func: function to execute asynchronously * @data: data pointer to pass to the function * @domain: the domain * * Returns an async_cookie_t that may be used for checkpointing later. * @domain may be used in the async_synchronize_*_domain() functions to * wait within a certain synchronization domain rather than globally. A * synchronization domain is specified via @domain. Note: This function * may be called from atomic or non-atomic contexts. */ async_cookie_t async_schedule_domain(async_func_t func, void *data, struct async_domain *domain) { return __async_schedule(func, data, domain); } EXPORT_SYMBOL_GPL(async_schedule_domain); /** * async_synchronize_full - synchronize all asynchronous function calls * * This function waits until all asynchronous function calls have been done. */ void async_synchronize_full(void) { async_synchronize_full_domain(NULL); } EXPORT_SYMBOL_GPL(async_synchronize_full); /** * async_unregister_domain - ensure no more anonymous waiters on this domain * @domain: idle domain to flush out of any async_synchronize_full instances * * async_synchronize_{cookie|full}_domain() are not flushed since callers * of these routines should know the lifetime of @domain * * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing */ void async_unregister_domain(struct async_domain *domain) { spin_lock_irq(&async_lock); WARN_ON(!domain->registered || !list_empty(&domain->pending)); domain->registered = 0; spin_unlock_irq(&async_lock); } EXPORT_SYMBOL_GPL(async_unregister_domain); /** * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain * @domain: the domain to synchronize * * This function waits until all asynchronous function calls for the * synchronization domain specified by @domain have been done. */ void async_synchronize_full_domain(struct async_domain *domain) { async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain); } EXPORT_SYMBOL_GPL(async_synchronize_full_domain); /** * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing * @cookie: async_cookie_t to use as checkpoint * @domain: the domain to synchronize (%NULL for all registered domains) * * This function waits until all asynchronous function calls for the * synchronization domain specified by @domain submitted prior to @cookie * have been done. */ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) { ktime_t uninitialized_var(starttime), delta, endtime; if (initcall_debug && system_state == SYSTEM_BOOTING) { printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); starttime = ktime_get(); } wait_event(async_done, lowest_in_progress(domain) >= cookie); if (initcall_debug && system_state == SYSTEM_BOOTING) { endtime = ktime_get(); delta = ktime_sub(endtime, starttime); printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n", task_pid_nr(current), (long long)ktime_to_ns(delta) >> 10); } } EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); /** * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing * @cookie: async_cookie_t to use as checkpoint * * This function waits until all asynchronous function calls prior to @cookie * have been done. */ void async_synchronize_cookie(async_cookie_t cookie) { async_synchronize_cookie_domain(cookie, &async_dfl_domain); } EXPORT_SYMBOL_GPL(async_synchronize_cookie); /** * current_is_async - is %current an async worker task? * * Returns %true if %current is an async worker task. */ bool current_is_async(void) { struct worker *worker = current_wq_worker(); return worker && worker->current_func == async_run_entry_fn; }
gpl-2.0
FireLord1/android_kernel_samsung_logan2g
drivers/media/dvb/frontends/tda1004x.c
3263
39791
/* Driver for Philips tda1004xh OFDM Demodulator (c) 2003, 2004 Andrew de Quincey & Robert Schlabbach This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * This driver needs external firmware. Please use the commands * "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10045", * "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10046" to * download/extract them, and then copy them to /usr/lib/hotplug/firmware * or /lib/firmware (depending on configuration of firmware hotplug). */ #define TDA10045_DEFAULT_FIRMWARE "dvb-fe-tda10045.fw" #define TDA10046_DEFAULT_FIRMWARE "dvb-fe-tda10046.fw" #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/jiffies.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "tda1004x.h" static int debug; #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "tda1004x: " args); \ } while (0) #define TDA1004X_CHIPID 0x00 #define TDA1004X_AUTO 0x01 #define TDA1004X_IN_CONF1 0x02 #define TDA1004X_IN_CONF2 0x03 #define TDA1004X_OUT_CONF1 0x04 #define TDA1004X_OUT_CONF2 0x05 #define TDA1004X_STATUS_CD 0x06 #define TDA1004X_CONFC4 0x07 #define TDA1004X_DSSPARE2 0x0C #define TDA10045H_CODE_IN 0x0D #define TDA10045H_FWPAGE 0x0E #define TDA1004X_SCAN_CPT 0x10 #define TDA1004X_DSP_CMD 0x11 #define TDA1004X_DSP_ARG 0x12 #define TDA1004X_DSP_DATA1 0x13 #define TDA1004X_DSP_DATA2 0x14 #define TDA1004X_CONFADC1 0x15 #define TDA1004X_CONFC1 0x16 #define TDA10045H_S_AGC 0x1a #define TDA10046H_AGC_TUN_LEVEL 0x1a #define TDA1004X_SNR 0x1c #define TDA1004X_CONF_TS1 0x1e #define TDA1004X_CONF_TS2 0x1f #define TDA1004X_CBER_RESET 0x20 #define TDA1004X_CBER_MSB 0x21 #define TDA1004X_CBER_LSB 0x22 #define TDA1004X_CVBER_LUT 0x23 #define TDA1004X_VBER_MSB 0x24 #define TDA1004X_VBER_MID 0x25 #define TDA1004X_VBER_LSB 0x26 #define TDA1004X_UNCOR 0x27 #define TDA10045H_CONFPLL_P 0x2D #define TDA10045H_CONFPLL_M_MSB 0x2E #define TDA10045H_CONFPLL_M_LSB 0x2F #define TDA10045H_CONFPLL_N 0x30 #define TDA10046H_CONFPLL1 0x2D #define TDA10046H_CONFPLL2 0x2F #define TDA10046H_CONFPLL3 0x30 #define TDA10046H_TIME_WREF1 0x31 #define TDA10046H_TIME_WREF2 0x32 #define TDA10046H_TIME_WREF3 0x33 #define TDA10046H_TIME_WREF4 0x34 #define TDA10046H_TIME_WREF5 0x35 #define TDA10045H_UNSURW_MSB 0x31 #define TDA10045H_UNSURW_LSB 0x32 #define TDA10045H_WREF_MSB 0x33 #define TDA10045H_WREF_MID 0x34 #define TDA10045H_WREF_LSB 0x35 #define TDA10045H_MUXOUT 0x36 #define TDA1004X_CONFADC2 0x37 #define TDA10045H_IOFFSET 0x38 #define TDA10046H_CONF_TRISTATE1 0x3B #define TDA10046H_CONF_TRISTATE2 0x3C #define TDA10046H_CONF_POLARITY 0x3D #define TDA10046H_FREQ_OFFSET 0x3E #define TDA10046H_GPIO_OUT_SEL 0x41 #define TDA10046H_GPIO_SELECT 0x42 #define TDA10046H_AGC_CONF 0x43 #define TDA10046H_AGC_THR 0x44 #define TDA10046H_AGC_RENORM 0x45 #define TDA10046H_AGC_GAINS 0x46 #define TDA10046H_AGC_TUN_MIN 0x47 #define TDA10046H_AGC_TUN_MAX 0x48 #define TDA10046H_AGC_IF_MIN 0x49 #define TDA10046H_AGC_IF_MAX 0x4A #define TDA10046H_FREQ_PHY2_MSB 0x4D #define TDA10046H_FREQ_PHY2_LSB 0x4E #define TDA10046H_CVBER_CTRL 0x4F #define TDA10046H_AGC_IF_LEVEL 0x52 #define TDA10046H_CODE_CPT 0x57 #define TDA10046H_CODE_IN 0x58 static int tda1004x_write_byteI(struct tda1004x_state *state, int reg, int data) { int ret; u8 buf[] = { reg, data }; struct i2c_msg msg = { .flags = 0, .buf = buf, .len = 2 }; dprintk("%s: reg=0x%x, data=0x%x\n", __func__, reg, data); msg.addr = state->config->demod_address; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) dprintk("%s: error reg=0x%x, data=0x%x, ret=%i\n", __func__, reg, data, ret); dprintk("%s: success reg=0x%x, data=0x%x, ret=%i\n", __func__, reg, data, ret); return (ret != 1) ? -1 : 0; } static int tda1004x_read_byte(struct tda1004x_state *state, int reg) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = {{ .flags = 0, .buf = b0, .len = 1 }, { .flags = I2C_M_RD, .buf = b1, .len = 1 }}; dprintk("%s: reg=0x%x\n", __func__, reg); msg[0].addr = state->config->demod_address; msg[1].addr = state->config->demod_address; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { dprintk("%s: error reg=0x%x, ret=%i\n", __func__, reg, ret); return -EINVAL; } dprintk("%s: success reg=0x%x, data=0x%x, ret=%i\n", __func__, reg, b1[0], ret); return b1[0]; } static int tda1004x_write_mask(struct tda1004x_state *state, int reg, int mask, int data) { int val; dprintk("%s: reg=0x%x, mask=0x%x, data=0x%x\n", __func__, reg, mask, data); // read a byte and check val = tda1004x_read_byte(state, reg); if (val < 0) return val; // mask if off val = val & ~mask; val |= data & 0xff; // write it out again return tda1004x_write_byteI(state, reg, val); } static int tda1004x_write_buf(struct tda1004x_state *state, int reg, unsigned char *buf, int len) { int i; int result; dprintk("%s: reg=0x%x, len=0x%x\n", __func__, reg, len); result = 0; for (i = 0; i < len; i++) { result = tda1004x_write_byteI(state, reg + i, buf[i]); if (result != 0) break; } return result; } static int tda1004x_enable_tuner_i2c(struct tda1004x_state *state) { int result; dprintk("%s\n", __func__); result = tda1004x_write_mask(state, TDA1004X_CONFC4, 2, 2); msleep(20); return result; } static int tda1004x_disable_tuner_i2c(struct tda1004x_state *state) { dprintk("%s\n", __func__); return tda1004x_write_mask(state, TDA1004X_CONFC4, 2, 0); } static int tda10045h_set_bandwidth(struct tda1004x_state *state, fe_bandwidth_t bandwidth) { static u8 bandwidth_6mhz[] = { 0x02, 0x00, 0x3d, 0x00, 0x60, 0x1e, 0xa7, 0x45, 0x4f }; static u8 bandwidth_7mhz[] = { 0x02, 0x00, 0x37, 0x00, 0x4a, 0x2f, 0x6d, 0x76, 0xdb }; static u8 bandwidth_8mhz[] = { 0x02, 0x00, 0x3d, 0x00, 0x48, 0x17, 0x89, 0xc7, 0x14 }; switch (bandwidth) { case BANDWIDTH_6_MHZ: tda1004x_write_buf(state, TDA10045H_CONFPLL_P, bandwidth_6mhz, sizeof(bandwidth_6mhz)); break; case BANDWIDTH_7_MHZ: tda1004x_write_buf(state, TDA10045H_CONFPLL_P, bandwidth_7mhz, sizeof(bandwidth_7mhz)); break; case BANDWIDTH_8_MHZ: tda1004x_write_buf(state, TDA10045H_CONFPLL_P, bandwidth_8mhz, sizeof(bandwidth_8mhz)); break; default: return -EINVAL; } tda1004x_write_byteI(state, TDA10045H_IOFFSET, 0); return 0; } static int tda10046h_set_bandwidth(struct tda1004x_state *state, fe_bandwidth_t bandwidth) { static u8 bandwidth_6mhz_53M[] = { 0x7b, 0x2e, 0x11, 0xf0, 0xd2 }; static u8 bandwidth_7mhz_53M[] = { 0x6a, 0x02, 0x6a, 0x43, 0x9f }; static u8 bandwidth_8mhz_53M[] = { 0x5c, 0x32, 0xc2, 0x96, 0x6d }; static u8 bandwidth_6mhz_48M[] = { 0x70, 0x02, 0x49, 0x24, 0x92 }; static u8 bandwidth_7mhz_48M[] = { 0x60, 0x02, 0xaa, 0xaa, 0xab }; static u8 bandwidth_8mhz_48M[] = { 0x54, 0x03, 0x0c, 0x30, 0xc3 }; int tda10046_clk53m; if ((state->config->if_freq == TDA10046_FREQ_045) || (state->config->if_freq == TDA10046_FREQ_052)) tda10046_clk53m = 0; else tda10046_clk53m = 1; switch (bandwidth) { case BANDWIDTH_6_MHZ: if (tda10046_clk53m) tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz_53M, sizeof(bandwidth_6mhz_53M)); else tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz_48M, sizeof(bandwidth_6mhz_48M)); if (state->config->if_freq == TDA10046_FREQ_045) { tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0a); tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0xab); } break; case BANDWIDTH_7_MHZ: if (tda10046_clk53m) tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz_53M, sizeof(bandwidth_7mhz_53M)); else tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz_48M, sizeof(bandwidth_7mhz_48M)); if (state->config->if_freq == TDA10046_FREQ_045) { tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0c); tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x00); } break; case BANDWIDTH_8_MHZ: if (tda10046_clk53m) tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz_53M, sizeof(bandwidth_8mhz_53M)); else tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz_48M, sizeof(bandwidth_8mhz_48M)); if (state->config->if_freq == TDA10046_FREQ_045) { tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0d); tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x55); } break; default: return -EINVAL; } return 0; } static int tda1004x_do_upload(struct tda1004x_state *state, const unsigned char *mem, unsigned int len, u8 dspCodeCounterReg, u8 dspCodeInReg) { u8 buf[65]; struct i2c_msg fw_msg = { .flags = 0, .buf = buf, .len = 0 }; int tx_size; int pos = 0; /* clear code counter */ tda1004x_write_byteI(state, dspCodeCounterReg, 0); fw_msg.addr = state->config->demod_address; buf[0] = dspCodeInReg; while (pos != len) { // work out how much to send this time tx_size = len - pos; if (tx_size > 0x10) tx_size = 0x10; // send the chunk memcpy(buf + 1, mem + pos, tx_size); fw_msg.len = tx_size + 1; if (i2c_transfer(state->i2c, &fw_msg, 1) != 1) { printk(KERN_ERR "tda1004x: Error during firmware upload\n"); return -EIO; } pos += tx_size; dprintk("%s: fw_pos=0x%x\n", __func__, pos); } // give the DSP a chance to settle 03/10/05 Hac msleep(100); return 0; } static int tda1004x_check_upload_ok(struct tda1004x_state *state) { u8 data1, data2; unsigned long timeout; if (state->demod_type == TDA1004X_DEMOD_TDA10046) { timeout = jiffies + 2 * HZ; while(!(tda1004x_read_byte(state, TDA1004X_STATUS_CD) & 0x20)) { if (time_after(jiffies, timeout)) { printk(KERN_ERR "tda1004x: timeout waiting for DSP ready\n"); break; } msleep(1); } } else msleep(100); // check upload was OK tda1004x_write_mask(state, TDA1004X_CONFC4, 0x10, 0); // we want to read from the DSP tda1004x_write_byteI(state, TDA1004X_DSP_CMD, 0x67); data1 = tda1004x_read_byte(state, TDA1004X_DSP_DATA1); data2 = tda1004x_read_byte(state, TDA1004X_DSP_DATA2); if (data1 != 0x67 || data2 < 0x20 || data2 > 0x2e) { printk(KERN_INFO "tda1004x: found firmware revision %x -- invalid\n", data2); return -EIO; } printk(KERN_INFO "tda1004x: found firmware revision %x -- ok\n", data2); return 0; } static int tda10045_fwupload(struct dvb_frontend* fe) { struct tda1004x_state* state = fe->demodulator_priv; int ret; const struct firmware *fw; /* don't re-upload unless necessary */ if (tda1004x_check_upload_ok(state) == 0) return 0; /* request the firmware, this will block until someone uploads it */ printk(KERN_INFO "tda1004x: waiting for firmware upload (%s)...\n", TDA10045_DEFAULT_FIRMWARE); ret = state->config->request_firmware(fe, &fw, TDA10045_DEFAULT_FIRMWARE); if (ret) { printk(KERN_ERR "tda1004x: no firmware upload (timeout or file not found?)\n"); return ret; } /* reset chip */ tda1004x_write_mask(state, TDA1004X_CONFC4, 0x10, 0); tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 8); tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 0); msleep(10); /* set parameters */ tda10045h_set_bandwidth(state, BANDWIDTH_8_MHZ); ret = tda1004x_do_upload(state, fw->data, fw->size, TDA10045H_FWPAGE, TDA10045H_CODE_IN); release_firmware(fw); if (ret) return ret; printk(KERN_INFO "tda1004x: firmware upload complete\n"); /* wait for DSP to initialise */ /* DSPREADY doesn't seem to work on the TDA10045H */ msleep(100); return tda1004x_check_upload_ok(state); } static void tda10046_init_plls(struct dvb_frontend* fe) { struct tda1004x_state* state = fe->demodulator_priv; int tda10046_clk53m; if ((state->config->if_freq == TDA10046_FREQ_045) || (state->config->if_freq == TDA10046_FREQ_052)) tda10046_clk53m = 0; else tda10046_clk53m = 1; tda1004x_write_byteI(state, TDA10046H_CONFPLL1, 0xf0); if(tda10046_clk53m) { printk(KERN_INFO "tda1004x: setting up plls for 53MHz sampling clock\n"); tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 0x08); // PLL M = 8 } else { printk(KERN_INFO "tda1004x: setting up plls for 48MHz sampling clock\n"); tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 0x03); // PLL M = 3 } if (state->config->xtal_freq == TDA10046_XTAL_4M ) { dprintk("%s: setting up PLLs for a 4 MHz Xtal\n", __func__); tda1004x_write_byteI(state, TDA10046H_CONFPLL3, 0); // PLL P = N = 0 } else { dprintk("%s: setting up PLLs for a 16 MHz Xtal\n", __func__); tda1004x_write_byteI(state, TDA10046H_CONFPLL3, 3); // PLL P = 0, N = 3 } if(tda10046_clk53m) tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 0x67); else tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 0x72); /* Note clock frequency is handled implicitly */ switch (state->config->if_freq) { case TDA10046_FREQ_045: tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0c); tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x00); break; case TDA10046_FREQ_052: tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0d); tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0xc7); break; case TDA10046_FREQ_3617: tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd7); tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x59); break; case TDA10046_FREQ_3613: tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd7); tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x3f); break; } tda10046h_set_bandwidth(state, BANDWIDTH_8_MHZ); // default bandwidth 8 MHz /* let the PLLs settle */ msleep(120); } static int tda10046_fwupload(struct dvb_frontend* fe) { struct tda1004x_state* state = fe->demodulator_priv; int ret, confc4; const struct firmware *fw; /* reset + wake up chip */ if (state->config->xtal_freq == TDA10046_XTAL_4M) { confc4 = 0; } else { dprintk("%s: 16MHz Xtal, reducing I2C speed\n", __func__); confc4 = 0x80; } tda1004x_write_byteI(state, TDA1004X_CONFC4, confc4); tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 1, 0); /* set GPIO 1 and 3 */ if (state->config->gpio_config != TDA10046_GPTRI) { tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE2, 0x33); tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0x0f, state->config->gpio_config &0x0f); } /* let the clocks recover from sleep */ msleep(10); /* The PLLs need to be reprogrammed after sleep */ tda10046_init_plls(fe); tda1004x_write_mask(state, TDA1004X_CONFADC2, 0xc0, 0); /* don't re-upload unless necessary */ if (tda1004x_check_upload_ok(state) == 0) return 0; /* For i2c normal work, we need to slow down the bus speed. However, the slow down breaks the eeprom firmware load. So, use normal speed for eeprom booting and then restore the i2c speed after that. Tested with MSI TV @nyware A/D board, that comes with firmware version 29 inside their eeprom. It should also be noticed that no other I2C transfer should be in course while booting from eeprom, otherwise, tda10046 goes into an instable state. So, proper locking are needed at the i2c bus master. */ printk(KERN_INFO "tda1004x: trying to boot from eeprom\n"); tda1004x_write_byteI(state, TDA1004X_CONFC4, 4); msleep(300); tda1004x_write_byteI(state, TDA1004X_CONFC4, confc4); /* Checks if eeprom firmware went without troubles */ if (tda1004x_check_upload_ok(state) == 0) return 0; /* eeprom firmware didn't work. Load one manually. */ if (state->config->request_firmware != NULL) { /* request the firmware, this will block until someone uploads it */ printk(KERN_INFO "tda1004x: waiting for firmware upload...\n"); ret = state->config->request_firmware(fe, &fw, TDA10046_DEFAULT_FIRMWARE); if (ret) { /* remain compatible to old bug: try to load with tda10045 image name */ ret = state->config->request_firmware(fe, &fw, TDA10045_DEFAULT_FIRMWARE); if (ret) { printk(KERN_ERR "tda1004x: no firmware upload (timeout or file not found?)\n"); return ret; } else { printk(KERN_INFO "tda1004x: please rename the firmware file to %s\n", TDA10046_DEFAULT_FIRMWARE); } } } else { printk(KERN_ERR "tda1004x: no request function defined, can't upload from file\n"); return -EIO; } tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 8); // going to boot from HOST ret = tda1004x_do_upload(state, fw->data, fw->size, TDA10046H_CODE_CPT, TDA10046H_CODE_IN); release_firmware(fw); return tda1004x_check_upload_ok(state); } static int tda1004x_encode_fec(int fec) { // convert known FEC values switch (fec) { case FEC_1_2: return 0; case FEC_2_3: return 1; case FEC_3_4: return 2; case FEC_5_6: return 3; case FEC_7_8: return 4; } // unsupported return -EINVAL; } static int tda1004x_decode_fec(int tdafec) { // convert known FEC values switch (tdafec) { case 0: return FEC_1_2; case 1: return FEC_2_3; case 2: return FEC_3_4; case 3: return FEC_5_6; case 4: return FEC_7_8; } // unsupported return -1; } static int tda1004x_write(struct dvb_frontend* fe, const u8 buf[], int len) { struct tda1004x_state* state = fe->demodulator_priv; if (len != 2) return -EINVAL; return tda1004x_write_byteI(state, buf[0], buf[1]); } static int tda10045_init(struct dvb_frontend* fe) { struct tda1004x_state* state = fe->demodulator_priv; dprintk("%s\n", __func__); if (tda10045_fwupload(fe)) { printk("tda1004x: firmware upload failed\n"); return -EIO; } tda1004x_write_mask(state, TDA1004X_CONFADC1, 0x10, 0); // wake up the ADC // tda setup tda1004x_write_mask(state, TDA1004X_CONFC4, 0x20, 0); // disable DSP watchdog timer tda1004x_write_mask(state, TDA1004X_AUTO, 8, 0); // select HP stream tda1004x_write_mask(state, TDA1004X_CONFC1, 0x40, 0); // set polarity of VAGC signal tda1004x_write_mask(state, TDA1004X_CONFC1, 0x80, 0x80); // enable pulse killer tda1004x_write_mask(state, TDA1004X_AUTO, 0x10, 0x10); // enable auto offset tda1004x_write_mask(state, TDA1004X_IN_CONF2, 0xC0, 0x0); // no frequency offset tda1004x_write_byteI(state, TDA1004X_CONF_TS1, 0); // setup MPEG2 TS interface tda1004x_write_byteI(state, TDA1004X_CONF_TS2, 0); // setup MPEG2 TS interface tda1004x_write_mask(state, TDA1004X_VBER_MSB, 0xe0, 0xa0); // 10^6 VBER measurement bits tda1004x_write_mask(state, TDA1004X_CONFC1, 0x10, 0); // VAGC polarity tda1004x_write_byteI(state, TDA1004X_CONFADC1, 0x2e); tda1004x_write_mask(state, 0x1f, 0x01, state->config->invert_oclk); return 0; } static int tda10046_init(struct dvb_frontend* fe) { struct tda1004x_state* state = fe->demodulator_priv; dprintk("%s\n", __func__); if (tda10046_fwupload(fe)) { printk("tda1004x: firmware upload failed\n"); return -EIO; } // tda setup tda1004x_write_mask(state, TDA1004X_CONFC4, 0x20, 0); // disable DSP watchdog timer tda1004x_write_byteI(state, TDA1004X_AUTO, 0x87); // 100 ppm crystal, select HP stream tda1004x_write_byteI(state, TDA1004X_CONFC1, 0x88); // enable pulse killer switch (state->config->agc_config) { case TDA10046_AGC_DEFAULT: tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x00); // AGC setup tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x60); // set AGC polarities break; case TDA10046_AGC_IFO_AUTO_NEG: tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x60); // set AGC polarities break; case TDA10046_AGC_IFO_AUTO_POS: tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x00); // set AGC polarities break; case TDA10046_AGC_TDA827X: tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x60); // set AGC polarities break; } if (state->config->ts_mode == 0) { tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 0xc0, 0x40); tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7); } else { tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 0xc0, 0x80); tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0x10, state->config->invert_oclk << 4); } tda1004x_write_byteI(state, TDA1004X_CONFADC2, 0x38); tda1004x_write_mask (state, TDA10046H_CONF_TRISTATE1, 0x3e, 0x38); // Turn IF AGC output on tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MIN, 0); // } tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MAX, 0xff); // } AGC min/max values tda1004x_write_byteI(state, TDA10046H_AGC_IF_MIN, 0); // } tda1004x_write_byteI(state, TDA10046H_AGC_IF_MAX, 0xff); // } tda1004x_write_byteI(state, TDA10046H_AGC_GAINS, 0x12); // IF gain 2, TUN gain 1 tda1004x_write_byteI(state, TDA10046H_CVBER_CTRL, 0x1a); // 10^6 VBER measurement bits tda1004x_write_byteI(state, TDA1004X_CONF_TS1, 7); // MPEG2 interface config tda1004x_write_byteI(state, TDA1004X_CONF_TS2, 0xc0); // MPEG2 interface config // tda1004x_write_mask(state, 0x50, 0x80, 0x80); // handle out of guard echoes return 0; } static int tda1004x_set_fe(struct dvb_frontend* fe, struct dvb_frontend_parameters *fe_params) { struct tda1004x_state* state = fe->demodulator_priv; int tmp; int inversion; dprintk("%s\n", __func__); if (state->demod_type == TDA1004X_DEMOD_TDA10046) { // setup auto offset tda1004x_write_mask(state, TDA1004X_AUTO, 0x10, 0x10); tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x80, 0); tda1004x_write_mask(state, TDA1004X_IN_CONF2, 0xC0, 0); // disable agc_conf[2] tda1004x_write_mask(state, TDA10046H_AGC_CONF, 4, 0); } // set frequency if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, fe_params); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } // Hardcoded to use auto as much as possible on the TDA10045 as it // is very unreliable if AUTO mode is _not_ used. if (state->demod_type == TDA1004X_DEMOD_TDA10045) { fe_params->u.ofdm.code_rate_HP = FEC_AUTO; fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO; fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO; } // Set standard params.. or put them to auto if ((fe_params->u.ofdm.code_rate_HP == FEC_AUTO) || (fe_params->u.ofdm.code_rate_LP == FEC_AUTO) || (fe_params->u.ofdm.constellation == QAM_AUTO) || (fe_params->u.ofdm.hierarchy_information == HIERARCHY_AUTO)) { tda1004x_write_mask(state, TDA1004X_AUTO, 1, 1); // enable auto tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x03, 0); // turn off constellation bits tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 0); // turn off hierarchy bits tda1004x_write_mask(state, TDA1004X_IN_CONF2, 0x3f, 0); // turn off FEC bits } else { tda1004x_write_mask(state, TDA1004X_AUTO, 1, 0); // disable auto // set HP FEC tmp = tda1004x_encode_fec(fe_params->u.ofdm.code_rate_HP); if (tmp < 0) return tmp; tda1004x_write_mask(state, TDA1004X_IN_CONF2, 7, tmp); // set LP FEC tmp = tda1004x_encode_fec(fe_params->u.ofdm.code_rate_LP); if (tmp < 0) return tmp; tda1004x_write_mask(state, TDA1004X_IN_CONF2, 0x38, tmp << 3); // set constellation switch (fe_params->u.ofdm.constellation) { case QPSK: tda1004x_write_mask(state, TDA1004X_IN_CONF1, 3, 0); break; case QAM_16: tda1004x_write_mask(state, TDA1004X_IN_CONF1, 3, 1); break; case QAM_64: tda1004x_write_mask(state, TDA1004X_IN_CONF1, 3, 2); break; default: return -EINVAL; } // set hierarchy switch (fe_params->u.ofdm.hierarchy_information) { case HIERARCHY_NONE: tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 0 << 5); break; case HIERARCHY_1: tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 1 << 5); break; case HIERARCHY_2: tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 2 << 5); break; case HIERARCHY_4: tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 3 << 5); break; default: return -EINVAL; } } // set bandwidth switch (state->demod_type) { case TDA1004X_DEMOD_TDA10045: tda10045h_set_bandwidth(state, fe_params->u.ofdm.bandwidth); break; case TDA1004X_DEMOD_TDA10046: tda10046h_set_bandwidth(state, fe_params->u.ofdm.bandwidth); break; } // set inversion inversion = fe_params->inversion; if (state->config->invert) inversion = inversion ? INVERSION_OFF : INVERSION_ON; switch (inversion) { case INVERSION_OFF: tda1004x_write_mask(state, TDA1004X_CONFC1, 0x20, 0); break; case INVERSION_ON: tda1004x_write_mask(state, TDA1004X_CONFC1, 0x20, 0x20); break; default: return -EINVAL; } // set guard interval switch (fe_params->u.ofdm.guard_interval) { case GUARD_INTERVAL_1_32: tda1004x_write_mask(state, TDA1004X_AUTO, 2, 0); tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 0 << 2); break; case GUARD_INTERVAL_1_16: tda1004x_write_mask(state, TDA1004X_AUTO, 2, 0); tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 1 << 2); break; case GUARD_INTERVAL_1_8: tda1004x_write_mask(state, TDA1004X_AUTO, 2, 0); tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 2 << 2); break; case GUARD_INTERVAL_1_4: tda1004x_write_mask(state, TDA1004X_AUTO, 2, 0); tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 3 << 2); break; case GUARD_INTERVAL_AUTO: tda1004x_write_mask(state, TDA1004X_AUTO, 2, 2); tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 0 << 2); break; default: return -EINVAL; } // set transmission mode switch (fe_params->u.ofdm.transmission_mode) { case TRANSMISSION_MODE_2K: tda1004x_write_mask(state, TDA1004X_AUTO, 4, 0); tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x10, 0 << 4); break; case TRANSMISSION_MODE_8K: tda1004x_write_mask(state, TDA1004X_AUTO, 4, 0); tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x10, 1 << 4); break; case TRANSMISSION_MODE_AUTO: tda1004x_write_mask(state, TDA1004X_AUTO, 4, 4); tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x10, 0); break; default: return -EINVAL; } // start the lock switch (state->demod_type) { case TDA1004X_DEMOD_TDA10045: tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 8); tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 0); break; case TDA1004X_DEMOD_TDA10046: tda1004x_write_mask(state, TDA1004X_AUTO, 0x40, 0x40); msleep(1); tda1004x_write_mask(state, TDA10046H_AGC_CONF, 4, 1); break; } msleep(10); return 0; } static int tda1004x_get_fe(struct dvb_frontend* fe, struct dvb_frontend_parameters *fe_params) { struct tda1004x_state* state = fe->demodulator_priv; dprintk("%s\n", __func__); // inversion status fe_params->inversion = INVERSION_OFF; if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20) fe_params->inversion = INVERSION_ON; if (state->config->invert) fe_params->inversion = fe_params->inversion ? INVERSION_OFF : INVERSION_ON; // bandwidth switch (state->demod_type) { case TDA1004X_DEMOD_TDA10045: switch (tda1004x_read_byte(state, TDA10045H_WREF_LSB)) { case 0x14: fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ; break; case 0xdb: fe_params->u.ofdm.bandwidth = BANDWIDTH_7_MHZ; break; case 0x4f: fe_params->u.ofdm.bandwidth = BANDWIDTH_6_MHZ; break; } break; case TDA1004X_DEMOD_TDA10046: switch (tda1004x_read_byte(state, TDA10046H_TIME_WREF1)) { case 0x5c: case 0x54: fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ; break; case 0x6a: case 0x60: fe_params->u.ofdm.bandwidth = BANDWIDTH_7_MHZ; break; case 0x7b: case 0x70: fe_params->u.ofdm.bandwidth = BANDWIDTH_6_MHZ; break; } break; } // FEC fe_params->u.ofdm.code_rate_HP = tda1004x_decode_fec(tda1004x_read_byte(state, TDA1004X_OUT_CONF2) & 7); fe_params->u.ofdm.code_rate_LP = tda1004x_decode_fec((tda1004x_read_byte(state, TDA1004X_OUT_CONF2) >> 3) & 7); // constellation switch (tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 3) { case 0: fe_params->u.ofdm.constellation = QPSK; break; case 1: fe_params->u.ofdm.constellation = QAM_16; break; case 2: fe_params->u.ofdm.constellation = QAM_64; break; } // transmission mode fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K; if (tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 0x10) fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K; // guard interval switch ((tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 0x0c) >> 2) { case 0: fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_32; break; case 1: fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_16; break; case 2: fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_8; break; case 3: fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_4; break; } // hierarchy switch ((tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 0x60) >> 5) { case 0: fe_params->u.ofdm.hierarchy_information = HIERARCHY_NONE; break; case 1: fe_params->u.ofdm.hierarchy_information = HIERARCHY_1; break; case 2: fe_params->u.ofdm.hierarchy_information = HIERARCHY_2; break; case 3: fe_params->u.ofdm.hierarchy_information = HIERARCHY_4; break; } return 0; } static int tda1004x_read_status(struct dvb_frontend* fe, fe_status_t * fe_status) { struct tda1004x_state* state = fe->demodulator_priv; int status; int cber; int vber; dprintk("%s\n", __func__); // read status status = tda1004x_read_byte(state, TDA1004X_STATUS_CD); if (status == -1) return -EIO; // decode *fe_status = 0; if (status & 4) *fe_status |= FE_HAS_SIGNAL; if (status & 2) *fe_status |= FE_HAS_CARRIER; if (status & 8) *fe_status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; // if we don't already have VITERBI (i.e. not LOCKED), see if the viterbi // is getting anything valid if (!(*fe_status & FE_HAS_VITERBI)) { // read the CBER cber = tda1004x_read_byte(state, TDA1004X_CBER_LSB); if (cber == -1) return -EIO; status = tda1004x_read_byte(state, TDA1004X_CBER_MSB); if (status == -1) return -EIO; cber |= (status << 8); // The address 0x20 should be read to cope with a TDA10046 bug tda1004x_read_byte(state, TDA1004X_CBER_RESET); if (cber != 65535) *fe_status |= FE_HAS_VITERBI; } // if we DO have some valid VITERBI output, but don't already have SYNC // bytes (i.e. not LOCKED), see if the RS decoder is getting anything valid. if ((*fe_status & FE_HAS_VITERBI) && (!(*fe_status & FE_HAS_SYNC))) { // read the VBER vber = tda1004x_read_byte(state, TDA1004X_VBER_LSB); if (vber == -1) return -EIO; status = tda1004x_read_byte(state, TDA1004X_VBER_MID); if (status == -1) return -EIO; vber |= (status << 8); status = tda1004x_read_byte(state, TDA1004X_VBER_MSB); if (status == -1) return -EIO; vber |= (status & 0x0f) << 16; // The CVBER_LUT should be read to cope with TDA10046 hardware bug tda1004x_read_byte(state, TDA1004X_CVBER_LUT); // if RS has passed some valid TS packets, then we must be // getting some SYNC bytes if (vber < 16632) *fe_status |= FE_HAS_SYNC; } // success dprintk("%s: fe_status=0x%x\n", __func__, *fe_status); return 0; } static int tda1004x_read_signal_strength(struct dvb_frontend* fe, u16 * signal) { struct tda1004x_state* state = fe->demodulator_priv; int tmp; int reg = 0; dprintk("%s\n", __func__); // determine the register to use switch (state->demod_type) { case TDA1004X_DEMOD_TDA10045: reg = TDA10045H_S_AGC; break; case TDA1004X_DEMOD_TDA10046: reg = TDA10046H_AGC_IF_LEVEL; break; } // read it tmp = tda1004x_read_byte(state, reg); if (tmp < 0) return -EIO; *signal = (tmp << 8) | tmp; dprintk("%s: signal=0x%x\n", __func__, *signal); return 0; } static int tda1004x_read_snr(struct dvb_frontend* fe, u16 * snr) { struct tda1004x_state* state = fe->demodulator_priv; int tmp; dprintk("%s\n", __func__); // read it tmp = tda1004x_read_byte(state, TDA1004X_SNR); if (tmp < 0) return -EIO; tmp = 255 - tmp; *snr = ((tmp << 8) | tmp); dprintk("%s: snr=0x%x\n", __func__, *snr); return 0; } static int tda1004x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { struct tda1004x_state* state = fe->demodulator_priv; int tmp; int tmp2; int counter; dprintk("%s\n", __func__); // read the UCBLOCKS and reset counter = 0; tmp = tda1004x_read_byte(state, TDA1004X_UNCOR); if (tmp < 0) return -EIO; tmp &= 0x7f; while (counter++ < 5) { tda1004x_write_mask(state, TDA1004X_UNCOR, 0x80, 0); tda1004x_write_mask(state, TDA1004X_UNCOR, 0x80, 0); tda1004x_write_mask(state, TDA1004X_UNCOR, 0x80, 0); tmp2 = tda1004x_read_byte(state, TDA1004X_UNCOR); if (tmp2 < 0) return -EIO; tmp2 &= 0x7f; if ((tmp2 < tmp) || (tmp2 == 0)) break; } if (tmp != 0x7f) *ucblocks = tmp; else *ucblocks = 0xffffffff; dprintk("%s: ucblocks=0x%x\n", __func__, *ucblocks); return 0; } static int tda1004x_read_ber(struct dvb_frontend* fe, u32* ber) { struct tda1004x_state* state = fe->demodulator_priv; int tmp; dprintk("%s\n", __func__); // read it in tmp = tda1004x_read_byte(state, TDA1004X_CBER_LSB); if (tmp < 0) return -EIO; *ber = tmp << 1; tmp = tda1004x_read_byte(state, TDA1004X_CBER_MSB); if (tmp < 0) return -EIO; *ber |= (tmp << 9); // The address 0x20 should be read to cope with a TDA10046 bug tda1004x_read_byte(state, TDA1004X_CBER_RESET); dprintk("%s: ber=0x%x\n", __func__, *ber); return 0; } static int tda1004x_sleep(struct dvb_frontend* fe) { struct tda1004x_state* state = fe->demodulator_priv; int gpio_conf; switch (state->demod_type) { case TDA1004X_DEMOD_TDA10045: tda1004x_write_mask(state, TDA1004X_CONFADC1, 0x10, 0x10); break; case TDA1004X_DEMOD_TDA10046: /* set outputs to tristate */ tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0xff); /* invert GPIO 1 and 3 if desired*/ gpio_conf = state->config->gpio_config; if (gpio_conf >= TDA10046_GP00_I) tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0x0f, (gpio_conf & 0x0f) ^ 0x0a); tda1004x_write_mask(state, TDA1004X_CONFADC2, 0xc0, 0xc0); tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 1); break; } return 0; } static int tda1004x_i2c_gate_ctrl(struct dvb_frontend* fe, int enable) { struct tda1004x_state* state = fe->demodulator_priv; if (enable) { return tda1004x_enable_tuner_i2c(state); } else { return tda1004x_disable_tuner_i2c(state); } } static int tda1004x_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings) { fesettings->min_delay_ms = 800; /* Drift compensation makes no sense for DVB-T */ fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static void tda1004x_release(struct dvb_frontend* fe) { struct tda1004x_state *state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops tda10045_ops = { .info = { .name = "Philips TDA10045H DVB-T", .type = FE_OFDM, .frequency_min = 51000000, .frequency_max = 858000000, .frequency_stepsize = 166667, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO }, .release = tda1004x_release, .init = tda10045_init, .sleep = tda1004x_sleep, .write = tda1004x_write, .i2c_gate_ctrl = tda1004x_i2c_gate_ctrl, .set_frontend = tda1004x_set_fe, .get_frontend = tda1004x_get_fe, .get_tune_settings = tda1004x_get_tune_settings, .read_status = tda1004x_read_status, .read_ber = tda1004x_read_ber, .read_signal_strength = tda1004x_read_signal_strength, .read_snr = tda1004x_read_snr, .read_ucblocks = tda1004x_read_ucblocks, }; struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config, struct i2c_adapter* i2c) { struct tda1004x_state *state; int id; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL); if (!state) { printk(KERN_ERR "Can't alocate memory for tda10045 state\n"); return NULL; } /* setup the state */ state->config = config; state->i2c = i2c; state->demod_type = TDA1004X_DEMOD_TDA10045; /* check if the demod is there */ id = tda1004x_read_byte(state, TDA1004X_CHIPID); if (id < 0) { printk(KERN_ERR "tda10045: chip is not answering. Giving up.\n"); kfree(state); return NULL; } if (id != 0x25) { printk(KERN_ERR "Invalid tda1004x ID = 0x%02x. Can't proceed\n", id); kfree(state); return NULL; } /* create dvb_frontend */ memcpy(&state->frontend.ops, &tda10045_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; } static struct dvb_frontend_ops tda10046_ops = { .info = { .name = "Philips TDA10046H DVB-T", .type = FE_OFDM, .frequency_min = 51000000, .frequency_max = 858000000, .frequency_stepsize = 166667, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO }, .release = tda1004x_release, .init = tda10046_init, .sleep = tda1004x_sleep, .write = tda1004x_write, .i2c_gate_ctrl = tda1004x_i2c_gate_ctrl, .set_frontend = tda1004x_set_fe, .get_frontend = tda1004x_get_fe, .get_tune_settings = tda1004x_get_tune_settings, .read_status = tda1004x_read_status, .read_ber = tda1004x_read_ber, .read_signal_strength = tda1004x_read_signal_strength, .read_snr = tda1004x_read_snr, .read_ucblocks = tda1004x_read_ucblocks, }; struct dvb_frontend* tda10046_attach(const struct tda1004x_config* config, struct i2c_adapter* i2c) { struct tda1004x_state *state; int id; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL); if (!state) { printk(KERN_ERR "Can't alocate memory for tda10046 state\n"); return NULL; } /* setup the state */ state->config = config; state->i2c = i2c; state->demod_type = TDA1004X_DEMOD_TDA10046; /* check if the demod is there */ id = tda1004x_read_byte(state, TDA1004X_CHIPID); if (id < 0) { printk(KERN_ERR "tda10046: chip is not answering. Giving up.\n"); kfree(state); return NULL; } if (id != 0x46) { printk(KERN_ERR "Invalid tda1004x ID = 0x%02x. Can't proceed\n", id); kfree(state); return NULL; } /* create dvb_frontend */ memcpy(&state->frontend.ops, &tda10046_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; } module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("Philips TDA10045H & TDA10046H DVB-T Demodulator"); MODULE_AUTHOR("Andrew de Quincey & Robert Schlabbach"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(tda10045_attach); EXPORT_SYMBOL(tda10046_attach);
gpl-2.0
rickardholmberg/linux-sunxi
drivers/staging/iio/kfifo_buf.c
4031
3369
#include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/workqueue.h> #include <linux/kfifo.h> #include <linux/mutex.h> #include "kfifo_buf.h" struct iio_kfifo { struct iio_buffer buffer; struct kfifo kf; int update_needed; }; #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, int bytes_per_datum, int length) { if ((length == 0) || (bytes_per_datum == 0)) return -EINVAL; __iio_update_buffer(&buf->buffer, bytes_per_datum, length); return kfifo_alloc(&buf->kf, bytes_per_datum*length, GFP_KERNEL); } static int iio_request_update_kfifo(struct iio_buffer *r) { int ret = 0; struct iio_kfifo *buf = iio_to_kfifo(r); if (!buf->update_needed) goto error_ret; kfifo_free(&buf->kf); ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum, buf->buffer.length); error_ret: return ret; } static int iio_get_length_kfifo(struct iio_buffer *r) { return r->length; } static IIO_BUFFER_ENABLE_ATTR; static IIO_BUFFER_LENGTH_ATTR; static struct attribute *iio_kfifo_attributes[] = { &dev_attr_length.attr, &dev_attr_enable.attr, NULL, }; static struct attribute_group iio_kfifo_attribute_group = { .attrs = iio_kfifo_attributes, .name = "buffer", }; static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r) { return r->bytes_per_datum; } static int iio_mark_update_needed_kfifo(struct iio_buffer *r) { struct iio_kfifo *kf = iio_to_kfifo(r); kf->update_needed = true; return 0; } static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd) { if (r->bytes_per_datum != bpd) { r->bytes_per_datum = bpd; iio_mark_update_needed_kfifo(r); } return 0; } static int iio_set_length_kfifo(struct iio_buffer *r, int length) { if (r->length != length) { r->length = length; iio_mark_update_needed_kfifo(r); } return 0; } static int iio_store_to_kfifo(struct iio_buffer *r, u8 *data, s64 timestamp) { int ret; struct iio_kfifo *kf = iio_to_kfifo(r); ret = kfifo_in(&kf->kf, data, r->bytes_per_datum); if (ret != r->bytes_per_datum) return -EBUSY; return 0; } static int iio_read_first_n_kfifo(struct iio_buffer *r, size_t n, char __user *buf) { int ret, copied; struct iio_kfifo *kf = iio_to_kfifo(r); if (n < r->bytes_per_datum) return -EINVAL; n = rounddown(n, r->bytes_per_datum); ret = kfifo_to_user(&kf->kf, buf, n, &copied); return copied; } static const struct iio_buffer_access_funcs kfifo_access_funcs = { .store_to = &iio_store_to_kfifo, .read_first_n = &iio_read_first_n_kfifo, .request_update = &iio_request_update_kfifo, .get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo, .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo, .get_length = &iio_get_length_kfifo, .set_length = &iio_set_length_kfifo, }; struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev) { struct iio_kfifo *kf; kf = kzalloc(sizeof *kf, GFP_KERNEL); if (!kf) return NULL; kf->update_needed = true; iio_buffer_init(&kf->buffer); kf->buffer.attrs = &iio_kfifo_attribute_group; kf->buffer.access = &kfifo_access_funcs; return &kf->buffer; } EXPORT_SYMBOL(iio_kfifo_allocate); void iio_kfifo_free(struct iio_buffer *r) { kfree(iio_to_kfifo(r)); } EXPORT_SYMBOL(iio_kfifo_free); MODULE_LICENSE("GPL");
gpl-2.0
brymaster5000/m7-501
drivers/usb/renesas_usbhs/mod_gadget.c
4287
24486
/* * Renesas USB driver * * Copyright (C) 2011 Renesas Solutions Corp. * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include "common.h" /* * struct */ struct usbhsg_request { struct usb_request req; struct usbhs_pkt pkt; }; #define EP_NAME_SIZE 8 struct usbhsg_gpriv; struct usbhsg_uep { struct usb_ep ep; struct usbhs_pipe *pipe; char ep_name[EP_NAME_SIZE]; struct usbhsg_gpriv *gpriv; }; struct usbhsg_gpriv { struct usb_gadget gadget; struct usbhs_mod mod; struct usbhsg_uep *uep; int uep_size; struct usb_gadget_driver *driver; u32 status; #define USBHSG_STATUS_STARTED (1 << 0) #define USBHSG_STATUS_REGISTERD (1 << 1) #define USBHSG_STATUS_WEDGE (1 << 2) }; struct usbhsg_recip_handle { char *name; int (*device)(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl); int (*interface)(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl); int (*endpoint)(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl); }; /* * macro */ #define usbhsg_priv_to_gpriv(priv) \ container_of( \ usbhs_mod_get(priv, USBHS_GADGET), \ struct usbhsg_gpriv, mod) #define __usbhsg_for_each_uep(start, pos, g, i) \ for (i = start, pos = (g)->uep + i; \ i < (g)->uep_size; \ i++, pos = (g)->uep + i) #define usbhsg_for_each_uep(pos, gpriv, i) \ __usbhsg_for_each_uep(1, pos, gpriv, i) #define usbhsg_for_each_uep_with_dcp(pos, gpriv, i) \ __usbhsg_for_each_uep(0, pos, gpriv, i) #define usbhsg_gadget_to_gpriv(g)\ container_of(g, struct usbhsg_gpriv, gadget) #define usbhsg_req_to_ureq(r)\ container_of(r, struct usbhsg_request, req) #define usbhsg_ep_to_uep(e) container_of(e, struct usbhsg_uep, ep) #define usbhsg_gpriv_to_dev(gp) usbhs_priv_to_dev((gp)->mod.priv) #define usbhsg_gpriv_to_priv(gp) ((gp)->mod.priv) #define usbhsg_gpriv_to_dcp(gp) ((gp)->uep) #define usbhsg_gpriv_to_nth_uep(gp, i) ((gp)->uep + i) #define usbhsg_uep_to_gpriv(u) ((u)->gpriv) #define usbhsg_uep_to_pipe(u) ((u)->pipe) #define usbhsg_pipe_to_uep(p) ((p)->mod_private) #define usbhsg_is_dcp(u) ((u) == usbhsg_gpriv_to_dcp((u)->gpriv)) #define usbhsg_ureq_to_pkt(u) (&(u)->pkt) #define usbhsg_pkt_to_ureq(i) \ container_of(i, struct usbhsg_request, pkt) #define usbhsg_is_not_connected(gp) ((gp)->gadget.speed == USB_SPEED_UNKNOWN) /* status */ #define usbhsg_status_init(gp) do {(gp)->status = 0; } while (0) #define usbhsg_status_set(gp, b) (gp->status |= b) #define usbhsg_status_clr(gp, b) (gp->status &= ~b) #define usbhsg_status_has(gp, b) (gp->status & b) /* * queue push/pop */ static void usbhsg_queue_pop(struct usbhsg_uep *uep, struct usbhsg_request *ureq, int status) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); ureq->req.status = status; ureq->req.complete(&uep->ep, &ureq->req); } static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt) { struct usbhs_pipe *pipe = pkt->pipe; struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); ureq->req.actual = pkt->actual; usbhsg_queue_pop(uep, ureq, 0); } static void usbhsg_queue_push(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq); struct usb_request *req = &ureq->req; req->actual = 0; req->status = -EINPROGRESS; usbhs_pkt_push(pipe, pkt, usbhsg_queue_done, req->buf, req->length, req->zero, -1); usbhs_pkt_start(pipe); dev_dbg(dev, "pipe %d : queue push (%d)\n", usbhs_pipe_number(pipe), req->length); } /* * dma map/unmap */ static int usbhsg_dma_map_ctrl(struct usbhs_pkt *pkt, int map) { struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); struct usb_request *req = &ureq->req; struct usbhs_pipe *pipe = pkt->pipe; struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); enum dma_data_direction dir; int ret = 0; dir = usbhs_pipe_is_dir_host(pipe); if (map) { /* it can not use scatter/gather */ WARN_ON(req->num_sgs); ret = usb_gadget_map_request(&gpriv->gadget, req, dir); if (ret < 0) return ret; pkt->dma = req->dma; } else { usb_gadget_unmap_request(&gpriv->gadget, req, dir); } return ret; } /* * USB_TYPE_STANDARD / clear feature functions */ static int usbhsg_recip_handler_std_control_done(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); usbhs_dcp_control_transfer_done(pipe); return 0; } static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); if (!usbhsg_status_has(gpriv, USBHSG_STATUS_WEDGE)) { usbhs_pipe_disable(pipe); usbhs_pipe_sequence_data0(pipe); usbhs_pipe_enable(pipe); } usbhsg_recip_handler_std_control_done(priv, uep, ctrl); usbhs_pkt_start(pipe); return 0; } struct usbhsg_recip_handle req_clear_feature = { .name = "clear feature", .device = usbhsg_recip_handler_std_control_done, .interface = usbhsg_recip_handler_std_control_done, .endpoint = usbhsg_recip_handler_std_clear_endpoint, }; /* * USB_TYPE_STANDARD / set feature functions */ static int usbhsg_recip_handler_std_set_device(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { switch (le16_to_cpu(ctrl->wValue)) { case USB_DEVICE_TEST_MODE: usbhsg_recip_handler_std_control_done(priv, uep, ctrl); udelay(100); usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex >> 8)); break; default: usbhsg_recip_handler_std_control_done(priv, uep, ctrl); break; } return 0; } static int usbhsg_recip_handler_std_set_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); usbhs_pipe_stall(pipe); usbhsg_recip_handler_std_control_done(priv, uep, ctrl); return 0; } struct usbhsg_recip_handle req_set_feature = { .name = "set feature", .device = usbhsg_recip_handler_std_set_device, .interface = usbhsg_recip_handler_std_control_done, .endpoint = usbhsg_recip_handler_std_set_endpoint, }; /* * USB_TYPE_STANDARD / get status functions */ static void __usbhsg_recip_send_complete(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); /* free allocated recip-buffer/usb_request */ kfree(ureq->pkt.buf); usb_ep_free_request(ep, req); } static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv, unsigned short status) { struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usb_request *req; unsigned short *buf; /* alloc new usb_request for recip */ req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC); if (!req) { dev_err(dev, "recip request allocation fail\n"); return; } /* alloc recip data buffer */ buf = kmalloc(sizeof(*buf), GFP_ATOMIC); if (!buf) { usb_ep_free_request(&dcp->ep, req); dev_err(dev, "recip data allocation fail\n"); return; } /* recip data is status */ *buf = cpu_to_le16(status); /* allocated usb_request/buffer will be freed */ req->complete = __usbhsg_recip_send_complete; req->buf = buf; req->length = sizeof(*buf); req->zero = 0; /* push packet */ pipe->handler = &usbhs_fifo_pio_push_handler; usbhsg_queue_push(dcp, usbhsg_req_to_ureq(req)); } static int usbhsg_recip_handler_std_get_device(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); unsigned short status = 1 << USB_DEVICE_SELF_POWERED; __usbhsg_recip_send_status(gpriv, status); return 0; } static int usbhsg_recip_handler_std_get_interface(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); unsigned short status = 0; __usbhsg_recip_send_status(gpriv, status); return 0; } static int usbhsg_recip_handler_std_get_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); unsigned short status = 0; if (usbhs_pipe_is_stall(pipe)) status = 1 << USB_ENDPOINT_HALT; __usbhsg_recip_send_status(gpriv, status); return 0; } struct usbhsg_recip_handle req_get_status = { .name = "get status", .device = usbhsg_recip_handler_std_get_device, .interface = usbhsg_recip_handler_std_get_interface, .endpoint = usbhsg_recip_handler_std_get_endpoint, }; /* * USB_TYPE handler */ static int usbhsg_recip_run_handle(struct usbhs_priv *priv, struct usbhsg_recip_handle *handler, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhsg_uep *uep; struct usbhs_pipe *pipe; int recip = ctrl->bRequestType & USB_RECIP_MASK; int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; int ret = 0; int (*func)(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl); char *msg; uep = usbhsg_gpriv_to_nth_uep(gpriv, nth); pipe = usbhsg_uep_to_pipe(uep); if (!pipe) { dev_err(dev, "wrong recip request\n"); return -EINVAL; } switch (recip) { case USB_RECIP_DEVICE: msg = "DEVICE"; func = handler->device; break; case USB_RECIP_INTERFACE: msg = "INTERFACE"; func = handler->interface; break; case USB_RECIP_ENDPOINT: msg = "ENDPOINT"; func = handler->endpoint; break; default: dev_warn(dev, "unsupported RECIP(%d)\n", recip); func = NULL; ret = -EINVAL; } if (func) { dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg); ret = func(priv, uep, ctrl); } return ret; } /* * irq functions * * it will be called from usbhs_interrupt */ static int usbhsg_irq_dev_state(struct usbhs_priv *priv, struct usbhs_irq_state *irq_state) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); gpriv->gadget.speed = usbhs_bus_get_speed(priv); dev_dbg(dev, "state = %x : speed : %d\n", usbhs_status_get_device_state(irq_state), gpriv->gadget.speed); return 0; } static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv, struct usbhs_irq_state *irq_state) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usb_ctrlrequest ctrl; struct usbhsg_recip_handle *recip_handler = NULL; int stage = usbhs_status_get_ctrl_stage(irq_state); int ret = 0; dev_dbg(dev, "stage = %d\n", stage); /* * see Manual * * "Operation" * - "Interrupt Function" * - "Control Transfer Stage Transition Interrupt" * - Fig. "Control Transfer Stage Transitions" */ switch (stage) { case READ_DATA_STAGE: pipe->handler = &usbhs_fifo_pio_push_handler; break; case WRITE_DATA_STAGE: pipe->handler = &usbhs_fifo_pio_pop_handler; break; case NODATA_STATUS_STAGE: pipe->handler = &usbhs_ctrl_stage_end_handler; break; default: return ret; } /* * get usb request */ usbhs_usbreq_get_val(priv, &ctrl); switch (ctrl.bRequestType & USB_TYPE_MASK) { case USB_TYPE_STANDARD: switch (ctrl.bRequest) { case USB_REQ_CLEAR_FEATURE: recip_handler = &req_clear_feature; break; case USB_REQ_SET_FEATURE: recip_handler = &req_set_feature; break; case USB_REQ_GET_STATUS: recip_handler = &req_get_status; break; } } /* * setup stage / run recip */ if (recip_handler) ret = usbhsg_recip_run_handle(priv, recip_handler, &ctrl); else ret = gpriv->driver->setup(&gpriv->gadget, &ctrl); if (ret < 0) usbhs_pipe_stall(pipe); return ret; } /* * * usb_dcp_ops * */ static int usbhsg_pipe_disable(struct usbhsg_uep *uep) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhs_pkt *pkt; while (1) { pkt = usbhs_pkt_pop(pipe, NULL); if (!pkt) break; usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ECONNRESET); } usbhs_pipe_disable(pipe); return 0; } static void usbhsg_uep_init(struct usbhsg_gpriv *gpriv) { int i; struct usbhsg_uep *uep; usbhsg_for_each_uep_with_dcp(uep, gpriv, i) uep->pipe = NULL; } /* * * usb_ep_ops * */ static int usbhsg_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct usbhs_pipe *pipe; int ret = -EIO; /* * if it already have pipe, * nothing to do */ if (uep->pipe) { usbhs_pipe_clear(uep->pipe); usbhs_pipe_sequence_data0(uep->pipe); return 0; } pipe = usbhs_pipe_malloc(priv, usb_endpoint_type(desc), usb_endpoint_dir_in(desc)); if (pipe) { uep->pipe = pipe; pipe->mod_private = uep; /* set epnum / maxp */ usbhs_pipe_config_update(pipe, 0, usb_endpoint_num(desc), usb_endpoint_maxp(desc)); /* * usbhs_fifo_dma_push/pop_handler try to * use dmaengine if possible. * It will use pio handler if impossible. */ if (usb_endpoint_dir_in(desc)) pipe->handler = &usbhs_fifo_dma_push_handler; else pipe->handler = &usbhs_fifo_dma_pop_handler; ret = 0; } return ret; } static int usbhsg_ep_disable(struct usb_ep *ep) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); return usbhsg_pipe_disable(uep); } static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct usbhsg_request *ureq; ureq = kzalloc(sizeof *ureq, gfp_flags); if (!ureq) return NULL; usbhs_pkt_init(usbhsg_ureq_to_pkt(ureq)); return &ureq->req; } static void usbhsg_ep_free_request(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); WARN_ON(!list_empty(&ureq->pkt.node)); kfree(ureq); } static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); /* param check */ if (usbhsg_is_not_connected(gpriv) || unlikely(!gpriv->driver) || unlikely(!pipe)) return -ESHUTDOWN; usbhsg_queue_push(uep, ureq); return 0; } static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); usbhsg_queue_pop(uep, ureq, -ECONNRESET); return 0; } static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); unsigned long flags; usbhsg_pipe_disable(uep); dev_dbg(dev, "set halt %d (pipe %d)\n", halt, usbhs_pipe_number(pipe)); /******************** spin lock ********************/ usbhs_lock(priv, flags); if (halt) usbhs_pipe_stall(pipe); else usbhs_pipe_disable(pipe); if (halt && wedge) usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE); else usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); usbhs_unlock(priv, flags); /******************** spin unlock ******************/ return 0; } static int usbhsg_ep_set_halt(struct usb_ep *ep, int value) { return __usbhsg_ep_set_halt_wedge(ep, value, 0); } static int usbhsg_ep_set_wedge(struct usb_ep *ep) { return __usbhsg_ep_set_halt_wedge(ep, 1, 1); } static struct usb_ep_ops usbhsg_ep_ops = { .enable = usbhsg_ep_enable, .disable = usbhsg_ep_disable, .alloc_request = usbhsg_ep_alloc_request, .free_request = usbhsg_ep_free_request, .queue = usbhsg_ep_queue, .dequeue = usbhsg_ep_dequeue, .set_halt = usbhsg_ep_set_halt, .set_wedge = usbhsg_ep_set_wedge, }; /* * usb module start/end */ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_mod *mod = usbhs_mod_get_current(priv); struct device *dev = usbhs_priv_to_dev(priv); unsigned long flags; int ret = 0; /******************** spin lock ********************/ usbhs_lock(priv, flags); usbhsg_status_set(gpriv, status); if (!(usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) && usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD))) ret = -1; /* not ready */ usbhs_unlock(priv, flags); /******************** spin unlock ********************/ if (ret < 0) return 0; /* not ready is not error */ /* * enable interrupt and systems if ready */ dev_dbg(dev, "start gadget\n"); /* * pipe initialize and enable DCP */ usbhs_pipe_init(priv, usbhsg_dma_map_ctrl); usbhs_fifo_init(priv); usbhsg_uep_init(gpriv); /* dcp init */ dcp->pipe = usbhs_dcp_malloc(priv); dcp->pipe->mod_private = dcp; usbhs_pipe_config_update(dcp->pipe, 0, 0, 64); /* * system config enble * - HI speed * - function * - usb module */ usbhs_sys_function_ctrl(priv, 1); /* * enable irq callback */ mod->irq_dev_state = usbhsg_irq_dev_state; mod->irq_ctrl_stage = usbhsg_irq_ctrl_stage; usbhs_irq_callback_update(priv, mod); return 0; } static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhs_mod *mod = usbhs_mod_get_current(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct device *dev = usbhs_priv_to_dev(priv); unsigned long flags; int ret = 0; /******************** spin lock ********************/ usbhs_lock(priv, flags); usbhsg_status_clr(gpriv, status); if (!usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) && !usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD)) ret = -1; /* already done */ usbhs_unlock(priv, flags); /******************** spin unlock ********************/ if (ret < 0) return 0; /* already done is not error */ /* * disable interrupt and systems if 1st try */ usbhs_fifo_quit(priv); /* disable all irq */ mod->irq_dev_state = NULL; mod->irq_ctrl_stage = NULL; usbhs_irq_callback_update(priv, mod); gpriv->gadget.speed = USB_SPEED_UNKNOWN; /* disable sys */ usbhs_sys_set_test_mode(priv, 0); usbhs_sys_function_ctrl(priv, 0); usbhsg_pipe_disable(dcp); dev_dbg(dev, "stop gadget\n"); return 0; } /* * * linux usb function * */ static int usbhsg_gadget_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); if (!driver || !driver->setup || driver->max_speed < USB_SPEED_FULL) return -EINVAL; /* first hook up the driver ... */ gpriv->driver = driver; gpriv->gadget.dev.driver = &driver->driver; return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD); } static int usbhsg_gadget_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); if (!driver || !driver->unbind) return -EINVAL; usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD); gpriv->gadget.dev.driver = NULL; gpriv->driver = NULL; return 0; } /* * usb gadget ops */ static int usbhsg_get_frame(struct usb_gadget *gadget) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); return usbhs_frame_get_num(priv); } static struct usb_gadget_ops usbhsg_gadget_ops = { .get_frame = usbhsg_get_frame, .udc_start = usbhsg_gadget_start, .udc_stop = usbhsg_gadget_stop, }; static int usbhsg_start(struct usbhs_priv *priv) { return usbhsg_try_start(priv, USBHSG_STATUS_STARTED); } static int usbhsg_stop(struct usbhs_priv *priv) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); /* cable disconnect */ if (gpriv->driver && gpriv->driver->disconnect) gpriv->driver->disconnect(&gpriv->gadget); return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED); } static void usbhs_mod_gadget_release(struct device *pdev) { /* do nothing */ } int usbhs_mod_gadget_probe(struct usbhs_priv *priv) { struct usbhsg_gpriv *gpriv; struct usbhsg_uep *uep; struct device *dev = usbhs_priv_to_dev(priv); int pipe_size = usbhs_get_dparam(priv, pipe_size); int i; int ret; gpriv = kzalloc(sizeof(struct usbhsg_gpriv), GFP_KERNEL); if (!gpriv) { dev_err(dev, "Could not allocate gadget priv\n"); return -ENOMEM; } uep = kzalloc(sizeof(struct usbhsg_uep) * pipe_size, GFP_KERNEL); if (!uep) { dev_err(dev, "Could not allocate ep\n"); ret = -ENOMEM; goto usbhs_mod_gadget_probe_err_gpriv; } /* * CAUTION * * There is no guarantee that it is possible to access usb module here. * Don't accesses to it. * The accesse will be enable after "usbhsg_start" */ /* * register itself */ usbhs_mod_register(priv, &gpriv->mod, USBHS_GADGET); /* init gpriv */ gpriv->mod.name = "gadget"; gpriv->mod.start = usbhsg_start; gpriv->mod.stop = usbhsg_stop; gpriv->uep = uep; gpriv->uep_size = pipe_size; usbhsg_status_init(gpriv); /* * init gadget */ dev_set_name(&gpriv->gadget.dev, "gadget"); gpriv->gadget.dev.parent = dev; gpriv->gadget.dev.release = usbhs_mod_gadget_release; gpriv->gadget.name = "renesas_usbhs_udc"; gpriv->gadget.ops = &usbhsg_gadget_ops; gpriv->gadget.max_speed = USB_SPEED_HIGH; ret = device_register(&gpriv->gadget.dev); if (ret < 0) goto err_add_udc; INIT_LIST_HEAD(&gpriv->gadget.ep_list); /* * init usb_ep */ usbhsg_for_each_uep_with_dcp(uep, gpriv, i) { uep->gpriv = gpriv; snprintf(uep->ep_name, EP_NAME_SIZE, "ep%d", i); uep->ep.name = uep->ep_name; uep->ep.ops = &usbhsg_ep_ops; INIT_LIST_HEAD(&uep->ep.ep_list); /* init DCP */ if (usbhsg_is_dcp(uep)) { gpriv->gadget.ep0 = &uep->ep; uep->ep.maxpacket = 64; } /* init normal pipe */ else { uep->ep.maxpacket = 512; list_add_tail(&uep->ep.ep_list, &gpriv->gadget.ep_list); } } ret = usb_add_gadget_udc(dev, &gpriv->gadget); if (ret) goto err_register; dev_info(dev, "gadget probed\n"); return 0; err_register: device_unregister(&gpriv->gadget.dev); err_add_udc: kfree(gpriv->uep); usbhs_mod_gadget_probe_err_gpriv: kfree(gpriv); return ret; } void usbhs_mod_gadget_remove(struct usbhs_priv *priv) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); usb_del_gadget_udc(&gpriv->gadget); device_unregister(&gpriv->gadget.dev); kfree(gpriv->uep); kfree(gpriv); }
gpl-2.0
t3ksin/efficientkernel
fs/btrfs/xattr.c
4543
11030
/* * Copyright (C) 2007 Red Hat. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/xattr.h> #include <linux/security.h> #include "ctree.h" #include "btrfs_inode.h" #include "transaction.h" #include "xattr.h" #include "disk-io.h" ssize_t __btrfs_getxattr(struct inode *inode, const char *name, void *buffer, size_t size) { struct btrfs_dir_item *di; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; struct extent_buffer *leaf; int ret = 0; unsigned long data_ptr; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* lookup the xattr by name */ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, strlen(name), 0); if (!di) { ret = -ENODATA; goto out; } else if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } leaf = path->nodes[0]; /* if size is 0, that means we want the size of the attr */ if (!size) { ret = btrfs_dir_data_len(leaf, di); goto out; } /* now get the data out of our dir_item */ if (btrfs_dir_data_len(leaf, di) > size) { ret = -ERANGE; goto out; } /* * The way things are packed into the leaf is like this * |struct btrfs_dir_item|name|data| * where name is the xattr name, so security.foo, and data is the * content of the xattr. data_ptr points to the location in memory * where the data starts in the in memory leaf */ data_ptr = (unsigned long)((char *)(di + 1) + btrfs_dir_name_len(leaf, di)); read_extent_buffer(leaf, buffer, data_ptr, btrfs_dir_data_len(leaf, di)); ret = btrfs_dir_data_len(leaf, di); out: btrfs_free_path(path); return ret; } static int do_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_dir_item *di; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; size_t name_len = strlen(name); int ret = 0; if (name_len + size > BTRFS_MAX_XATTR_SIZE(root)) return -ENOSPC; path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (flags & XATTR_REPLACE) { di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } else if (!di) { ret = -ENODATA; goto out; } ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; btrfs_release_path(path); /* * remove the attribute */ if (!value) goto out; } again: ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), name, name_len, value, size); /* * If we're setting an xattr to a new value but the new value is say * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting * back from split_leaf. This is because it thinks we'll be extending * the existing item size, but we're asking for enough space to add the * item itself. So if we get EOVERFLOW just set ret to EEXIST and let * the rest of the function figure it out. */ if (ret == -EOVERFLOW) ret = -EEXIST; if (ret == -EEXIST) { if (flags & XATTR_CREATE) goto out; /* * We can't use the path we already have since we won't have the * proper locking for a delete, so release the path and * re-lookup to delete the thing. */ btrfs_release_path(path); di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } else if (!di) { /* Shouldn't happen but just in case... */ btrfs_release_path(path); goto again; } ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; /* * We have a value to set, so go back and try to insert it now. */ if (value) { btrfs_release_path(path); goto again; } } out: btrfs_free_path(path); return ret; } /* * @value: "" makes the attribute to empty, NULL removes it */ int __btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; if (trans) return do_setxattr(trans, inode, name, value, size, flags); trans = btrfs_start_transaction(root, 2); if (IS_ERR(trans)) return PTR_ERR(trans); ret = do_setxattr(trans, inode, name, value, size, flags); if (ret) goto out; inode->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); out: btrfs_end_transaction(trans, root); return ret; } ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct btrfs_key key, found_key; struct inode *inode = dentry->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dir_item *di; int ret = 0, slot; size_t total_size = 0, size_left = size; unsigned long name_ptr; size_t name_len; /* * ok we want all objects associated with this id. * NOTE: we set key.offset = 0; because we want to start with the * first xattr that we find and walk forward */ key.objectid = btrfs_ino(inode); btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); key.offset = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 2; /* search for our xattrs */ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; while (1) { leaf = path->nodes[0]; slot = path->slots[0]; /* this is where we start walking through the path */ if (slot >= btrfs_header_nritems(leaf)) { /* * if we've reached the last slot in this leaf we need * to go to the next leaf and reset everything */ ret = btrfs_next_leaf(root, path); if (ret < 0) goto err; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &found_key, slot); /* check to make sure this item is what we want */ if (found_key.objectid != key.objectid) break; if (btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY) break; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); if (verify_dir_item(root, leaf, di)) continue; name_len = btrfs_dir_name_len(leaf, di); total_size += name_len + 1; /* we are just looking for how big our buffer needs to be */ if (!size) goto next; if (!buffer || (name_len + 1) > size_left) { ret = -ERANGE; goto err; } name_ptr = (unsigned long)(di + 1); read_extent_buffer(leaf, buffer, name_ptr, name_len); buffer[name_len] = '\0'; size_left -= name_len + 1; buffer += name_len + 1; next: path->slots[0]++; } ret = total_size; err: btrfs_free_path(path); return ret; } /* * List of handlers for synthetic system.* attributes. All real ondisk * attributes are handled directly. */ const struct xattr_handler *btrfs_xattr_handlers[] = { #ifdef CONFIG_BTRFS_FS_POSIX_ACL &btrfs_xattr_acl_access_handler, &btrfs_xattr_acl_default_handler, #endif NULL, }; /* * Check if the attribute is in a supported namespace. * * This applied after the check for the synthetic attributes in the system * namespace. */ static bool btrfs_is_valid_xattr(const char *name) { return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) || !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); } ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size) { /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_getxattr(dentry, name, buffer, size); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; return __btrfs_getxattr(dentry->d_inode, name, buffer, size); } int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; /* * The permission on security.* and system.* is not checked * in permission(). */ if (btrfs_root_readonly(root)) return -EROFS; /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_setxattr(dentry, name, value, size, flags); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; if (size == 0) value = ""; /* empty EA, do not remove */ return __btrfs_setxattr(NULL, dentry->d_inode, name, value, size, flags); } int btrfs_removexattr(struct dentry *dentry, const char *name) { struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; /* * The permission on security.* and system.* is not checked * in permission(). */ if (btrfs_root_readonly(root)) return -EROFS; /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_removexattr(dentry, name); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); } int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; struct btrfs_trans_handle *trans = fs_info; char *name; int err = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(xattr->name) + 1, GFP_NOFS); if (!name) { err = -ENOMEM; break; } strcpy(name, XATTR_SECURITY_PREFIX); strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); err = __btrfs_setxattr(trans, inode, name, xattr->value, xattr->value_len, 0); kfree(name); if (err < 0) break; } return err; } int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *dir, const struct qstr *qstr) { return security_inode_init_security(inode, dir, qstr, &btrfs_initxattrs, trans); }
gpl-2.0
RoyMcBaster/kernel_hammerhead
drivers/staging/wlags49_h2/wl_util.c
4799
42019
/******************************************************************************* * Agere Systems Inc. * Wireless device driver for Linux (wlags49). * * Copyright (c) 1998-2003 Agere Systems Inc. * All rights reserved. * http://www.agere.com * * Initially developed by TriplePoint, Inc. * http://www.triplepoint.com * *------------------------------------------------------------------------------ * * This file defines misc utility functions. * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright © 2003 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * ******************************************************************************/ /******************************************************************************* * include files ******************************************************************************/ #include <wl_version.h> #include <linux/kernel.h> // #include <linux/sched.h> // #include <linux/ptrace.h> #include <linux/ctype.h> // #include <linux/string.h> // #include <linux/timer.h> // #include <linux/interrupt.h> // #include <linux/in.h> // #include <linux/delay.h> // #include <asm/io.h> // // #include <asm/bitops.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> // #include <linux/skbuff.h> // #include <linux/if_arp.h> // #include <linux/ioport.h> #include <debug.h> #include <hcf.h> // #include <hcfdef.h> #include <wl_if.h> #include <wl_internal.h> #include <wl_util.h> #include <wl_wext.h> #include <wl_main.h> /******************************************************************************* * global variables ******************************************************************************/ /* A matrix which maps channels to frequencies */ #define MAX_CHAN_FREQ_MAP_ENTRIES 50 static const long chan_freq_list[][MAX_CHAN_FREQ_MAP_ENTRIES] = { {1,2412}, {2,2417}, {3,2422}, {4,2427}, {5,2432}, {6,2437}, {7,2442}, {8,2447}, {9,2452}, {10,2457}, {11,2462}, {12,2467}, {13,2472}, {14,2484}, {36,5180}, {40,5200}, {44,5220}, {48,5240}, {52,5260}, {56,5280}, {60,5300}, {64,5320}, {149,5745}, {153,5765}, {157,5785}, {161,5805} }; #if DBG extern dbg_info_t *DbgInfo; #endif /* DBG */ /******************************************************************************* * dbm() ******************************************************************************* * * DESCRIPTION: * * Return an energy value in dBm. * * PARAMETERS: * * value - the energy value to be converted * * RETURNS: * * the value in dBm * ******************************************************************************/ int dbm( int value ) { /* Truncate the value to be between min and max. */ if( value < HCF_MIN_SIGNAL_LEVEL ) value = HCF_MIN_SIGNAL_LEVEL; if( value > HCF_MAX_SIGNAL_LEVEL ) value = HCF_MAX_SIGNAL_LEVEL; /* Return the energy value in dBm. */ return ( value - HCF_0DBM_OFFSET ); } // dbm /*============================================================================*/ /******************************************************************************* * percent() ******************************************************************************* * * DESCRIPTION: * * Return a value as a percentage of min to max. * * PARAMETERS: * * value - the value in question * min - the minimum range value * max - the maximum range value * * RETURNS: * * the percentage value * ******************************************************************************/ int percent( int value, int min, int max ) { /* Truncate the value to be between min and max. */ if( value < min ) value = min; if( value > max ) value = max; /* Return the value as a percentage of min to max. */ return ((( value - min ) * 100 ) / ( max - min )); } // percent /*============================================================================*/ /******************************************************************************* * is_valid_key_string() ******************************************************************************* * * DESCRIPTION: * * Checks to determine if the WEP key string is valid * * PARAMETERS: * * s - the string in question * * RETURNS: * * non-zero if the string contains a valid key * ******************************************************************************/ int is_valid_key_string( char *s ) { int l; int i; /*------------------------------------------------------------------------*/ l = strlen( s ); /* 0x followed by 5 or 13 hexadecimal digit pairs is valid */ if( s[0] == '0' && ( s[1] == 'x' || s[1] == 'X' )) { if( l == 12 || l == 28 ) { for( i = 2; i < l; i++ ) { if( !isxdigit( s[i] )) return 0; } return 1; } else { return 0; } } /* string with 0, 5, or 13 characters is valid */ else { return( l == 0 || l == 5 || l == 13 ); } } // is_valid_key_string /*============================================================================*/ /******************************************************************************* * key_string2key() ******************************************************************************* * * DESCRIPTION: * * Converts a key_string to a key, Assumes the key_string is validated with * is_valid_key_string(). * * PARAMETERS: * * ks - the valid key string * key - a pointer to a KEY_STRUCT where the converted key information will * be stored. * * RETURNS: * * N/A * ******************************************************************************/ void key_string2key( char *ks, KEY_STRCT *key ) { int l,i,n; char *p; /*------------------------------------------------------------------------*/ l = strlen( ks ); /* 0x followed by hexadecimal digit pairs */ if( ks[0] == '0' && ( ks[1] == 'x' || ks[1] == 'X' )) { n = 0; p = (char *)key->key; for( i = 2; i < l; i+=2 ) { *p++ = (hex_to_bin(ks[i]) << 4) + hex_to_bin(ks[i+1]); n++; } /* Note that endian translation of the length field is not needed here because it's performed in wl_put_ltv() */ key->len = n; } /* character string */ else { strcpy( (char *)key->key, ks ); key->len = l; } return; } // key_string2key /*============================================================================*/ /******************************************************************************* * wl_has_wep() ******************************************************************************* * * DESCRIPTION: * * Checks to see if the device supports WEP * * PARAMETERS: * * ifbp - the IFB pointer of the device in question * * RETURNS: * * 1 if WEP is known enabled, else 0 * ******************************************************************************/ int wl_has_wep (IFBP ifbp) { CFG_PRIVACY_OPT_IMPLEMENTED_STRCT ltv; int rc, privacy; /*------------------------------------------------------------------------*/ /* This function allows us to distiguish bronze cards from other types, to know if WEP exists. Does not distinguish (because there's no way to) between silver and gold cards. */ ltv.len = 2; ltv.typ = CFG_PRIVACY_OPT_IMPLEMENTED; rc = hcf_get_info( ifbp, (LTVP) &ltv ); privacy = CNV_LITTLE_TO_INT( ltv.privacy_opt_implemented ); //return rc ? 0 : privacy; return 1; } // wl_has_wep /*============================================================================*/ /******************************************************************************* * wl_hcf_error() ******************************************************************************* * * DESCRIPTION: * * Report the type of HCF error message * * PARAMETERS: * * none * * RETURNS: * * A descriptive string indicating the error, quiet otherwise. * ******************************************************************************/ void wl_hcf_error( struct net_device *dev, int hcfStatus ) { char buffer[64], *pMsg; /*------------------------------------------------------------------------*/ if( hcfStatus != HCF_SUCCESS ) { switch( hcfStatus ) { case HCF_ERR_TIME_OUT: pMsg = "Expected adapter event did not occur in expected time"; break; case HCF_ERR_NO_NIC: pMsg = "Card not found (ejected unexpectedly)"; break; case HCF_ERR_LEN: pMsg = "Command buffer size insufficient"; break; case HCF_ERR_INCOMP_PRI: pMsg = "Primary functions are not compatible"; break; case HCF_ERR_INCOMP_FW: pMsg = "Primary functions are compatible, " "station/ap functions are not"; break; case HCF_ERR_BUSY: pMsg = "Inquire cmd while another Inquire in progress"; break; //case HCF_ERR_SEQ_BUG: // pMsg = "Unexpected command completed"; // break; case HCF_ERR_DEFUNCT_AUX: pMsg = "Timeout on ack for enable/disable of AUX registers"; break; case HCF_ERR_DEFUNCT_TIMER: pMsg = "Timeout on timer calibration during initialization process"; break; case HCF_ERR_DEFUNCT_TIME_OUT: pMsg = "Timeout on Busy bit drop during BAP setup"; break; case HCF_ERR_DEFUNCT_CMD_SEQ: pMsg = "Hermes and HCF are out of sync"; break; default: sprintf( buffer, "Error code %d", hcfStatus ); pMsg = buffer; break; } printk( KERN_INFO "%s: Wireless, HCF failure: \"%s\"\n", dev->name, pMsg ); } } // wl_hcf_error /*============================================================================*/ /******************************************************************************* * wl_endian_translate_event() ******************************************************************************* * * DESCRIPTION: * * Determines what type of data is in the mailbox and performs the proper * endian translation. * * PARAMETERS: * * pLtv - an LTV pointer * * RETURNS: * * N/A * ******************************************************************************/ void wl_endian_translate_event( ltv_t *pLtv ) { DBG_FUNC( "wl_endian_translate_event" ); DBG_ENTER( DbgInfo ); switch( pLtv->typ ) { case CFG_TALLIES: break; case CFG_SCAN: { int numAPs; SCAN_RS_STRCT *pAps = (SCAN_RS_STRCT*)&pLtv->u.u8[0]; numAPs = (hcf_16)(( (size_t)( pLtv->len - 1 ) * 2 ) / (sizeof( SCAN_RS_STRCT ))); while( numAPs >= 1 ) { numAPs--; pAps[numAPs].channel_id = CNV_LITTLE_TO_INT( pAps[numAPs].channel_id ); pAps[numAPs].noise_level = CNV_LITTLE_TO_INT( pAps[numAPs].noise_level ); pAps[numAPs].signal_level = CNV_LITTLE_TO_INT( pAps[numAPs].signal_level ); pAps[numAPs].beacon_interval_time = CNV_LITTLE_TO_INT( pAps[numAPs].beacon_interval_time ); pAps[numAPs].capability = CNV_LITTLE_TO_INT( pAps[numAPs].capability ); pAps[numAPs].ssid_len = CNV_LITTLE_TO_INT( pAps[numAPs].ssid_len ); pAps[numAPs].ssid_val[pAps[numAPs].ssid_len] = 0; } } break; case CFG_ACS_SCAN: { PROBE_RESP *probe_resp = (PROBE_RESP *)pLtv; probe_resp->frameControl = CNV_LITTLE_TO_INT( probe_resp->frameControl ); probe_resp->durID = CNV_LITTLE_TO_INT( probe_resp->durID ); probe_resp->sequence = CNV_LITTLE_TO_INT( probe_resp->sequence ); probe_resp->dataLength = CNV_LITTLE_TO_INT( probe_resp->dataLength ); #ifndef WARP probe_resp->lenType = CNV_LITTLE_TO_INT( probe_resp->lenType ); #endif // WARP probe_resp->beaconInterval = CNV_LITTLE_TO_INT( probe_resp->beaconInterval ); probe_resp->capability = CNV_LITTLE_TO_INT( probe_resp->capability ); probe_resp->flags = CNV_LITTLE_TO_INT( probe_resp->flags ); } break; case CFG_LINK_STAT: #define ls ((LINK_STATUS_STRCT *)pLtv) ls->linkStatus = CNV_LITTLE_TO_INT( ls->linkStatus ); break; #undef ls case CFG_ASSOC_STAT: { ASSOC_STATUS_STRCT *pAs = (ASSOC_STATUS_STRCT *)pLtv; pAs->assocStatus = CNV_LITTLE_TO_INT( pAs->assocStatus ); } break; case CFG_SECURITY_STAT: { SECURITY_STATUS_STRCT *pSs = (SECURITY_STATUS_STRCT *)pLtv; pSs->securityStatus = CNV_LITTLE_TO_INT( pSs->securityStatus ); pSs->reason = CNV_LITTLE_TO_INT( pSs->reason ); } break; case CFG_WMP: break; case CFG_NULL: break; default: break; } DBG_LEAVE( DbgInfo ); return; } // wl_endian_translate_event /*============================================================================*/ /******************************************************************************* * msf_assert() ******************************************************************************* * * DESCRIPTION: * * Print statement used to display asserts from within the HCF. Only called * when asserts in the HCF are turned on. See hcfcfg.h for more information. * * PARAMETERS: * * file_namep - the filename in which the assert occurred. * line_number - the line number on which the assert occurred. * trace - a comment associated with the assert. * qual - return code or other value related to the assert * * RETURNS: * * N/A * ******************************************************************************/ void msf_assert( unsigned int line_number, hcf_16 trace, hcf_32 qual ) { DBG_PRINT( "HCF ASSERT: Line %d, VAL: 0x%.8x\n", line_number, /*;?*/(u32)qual ); } // msf_assert /*============================================================================*/ /******************************************************************************* * wl_parse_ds_ie() ******************************************************************************* * * DESCRIPTION: * * This function parses the Direct Sequence Parameter Set IE, used to * determine channel/frequency information. * * PARAMETERS: * * probe_rsp - a pointer to a PROBE_RESP structure containing the probe * response. * * RETURNS: * * The channel on which the BSS represented by this probe response is * transmitting. * ******************************************************************************/ hcf_8 wl_parse_ds_ie( PROBE_RESP *probe_rsp ) { int i; int ie_length = 0; hcf_8 *buf; hcf_8 buf_size; /*------------------------------------------------------------------------*/ if( probe_rsp == NULL ) { return 0; } buf = probe_rsp->rawData; buf_size = sizeof( probe_rsp->rawData ); for( i = 0; i < buf_size; i++ ) { if( buf[i] == DS_INFO_ELEM ) { /* Increment by 1 to get the length, and test it; in a DS element, length should always be 1 */ i++; ie_length = buf[i]; if( buf[i] == 1 ) { /* Get the channel information */ i++; return buf[i]; } } } /* If we get here, we didn't find a DS-IE, which is strange */ return 0; } // wl_parse_ds_ie /******************************************************************************* * wl_parse_wpa_ie() ******************************************************************************* * * DESCRIPTION: * * This function parses the Probe Response for a valid WPA-IE. * * PARAMETERS: * * probe_rsp - a pointer to a PROBE_RESP structure containing the probe * response * length - a pointer to an hcf_16 in which the size of the WPA-IE will * be stored (if found). * * RETURNS: * * A pointer to the location in the probe response buffer where a valid * WPA-IE lives. The length of this IE is written back to the 'length' * argument passed to the function. * ******************************************************************************/ hcf_8 * wl_parse_wpa_ie( PROBE_RESP *probe_rsp, hcf_16 *length ) { int i; int ie_length = 0; hcf_8 *buf; hcf_8 buf_size; hcf_8 wpa_oui[] = WPA_OUI_TYPE; /*------------------------------------------------------------------------*/ if( probe_rsp == NULL || length == NULL ) { return NULL; } buf = probe_rsp->rawData; buf_size = sizeof( probe_rsp->rawData ); *length = 0; for( i = 0; i < buf_size; i++ ) { if( buf[i] == GENERIC_INFO_ELEM ) { /* Increment by one to get the IE length */ i++; ie_length = probe_rsp->rawData[i]; /* Increment by one to point to the IE payload */ i++; /* Does the IE contain a WPA OUI? If not, it's a proprietary IE */ if( memcmp( &buf[i], &wpa_oui, WPA_SELECTOR_LEN ) == 0 ) { /* Pass back length and return a pointer to the WPA-IE */ /* NOTE: Length contained in the WPA-IE is only the length of the payload. The entire WPA-IE, including the IE identifier and the length, is 2 bytes larger */ *length = ie_length + 2; /* Back up the pointer 2 bytes to include the IE identifier and the length in the buffer returned */ i -= 2; return &buf[i]; } /* Increment past this non-WPA IE and continue looking */ i += ( ie_length - 1 ); } } /* If we're here, we didn't find a WPA-IE in the buffer */ return NULL; } // wl_parse_wpa_ie /******************************************************************************* * wl_print_wpa_ie() ******************************************************************************* * * DESCRIPTION: * * Function used to take a WPA Information Element (WPA-IE) buffer and * display it in a readable format. * * PARAMETERS: * * buffer - the byte buffer containing the WPA-IE * length - the length of the above buffer * * RETURNS: * * A pointer to the formatted WPA-IE string. Note that the format used is * byte-by-byte printing as %02x hex values with no spaces. This is * required for proper operation with some WPA supplicants. * ******************************************************************************/ hcf_8 * wl_print_wpa_ie( hcf_8 *buffer, int length ) { int count; int rows; int remainder; int rowsize = 4; hcf_8 row_buf[64]; static hcf_8 output[512]; /*------------------------------------------------------------------------*/ memset( output, 0, sizeof( output )); memset( row_buf, 0, sizeof( row_buf )); /* Determine how many rows will be needed, and the remainder */ rows = length / rowsize; remainder = length % rowsize; /* Format the rows */ for( count = 0; count < rows; count++ ) { sprintf( row_buf, "%02x%02x%02x%02x", buffer[count*rowsize], buffer[count*rowsize+1], buffer[count*rowsize+2], buffer[count*rowsize+3]); strcat( output, row_buf ); } memset( row_buf, 0, sizeof( row_buf )); /* Format the remainder */ for( count = 0; count < remainder; count++ ) { sprintf( row_buf, "%02x", buffer[(rows*rowsize)+count]); strcat( output, row_buf ); } return output; } // wl_print_wpa_ie /*============================================================================*/ /******************************************************************************* * wl_is_a_valid_chan() ******************************************************************************* * * DESCRIPTION: * * Checks if a given channel is valid * * PARAMETERS: * * channel - the channel * * RETURNS: * * 1 if TRUE * 0 if FALSE * ******************************************************************************/ int wl_is_a_valid_chan( int channel ) { int i; /*------------------------------------------------------------------------*/ /* Strip out the high bit set by the FW for 802.11a channels */ if( channel & 0x100 ) { channel = channel & 0x0FF; } /* Iterate through the matrix and retrieve the frequency */ for( i = 0; i < MAX_CHAN_FREQ_MAP_ENTRIES; i++ ) { if( chan_freq_list[i][0] == channel ) { return 1; } } return 0; } // wl_is_a_valid_chan /*============================================================================*/ /******************************************************************************* * wl_get_chan_from_freq() ******************************************************************************* * * DESCRIPTION: * * Checks if a given frequency is valid * * PARAMETERS: * * freq - the frequency * * RETURNS: * * 1 if TRUE * 0 if FALSE * ******************************************************************************/ int wl_is_a_valid_freq( long frequency ) { int i; /*------------------------------------------------------------------------*/ /* Iterate through the matrix and retrieve the channel */ for( i = 0; i < MAX_CHAN_FREQ_MAP_ENTRIES; i++ ) { if( chan_freq_list[i][1] == frequency ) { return 1; } } return 0; } // wl_is_a_valid_freq /*============================================================================*/ /******************************************************************************* * wl_get_freq_from_chan() ******************************************************************************* * * DESCRIPTION: * * Function used to look up the frequency for a given channel on which the * adapter is Tx/Rx. * * PARAMETERS: * * channel - the channel * * RETURNS: * * The corresponding frequency * ******************************************************************************/ long wl_get_freq_from_chan( int channel ) { int i; /*------------------------------------------------------------------------*/ /* Strip out the high bit set by the FW for 802.11a channels */ if( channel & 0x100 ) { channel = channel & 0x0FF; } /* Iterate through the matrix and retrieve the frequency */ for( i = 0; i < MAX_CHAN_FREQ_MAP_ENTRIES; i++ ) { if( chan_freq_list[i][0] == channel ) { return chan_freq_list[i][1]; } } return 0; } // wl_get_freq_from_chan /*============================================================================*/ /******************************************************************************* * wl_get_chan_from_freq() ******************************************************************************* * * DESCRIPTION: * * Function used to look up the channel for a given frequency on which the * adapter is Tx/Rx. * * PARAMETERS: * * frequency - the frequency * * RETURNS: * * The corresponding channel * ******************************************************************************/ int wl_get_chan_from_freq( long frequency ) { int i; /*------------------------------------------------------------------------*/ /* Iterate through the matrix and retrieve the channel */ for( i = 0; i < MAX_CHAN_FREQ_MAP_ENTRIES; i++ ) { if( chan_freq_list[i][1] == frequency ) { return chan_freq_list[i][0]; } } return 0; } // wl_get_chan_from_freq /*============================================================================*/ /******************************************************************************* * wl_process_link_status() ******************************************************************************* * * DESCRIPTION: * * Process the link status message signaled by the device. * * PARAMETERS: * * lp - a pointer to the device's private structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_process_link_status( struct wl_private *lp ) { hcf_16 link_stat; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_process_link_status" ); DBG_ENTER( DbgInfo ); if( lp != NULL ) { //link_stat = lp->hcfCtx.IFB_DSLinkStat & CFG_LINK_STAT_FW; link_stat = lp->hcfCtx.IFB_LinkStat & CFG_LINK_STAT_FW; switch( link_stat ) { case 1: DBG_TRACE( DbgInfo, "Link Status : Connected\n" ); wl_wext_event_ap( lp->dev ); break; case 2: DBG_TRACE( DbgInfo, "Link Status : Disconnected\n" ); break; case 3: DBG_TRACE( DbgInfo, "Link Status : Access Point Change\n" ); break; case 4: DBG_TRACE( DbgInfo, "Link Status : Access Point Out of Range\n" ); break; case 5: DBG_TRACE( DbgInfo, "Link Status : Access Point In Range\n" ); break; default: DBG_TRACE( DbgInfo, "Link Status : UNKNOWN (0x%04x)\n", link_stat ); break; } } DBG_LEAVE( DbgInfo ); return; } // wl_process_link_status /*============================================================================*/ /******************************************************************************* * wl_process_probe_response() ******************************************************************************* * * DESCRIPTION: * * Process the probe responses retunred by the device as a result of an * active scan. * * PARAMETERS: * * lp - a pointer to the device's private structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_process_probe_response( struct wl_private *lp ) { PROBE_RESP *probe_rsp; hcf_8 *wpa_ie = NULL; hcf_16 wpa_ie_len = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_process_probe_response" ); DBG_ENTER( DbgInfo ); if( lp != NULL ) { probe_rsp = (PROBE_RESP *)&lp->ProbeResp; wl_endian_translate_event( (ltv_t *)probe_rsp ); DBG_TRACE( DbgInfo, "(%s) =========================\n", lp->dev->name ); DBG_TRACE( DbgInfo, "(%s) length : 0x%04x.\n", lp->dev->name, probe_rsp->length ); if( probe_rsp->length > 1 ) { DBG_TRACE( DbgInfo, "(%s) infoType : 0x%04x.\n", lp->dev->name, probe_rsp->infoType ); DBG_TRACE( DbgInfo, "(%s) signal : 0x%02x.\n", lp->dev->name, probe_rsp->signal ); DBG_TRACE( DbgInfo, "(%s) silence : 0x%02x.\n", lp->dev->name, probe_rsp->silence ); DBG_TRACE( DbgInfo, "(%s) rxFlow : 0x%02x.\n", lp->dev->name, probe_rsp->rxFlow ); DBG_TRACE( DbgInfo, "(%s) rate : 0x%02x.\n", lp->dev->name, probe_rsp->rate ); DBG_TRACE( DbgInfo, "(%s) frame cntl : 0x%04x.\n", lp->dev->name, probe_rsp->frameControl ); DBG_TRACE( DbgInfo, "(%s) durID : 0x%04x.\n", lp->dev->name, probe_rsp->durID ); DBG_TRACE(DbgInfo, "(%s) address1 : %pM\n", lp->dev->name, probe_rsp->address1); DBG_TRACE(DbgInfo, "(%s) address2 : %pM\n", lp->dev->name, probe_rsp->address2); DBG_TRACE(DbgInfo, "(%s) BSSID : %pM\n", lp->dev->name, probe_rsp->BSSID); DBG_TRACE( DbgInfo, "(%s) sequence : 0x%04x.\n", lp->dev->name, probe_rsp->sequence ); DBG_TRACE(DbgInfo, "(%s) address4 : %pM\n", lp->dev->name, probe_rsp->address4); DBG_TRACE( DbgInfo, "(%s) datalength : 0x%04x.\n", lp->dev->name, probe_rsp->dataLength ); DBG_TRACE(DbgInfo, "(%s) DA : %pM\n", lp->dev->name, probe_rsp->DA); DBG_TRACE(DbgInfo, "(%s) SA : %pM\n", lp->dev->name, probe_rsp->SA); #ifdef WARP DBG_TRACE( DbgInfo, "(%s) channel : %d\n", lp->dev->name, probe_rsp->channel ); DBG_TRACE( DbgInfo, "(%s) band : %d\n", lp->dev->name, probe_rsp->band ); #else DBG_TRACE( DbgInfo, "(%s) lenType : 0x%04x.\n", lp->dev->name, probe_rsp->lenType ); #endif // WARP DBG_TRACE( DbgInfo, "(%s) timeStamp : %d.%d.%d.%d.%d.%d.%d.%d\n", lp->dev->name, probe_rsp->timeStamp[0], probe_rsp->timeStamp[1], probe_rsp->timeStamp[2], probe_rsp->timeStamp[3], probe_rsp->timeStamp[4], probe_rsp->timeStamp[5], probe_rsp->timeStamp[6], probe_rsp->timeStamp[7]); DBG_TRACE( DbgInfo, "(%s) beaconInt : 0x%04x.\n", lp->dev->name, probe_rsp->beaconInterval ); DBG_TRACE( DbgInfo, "(%s) capability : 0x%04x.\n", lp->dev->name, probe_rsp->capability ); DBG_TRACE( DbgInfo, "(%s) SSID len : 0x%04x.\n", lp->dev->name, probe_rsp->rawData[1] ); if( probe_rsp->rawData[1] > 0 ) { char ssid[HCF_MAX_NAME_LEN]; memset( ssid, 0, sizeof( ssid )); strncpy( ssid, &probe_rsp->rawData[2], probe_rsp->rawData[1] ); DBG_TRACE( DbgInfo, "(%s) SSID : %s\n", lp->dev->name, ssid ); } /* Parse out the WPA-IE, if one exists */ wpa_ie = wl_parse_wpa_ie( probe_rsp, &wpa_ie_len ); if( wpa_ie != NULL ) { DBG_TRACE( DbgInfo, "(%s) WPA-IE : %s\n", lp->dev->name, wl_print_wpa_ie( wpa_ie, wpa_ie_len )); } DBG_TRACE( DbgInfo, "(%s) flags : 0x%04x.\n", lp->dev->name, probe_rsp->flags ); } DBG_TRACE( DbgInfo, "\n" ); /* If probe response length is 1, then the scan is complete */ if( probe_rsp->length == 1 ) { DBG_TRACE( DbgInfo, "SCAN COMPLETE\n" ); lp->probe_results.num_aps = lp->probe_num_aps; lp->probe_results.scan_complete = TRUE; /* Reset the counter for the next scan request */ lp->probe_num_aps = 0; /* Send a wireless extensions event that the scan completed */ wl_wext_event_scan_complete( lp->dev ); } else { /* Only copy to the table if the entry is unique; APs sometimes respond more than once to a probe */ if( lp->probe_num_aps == 0 ) { /* Copy the info to the ScanResult structure in the private adapter struct */ memcpy( &( lp->probe_results.ProbeTable[lp->probe_num_aps] ), probe_rsp, sizeof( PROBE_RESP )); /* Increment the number of APs detected */ lp->probe_num_aps++; } else { int count; int unique = 1; for( count = 0; count < lp->probe_num_aps; count++ ) { if( memcmp( &( probe_rsp->BSSID ), lp->probe_results.ProbeTable[count].BSSID, ETH_ALEN ) == 0 ) { unique = 0; } } if( unique ) { /* Copy the info to the ScanResult structure in the private adapter struct. Only copy if there's room in the table */ if( lp->probe_num_aps < MAX_NAPS ) { memcpy( &( lp->probe_results.ProbeTable[lp->probe_num_aps] ), probe_rsp, sizeof( PROBE_RESP )); } else { DBG_WARNING( DbgInfo, "Num of scan results exceeds storage, truncating\n" ); } /* Increment the number of APs detected. Note I do this here even when I don't copy the probe response to the buffer in order to detect the overflow condition */ lp->probe_num_aps++; } } } } DBG_LEAVE( DbgInfo ); return; } // wl_process_probe_response /*============================================================================*/ /******************************************************************************* * wl_process_updated_record() ******************************************************************************* * * DESCRIPTION: * * Process the updated information record message signaled by the device. * * PARAMETERS: * * lp - a pointer to the device's private structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_process_updated_record( struct wl_private *lp ) { DBG_FUNC( "wl_process_updated_record" ); DBG_ENTER( DbgInfo ); if( lp != NULL ) { lp->updatedRecord.u.u16[0] = CNV_LITTLE_TO_INT( lp->updatedRecord.u.u16[0] ); switch( lp->updatedRecord.u.u16[0] ) { case CFG_CUR_COUNTRY_INFO: DBG_TRACE( DbgInfo, "Updated Record: CFG_CUR_COUNTRY_INFO\n" ); wl_connect( lp ); break; case CFG_PORT_STAT: DBG_TRACE( DbgInfo, "Updated Record: WAIT_FOR_CONNECT (0xFD40)\n" ); //wl_connect( lp ); break; default: DBG_TRACE( DbgInfo, "UNKNOWN: 0x%04x\n", lp->updatedRecord.u.u16[0] ); } } DBG_LEAVE( DbgInfo ); return; } // wl_process_updated_record /*============================================================================*/ /******************************************************************************* * wl_process_assoc_status() ******************************************************************************* * * DESCRIPTION: * * Process the association status event signaled by the device. * * PARAMETERS: * * lp - a pointer to the device's private structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_process_assoc_status( struct wl_private *lp ) { ASSOC_STATUS_STRCT *assoc_stat; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_process_assoc_status" ); DBG_ENTER( DbgInfo ); if( lp != NULL ) { assoc_stat = (ASSOC_STATUS_STRCT *)&lp->assoc_stat; wl_endian_translate_event( (ltv_t *)assoc_stat ); switch( assoc_stat->assocStatus ) { case 1: DBG_TRACE( DbgInfo, "Association Status : STA Associated\n" ); break; case 2: DBG_TRACE( DbgInfo, "Association Status : STA Reassociated\n" ); break; case 3: DBG_TRACE( DbgInfo, "Association Status : STA Disassociated\n" ); break; default: DBG_TRACE( DbgInfo, "Association Status : UNKNOWN (0x%04x)\n", assoc_stat->assocStatus ); break; } DBG_TRACE(DbgInfo, "STA Address : %pM\n", assoc_stat->staAddr); if(( assoc_stat->assocStatus == 2 ) && ( assoc_stat->len == 8 )) { DBG_TRACE(DbgInfo, "Old AP Address : %pM\n", assoc_stat->oldApAddr); } } DBG_LEAVE( DbgInfo ); return; } // wl_process_assoc_status /*============================================================================*/ /******************************************************************************* * wl_process_security_status() ******************************************************************************* * * DESCRIPTION: * * Process the security status message signaled by the device. * * PARAMETERS: * * lp - a pointer to the device's private structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_process_security_status( struct wl_private *lp ) { SECURITY_STATUS_STRCT *sec_stat; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_process_security_status" ); DBG_ENTER( DbgInfo ); if( lp != NULL ) { sec_stat = (SECURITY_STATUS_STRCT *)&lp->sec_stat; wl_endian_translate_event( (ltv_t *)sec_stat ); switch( sec_stat->securityStatus ) { case 1: DBG_TRACE( DbgInfo, "Security Status : Dissassociate [AP]\n" ); break; case 2: DBG_TRACE( DbgInfo, "Security Status : Deauthenticate [AP]\n" ); break; case 3: DBG_TRACE( DbgInfo, "Security Status : Authenticate Fail [STA] or [AP]\n" ); break; case 4: DBG_TRACE( DbgInfo, "Security Status : MIC Fail\n" ); break; case 5: DBG_TRACE( DbgInfo, "Security Status : Associate Fail\n" ); break; default: DBG_TRACE( DbgInfo, "Security Status : UNKNOWN (0x%04x)\n", sec_stat->securityStatus ); break; } DBG_TRACE(DbgInfo, "STA Address : %pM\n", sec_stat->staAddr); DBG_TRACE(DbgInfo, "Reason : 0x%04x\n", sec_stat->reason); } DBG_LEAVE( DbgInfo ); return; } // wl_process_security_status /*============================================================================*/ int wl_get_tallies(struct wl_private *lp, CFG_HERMES_TALLIES_STRCT *tallies) { int ret = 0; int status; CFG_HERMES_TALLIES_STRCT *pTallies; DBG_FUNC( "wl_get_tallies" ); DBG_ENTER(DbgInfo); /* Get the current tallies from the adapter */ lp->ltvRecord.len = 1 + HCF_TOT_TAL_CNT * sizeof(hcf_16); lp->ltvRecord.typ = CFG_TALLIES; status = hcf_get_info(&(lp->hcfCtx), (LTVP)&(lp->ltvRecord)); if( status == HCF_SUCCESS ) { pTallies = (CFG_HERMES_TALLIES_STRCT *)&(lp->ltvRecord.u.u32); memcpy(tallies, pTallies, sizeof(*tallies)); DBG_TRACE( DbgInfo, "Get tallies okay, dixe: %d\n", sizeof(*tallies) ); } else { DBG_TRACE( DbgInfo, "Get tallies failed\n" ); ret = -EFAULT; } DBG_LEAVE( DbgInfo ); return ret; }
gpl-2.0
kashifmin/BLU_LIFE_ONE
drivers/net/hippi/rrunner.c
4799
42522
/* * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board. * * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>. * * Thanks to Essential Communication for providing us with hardware * and very comprehensive documentation without which I would not have * been able to write this driver. A special thank you to John Gibbon * for sorting out the legal issues, with the NDA, allowing the code to * be released under the GPL. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the * stupid bugs in my code. * * Softnet support and various other patches from Val Henson of * ODS/Essential. * * PCI DMA mapping code partly based on work by Francois Romieu. */ #define DEBUG 1 #define RX_DMA_SKBUFF 1 #define PKT_COPY_THRESHOLD 512 #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/hippidevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/slab.h> #include <net/sock.h> #include <asm/cache.h> #include <asm/byteorder.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> #define rr_if_busy(dev) netif_queue_stopped(dev) #define rr_if_running(dev) netif_running(dev) #include "rrunner.h" #define RUN_AT(x) (jiffies + (x)) MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>"); MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver"); MODULE_LICENSE("GPL"); static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n"; static const struct net_device_ops rr_netdev_ops = { .ndo_open = rr_open, .ndo_stop = rr_close, .ndo_do_ioctl = rr_ioctl, .ndo_start_xmit = rr_start_xmit, .ndo_change_mtu = hippi_change_mtu, .ndo_set_mac_address = hippi_mac_addr, }; /* * Implementation notes: * * The DMA engine only allows for DMA within physical 64KB chunks of * memory. The current approach of the driver (and stack) is to use * linear blocks of memory for the skbuffs. However, as the data block * is always the first part of the skb and skbs are 2^n aligned so we * are guarantted to get the whole block within one 64KB align 64KB * chunk. * * On the long term, relying on being able to allocate 64KB linear * chunks of memory is not feasible and the skb handling code and the * stack will need to know about I/O vectors or something similar. */ static int __devinit rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; static int version_disp; u8 pci_latency; struct rr_private *rrpriv; void *tmpptr; dma_addr_t ring_dma; int ret = -ENOMEM; dev = alloc_hippi_dev(sizeof(struct rr_private)); if (!dev) goto out3; ret = pci_enable_device(pdev); if (ret) { ret = -ENODEV; goto out2; } rrpriv = netdev_priv(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, "rrunner")) { ret = -EIO; goto out; } pci_set_drvdata(pdev, dev); rrpriv->pci_dev = pdev; spin_lock_init(&rrpriv->lock); dev->irq = pdev->irq; dev->netdev_ops = &rr_netdev_ops; dev->base_addr = pci_resource_start(pdev, 0); /* display version info if adapter is found */ if (!version_disp) { /* set display flag to TRUE so that */ /* we only display this string ONCE */ version_disp = 1; printk(version); } pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); if (pci_latency <= 0x58){ pci_latency = 0x58; pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency); } pci_set_master(pdev); printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI " "at 0x%08lx, irq %i, PCI latency %i\n", dev->name, dev->base_addr, dev->irq, pci_latency); /* * Remap the regs into kernel space. */ rrpriv->regs = ioremap(dev->base_addr, 0x1000); if (!rrpriv->regs){ printk(KERN_ERR "%s: Unable to map I/O register, " "RoadRunner will be disabled.\n", dev->name); ret = -EIO; goto out; } tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); rrpriv->tx_ring = tmpptr; rrpriv->tx_ring_dma = ring_dma; if (!tmpptr) { ret = -ENOMEM; goto out; } tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); rrpriv->rx_ring = tmpptr; rrpriv->rx_ring_dma = ring_dma; if (!tmpptr) { ret = -ENOMEM; goto out; } tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma); rrpriv->evt_ring = tmpptr; rrpriv->evt_ring_dma = ring_dma; if (!tmpptr) { ret = -ENOMEM; goto out; } /* * Don't access any register before this point! */ #ifdef __BIG_ENDIAN writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP, &rrpriv->regs->HostCtrl); #endif /* * Need to add a case for little-endian 64-bit hosts here. */ rr_init(dev); dev->base_addr = 0; ret = register_netdev(dev); if (ret) goto out; return 0; out: if (rrpriv->rx_ring) pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, rrpriv->rx_ring_dma); if (rrpriv->tx_ring) pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring, rrpriv->tx_ring_dma); if (rrpriv->regs) iounmap(rrpriv->regs); if (pdev) { pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); } out2: free_netdev(dev); out3: return ret; } static void __devexit rr_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct rr_private *rr = netdev_priv(dev); if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){ printk(KERN_ERR "%s: trying to unload running NIC\n", dev->name); writel(HALT_NIC, &rr->regs->HostCtrl); } pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring, rr->evt_ring_dma); pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring, rr->rx_ring_dma); pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring, rr->tx_ring_dma); unregister_netdev(dev); iounmap(rr->regs); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } /* * Commands are considered to be slow, thus there is no reason to * inline this. */ static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd) { struct rr_regs __iomem *regs; u32 idx; regs = rrpriv->regs; /* * This is temporary - it will go away in the final version. * We probably also want to make this function inline. */ if (readl(&regs->HostCtrl) & NIC_HALTED){ printk("issuing command for halted NIC, code 0x%x, " "HostCtrl %08x\n", cmd->code, readl(&regs->HostCtrl)); if (readl(&regs->Mode) & FATAL_ERR) printk("error codes Fail1 %02x, Fail2 %02x\n", readl(&regs->Fail1), readl(&regs->Fail2)); } idx = rrpriv->info->cmd_ctrl.pi; writel(*(u32*)(cmd), &regs->CmdRing[idx]); wmb(); idx = (idx - 1) % CMD_RING_ENTRIES; rrpriv->info->cmd_ctrl.pi = idx; wmb(); if (readl(&regs->Mode) & FATAL_ERR) printk("error code %02x\n", readl(&regs->Fail1)); } /* * Reset the board in a sensible manner. The NIC is already halted * when we get here and a spin-lock is held. */ static int rr_reset(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; u32 start_pc; int i; rrpriv = netdev_priv(dev); regs = rrpriv->regs; rr_load_firmware(dev); writel(0x01000000, &regs->TX_state); writel(0xff800000, &regs->RX_state); writel(0, &regs->AssistState); writel(CLEAR_INTA, &regs->LocalCtrl); writel(0x01, &regs->BrkPt); writel(0, &regs->Timer); writel(0, &regs->TimerRef); writel(RESET_DMA, &regs->DmaReadState); writel(RESET_DMA, &regs->DmaWriteState); writel(0, &regs->DmaWriteHostHi); writel(0, &regs->DmaWriteHostLo); writel(0, &regs->DmaReadHostHi); writel(0, &regs->DmaReadHostLo); writel(0, &regs->DmaReadLen); writel(0, &regs->DmaWriteLen); writel(0, &regs->DmaWriteLcl); writel(0, &regs->DmaWriteIPchecksum); writel(0, &regs->DmaReadLcl); writel(0, &regs->DmaReadIPchecksum); writel(0, &regs->PciState); #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, &regs->Mode); #elif (BITS_PER_LONG == 64) writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, &regs->Mode); #else writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, &regs->Mode); #endif #if 0 /* * Don't worry, this is just black magic. */ writel(0xdf000, &regs->RxBase); writel(0xdf000, &regs->RxPrd); writel(0xdf000, &regs->RxCon); writel(0xce000, &regs->TxBase); writel(0xce000, &regs->TxPrd); writel(0xce000, &regs->TxCon); writel(0, &regs->RxIndPro); writel(0, &regs->RxIndCon); writel(0, &regs->RxIndRef); writel(0, &regs->TxIndPro); writel(0, &regs->TxIndCon); writel(0, &regs->TxIndRef); writel(0xcc000, &regs->pad10[0]); writel(0, &regs->DrCmndPro); writel(0, &regs->DrCmndCon); writel(0, &regs->DwCmndPro); writel(0, &regs->DwCmndCon); writel(0, &regs->DwCmndRef); writel(0, &regs->DrDataPro); writel(0, &regs->DrDataCon); writel(0, &regs->DrDataRef); writel(0, &regs->DwDataPro); writel(0, &regs->DwDataCon); writel(0, &regs->DwDataRef); #endif writel(0xffffffff, &regs->MbEvent); writel(0, &regs->Event); writel(0, &regs->TxPi); writel(0, &regs->IpRxPi); writel(0, &regs->EvtCon); writel(0, &regs->EvtPrd); rrpriv->info->evt_ctrl.pi = 0; for (i = 0; i < CMD_RING_ENTRIES; i++) writel(0, &regs->CmdRing[i]); /* * Why 32 ? is this not cache line size dependent? */ writel(RBURST_64|WBURST_64, &regs->PciState); wmb(); start_pc = rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, rncd_info.FwStart)); #if (DEBUG > 1) printk("%s: Executing firmware at address 0x%06x\n", dev->name, start_pc); #endif writel(start_pc + 0x800, &regs->Pc); wmb(); udelay(5); writel(start_pc, &regs->Pc); wmb(); return 0; } /* * Read a string from the EEPROM. */ static unsigned int rr_read_eeprom(struct rr_private *rrpriv, unsigned long offset, unsigned char *buf, unsigned long length) { struct rr_regs __iomem *regs = rrpriv->regs; u32 misc, io, host, i; io = readl(&regs->ExtIo); writel(0, &regs->ExtIo); misc = readl(&regs->LocalCtrl); writel(0, &regs->LocalCtrl); host = readl(&regs->HostCtrl); writel(host | HALT_NIC, &regs->HostCtrl); mb(); for (i = 0; i < length; i++){ writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase); mb(); buf[i] = (readl(&regs->WinData) >> 24) & 0xff; mb(); } writel(host, &regs->HostCtrl); writel(misc, &regs->LocalCtrl); writel(io, &regs->ExtIo); mb(); return i; } /* * Shortcut to read one word (4 bytes) out of the EEPROM and convert * it to our CPU byte-order. */ static u32 rr_read_eeprom_word(struct rr_private *rrpriv, size_t offset) { __be32 word; if ((rr_read_eeprom(rrpriv, offset, (unsigned char *)&word, 4) == 4)) return be32_to_cpu(word); return 0; } /* * Write a string to the EEPROM. * * This is only called when the firmware is not running. */ static unsigned int write_eeprom(struct rr_private *rrpriv, unsigned long offset, unsigned char *buf, unsigned long length) { struct rr_regs __iomem *regs = rrpriv->regs; u32 misc, io, data, i, j, ready, error = 0; io = readl(&regs->ExtIo); writel(0, &regs->ExtIo); misc = readl(&regs->LocalCtrl); writel(ENABLE_EEPROM_WRITE, &regs->LocalCtrl); mb(); for (i = 0; i < length; i++){ writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase); mb(); data = buf[i] << 24; /* * Only try to write the data if it is not the same * value already. */ if ((readl(&regs->WinData) & 0xff000000) != data){ writel(data, &regs->WinData); ready = 0; j = 0; mb(); while(!ready){ udelay(20); if ((readl(&regs->WinData) & 0xff000000) == data) ready = 1; mb(); if (j++ > 5000){ printk("data mismatch: %08x, " "WinData %08x\n", data, readl(&regs->WinData)); ready = 1; error = 1; } } } } writel(misc, &regs->LocalCtrl); writel(io, &regs->ExtIo); mb(); return error; } static int __devinit rr_init(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; u32 sram_size, rev; rrpriv = netdev_priv(dev); regs = rrpriv->regs; rev = readl(&regs->FwRev); rrpriv->fw_rev = rev; if (rev > 0x00020024) printk(" Firmware revision: %i.%i.%i\n", (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff)); else if (rev >= 0x00020000) { printk(" Firmware revision: %i.%i.%i (2.0.37 or " "later is recommended)\n", (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff)); }else{ printk(" Firmware revision too old: %i.%i.%i, please " "upgrade to 2.0.37 or later.\n", (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff)); } #if (DEBUG > 2) printk(" Maximum receive rings %i\n", readl(&regs->MaxRxRng)); #endif /* * Read the hardware address from the eeprom. The HW address * is not really necessary for HIPPI but awfully convenient. * The pointer arithmetic to put it in dev_addr is ugly, but * Donald Becker does it this way for the GigE version of this * card and it's shorter and more portable than any * other method I've seen. -VAL */ *(__be16 *)(dev->dev_addr) = htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA))); *(__be32 *)(dev->dev_addr+2) = htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4]))); printk(" MAC: %pM\n", dev->dev_addr); sram_size = rr_read_eeprom_word(rrpriv, 8); printk(" SRAM size 0x%06x\n", sram_size); return 0; } static int rr_init1(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; unsigned long myjif, flags; struct cmd cmd; u32 hostctrl; int ecode = 0; short i; rrpriv = netdev_priv(dev); regs = rrpriv->regs; spin_lock_irqsave(&rrpriv->lock, flags); hostctrl = readl(&regs->HostCtrl); writel(hostctrl | HALT_NIC | RR_CLEAR_INT, &regs->HostCtrl); wmb(); if (hostctrl & PARITY_ERR){ printk("%s: Parity error halting NIC - this is serious!\n", dev->name); spin_unlock_irqrestore(&rrpriv->lock, flags); ecode = -EFAULT; goto error; } set_rxaddr(regs, rrpriv->rx_ctrl_dma); set_infoaddr(regs, rrpriv->info_dma); rrpriv->info->evt_ctrl.entry_size = sizeof(struct event); rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES; rrpriv->info->evt_ctrl.mode = 0; rrpriv->info->evt_ctrl.pi = 0; set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma); rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd); rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES; rrpriv->info->cmd_ctrl.mode = 0; rrpriv->info->cmd_ctrl.pi = 15; for (i = 0; i < CMD_RING_ENTRIES; i++) { writel(0, &regs->CmdRing[i]); } for (i = 0; i < TX_RING_ENTRIES; i++) { rrpriv->tx_ring[i].size = 0; set_rraddr(&rrpriv->tx_ring[i].addr, 0); rrpriv->tx_skbuff[i] = NULL; } rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc); rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES; rrpriv->info->tx_ctrl.mode = 0; rrpriv->info->tx_ctrl.pi = 0; set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma); /* * Set dirty_tx before we start receiving interrupts, otherwise * the interrupt handler might think it is supposed to process * tx ints before we are up and running, which may cause a null * pointer access in the int handler. */ rrpriv->tx_full = 0; rrpriv->cur_rx = 0; rrpriv->dirty_rx = rrpriv->dirty_tx = 0; rr_reset(dev); /* Tuning values */ writel(0x5000, &regs->ConRetry); writel(0x100, &regs->ConRetryTmr); writel(0x500000, &regs->ConTmout); writel(0x60, &regs->IntrTmr); writel(0x500000, &regs->TxDataMvTimeout); writel(0x200000, &regs->RxDataMvTimeout); writel(0x80, &regs->WriteDmaThresh); writel(0x80, &regs->ReadDmaThresh); rrpriv->fw_running = 0; wmb(); hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR); writel(hostctrl, &regs->HostCtrl); wmb(); spin_unlock_irqrestore(&rrpriv->lock, flags); for (i = 0; i < RX_RING_ENTRIES; i++) { struct sk_buff *skb; dma_addr_t addr; rrpriv->rx_ring[i].mode = 0; skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "%s: Unable to allocate memory " "for receive ring - halting NIC\n", dev->name); ecode = -ENOMEM; goto error; } rrpriv->rx_skbuff[i] = skb; addr = pci_map_single(rrpriv->pci_dev, skb->data, dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); /* * Sanity test to see if we conflict with the DMA * limitations of the Roadrunner. */ if ((((unsigned long)skb->data) & 0xfff) > ~65320) printk("skb alloc error\n"); set_rraddr(&rrpriv->rx_ring[i].addr, addr); rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN; } rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc); rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES; rrpriv->rx_ctrl[4].mode = 8; rrpriv->rx_ctrl[4].pi = 0; wmb(); set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma); udelay(1000); /* * Now start the FirmWare. */ cmd.code = C_START_FW; cmd.ring = 0; cmd.index = 0; rr_issue_cmd(rrpriv, &cmd); /* * Give the FirmWare time to chew on the `get running' command. */ myjif = jiffies + 5 * HZ; while (time_before(jiffies, myjif) && !rrpriv->fw_running) cpu_relax(); netif_start_queue(dev); return ecode; error: /* * We might have gotten here because we are out of memory, * make sure we release everything we allocated before failing */ for (i = 0; i < RX_RING_ENTRIES; i++) { struct sk_buff *skb = rrpriv->rx_skbuff[i]; if (skb) { pci_unmap_single(rrpriv->pci_dev, rrpriv->rx_ring[i].addr.addrlo, dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); rrpriv->rx_ring[i].size = 0; set_rraddr(&rrpriv->rx_ring[i].addr, 0); dev_kfree_skb(skb); rrpriv->rx_skbuff[i] = NULL; } } return ecode; } /* * All events are considered to be slow (RX/TX ints do not generate * events) and are handled here, outside the main interrupt handler, * to reduce the size of the handler. */ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; u32 tmp; rrpriv = netdev_priv(dev); regs = rrpriv->regs; while (prodidx != eidx){ switch (rrpriv->evt_ring[eidx].code){ case E_NIC_UP: tmp = readl(&regs->FwRev); printk(KERN_INFO "%s: Firmware revision %i.%i.%i " "up and running\n", dev->name, (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff)); rrpriv->fw_running = 1; writel(RX_RING_ENTRIES - 1, &regs->IpRxPi); wmb(); break; case E_LINK_ON: printk(KERN_INFO "%s: Optical link ON\n", dev->name); break; case E_LINK_OFF: printk(KERN_INFO "%s: Optical link OFF\n", dev->name); break; case E_RX_IDLE: printk(KERN_WARNING "%s: RX data not moving\n", dev->name); goto drop; case E_WATCHDOG: printk(KERN_INFO "%s: The watchdog is here to see " "us\n", dev->name); break; case E_INTERN_ERR: printk(KERN_ERR "%s: HIPPI Internal NIC error\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_HOST_ERR: printk(KERN_ERR "%s: Host software error\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; /* * TX events. */ case E_CON_REJ: printk(KERN_WARNING "%s: Connection rejected\n", dev->name); dev->stats.tx_aborted_errors++; break; case E_CON_TMOUT: printk(KERN_WARNING "%s: Connection timeout\n", dev->name); break; case E_DISC_ERR: printk(KERN_WARNING "%s: HIPPI disconnect error\n", dev->name); dev->stats.tx_aborted_errors++; break; case E_INT_PRTY: printk(KERN_ERR "%s: HIPPI Internal Parity error\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_TX_IDLE: printk(KERN_WARNING "%s: Transmitter idle\n", dev->name); break; case E_TX_LINK_DROP: printk(KERN_WARNING "%s: Link lost during transmit\n", dev->name); dev->stats.tx_aborted_errors++; writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_TX_INV_RNG: printk(KERN_ERR "%s: Invalid send ring block\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_TX_INV_BUF: printk(KERN_ERR "%s: Invalid send buffer address\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_TX_INV_DSC: printk(KERN_ERR "%s: Invalid descriptor address\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; /* * RX events. */ case E_RX_RNG_OUT: printk(KERN_INFO "%s: Receive ring full\n", dev->name); break; case E_RX_PAR_ERR: printk(KERN_WARNING "%s: Receive parity error\n", dev->name); goto drop; case E_RX_LLRC_ERR: printk(KERN_WARNING "%s: Receive LLRC error\n", dev->name); goto drop; case E_PKT_LN_ERR: printk(KERN_WARNING "%s: Receive packet length " "error\n", dev->name); goto drop; case E_DTA_CKSM_ERR: printk(KERN_WARNING "%s: Data checksum error\n", dev->name); goto drop; case E_SHT_BST: printk(KERN_WARNING "%s: Unexpected short burst " "error\n", dev->name); goto drop; case E_STATE_ERR: printk(KERN_WARNING "%s: Recv. state transition" " error\n", dev->name); goto drop; case E_UNEXP_DATA: printk(KERN_WARNING "%s: Unexpected data error\n", dev->name); goto drop; case E_LST_LNK_ERR: printk(KERN_WARNING "%s: Link lost error\n", dev->name); goto drop; case E_FRM_ERR: printk(KERN_WARNING "%s: Framming Error\n", dev->name); goto drop; case E_FLG_SYN_ERR: printk(KERN_WARNING "%s: Flag sync. lost during " "packet\n", dev->name); goto drop; case E_RX_INV_BUF: printk(KERN_ERR "%s: Invalid receive buffer " "address\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_RX_INV_DSC: printk(KERN_ERR "%s: Invalid receive descriptor " "address\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_RNG_BLK: printk(KERN_ERR "%s: Invalid ring block\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; drop: /* Label packet to be dropped. * Actual dropping occurs in rx * handling. * * The index of packet we get to drop is * the index of the packet following * the bad packet. -kbf */ { u16 index = rrpriv->evt_ring[eidx].index; index = (index + (RX_RING_ENTRIES - 1)) % RX_RING_ENTRIES; rrpriv->rx_ring[index].mode |= (PACKET_BAD | PACKET_END); } break; default: printk(KERN_WARNING "%s: Unhandled event 0x%02x\n", dev->name, rrpriv->evt_ring[eidx].code); } eidx = (eidx + 1) % EVT_RING_ENTRIES; } rrpriv->info->evt_ctrl.pi = eidx; wmb(); return eidx; } static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) { struct rr_private *rrpriv = netdev_priv(dev); struct rr_regs __iomem *regs = rrpriv->regs; do { struct rx_desc *desc; u32 pkt_len; desc = &(rrpriv->rx_ring[index]); pkt_len = desc->size; #if (DEBUG > 2) printk("index %i, rxlimit %i\n", index, rxlimit); printk("len %x, mode %x\n", pkt_len, desc->mode); #endif if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){ dev->stats.rx_dropped++; goto defer; } if (pkt_len > 0){ struct sk_buff *skb, *rx_skb; rx_skb = rrpriv->rx_skbuff[index]; if (pkt_len < PKT_COPY_THRESHOLD) { skb = alloc_skb(pkt_len, GFP_ATOMIC); if (skb == NULL){ printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len); dev->stats.rx_dropped++; goto defer; } else { pci_dma_sync_single_for_cpu(rrpriv->pci_dev, desc->addr.addrlo, pkt_len, PCI_DMA_FROMDEVICE); memcpy(skb_put(skb, pkt_len), rx_skb->data, pkt_len); pci_dma_sync_single_for_device(rrpriv->pci_dev, desc->addr.addrlo, pkt_len, PCI_DMA_FROMDEVICE); } }else{ struct sk_buff *newskb; newskb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC); if (newskb){ dma_addr_t addr; pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); skb = rx_skb; skb_put(skb, pkt_len); rrpriv->rx_skbuff[index] = newskb; addr = pci_map_single(rrpriv->pci_dev, newskb->data, dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); set_rraddr(&desc->addr, addr); } else { printk("%s: Out of memory, deferring " "packet\n", dev->name); dev->stats.rx_dropped++; goto defer; } } skb->protocol = hippi_type_trans(skb, dev); netif_rx(skb); /* send it up */ dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } defer: desc->mode = 0; desc->size = dev->mtu + HIPPI_HLEN; if ((index & 7) == 7) writel(index, &regs->IpRxPi); index = (index + 1) % RX_RING_ENTRIES; } while(index != rxlimit); rrpriv->cur_rx = index; wmb(); } static irqreturn_t rr_interrupt(int irq, void *dev_id) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; struct net_device *dev = (struct net_device *)dev_id; u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon; rrpriv = netdev_priv(dev); regs = rrpriv->regs; if (!(readl(&regs->HostCtrl) & RR_INT)) return IRQ_NONE; spin_lock(&rrpriv->lock); prodidx = readl(&regs->EvtPrd); txcsmr = (prodidx >> 8) & 0xff; rxlimit = (prodidx >> 16) & 0xff; prodidx &= 0xff; #if (DEBUG > 2) printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name, prodidx, rrpriv->info->evt_ctrl.pi); #endif /* * Order here is important. We must handle events * before doing anything else in order to catch * such things as LLRC errors, etc -kbf */ eidx = rrpriv->info->evt_ctrl.pi; if (prodidx != eidx) eidx = rr_handle_event(dev, prodidx, eidx); rxindex = rrpriv->cur_rx; if (rxindex != rxlimit) rx_int(dev, rxlimit, rxindex); txcon = rrpriv->dirty_tx; if (txcsmr != txcon) { do { /* Due to occational firmware TX producer/consumer out * of sync. error need to check entry in ring -kbf */ if(rrpriv->tx_skbuff[txcon]){ struct tx_desc *desc; struct sk_buff *skb; desc = &(rrpriv->tx_ring[txcon]); skb = rrpriv->tx_skbuff[txcon]; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); rrpriv->tx_skbuff[txcon] = NULL; desc->size = 0; set_rraddr(&rrpriv->tx_ring[txcon].addr, 0); desc->mode = 0; } txcon = (txcon + 1) % TX_RING_ENTRIES; } while (txcsmr != txcon); wmb(); rrpriv->dirty_tx = txcon; if (rrpriv->tx_full && rr_if_busy(dev) && (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES) != rrpriv->dirty_tx)){ rrpriv->tx_full = 0; netif_wake_queue(dev); } } eidx |= ((txcsmr << 8) | (rxlimit << 16)); writel(eidx, &regs->EvtCon); wmb(); spin_unlock(&rrpriv->lock); return IRQ_HANDLED; } static inline void rr_raz_tx(struct rr_private *rrpriv, struct net_device *dev) { int i; for (i = 0; i < TX_RING_ENTRIES; i++) { struct sk_buff *skb = rrpriv->tx_skbuff[i]; if (skb) { struct tx_desc *desc = &(rrpriv->tx_ring[i]); pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, skb->len, PCI_DMA_TODEVICE); desc->size = 0; set_rraddr(&desc->addr, 0); dev_kfree_skb(skb); rrpriv->tx_skbuff[i] = NULL; } } } static inline void rr_raz_rx(struct rr_private *rrpriv, struct net_device *dev) { int i; for (i = 0; i < RX_RING_ENTRIES; i++) { struct sk_buff *skb = rrpriv->rx_skbuff[i]; if (skb) { struct rx_desc *desc = &(rrpriv->rx_ring[i]); pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); desc->size = 0; set_rraddr(&desc->addr, 0); dev_kfree_skb(skb); rrpriv->rx_skbuff[i] = NULL; } } } static void rr_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct rr_private *rrpriv = netdev_priv(dev); struct rr_regs __iomem *regs = rrpriv->regs; unsigned long flags; if (readl(&regs->HostCtrl) & NIC_HALTED){ printk("%s: Restarting nic\n", dev->name); memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl)); memset(rrpriv->info, 0, sizeof(struct rr_info)); wmb(); rr_raz_tx(rrpriv, dev); rr_raz_rx(rrpriv, dev); if (rr_init1(dev)) { spin_lock_irqsave(&rrpriv->lock, flags); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); spin_unlock_irqrestore(&rrpriv->lock, flags); } } rrpriv->timer.expires = RUN_AT(5*HZ); add_timer(&rrpriv->timer); } static int rr_open(struct net_device *dev) { struct rr_private *rrpriv = netdev_priv(dev); struct pci_dev *pdev = rrpriv->pci_dev; struct rr_regs __iomem *regs; int ecode = 0; unsigned long flags; dma_addr_t dma_addr; regs = rrpriv->regs; if (rrpriv->fw_rev < 0x00020000) { printk(KERN_WARNING "%s: trying to configure device with " "obsolete firmware\n", dev->name); ecode = -EBUSY; goto error; } rrpriv->rx_ctrl = pci_alloc_consistent(pdev, 256 * sizeof(struct ring_ctrl), &dma_addr); if (!rrpriv->rx_ctrl) { ecode = -ENOMEM; goto error; } rrpriv->rx_ctrl_dma = dma_addr; memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl)); rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info), &dma_addr); if (!rrpriv->info) { ecode = -ENOMEM; goto error; } rrpriv->info_dma = dma_addr; memset(rrpriv->info, 0, sizeof(struct rr_info)); wmb(); spin_lock_irqsave(&rrpriv->lock, flags); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); readl(&regs->HostCtrl); spin_unlock_irqrestore(&rrpriv->lock, flags); if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", dev->name, dev->irq); ecode = -EAGAIN; goto error; } if ((ecode = rr_init1(dev))) goto error; /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ init_timer(&rrpriv->timer); rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */ rrpriv->timer.data = (unsigned long)dev; rrpriv->timer.function = rr_timer; /* timer handler */ add_timer(&rrpriv->timer); netif_start_queue(dev); return ecode; error: spin_lock_irqsave(&rrpriv->lock, flags); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); spin_unlock_irqrestore(&rrpriv->lock, flags); if (rrpriv->info) { pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info, rrpriv->info_dma); rrpriv->info = NULL; } if (rrpriv->rx_ctrl) { pci_free_consistent(pdev, sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; } netif_stop_queue(dev); return ecode; } static void rr_dump(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; u32 index, cons; short i; int len; rrpriv = netdev_priv(dev); regs = rrpriv->regs; printk("%s: dumping NIC TX rings\n", dev->name); printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n", readl(&regs->RxPrd), readl(&regs->TxPrd), readl(&regs->EvtPrd), readl(&regs->TxPi), rrpriv->info->tx_ctrl.pi); printk("Error code 0x%x\n", readl(&regs->Fail1)); index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES; cons = rrpriv->dirty_tx; printk("TX ring index %i, TX consumer %i\n", index, cons); if (rrpriv->tx_skbuff[index]){ len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len); printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size); for (i = 0; i < len; i++){ if (!(i & 7)) printk("\n"); printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]); } printk("\n"); } if (rrpriv->tx_skbuff[cons]){ len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len); printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len); printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n", rrpriv->tx_ring[cons].mode, rrpriv->tx_ring[cons].size, (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo, (unsigned long)rrpriv->tx_skbuff[cons]->data, (unsigned int)rrpriv->tx_skbuff[cons]->truesize); for (i = 0; i < len; i++){ if (!(i & 7)) printk("\n"); printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size); } printk("\n"); } printk("dumping TX ring info:\n"); for (i = 0; i < TX_RING_ENTRIES; i++) printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n", rrpriv->tx_ring[i].mode, rrpriv->tx_ring[i].size, (unsigned long long) rrpriv->tx_ring[i].addr.addrlo); } static int rr_close(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; unsigned long flags; u32 tmp; short i; netif_stop_queue(dev); rrpriv = netdev_priv(dev); regs = rrpriv->regs; /* * Lock to make sure we are not cleaning up while another CPU * is handling interrupts. */ spin_lock_irqsave(&rrpriv->lock, flags); tmp = readl(&regs->HostCtrl); if (tmp & NIC_HALTED){ printk("%s: NIC already halted\n", dev->name); rr_dump(dev); }else{ tmp |= HALT_NIC | RR_CLEAR_INT; writel(tmp, &regs->HostCtrl); readl(&regs->HostCtrl); } rrpriv->fw_running = 0; del_timer_sync(&rrpriv->timer); writel(0, &regs->TxPi); writel(0, &regs->IpRxPi); writel(0, &regs->EvtCon); writel(0, &regs->EvtPrd); for (i = 0; i < CMD_RING_ENTRIES; i++) writel(0, &regs->CmdRing[i]); rrpriv->info->tx_ctrl.entries = 0; rrpriv->info->cmd_ctrl.pi = 0; rrpriv->info->evt_ctrl.pi = 0; rrpriv->rx_ctrl[4].entries = 0; rr_raz_tx(rrpriv, dev); rr_raz_rx(rrpriv, dev); pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info), rrpriv->info, rrpriv->info_dma); rrpriv->info = NULL; free_irq(dev->irq, dev); spin_unlock_irqrestore(&rrpriv->lock, flags); return 0; } static netdev_tx_t rr_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct rr_private *rrpriv = netdev_priv(dev); struct rr_regs __iomem *regs = rrpriv->regs; struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; struct ring_ctrl *txctrl; unsigned long flags; u32 index, len = skb->len; u32 *ifield; struct sk_buff *new_skb; if (readl(&regs->Mode) & FATAL_ERR) printk("error codes Fail1 %02x, Fail2 %02x\n", readl(&regs->Fail1), readl(&regs->Fail2)); /* * We probably need to deal with tbusy here to prevent overruns. */ if (skb_headroom(skb) < 8){ printk("incoming skb too small - reallocating\n"); if (!(new_skb = dev_alloc_skb(len + 8))) { dev_kfree_skb(skb); netif_wake_queue(dev); return NETDEV_TX_OK; } skb_reserve(new_skb, 8); skb_put(new_skb, len); skb_copy_from_linear_data(skb, new_skb->data, len); dev_kfree_skb(skb); skb = new_skb; } ifield = (u32 *)skb_push(skb, 8); ifield[0] = 0; ifield[1] = hcb->ifield; /* * We don't need the lock before we are actually going to start * fiddling with the control blocks. */ spin_lock_irqsave(&rrpriv->lock, flags); txctrl = &rrpriv->info->tx_ctrl; index = txctrl->pi; rrpriv->tx_skbuff[index] = skb; set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single( rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE)); rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */ rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END; txctrl->pi = (index + 1) % TX_RING_ENTRIES; wmb(); writel(txctrl->pi, &regs->TxPi); if (txctrl->pi == rrpriv->dirty_tx){ rrpriv->tx_full = 1; netif_stop_queue(dev); } spin_unlock_irqrestore(&rrpriv->lock, flags); return NETDEV_TX_OK; } /* * Read the firmware out of the EEPROM and put it into the SRAM * (or from user space - later) * * This operation requires the NIC to be halted and is performed with * interrupts disabled and with the spinlock hold. */ static int rr_load_firmware(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; size_t eptr, segptr; int i, j; u32 localctrl, sptr, len, tmp; u32 p2len, p2size, nr_seg, revision, io, sram_size; rrpriv = netdev_priv(dev); regs = rrpriv->regs; if (dev->flags & IFF_UP) return -EBUSY; if (!(readl(&regs->HostCtrl) & NIC_HALTED)){ printk("%s: Trying to load firmware to a running NIC.\n", dev->name); return -EBUSY; } localctrl = readl(&regs->LocalCtrl); writel(0, &regs->LocalCtrl); writel(0, &regs->EvtPrd); writel(0, &regs->RxPrd); writel(0, &regs->TxPrd); /* * First wipe the entire SRAM, otherwise we might run into all * kinds of trouble ... sigh, this took almost all afternoon * to track down ;-( */ io = readl(&regs->ExtIo); writel(0, &regs->ExtIo); sram_size = rr_read_eeprom_word(rrpriv, 8); for (i = 200; i < sram_size / 4; i++){ writel(i * 4, &regs->WinBase); mb(); writel(0, &regs->WinData); mb(); } writel(io, &regs->ExtIo); mb(); eptr = rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, rncd_info.AddrRunCodeSegs)); eptr = ((eptr & 0x1fffff) >> 3); p2len = rr_read_eeprom_word(rrpriv, 0x83*4); p2len = (p2len << 2); p2size = rr_read_eeprom_word(rrpriv, 0x84*4); p2size = ((p2size & 0x1fffff) >> 3); if ((eptr < p2size) || (eptr > (p2size + p2len))){ printk("%s: eptr is invalid\n", dev->name); goto out; } revision = rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.HeaderFmt)); if (revision != 1){ printk("%s: invalid firmware format (%i)\n", dev->name, revision); goto out; } nr_seg = rr_read_eeprom_word(rrpriv, eptr); eptr +=4; #if (DEBUG > 1) printk("%s: nr_seg %i\n", dev->name, nr_seg); #endif for (i = 0; i < nr_seg; i++){ sptr = rr_read_eeprom_word(rrpriv, eptr); eptr += 4; len = rr_read_eeprom_word(rrpriv, eptr); eptr += 4; segptr = rr_read_eeprom_word(rrpriv, eptr); segptr = ((segptr & 0x1fffff) >> 3); eptr += 4; #if (DEBUG > 1) printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n", dev->name, i, sptr, len, segptr); #endif for (j = 0; j < len; j++){ tmp = rr_read_eeprom_word(rrpriv, segptr); writel(sptr, &regs->WinBase); mb(); writel(tmp, &regs->WinData); mb(); segptr += 4; sptr += 4; } } out: writel(localctrl, &regs->LocalCtrl); mb(); return 0; } static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct rr_private *rrpriv; unsigned char *image, *oldimage; unsigned long flags; unsigned int i; int error = -EOPNOTSUPP; rrpriv = netdev_priv(dev); switch(cmd){ case SIOCRRGFW: if (!capable(CAP_SYS_RAWIO)){ return -EPERM; } image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); if (!image) return -ENOMEM; if (rrpriv->fw_running){ printk("%s: Firmware already running\n", dev->name); error = -EPERM; goto gf_out; } spin_lock_irqsave(&rrpriv->lock, flags); i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES); spin_unlock_irqrestore(&rrpriv->lock, flags); if (i != EEPROM_BYTES){ printk(KERN_ERR "%s: Error reading EEPROM\n", dev->name); error = -EFAULT; goto gf_out; } error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES); if (error) error = -EFAULT; gf_out: kfree(image); return error; case SIOCRRPFW: if (!capable(CAP_SYS_RAWIO)){ return -EPERM; } image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); if (!image || !oldimage) { error = -ENOMEM; goto wf_out; } error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES); if (error) { error = -EFAULT; goto wf_out; } if (rrpriv->fw_running){ printk("%s: Firmware already running\n", dev->name); error = -EPERM; goto wf_out; } printk("%s: Updating EEPROM firmware\n", dev->name); spin_lock_irqsave(&rrpriv->lock, flags); error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES); if (error) printk(KERN_ERR "%s: Error writing EEPROM\n", dev->name); i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES); spin_unlock_irqrestore(&rrpriv->lock, flags); if (i != EEPROM_BYTES) printk(KERN_ERR "%s: Error reading back EEPROM " "image\n", dev->name); error = memcmp(image, oldimage, EEPROM_BYTES); if (error){ printk(KERN_ERR "%s: Error verifying EEPROM image\n", dev->name); error = -EFAULT; } wf_out: kfree(oldimage); kfree(image); return error; case SIOCRRID: return put_user(0x52523032, (int __user *)rq->ifr_data); default: return error; } } static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = { { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER, PCI_ANY_ID, PCI_ANY_ID, }, { 0,} }; MODULE_DEVICE_TABLE(pci, rr_pci_tbl); static struct pci_driver rr_driver = { .name = "rrunner", .id_table = rr_pci_tbl, .probe = rr_init_one, .remove = __devexit_p(rr_remove_one), }; static int __init rr_init_module(void) { return pci_register_driver(&rr_driver); } static void __exit rr_cleanup_module(void) { pci_unregister_driver(&rr_driver); } module_init(rr_init_module); module_exit(rr_cleanup_module);
gpl-2.0
mzhou/lge-kernel-p880-cyanogenmod
drivers/net/wireless/prism54/islpci_hotplug.c
8127
9553
/* * Copyright (C) 2002 Intersil Americas Inc. * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/init.h> /* For __init, __exit */ #include <linux/dma-mapping.h> #include "prismcompat.h" #include "islpci_dev.h" #include "islpci_mgt.h" /* for pc_debug */ #include "isl_oid.h" MODULE_AUTHOR("[Intersil] R.Bastings and W.Termorshuizen, The prism54.org Development Team <prism54-devel@prism54.org>"); MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter"); MODULE_LICENSE("GPL"); static int init_pcitm = 0; module_param(init_pcitm, int, 0); /* In this order: vendor, device, subvendor, subdevice, class, class_mask, * driver_data * If you have an update for this please contact prism54-devel@prism54.org * The latest list can be found at http://wireless.kernel.org/en/users/Drivers/p54 */ static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = { /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ { 0x1260, 0x3890, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* 3COM 3CRWE154G72 Wireless LAN adapter */ { PCI_VDEVICE(3COM, 0x6001), 0 }, /* Intersil PRISM Indigo Wireless LAN adapter */ { 0x1260, 0x3877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */ { 0x1260, 0x3886, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* End of list */ {0,0,0,0,0,0,0} }; /* register the device with the Hotplug facilities of the kernel */ MODULE_DEVICE_TABLE(pci, prism54_id_tbl); static int prism54_probe(struct pci_dev *, const struct pci_device_id *); static void prism54_remove(struct pci_dev *); static int prism54_suspend(struct pci_dev *, pm_message_t state); static int prism54_resume(struct pci_dev *); static struct pci_driver prism54_driver = { .name = DRV_NAME, .id_table = prism54_id_tbl, .probe = prism54_probe, .remove = prism54_remove, .suspend = prism54_suspend, .resume = prism54_resume, }; /****************************************************************************** Module initialization functions ******************************************************************************/ static int prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct net_device *ndev; u8 latency_tmr; u32 mem_addr; islpci_private *priv; int rvalue; /* Enable the pci device */ if (pci_enable_device(pdev)) { printk(KERN_ERR "%s: pci_enable_device() failed.\n", DRV_NAME); return -ENODEV; } /* check whether the latency timer is set correctly */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_tmr); #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "latency timer: %x\n", latency_tmr); #endif if (latency_tmr < PCIDEVICE_LATENCY_TIMER_MIN) { /* set the latency timer */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, PCIDEVICE_LATENCY_TIMER_VAL); } /* enable PCI DMA */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME); goto do_pci_disable_device; } /* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT) * 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT) * The RETRY_TIMEOUT is used to set the number of retries that the core, as a * Master, will perform before abandoning a cycle. The default value for * RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new * devices. A write of zero to the RETRY_TIMEOUT register disables this * function to allow use with any non-compliant legacy devices that may * execute more retries. * * Writing zero to both these two registers will disable both timeouts and * *can* solve problems caused by devices that are slow to respond. * Make this configurable - MSW */ if ( init_pcitm >= 0 ) { pci_write_config_byte(pdev, 0x40, (u8)init_pcitm); pci_write_config_byte(pdev, 0x41, (u8)init_pcitm); } else { printk(KERN_INFO "PCI TRDY/RETRY unchanged\n"); } /* request the pci device I/O regions */ rvalue = pci_request_regions(pdev, DRV_NAME); if (rvalue) { printk(KERN_ERR "%s: pci_request_regions failure (rc=%d)\n", DRV_NAME, rvalue); goto do_pci_disable_device; } /* check if the memory window is indeed set */ rvalue = pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &mem_addr); if (rvalue || !mem_addr) { printk(KERN_ERR "%s: PCI device memory region not configured; fix your BIOS or CardBus bridge/drivers\n", DRV_NAME); goto do_pci_release_regions; } /* enable PCI bus-mastering */ DEBUG(SHOW_TRACING, "%s: pci_set_master(pdev)\n", DRV_NAME); pci_set_master(pdev); /* enable MWI */ pci_try_set_mwi(pdev); /* setup the network device interface and its structure */ if (!(ndev = islpci_setup(pdev))) { /* error configuring the driver as a network device */ printk(KERN_ERR "%s: could not configure network device\n", DRV_NAME); goto do_pci_clear_mwi; } priv = netdev_priv(ndev); islpci_set_state(priv, PRV_STATE_PREBOOT); /* we are attempting to boot */ /* card is in unknown state yet, might have some interrupts pending */ isl38xx_disable_interrupts(priv->device_base); /* request for the interrupt before uploading the firmware */ rvalue = request_irq(pdev->irq, islpci_interrupt, IRQF_SHARED, ndev->name, priv); if (rvalue) { /* error, could not hook the handler to the irq */ printk(KERN_ERR "%s: could not install IRQ handler\n", ndev->name); goto do_unregister_netdev; } /* firmware upload is triggered in islpci_open */ return 0; do_unregister_netdev: unregister_netdev(ndev); islpci_free_memory(priv); pci_set_drvdata(pdev, NULL); free_netdev(ndev); priv = NULL; do_pci_clear_mwi: pci_clear_mwi(pdev); do_pci_release_regions: pci_release_regions(pdev); do_pci_disable_device: pci_disable_device(pdev); return -EIO; } /* set by cleanup_module */ static volatile int __in_cleanup_module = 0; /* this one removes one(!!) instance only */ static void prism54_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; BUG_ON(!priv); if (!__in_cleanup_module) { printk(KERN_DEBUG "%s: hot unplug detected\n", ndev->name); islpci_set_state(priv, PRV_STATE_OFF); } printk(KERN_DEBUG "%s: removing device\n", ndev->name); unregister_netdev(ndev); /* free the interrupt request */ if (islpci_get_state(priv) != PRV_STATE_OFF) { isl38xx_disable_interrupts(priv->device_base); islpci_set_state(priv, PRV_STATE_OFF); /* This bellow causes a lockup at rmmod time. It might be * because some interrupts still linger after rmmod time, * see bug #17 */ /* pci_set_power_state(pdev, 3);*/ /* try to power-off */ } free_irq(pdev->irq, priv); /* free the PCI memory and unmap the remapped page */ islpci_free_memory(priv); pci_set_drvdata(pdev, NULL); free_netdev(ndev); priv = NULL; pci_clear_mwi(pdev); pci_release_regions(pdev); pci_disable_device(pdev); } static int prism54_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *ndev = pci_get_drvdata(pdev); islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; BUG_ON(!priv); pci_save_state(pdev); /* tell the device not to trigger interrupts for now... */ isl38xx_disable_interrupts(priv->device_base); /* from now on assume the hardware was already powered down and don't touch it anymore */ islpci_set_state(priv, PRV_STATE_OFF); netif_stop_queue(ndev); netif_device_detach(ndev); return 0; } static int prism54_resume(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; int err; BUG_ON(!priv); printk(KERN_NOTICE "%s: got resume request\n", ndev->name); err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "%s: pci_enable_device failed on resume\n", ndev->name); return err; } pci_restore_state(pdev); /* alright let's go into the PREBOOT state */ islpci_reset(priv, 1); netif_device_attach(ndev); netif_start_queue(ndev); return 0; } static int __init prism54_module_init(void) { printk(KERN_INFO "Loaded %s driver, version %s\n", DRV_NAME, DRV_VERSION); __bug_on_wrong_struct_sizes (); return pci_register_driver(&prism54_driver); } /* by the time prism54_module_exit() terminates, as a postcondition * all instances will have been destroyed by calls to * prism54_remove() */ static void __exit prism54_module_exit(void) { __in_cleanup_module = 1; pci_unregister_driver(&prism54_driver); printk(KERN_INFO "Unloaded %s driver\n", DRV_NAME); __in_cleanup_module = 0; } /* register entry points */ module_init(prism54_module_init); module_exit(prism54_module_exit); /* EOF */
gpl-2.0
littlelerroyy/android_kernel_htc_pyramid
arch/h8300/kernel/h8300_ksyms.c
9407
2653
#include <linux/module.h> #include <linux/linkage.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/user.h> #include <linux/elfcore.h> #include <linux/in6.h> #include <linux/interrupt.h> #include <asm/setup.h> #include <asm/pgalloc.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/checksum.h> #include <asm/current.h> #include <asm/gpio.h> //asmlinkage long long __ashrdi3 (long long, int); //asmlinkage long long __lshrdi3 (long long, int); extern char h8300_debug_device[]; /* platform dependent support */ EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(strrchr); EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(strchr); EXPORT_SYMBOL(strcat); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(ip_fast_csum); EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(enable_irq); EXPORT_SYMBOL(disable_irq); /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_nocheck); /* The following are special because they're not called explicitly (the C compiler generates them). Fortunately, their interface isn't gonna change any time soon now, so it's OK to leave it out of version control. */ //EXPORT_SYMBOL(__ashrdi3); //EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(memmove); /* * libgcc functions - functions that are used internally by the * compiler... (prototypes are not correct though, but that * doesn't really matter since they're not versioned). */ extern void __gcc_bcmp(void); extern void __ashldi3(void); extern void __ashrdi3(void); extern void __cmpdi2(void); extern void __divdi3(void); extern void __divsi3(void); extern void __lshrdi3(void); extern void __moddi3(void); extern void __modsi3(void); extern void __muldi3(void); extern void __mulsi3(void); extern void __negdi2(void); extern void __ucmpdi2(void); extern void __udivdi3(void); extern void __udivmoddi4(void); extern void __udivsi3(void); extern void __umoddi3(void); extern void __umodsi3(void); /* gcc lib functions */ EXPORT_SYMBOL(__gcc_bcmp); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__cmpdi2); EXPORT_SYMBOL(__divdi3); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__moddi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__mulsi3); EXPORT_SYMBOL(__negdi2); EXPORT_SYMBOL(__ucmpdi2); EXPORT_SYMBOL(__udivdi3); EXPORT_SYMBOL(__udivmoddi4); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umoddi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(h8300_reserved_gpio); EXPORT_SYMBOL(h8300_free_gpio); EXPORT_SYMBOL(h8300_set_gpio_dir);
gpl-2.0
daniabo/kernel-HuaweiP2-6011.3.0.8
arch/powerpc/boot/simpleboot.c
11967
2884
/* * The simple platform -- for booting when firmware doesn't supply a device * tree or any platform configuration information. * All data is extracted from an embedded device tree * blob. * * Authors: Scott Wood <scottwood@freescale.com> * Grant Likely <grant.likely@secretlab.ca> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * Copyright (c) 2008 Secret Lab Technologies Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "types.h" #include "io.h" #include "stdio.h" #include <libfdt.h> BSS_STACK(4*1024); extern int platform_specific_init(void) __attribute__((weak)); void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { const u32 *na, *ns, *reg, *timebase; u64 memsize64; int node, size, i; /* Make sure FDT blob is sane */ if (fdt_check_header(_dtb_start) != 0) fatal("Invalid device tree blob\n"); /* Find the #address-cells and #size-cells properties */ node = fdt_path_offset(_dtb_start, "/"); if (node < 0) fatal("Cannot find root node\n"); na = fdt_getprop(_dtb_start, node, "#address-cells", &size); if (!na || (size != 4)) fatal("Cannot find #address-cells property"); ns = fdt_getprop(_dtb_start, node, "#size-cells", &size); if (!ns || (size != 4)) fatal("Cannot find #size-cells property"); /* Find the memory range */ node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", "memory", sizeof("memory")); if (node < 0) fatal("Cannot find memory node\n"); reg = fdt_getprop(_dtb_start, node, "reg", &size); if (size < (*na+*ns) * sizeof(u32)) fatal("cannot get memory range\n"); /* Only interested in memory based at 0 */ for (i = 0; i < *na; i++) if (*reg++ != 0) fatal("Memory range is not based at address 0\n"); /* get the memsize and trucate it to under 4G on 32 bit machines */ memsize64 = 0; for (i = 0; i < *ns; i++) memsize64 = (memsize64 << 32) | *reg++; if (sizeof(void *) == 4 && memsize64 >= 0x100000000ULL) memsize64 = 0xffffffff; /* finally, setup the timebase */ node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", "cpu", sizeof("cpu")); if (!node) fatal("Cannot find cpu node\n"); timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size); if (timebase && (size == 4)) timebase_period_ns = 1000000000 / *timebase; /* Now we have the memory size; initialize the heap */ simple_alloc_init(_end, memsize64 - (unsigned long)_end, 32, 64); /* prepare the device tree and find the console */ fdt_init(_dtb_start); if (platform_specific_init) platform_specific_init(); serial_console_init(); }
gpl-2.0
pcamarillor/linux
arch/blackfin/mach-bf537/dma.c
12223
1994
/* * Copyright 2007-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. * * This file contains the simple DMA Implementation for Blackfin */ #include <linux/module.h> #include <asm/blackfin.h> #include <asm/dma.h> struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = { (struct dma_register *) DMA0_NEXT_DESC_PTR, (struct dma_register *) DMA1_NEXT_DESC_PTR, (struct dma_register *) DMA2_NEXT_DESC_PTR, (struct dma_register *) DMA3_NEXT_DESC_PTR, (struct dma_register *) DMA4_NEXT_DESC_PTR, (struct dma_register *) DMA5_NEXT_DESC_PTR, (struct dma_register *) DMA6_NEXT_DESC_PTR, (struct dma_register *) DMA7_NEXT_DESC_PTR, (struct dma_register *) DMA8_NEXT_DESC_PTR, (struct dma_register *) DMA9_NEXT_DESC_PTR, (struct dma_register *) DMA10_NEXT_DESC_PTR, (struct dma_register *) DMA11_NEXT_DESC_PTR, (struct dma_register *) MDMA_D0_NEXT_DESC_PTR, (struct dma_register *) MDMA_S0_NEXT_DESC_PTR, (struct dma_register *) MDMA_D1_NEXT_DESC_PTR, (struct dma_register *) MDMA_S1_NEXT_DESC_PTR, }; EXPORT_SYMBOL(dma_io_base_addr); int channel2irq(unsigned int channel) { int ret_irq = -1; switch (channel) { case CH_PPI: ret_irq = IRQ_PPI; break; case CH_EMAC_RX: ret_irq = IRQ_MAC_RX; break; case CH_EMAC_TX: ret_irq = IRQ_MAC_TX; break; case CH_UART1_RX: ret_irq = IRQ_UART1_RX; break; case CH_UART1_TX: ret_irq = IRQ_UART1_TX; break; case CH_SPORT0_RX: ret_irq = IRQ_SPORT0_RX; break; case CH_SPORT0_TX: ret_irq = IRQ_SPORT0_TX; break; case CH_SPORT1_RX: ret_irq = IRQ_SPORT1_RX; break; case CH_SPORT1_TX: ret_irq = IRQ_SPORT1_TX; break; case CH_SPI: ret_irq = IRQ_SPI; break; case CH_UART0_RX: ret_irq = IRQ_UART0_RX; break; case CH_UART0_TX: ret_irq = IRQ_UART0_TX; break; case CH_MEM_STREAM0_SRC: case CH_MEM_STREAM0_DEST: ret_irq = IRQ_MEM_DMA0; break; case CH_MEM_STREAM1_SRC: case CH_MEM_STREAM1_DEST: ret_irq = IRQ_MEM_DMA1; break; } return ret_irq; }
gpl-2.0
MingquanLiang/linux
net/wimax/debugfs.c
14527
2283
/* * Linux WiMAX * Debugfs support * * * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/debugfs.h> #include <linux/wimax.h> #include "wimax-internal.h" #define D_SUBMODULE debugfs #include "debug-levels.h" #define __debugfs_register(prefix, name, parent) \ do { \ result = d_level_register_debugfs(prefix, name, parent); \ if (result < 0) \ goto error; \ } while (0) int wimax_debugfs_add(struct wimax_dev *wimax_dev) { int result; struct net_device *net_dev = wimax_dev->net_dev; struct device *dev = net_dev->dev.parent; struct dentry *dentry; char buf[128]; snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name); dentry = debugfs_create_dir(buf, NULL); result = PTR_ERR(dentry); if (IS_ERR(dentry)) { if (result == -ENODEV) result = 0; /* No debugfs support */ else dev_err(dev, "Can't create debugfs dentry: %d\n", result); goto out; } wimax_dev->debugfs_dentry = dentry; __debugfs_register("wimax_dl_", debugfs, dentry); __debugfs_register("wimax_dl_", id_table, dentry); __debugfs_register("wimax_dl_", op_msg, dentry); __debugfs_register("wimax_dl_", op_reset, dentry); __debugfs_register("wimax_dl_", op_rfkill, dentry); __debugfs_register("wimax_dl_", op_state_get, dentry); __debugfs_register("wimax_dl_", stack, dentry); result = 0; out: return result; error: debugfs_remove_recursive(wimax_dev->debugfs_dentry); return result; } void wimax_debugfs_rm(struct wimax_dev *wimax_dev) { debugfs_remove_recursive(wimax_dev->debugfs_dentry); }
gpl-2.0
wjn740/linux
sound/soc/codecs/sirf-audio-codec.c
192
19075
/* * SiRF audio codec driver * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/regmap.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/tlv.h> #include <sound/soc.h> #include <sound/dmaengine_pcm.h> #include "sirf-audio-codec.h" struct sirf_audio_codec { struct clk *clk; struct regmap *regmap; u32 reg_ctrl0, reg_ctrl1; }; static const char * const input_mode_mux[] = {"Single-ended", "Differential"}; static const struct soc_enum input_mode_mux_enum = SOC_ENUM_SINGLE(AUDIO_IC_CODEC_CTRL1, 4, 2, input_mode_mux); static const struct snd_kcontrol_new sirf_audio_codec_input_mode_control = SOC_DAPM_ENUM("Route", input_mode_mux_enum); static const DECLARE_TLV_DB_SCALE(playback_vol_tlv, -12400, 100, 0); static const DECLARE_TLV_DB_SCALE(capture_vol_tlv_prima2, 500, 100, 0); static const DECLARE_TLV_DB_RANGE(capture_vol_tlv_atlas6, 0, 7, TLV_DB_SCALE_ITEM(-100, 100, 0), 0x22, 0x3F, TLV_DB_SCALE_ITEM(700, 100, 0), ); static struct snd_kcontrol_new volume_controls_atlas6[] = { SOC_DOUBLE_TLV("Playback Volume", AUDIO_IC_CODEC_CTRL0, 21, 14, 0x7F, 0, playback_vol_tlv), SOC_DOUBLE_TLV("Capture Volume", AUDIO_IC_CODEC_CTRL1, 16, 10, 0x3F, 0, capture_vol_tlv_atlas6), }; static struct snd_kcontrol_new volume_controls_prima2[] = { SOC_DOUBLE_TLV("Speaker Volume", AUDIO_IC_CODEC_CTRL0, 21, 14, 0x7F, 0, playback_vol_tlv), SOC_DOUBLE_TLV("Capture Volume", AUDIO_IC_CODEC_CTRL1, 15, 10, 0x1F, 0, capture_vol_tlv_prima2), }; static struct snd_kcontrol_new left_input_path_controls[] = { SOC_DAPM_SINGLE("Line Left Switch", AUDIO_IC_CODEC_CTRL1, 6, 1, 0), SOC_DAPM_SINGLE("Mic Left Switch", AUDIO_IC_CODEC_CTRL1, 3, 1, 0), }; static struct snd_kcontrol_new right_input_path_controls[] = { SOC_DAPM_SINGLE("Line Right Switch", AUDIO_IC_CODEC_CTRL1, 5, 1, 0), SOC_DAPM_SINGLE("Mic Right Switch", AUDIO_IC_CODEC_CTRL1, 2, 1, 0), }; static struct snd_kcontrol_new left_dac_to_hp_left_amp_switch_control = SOC_DAPM_SINGLE("Switch", AUDIO_IC_CODEC_CTRL0, 9, 1, 0); static struct snd_kcontrol_new left_dac_to_hp_right_amp_switch_control = SOC_DAPM_SINGLE("Switch", AUDIO_IC_CODEC_CTRL0, 8, 1, 0); static struct snd_kcontrol_new right_dac_to_hp_left_amp_switch_control = SOC_DAPM_SINGLE("Switch", AUDIO_IC_CODEC_CTRL0, 7, 1, 0); static struct snd_kcontrol_new right_dac_to_hp_right_amp_switch_control = SOC_DAPM_SINGLE("Switch", AUDIO_IC_CODEC_CTRL0, 6, 1, 0); static struct snd_kcontrol_new left_dac_to_speaker_lineout_switch_control = SOC_DAPM_SINGLE("Switch", AUDIO_IC_CODEC_CTRL0, 11, 1, 0); static struct snd_kcontrol_new right_dac_to_speaker_lineout_switch_control = SOC_DAPM_SINGLE("Switch", AUDIO_IC_CODEC_CTRL0, 10, 1, 0); /* After enable adc, Delay 200ms to avoid pop noise */ static int adc_enable_delay_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { switch (event) { case SND_SOC_DAPM_POST_PMU: msleep(200); break; default: break; } return 0; } static void enable_and_reset_codec(struct regmap *regmap, u32 codec_enable_bits, u32 codec_reset_bits) { regmap_update_bits(regmap, AUDIO_IC_CODEC_CTRL1, codec_enable_bits | codec_reset_bits, codec_enable_bits); msleep(20); regmap_update_bits(regmap, AUDIO_IC_CODEC_CTRL1, codec_reset_bits, codec_reset_bits); } static int atlas6_codec_enable_and_reset_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { #define ATLAS6_CODEC_ENABLE_BITS (1 << 29) #define ATLAS6_CODEC_RESET_BITS (1 << 28) struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); struct sirf_audio_codec *sirf_audio_codec = snd_soc_codec_get_drvdata(codec); switch (event) { case SND_SOC_DAPM_PRE_PMU: enable_and_reset_codec(sirf_audio_codec->regmap, ATLAS6_CODEC_ENABLE_BITS, ATLAS6_CODEC_RESET_BITS); break; case SND_SOC_DAPM_POST_PMD: regmap_update_bits(sirf_audio_codec->regmap, AUDIO_IC_CODEC_CTRL1, ATLAS6_CODEC_ENABLE_BITS, 0); break; default: break; } return 0; } static int prima2_codec_enable_and_reset_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { #define PRIMA2_CODEC_ENABLE_BITS (1 << 27) #define PRIMA2_CODEC_RESET_BITS (1 << 26) struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); struct sirf_audio_codec *sirf_audio_codec = snd_soc_codec_get_drvdata(codec); switch (event) { case SND_SOC_DAPM_POST_PMU: enable_and_reset_codec(sirf_audio_codec->regmap, PRIMA2_CODEC_ENABLE_BITS, PRIMA2_CODEC_RESET_BITS); break; case SND_SOC_DAPM_POST_PMD: regmap_update_bits(sirf_audio_codec->regmap, AUDIO_IC_CODEC_CTRL1, PRIMA2_CODEC_ENABLE_BITS, 0); break; default: break; } return 0; } static const struct snd_soc_dapm_widget atlas6_output_driver_dapm_widgets[] = { SND_SOC_DAPM_OUT_DRV("HP Left Driver", AUDIO_IC_CODEC_CTRL1, 25, 0, NULL, 0), SND_SOC_DAPM_OUT_DRV("HP Right Driver", AUDIO_IC_CODEC_CTRL1, 26, 0, NULL, 0), SND_SOC_DAPM_OUT_DRV("Speaker Driver", AUDIO_IC_CODEC_CTRL1, 27, 0, NULL, 0), }; static const struct snd_soc_dapm_widget prima2_output_driver_dapm_widgets[] = { SND_SOC_DAPM_OUT_DRV("HP Left Driver", AUDIO_IC_CODEC_CTRL1, 23, 0, NULL, 0), SND_SOC_DAPM_OUT_DRV("HP Right Driver", AUDIO_IC_CODEC_CTRL1, 24, 0, NULL, 0), SND_SOC_DAPM_OUT_DRV("Speaker Driver", AUDIO_IC_CODEC_CTRL1, 25, 0, NULL, 0), }; static const struct snd_soc_dapm_widget atlas6_codec_clock_dapm_widget = SND_SOC_DAPM_SUPPLY("codecclk", SND_SOC_NOPM, 0, 0, atlas6_codec_enable_and_reset_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD); static const struct snd_soc_dapm_widget prima2_codec_clock_dapm_widget = SND_SOC_DAPM_SUPPLY("codecclk", SND_SOC_NOPM, 0, 0, prima2_codec_enable_and_reset_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD); static const struct snd_soc_dapm_widget sirf_audio_codec_dapm_widgets[] = { SND_SOC_DAPM_DAC("DAC left", NULL, AUDIO_IC_CODEC_CTRL0, 1, 0), SND_SOC_DAPM_DAC("DAC right", NULL, AUDIO_IC_CODEC_CTRL0, 0, 0), SND_SOC_DAPM_SWITCH("Left dac to hp left amp", SND_SOC_NOPM, 0, 0, &left_dac_to_hp_left_amp_switch_control), SND_SOC_DAPM_SWITCH("Left dac to hp right amp", SND_SOC_NOPM, 0, 0, &left_dac_to_hp_right_amp_switch_control), SND_SOC_DAPM_SWITCH("Right dac to hp left amp", SND_SOC_NOPM, 0, 0, &right_dac_to_hp_left_amp_switch_control), SND_SOC_DAPM_SWITCH("Right dac to hp right amp", SND_SOC_NOPM, 0, 0, &right_dac_to_hp_right_amp_switch_control), SND_SOC_DAPM_OUT_DRV("HP amp left driver", AUDIO_IC_CODEC_CTRL0, 3, 0, NULL, 0), SND_SOC_DAPM_OUT_DRV("HP amp right driver", AUDIO_IC_CODEC_CTRL0, 3, 0, NULL, 0), SND_SOC_DAPM_SWITCH("Left dac to speaker lineout", SND_SOC_NOPM, 0, 0, &left_dac_to_speaker_lineout_switch_control), SND_SOC_DAPM_SWITCH("Right dac to speaker lineout", SND_SOC_NOPM, 0, 0, &right_dac_to_speaker_lineout_switch_control), SND_SOC_DAPM_OUT_DRV("Speaker amp driver", AUDIO_IC_CODEC_CTRL0, 4, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("HPOUTL"), SND_SOC_DAPM_OUTPUT("HPOUTR"), SND_SOC_DAPM_OUTPUT("SPKOUT"), SND_SOC_DAPM_ADC_E("ADC left", NULL, AUDIO_IC_CODEC_CTRL1, 8, 0, adc_enable_delay_event, SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_ADC_E("ADC right", NULL, AUDIO_IC_CODEC_CTRL1, 7, 0, adc_enable_delay_event, SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_MIXER("Left PGA mixer", AUDIO_IC_CODEC_CTRL1, 1, 0, &left_input_path_controls[0], ARRAY_SIZE(left_input_path_controls)), SND_SOC_DAPM_MIXER("Right PGA mixer", AUDIO_IC_CODEC_CTRL1, 0, 0, &right_input_path_controls[0], ARRAY_SIZE(right_input_path_controls)), SND_SOC_DAPM_MUX("Mic input mode mux", SND_SOC_NOPM, 0, 0, &sirf_audio_codec_input_mode_control), SND_SOC_DAPM_MICBIAS("Mic Bias", AUDIO_IC_CODEC_PWR, 3, 0), SND_SOC_DAPM_INPUT("MICIN1"), SND_SOC_DAPM_INPUT("MICIN2"), SND_SOC_DAPM_INPUT("LINEIN1"), SND_SOC_DAPM_INPUT("LINEIN2"), SND_SOC_DAPM_SUPPLY("HSL Phase Opposite", AUDIO_IC_CODEC_CTRL0, 30, 0, NULL, 0), }; static const struct snd_soc_dapm_route sirf_audio_codec_map[] = { {"SPKOUT", NULL, "Speaker Driver"}, {"Speaker Driver", NULL, "Speaker amp driver"}, {"Speaker amp driver", NULL, "Left dac to speaker lineout"}, {"Speaker amp driver", NULL, "Right dac to speaker lineout"}, {"Left dac to speaker lineout", "Switch", "DAC left"}, {"Right dac to speaker lineout", "Switch", "DAC right"}, {"HPOUTL", NULL, "HP Left Driver"}, {"HPOUTR", NULL, "HP Right Driver"}, {"HP Left Driver", NULL, "HP amp left driver"}, {"HP Right Driver", NULL, "HP amp right driver"}, {"HP amp left driver", NULL, "Right dac to hp left amp"}, {"HP amp right driver", NULL , "Right dac to hp right amp"}, {"HP amp left driver", NULL, "Left dac to hp left amp"}, {"HP amp right driver", NULL , "Right dac to hp right amp"}, {"Right dac to hp left amp", "Switch", "DAC left"}, {"Right dac to hp right amp", "Switch", "DAC right"}, {"Left dac to hp left amp", "Switch", "DAC left"}, {"Left dac to hp right amp", "Switch", "DAC right"}, {"DAC left", NULL, "codecclk"}, {"DAC right", NULL, "codecclk"}, {"DAC left", NULL, "Playback"}, {"DAC right", NULL, "Playback"}, {"DAC left", NULL, "HSL Phase Opposite"}, {"DAC right", NULL, "HSL Phase Opposite"}, {"Capture", NULL, "ADC left"}, {"Capture", NULL, "ADC right"}, {"ADC left", NULL, "codecclk"}, {"ADC right", NULL, "codecclk"}, {"ADC left", NULL, "Left PGA mixer"}, {"ADC right", NULL, "Right PGA mixer"}, {"Left PGA mixer", "Line Left Switch", "LINEIN2"}, {"Right PGA mixer", "Line Right Switch", "LINEIN1"}, {"Left PGA mixer", "Mic Left Switch", "MICIN2"}, {"Right PGA mixer", "Mic Right Switch", "Mic input mode mux"}, {"Mic input mode mux", "Single-ended", "MICIN1"}, {"Mic input mode mux", "Differential", "MICIN1"}, }; static void sirf_audio_codec_tx_enable(struct sirf_audio_codec *sirf_audio_codec) { regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_TXFIFO_OP, AUDIO_FIFO_RESET, AUDIO_FIFO_RESET); regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_TXFIFO_OP, AUDIO_FIFO_RESET, ~AUDIO_FIFO_RESET); regmap_write(sirf_audio_codec->regmap, AUDIO_PORT_IC_TXFIFO_INT_MSK, 0); regmap_write(sirf_audio_codec->regmap, AUDIO_PORT_IC_TXFIFO_OP, 0); regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_TXFIFO_OP, AUDIO_FIFO_START, AUDIO_FIFO_START); regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_CODEC_TX_CTRL, IC_TX_ENABLE, IC_TX_ENABLE); } static void sirf_audio_codec_tx_disable(struct sirf_audio_codec *sirf_audio_codec) { regmap_write(sirf_audio_codec->regmap, AUDIO_PORT_IC_TXFIFO_OP, 0); regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_CODEC_TX_CTRL, IC_TX_ENABLE, ~IC_TX_ENABLE); } static void sirf_audio_codec_rx_enable(struct sirf_audio_codec *sirf_audio_codec, int channels) { regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_RXFIFO_OP, AUDIO_FIFO_RESET, AUDIO_FIFO_RESET); regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_RXFIFO_OP, AUDIO_FIFO_RESET, ~AUDIO_FIFO_RESET); regmap_write(sirf_audio_codec->regmap, AUDIO_PORT_IC_RXFIFO_INT_MSK, 0); regmap_write(sirf_audio_codec->regmap, AUDIO_PORT_IC_RXFIFO_OP, 0); regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_RXFIFO_OP, AUDIO_FIFO_START, AUDIO_FIFO_START); if (channels == 1) regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_CODEC_RX_CTRL, IC_RX_ENABLE_MONO, IC_RX_ENABLE_MONO); else regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_CODEC_RX_CTRL, IC_RX_ENABLE_STEREO, IC_RX_ENABLE_STEREO); } static void sirf_audio_codec_rx_disable(struct sirf_audio_codec *sirf_audio_codec) { regmap_update_bits(sirf_audio_codec->regmap, AUDIO_PORT_IC_CODEC_RX_CTRL, IC_RX_ENABLE_STEREO, ~IC_RX_ENABLE_STEREO); } static int sirf_audio_codec_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct sirf_audio_codec *sirf_audio_codec = snd_soc_codec_get_drvdata(codec); int playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; /* * This is a workaround, When stop playback, * need disable HP amp, avoid the current noise. */ switch (cmd) { case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (playback) { snd_soc_update_bits(codec, AUDIO_IC_CODEC_CTRL0, IC_HSLEN | IC_HSREN, 0); sirf_audio_codec_tx_disable(sirf_audio_codec); } else sirf_audio_codec_rx_disable(sirf_audio_codec); break; case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (playback) { sirf_audio_codec_tx_enable(sirf_audio_codec); snd_soc_update_bits(codec, AUDIO_IC_CODEC_CTRL0, IC_HSLEN | IC_HSREN, IC_HSLEN | IC_HSREN); } else sirf_audio_codec_rx_enable(sirf_audio_codec, substream->runtime->channels); break; default: return -EINVAL; } return 0; } struct snd_soc_dai_ops sirf_audio_codec_dai_ops = { .trigger = sirf_audio_codec_trigger, }; struct snd_soc_dai_driver sirf_audio_codec_dai = { .name = "sirf-audio-codec", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &sirf_audio_codec_dai_ops, }; static int sirf_audio_codec_probe(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec); pm_runtime_enable(codec->dev); if (of_device_is_compatible(codec->dev->of_node, "sirf,prima2-audio-codec")) { snd_soc_dapm_new_controls(dapm, prima2_output_driver_dapm_widgets, ARRAY_SIZE(prima2_output_driver_dapm_widgets)); snd_soc_dapm_new_controls(dapm, &prima2_codec_clock_dapm_widget, 1); return snd_soc_add_codec_controls(codec, volume_controls_prima2, ARRAY_SIZE(volume_controls_prima2)); } if (of_device_is_compatible(codec->dev->of_node, "sirf,atlas6-audio-codec")) { snd_soc_dapm_new_controls(dapm, atlas6_output_driver_dapm_widgets, ARRAY_SIZE(atlas6_output_driver_dapm_widgets)); snd_soc_dapm_new_controls(dapm, &atlas6_codec_clock_dapm_widget, 1); return snd_soc_add_codec_controls(codec, volume_controls_atlas6, ARRAY_SIZE(volume_controls_atlas6)); } return -EINVAL; } static int sirf_audio_codec_remove(struct snd_soc_codec *codec) { pm_runtime_disable(codec->dev); return 0; } static struct snd_soc_codec_driver soc_codec_device_sirf_audio_codec = { .probe = sirf_audio_codec_probe, .remove = sirf_audio_codec_remove, .dapm_widgets = sirf_audio_codec_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(sirf_audio_codec_dapm_widgets), .dapm_routes = sirf_audio_codec_map, .num_dapm_routes = ARRAY_SIZE(sirf_audio_codec_map), .idle_bias_off = true, }; static const struct of_device_id sirf_audio_codec_of_match[] = { { .compatible = "sirf,prima2-audio-codec" }, { .compatible = "sirf,atlas6-audio-codec" }, {} }; MODULE_DEVICE_TABLE(of, sirf_audio_codec_of_match); static const struct regmap_config sirf_audio_codec_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = AUDIO_PORT_IC_RXFIFO_INT_MSK, .cache_type = REGCACHE_NONE, }; static int sirf_audio_codec_driver_probe(struct platform_device *pdev) { int ret; struct sirf_audio_codec *sirf_audio_codec; void __iomem *base; struct resource *mem_res; const struct of_device_id *match; match = of_match_node(sirf_audio_codec_of_match, pdev->dev.of_node); sirf_audio_codec = devm_kzalloc(&pdev->dev, sizeof(struct sirf_audio_codec), GFP_KERNEL); if (!sirf_audio_codec) return -ENOMEM; platform_set_drvdata(pdev, sirf_audio_codec); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(base)) return PTR_ERR(base); sirf_audio_codec->regmap = devm_regmap_init_mmio(&pdev->dev, base, &sirf_audio_codec_regmap_config); if (IS_ERR(sirf_audio_codec->regmap)) return PTR_ERR(sirf_audio_codec->regmap); sirf_audio_codec->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(sirf_audio_codec->clk)) { dev_err(&pdev->dev, "Get clock failed.\n"); return PTR_ERR(sirf_audio_codec->clk); } ret = clk_prepare_enable(sirf_audio_codec->clk); if (ret) { dev_err(&pdev->dev, "Enable clock failed.\n"); return ret; } ret = snd_soc_register_codec(&(pdev->dev), &soc_codec_device_sirf_audio_codec, &sirf_audio_codec_dai, 1); if (ret) { dev_err(&pdev->dev, "Register Audio Codec dai failed.\n"); goto err_clk_put; } /* * Always open charge pump, if not, when the charge pump closed the * adc will not stable */ regmap_update_bits(sirf_audio_codec->regmap, AUDIO_IC_CODEC_CTRL0, IC_CPFREQ, IC_CPFREQ); if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas6-audio-codec")) regmap_update_bits(sirf_audio_codec->regmap, AUDIO_IC_CODEC_CTRL0, IC_CPEN, IC_CPEN); return 0; err_clk_put: clk_disable_unprepare(sirf_audio_codec->clk); return ret; } static int sirf_audio_codec_driver_remove(struct platform_device *pdev) { struct sirf_audio_codec *sirf_audio_codec = platform_get_drvdata(pdev); clk_disable_unprepare(sirf_audio_codec->clk); snd_soc_unregister_codec(&(pdev->dev)); return 0; } #ifdef CONFIG_PM_SLEEP static int sirf_audio_codec_suspend(struct device *dev) { struct sirf_audio_codec *sirf_audio_codec = dev_get_drvdata(dev); regmap_read(sirf_audio_codec->regmap, AUDIO_IC_CODEC_CTRL0, &sirf_audio_codec->reg_ctrl0); regmap_read(sirf_audio_codec->regmap, AUDIO_IC_CODEC_CTRL1, &sirf_audio_codec->reg_ctrl1); clk_disable_unprepare(sirf_audio_codec->clk); return 0; } static int sirf_audio_codec_resume(struct device *dev) { struct sirf_audio_codec *sirf_audio_codec = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(sirf_audio_codec->clk); if (ret) return ret; regmap_write(sirf_audio_codec->regmap, AUDIO_IC_CODEC_CTRL0, sirf_audio_codec->reg_ctrl0); regmap_write(sirf_audio_codec->regmap, AUDIO_IC_CODEC_CTRL1, sirf_audio_codec->reg_ctrl1); return 0; } #endif static const struct dev_pm_ops sirf_audio_codec_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sirf_audio_codec_suspend, sirf_audio_codec_resume) }; static struct platform_driver sirf_audio_codec_driver = { .driver = { .name = "sirf-audio-codec", .of_match_table = sirf_audio_codec_of_match, .pm = &sirf_audio_codec_pm_ops, }, .probe = sirf_audio_codec_driver_probe, .remove = sirf_audio_codec_driver_remove, }; module_platform_driver(sirf_audio_codec_driver); MODULE_DESCRIPTION("SiRF audio codec driver"); MODULE_AUTHOR("RongJun Ying <Rongjun.Ying@csr.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
cattleprod/samsung-kernel-gt-i9100
drivers/scsi/megaraid/megaraid_sas.c
704
111504
/* * * Linux MegaRAID driver for SAS based RAID controllers * * Copyright (c) 2003-2005 LSI Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * FILE : megaraid_sas.c * Version : v00.00.04.17.1-rc1 * * Authors: * (email-id : megaraidlinux@lsi.com) * Sreenivas Bagalkote * Sumant Patro * Bo Yang * * List of supported controllers * * OEM Product Name VID DID SSVID SSID * --- ------------ --- --- ---- ---- */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/smp_lock.h> #include <linux/uio.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/poll.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "megaraid_sas.h" /* * poll_mode_io:1- schedule complete completion from q cmd */ static unsigned int poll_mode_io; module_param_named(poll_mode_io, poll_mode_io, int, 0); MODULE_PARM_DESC(poll_mode_io, "Complete cmds from IO path, (default=0)"); MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("megaraidlinux@lsi.com"); MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); /* * PCI ID table for all supported controllers */ static struct pci_device_id megasas_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, /* ppc IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, /* ppc IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, /* xscale IOP, vega */ {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, /* xscale IOP */ {} }; MODULE_DEVICE_TABLE(pci, megasas_pci_table); static int megasas_mgmt_majorno; static struct megasas_mgmt_info megasas_mgmt_info; static struct fasync_struct *megasas_async_queue; static DEFINE_MUTEX(megasas_async_queue_mutex); static int megasas_poll_wait_aen; static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); static u32 support_poll_for_event; static u32 megasas_dbg_lvl; /* define lock for aen poll */ spinlock_t poll_aen_lock; static void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); /** * megasas_get_cmd - Get a command from the free pool * @instance: Adapter soft state * * Returns a free command from the pool */ static struct megasas_cmd *megasas_get_cmd(struct megasas_instance *instance) { unsigned long flags; struct megasas_cmd *cmd = NULL; spin_lock_irqsave(&instance->cmd_pool_lock, flags); if (!list_empty(&instance->cmd_pool)) { cmd = list_entry((&instance->cmd_pool)->next, struct megasas_cmd, list); list_del_init(&cmd->list); } else { printk(KERN_ERR "megasas: Command pool empty!\n"); } spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); return cmd; } /** * megasas_return_cmd - Return a cmd to free command pool * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ static inline void megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; spin_lock_irqsave(&instance->cmd_pool_lock, flags); cmd->scmd = NULL; list_add_tail(&cmd->list, &instance->cmd_pool); spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); } /** * The following functions are defined for xscale * (deviceid : 1064R, PERC5) controllers */ /** * megasas_enable_intr_xscale - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs) { writel(1, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_xscale -Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_xscale(struct megasas_register_set __iomem * regs) { u32 mask = 0x1f; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_xscale - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs) { return readl(&(regs)->outbound_msg_0); } /** * megasas_clear_interrupt_xscale - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) { u32 status; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (!(status & MFI_OB_INTR_STATUS_MASK)) { return 1; } /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_intr_status); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_status); return 0; } /** * megasas_fire_cmd_xscale - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_xscale(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { writel((frame_phys_addr >> 3)|(frame_count), &(regs)->inbound_queue_port); } static struct megasas_instance_template megasas_instance_template_xscale = { .fire_cmd = megasas_fire_cmd_xscale, .enable_intr = megasas_enable_intr_xscale, .disable_intr = megasas_disable_intr_xscale, .clear_intr = megasas_clear_intr_xscale, .read_fw_status_reg = megasas_read_fw_status_reg_xscale, }; /** * This is the end of set of functions & definitions specific * to xscale (deviceid : 1064R, PERC5) controllers */ /** * The following functions are defined for ppc (deviceid : 0x60) * controllers */ /** * megasas_enable_intr_ppc - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) { writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); writel(~0x80000004, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_ppc - Disable interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_ppc(struct megasas_register_set __iomem * regs) { u32 mask = 0xFFFFFFFF; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_ppc - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_ppc - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) { u32 status; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) { return 1; } /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(&regs->outbound_doorbell_clear); return 0; } /** * megasas_fire_cmd_ppc - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_ppc(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); } static struct megasas_instance_template megasas_instance_template_ppc = { .fire_cmd = megasas_fire_cmd_ppc, .enable_intr = megasas_enable_intr_ppc, .disable_intr = megasas_disable_intr_ppc, .clear_intr = megasas_clear_intr_ppc, .read_fw_status_reg = megasas_read_fw_status_reg_ppc, }; /** * megasas_enable_intr_skinny - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs) { writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_skinny - Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs) { u32 mask = 0xFFFFFFFF; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_skinny - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_skinny - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) { u32 status; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { return 1; } /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_intr_status); /* * dummy read to flush PCI */ readl(&regs->outbound_intr_status); return 0; } /** * megasas_fire_cmd_skinny - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_skinny(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->fire_lock, flags); writel(0, &(regs)->inbound_high_queue_port); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_low_queue_port); spin_unlock_irqrestore(&instance->fire_lock, flags); } static struct megasas_instance_template megasas_instance_template_skinny = { .fire_cmd = megasas_fire_cmd_skinny, .enable_intr = megasas_enable_intr_skinny, .disable_intr = megasas_disable_intr_skinny, .clear_intr = megasas_clear_intr_skinny, .read_fw_status_reg = megasas_read_fw_status_reg_skinny, }; /** * The following functions are defined for gen2 (deviceid : 0x78 0x79) * controllers */ /** * megasas_enable_intr_gen2 - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs) { writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); /* write ~0x00000005 (4 & 1) to the intr mask*/ writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_gen2 - Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs) { u32 mask = 0xFFFFFFFF; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_gen2 - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_gen2 - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) { u32 status; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (!(status & MFI_GEN2_ENABLE_INTERRUPT_MASK)) return 1; /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_status); return 0; } /** * megasas_fire_cmd_gen2 - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_gen2(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); } static struct megasas_instance_template megasas_instance_template_gen2 = { .fire_cmd = megasas_fire_cmd_gen2, .enable_intr = megasas_enable_intr_gen2, .disable_intr = megasas_disable_intr_gen2, .clear_intr = megasas_clear_intr_gen2, .read_fw_status_reg = megasas_read_fw_status_reg_gen2, }; /** * This is the end of set of functions & definitions * specific to ppc (deviceid : 0x60) controllers */ /** * megasas_issue_polled - Issues a polling command * @instance: Adapter soft state * @cmd: Command packet to be issued * * For polling, MFI requires the cmd_status to be set to 0xFF before posting. */ static int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) { int i; u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000; struct megasas_header *frame_hdr = &cmd->frame->hdr; frame_hdr->cmd_status = 0xFF; frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; /* * Issue the frame using inbound queue port */ instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); /* * Wait for cmd_status to change */ for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) { rmb(); msleep(1); } if (frame_hdr->cmd_status == 0xff) return -ETIME; return 0; } /** * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds * @instance: Adapter soft state * @cmd: Command to be issued * * This function waits on an event for the command to be returned from ISR. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs * Used to issue ioctl commands. */ static int megasas_issue_blocked_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { cmd->cmd_status = ENODATA; instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA), MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ); return 0; } /** * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd * @instance: Adapter soft state * @cmd_to_abort: Previously issued cmd to be aborted * * MFI firmware can abort previously issued AEN comamnd (automatic event * notification). The megasas_issue_blocked_abort_cmd() issues such abort * cmd and waits for return status. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs */ static int megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd_to_abort) { struct megasas_cmd *cmd; struct megasas_abort_frame *abort_fr; cmd = megasas_get_cmd(instance); if (!cmd) return -1; abort_fr = &cmd->frame->abort; /* * Prepare and issue the abort frame */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = 0xFF; abort_fr->flags = 0; abort_fr->abort_context = cmd_to_abort->index; abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; abort_fr->abort_mfi_phys_addr_hi = 0; cmd->sync_cmd = 1; cmd->cmd_status = 0xFF; instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); /* * Wait for this cmd to complete */ wait_event_timeout(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF), MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ); megasas_return_cmd(instance, cmd); return 0; } /** * megasas_make_sgl32 - Prepares 32-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge32[i].length = sg_dma_len(os_sgl); mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl); } } return sge_count; } /** * megasas_make_sgl64 - Prepares 64-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); } } return sge_count; } /** * megasas_make_sgl_skinny - Prepares IEEE SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl_skinny(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); mfi_sgl->sge_skinny[i].phys_addr = sg_dma_address(os_sgl); } } return sge_count; } /** * megasas_get_frame_count - Computes the number of frames * @frame_type : type of frame- io or pthru frame * @sge_count : number of sg elements * * Returns the number of frames required for numnber of sge's (sge_count) */ static u32 megasas_get_frame_count(struct megasas_instance *instance, u8 sge_count, u8 frame_type) { int num_cnt; int sge_bytes; u32 sge_sz; u32 frame_count=0; sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); if (instance->flag_ieee) { sge_sz = sizeof(struct megasas_sge_skinny); } /* * Main frame can contain 2 SGEs for 64-bit SGLs and * 3 SGEs for 32-bit SGLs for ldio & * 1 SGEs for 64-bit SGLs and * 2 SGEs for 32-bit SGLs for pthru frame */ if (unlikely(frame_type == PTHRU_FRAME)) { if (instance->flag_ieee == 1) { num_cnt = sge_count - 1; } else if (IS_DMA64) num_cnt = sge_count - 1; else num_cnt = sge_count - 2; } else { if (instance->flag_ieee == 1) { num_cnt = sge_count - 1; } else if (IS_DMA64) num_cnt = sge_count - 2; else num_cnt = sge_count - 3; } if(num_cnt>0){ sge_bytes = sge_sz * num_cnt; frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; } /* Main frame */ frame_count +=1; if (frame_count > 7) frame_count = 8; return frame_count; } /** * megasas_build_dcdb - Prepares a direct cdb (DCDB) command * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared in * * This function prepares CDB commands. These are typcially pass-through * commands to the devices. */ static int megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd *cmd) { u32 is_logical; u32 device_id; u16 flags = 0; struct megasas_pthru_frame *pthru; is_logical = MEGASAS_IS_LOGICAL(scp); device_id = MEGASAS_DEV_INDEX(instance, scp); pthru = (struct megasas_pthru_frame *)cmd->frame; if (scp->sc_data_direction == PCI_DMA_TODEVICE) flags = MFI_FRAME_DIR_WRITE; else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) flags = MFI_FRAME_DIR_READ; else if (scp->sc_data_direction == PCI_DMA_NONE) flags = MFI_FRAME_DIR_NONE; if (instance->flag_ieee == 1) { flags |= MFI_FRAME_IEEE; } /* * Prepare the DCDB frame */ pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; pthru->cmd_status = 0x0; pthru->scsi_status = 0x0; pthru->target_id = device_id; pthru->lun = scp->device->lun; pthru->cdb_len = scp->cmd_len; pthru->timeout = 0; pthru->pad_0 = 0; pthru->flags = flags; pthru->data_xfer_len = scsi_bufflen(scp); memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); /* * If the command is for the tape device, set the * pthru timeout to the os layer timeout value. */ if (scp->device->type == TYPE_TAPE) { if ((scp->request->timeout / HZ) > 0xFFFF) pthru->timeout = 0xFFFF; else pthru->timeout = scp->request->timeout / HZ; } /* * Construct SGL */ if (instance->flag_ieee == 1) { pthru->flags |= MFI_FRAME_SGL64; pthru->sge_count = megasas_make_sgl_skinny(instance, scp, &pthru->sgl); } else if (IS_DMA64) { pthru->flags |= MFI_FRAME_SGL64; pthru->sge_count = megasas_make_sgl64(instance, scp, &pthru->sgl); } else pthru->sge_count = megasas_make_sgl32(instance, scp, &pthru->sgl); if (pthru->sge_count > instance->max_num_sge) { printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n", pthru->sge_count); return 0; } /* * Sense info specific */ pthru->sense_len = SCSI_SENSE_BUFFERSIZE; pthru->sense_buf_phys_addr_hi = 0; pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; /* * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, PTHRU_FRAME); return cmd->frame_count; } /** * megasas_build_ldio - Prepares IOs to logical devices * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * * Frames (and accompanying SGLs) for regular SCSI IOs use this function. */ static int megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd *cmd) { u32 device_id; u8 sc = scp->cmnd[0]; u16 flags = 0; struct megasas_io_frame *ldio; device_id = MEGASAS_DEV_INDEX(instance, scp); ldio = (struct megasas_io_frame *)cmd->frame; if (scp->sc_data_direction == PCI_DMA_TODEVICE) flags = MFI_FRAME_DIR_WRITE; else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) flags = MFI_FRAME_DIR_READ; if (instance->flag_ieee == 1) { flags |= MFI_FRAME_IEEE; } /* * Prepare the Logical IO frame: 2nd bit is zero for all read cmds */ ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; ldio->cmd_status = 0x0; ldio->scsi_status = 0x0; ldio->target_id = device_id; ldio->timeout = 0; ldio->reserved_0 = 0; ldio->pad_0 = 0; ldio->flags = flags; ldio->start_lba_hi = 0; ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; /* * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if (scp->cmd_len == 6) { ldio->lba_count = (u32) scp->cmnd[4]; ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; ldio->start_lba_lo &= 0x1FFFFF; } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ else if (scp->cmd_len == 10) { ldio->lba_count = (u32) scp->cmnd[8] | ((u32) scp->cmnd[7] << 8); ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ else if (scp->cmd_len == 12) { ldio->lba_count = ((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } /* * 16-byte READ(0x88) or WRITE(0x8A) cdb */ else if (scp->cmd_len == 16) { ldio->lba_count = ((u32) scp->cmnd[10] << 24) | ((u32) scp->cmnd[11] << 16) | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } /* * Construct SGL */ if (instance->flag_ieee) { ldio->flags |= MFI_FRAME_SGL64; ldio->sge_count = megasas_make_sgl_skinny(instance, scp, &ldio->sgl); } else if (IS_DMA64) { ldio->flags |= MFI_FRAME_SGL64; ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); } else ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); if (ldio->sge_count > instance->max_num_sge) { printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n", ldio->sge_count); return 0; } /* * Sense info specific */ ldio->sense_len = SCSI_SENSE_BUFFERSIZE; ldio->sense_buf_phys_addr_hi = 0; ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; /* * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ cmd->frame_count = megasas_get_frame_count(instance, ldio->sge_count, IO_FRAME); return cmd->frame_count; } /** * megasas_is_ldio - Checks if the cmd is for logical drive * @scmd: SCSI command * * Called by megasas_queue_command to find out if the command to be queued * is a logical drive command */ static inline int megasas_is_ldio(struct scsi_cmnd *cmd) { if (!MEGASAS_IS_LOGICAL(cmd)) return 0; switch (cmd->cmnd[0]) { case READ_10: case WRITE_10: case READ_12: case WRITE_12: case READ_6: case WRITE_6: case READ_16: case WRITE_16: return 1; default: return 0; } } /** * megasas_dump_pending_frames - Dumps the frame address of all pending cmds * in FW * @instance: Adapter soft state */ static inline void megasas_dump_pending_frames(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i,n; union megasas_sgl *mfi_sgl; struct megasas_io_frame *ldio; struct megasas_pthru_frame *pthru; u32 sgcount; u32 max_cmd = instance->max_fw_cmds; printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); if (IS_DMA64) printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); else printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if(!cmd->scmd) continue; printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); if (megasas_is_ldio(cmd->scmd)){ ldio = (struct megasas_io_frame *)cmd->frame; mfi_sgl = &ldio->sgl; sgcount = ldio->sge_count; printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount); } else { pthru = (struct megasas_pthru_frame *) cmd->frame; mfi_sgl = &pthru->sgl; sgcount = pthru->sge_count; printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount); } if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ for (n = 0; n < sgcount; n++){ if (IS_DMA64) printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ; else printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ; } } printk(KERN_ERR "\n"); } /*for max_cmd*/ printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if(cmd->sync_cmd == 1){ printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); } } printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no); } /** * megasas_queue_command - Queue entry point * @scmd: SCSI command to be queued * @done: Callback entry point */ static int megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) { u32 frame_count; struct megasas_cmd *cmd; struct megasas_instance *instance; instance = (struct megasas_instance *) scmd->device->host->hostdata; /* Don't process if we have already declared adapter dead */ if (instance->hw_crit_error) return SCSI_MLQUEUE_HOST_BUSY; scmd->scsi_done = done; scmd->result = 0; if (MEGASAS_IS_LOGICAL(scmd) && (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) { scmd->result = DID_BAD_TARGET << 16; goto out_done; } switch (scmd->cmnd[0]) { case SYNCHRONIZE_CACHE: /* * FW takes care of flush cache on its own * No need to send it down */ scmd->result = DID_OK << 16; goto out_done; default: break; } cmd = megasas_get_cmd(instance); if (!cmd) return SCSI_MLQUEUE_HOST_BUSY; /* * Logical drive command */ if (megasas_is_ldio(scmd)) frame_count = megasas_build_ldio(instance, scmd, cmd); else frame_count = megasas_build_dcdb(instance, scmd, cmd); if (!frame_count) goto out_return_cmd; cmd->scmd = scmd; scmd->SCp.ptr = (char *)cmd; /* * Issue the command to the FW */ atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, cmd->frame_count-1, instance->reg_set); /* * Check if we have pend cmds to be completed */ if (poll_mode_io && atomic_read(&instance->fw_outstanding)) tasklet_schedule(&instance->isr_tasklet); return 0; out_return_cmd: megasas_return_cmd(instance, cmd); out_done: done(scmd); return 0; } static struct megasas_instance *megasas_lookup_instance(u16 host_no) { int i; for (i = 0; i < megasas_mgmt_info.max_index; i++) { if ((megasas_mgmt_info.instance[i]) && (megasas_mgmt_info.instance[i]->host->host_no == host_no)) return megasas_mgmt_info.instance[i]; } return NULL; } static int megasas_slave_configure(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance ; instance = megasas_lookup_instance(sdev->host->host_no); /* * Don't export physical disk devices to the disk driver. * * FIXME: Currently we don't export them to the midlayer at all. * That will be fixed once LSI engineers have audited the * firmware for possible issues. */ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { blk_queue_rq_timeout(sdev->request_queue, MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); return 0; } return -ENXIO; } /* * The RAID firmware may require extended timeouts. */ blk_queue_rq_timeout(sdev->request_queue, MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); return 0; } static int megasas_slave_alloc(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance ; instance = megasas_lookup_instance(sdev->host->host_no); if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) && (sdev->type == TYPE_DISK)) { /* * Open the OS scan to the SYSTEM PD */ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) && (instance->pd_list[pd_index].driveType == TYPE_DISK)) { return 0; } return -ENXIO; } return 0; } /** * megasas_complete_cmd_dpc - Returns FW's controller structure * @instance_addr: Address of adapter soft state * * Tasklet to complete cmds */ static void megasas_complete_cmd_dpc(unsigned long instance_addr) { u32 producer; u32 consumer; u32 context; struct megasas_cmd *cmd; struct megasas_instance *instance = (struct megasas_instance *)instance_addr; unsigned long flags; /* If we have already declared adapter dead, donot complete cmds */ if (instance->hw_crit_error) return; spin_lock_irqsave(&instance->completion_lock, flags); producer = *instance->producer; consumer = *instance->consumer; while (consumer != producer) { context = instance->reply_queue[consumer]; cmd = instance->cmd_list[context]; megasas_complete_cmd(instance, cmd, DID_OK); consumer++; if (consumer == (instance->max_fw_cmds + 1)) { consumer = 0; } } *instance->consumer = producer; spin_unlock_irqrestore(&instance->completion_lock, flags); /* * Check if we can restore can_queue */ if (instance->flag & MEGASAS_FW_BUSY && time_after(jiffies, instance->last_time + 5 * HZ) && atomic_read(&instance->fw_outstanding) < 17) { spin_lock_irqsave(instance->host->host_lock, flags); instance->flag &= ~MEGASAS_FW_BUSY; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { instance->host->can_queue = instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; } else instance->host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; spin_unlock_irqrestore(instance->host->host_lock, flags); } } /** * megasas_wait_for_outstanding - Wait for all outstanding cmds * @instance: Adapter soft state * * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to * complete all its outstanding commands. Returns error if one or more IOs * are pending after this time period. It also marks the controller dead. */ static int megasas_wait_for_outstanding(struct megasas_instance *instance) { int i; u32 wait_time = MEGASAS_RESET_WAIT_TIME; for (i = 0; i < wait_time; i++) { int outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) break; if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { printk(KERN_NOTICE "megasas: [%2d]waiting for %d " "commands to complete\n",i,outstanding); /* * Call cmd completion routine. Cmd to be * be completed directly without depending on isr. */ megasas_complete_cmd_dpc((unsigned long)instance); } msleep(1000); } if (atomic_read(&instance->fw_outstanding)) { /* * Send signal to FW to stop processing any pending cmds. * The controller will be taken offline by the OS now. */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { writel(MFI_STOP_ADP, &instance->reg_set->reserved_0[0]); } else { writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); } megasas_dump_pending_frames(instance); instance->hw_crit_error = 1; return FAILED; } return SUCCESS; } /** * megasas_generic_reset - Generic reset routine * @scmd: Mid-layer SCSI command * * This routine implements a generic reset handler for device, bus and host * reset requests. Device, bus and host specific reset handlers can use this * function after they do their specific tasks. */ static int megasas_generic_reset(struct scsi_cmnd *scmd) { int ret_val; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n", scmd->serial_number, scmd->cmnd[0], scmd->retries); if (instance->hw_crit_error) { printk(KERN_ERR "megasas: cannot recover from previous reset " "failures\n"); return FAILED; } ret_val = megasas_wait_for_outstanding(instance); if (ret_val == SUCCESS) printk(KERN_NOTICE "megasas: reset successful \n"); else printk(KERN_ERR "megasas: failed to do reset\n"); return ret_val; } /** * megasas_reset_timer - quiesce the adapter if required * @scmd: scsi cmnd * * Sets the FW busy flag and reduces the host->can_queue if the * cmd has not been completed within the timeout period. */ static enum blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) { struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; struct megasas_instance *instance; unsigned long flags; if (time_after(jiffies, scmd->jiffies_at_alloc + (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { return BLK_EH_NOT_HANDLED; } instance = cmd->instance; if (!(instance->flag & MEGASAS_FW_BUSY)) { /* FW is busy, throttle IO */ spin_lock_irqsave(instance->host->host_lock, flags); instance->host->can_queue = 16; instance->last_time = jiffies; instance->flag |= MEGASAS_FW_BUSY; spin_unlock_irqrestore(instance->host->host_lock, flags); } return BLK_EH_RESET_TIMER; } /** * megasas_reset_device - Device reset handler entry point */ static int megasas_reset_device(struct scsi_cmnd *scmd) { int ret; /* * First wait for all commands to complete */ ret = megasas_generic_reset(scmd); return ret; } /** * megasas_reset_bus_host - Bus & host reset handler entry point */ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) { int ret; /* * First wait for all commands to complete */ ret = megasas_generic_reset(scmd); return ret; } /** * megasas_bios_param - Returns disk geometry for a disk * @sdev: device handle * @bdev: block device * @capacity: drive capacity * @geom: geometry parameters */ static int megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads; int sectors; sector_t cylinders; unsigned long tmp; /* Default heads (64) & sectors (32) */ heads = 64; sectors = 32; tmp = heads * sectors; cylinders = capacity; sector_div(cylinders, tmp); /* * Handle extended translation size for logical drives > 1Gb */ if (capacity >= 0x200000) { heads = 255; sectors = 63; tmp = heads*sectors; cylinders = capacity; sector_div(cylinders, tmp); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return 0; } static void megasas_aen_polling(struct work_struct *work); /** * megasas_service_aen - Processes an event notification * @instance: Adapter soft state * @cmd: AEN command completed by the ISR * * For AEN, driver sends a command down to FW that is held by the FW till an * event occurs. When an event of interest occurs, FW completes the command * that it was previously holding. * * This routines sends SIGIO signal to processes that have registered with the * driver for AEN. */ static void megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; /* * Don't signal app if it is just an aborted previously registered aen */ if ((!cmd->abort_aen) && (instance->unload == 0)) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 1; spin_unlock_irqrestore(&poll_aen_lock, flags); wake_up(&megasas_poll_wait); kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); } else cmd->abort_aen = 0; instance->aen_cmd = NULL; megasas_return_cmd(instance, cmd); if (instance->unload == 0) { struct megasas_aen_event *ev; ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (!ev) { printk(KERN_ERR "megasas_service_aen: out of memory\n"); } else { ev->instance = instance; instance->ev = ev; INIT_WORK(&ev->hotplug_work, megasas_aen_polling); schedule_delayed_work( (struct delayed_work *)&ev->hotplug_work, 0); } } } /* * Scsi host template for megaraid_sas driver */ static struct scsi_host_template megasas_template = { .module = THIS_MODULE, .name = "LSI SAS based MegaRAID driver", .proc_name = "megaraid_sas", .slave_configure = megasas_slave_configure, .slave_alloc = megasas_slave_alloc, .queuecommand = megasas_queue_command, .eh_device_reset_handler = megasas_reset_device, .eh_bus_reset_handler = megasas_reset_bus_host, .eh_host_reset_handler = megasas_reset_bus_host, .eh_timed_out = megasas_reset_timer, .bios_param = megasas_bios_param, .use_clustering = ENABLE_CLUSTERING, }; /** * megasas_complete_int_cmd - Completes an internal command * @instance: Adapter soft state * @cmd: Command to be completed * * The megasas_issue_blocked_cmd() function waits for a command to complete * after it issues a command. This function wakes up that waiting routine by * calling wake_up() on the wait queue. */ static void megasas_complete_int_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { cmd->cmd_status = cmd->frame->io.cmd_status; if (cmd->cmd_status == ENODATA) { cmd->cmd_status = 0; } wake_up(&instance->int_cmd_wait_q); } /** * megasas_complete_abort - Completes aborting a command * @instance: Adapter soft state * @cmd: Cmd that was issued to abort another cmd * * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q * after it issues an abort on a previously issued command. This function * wakes up all functions waiting on the same wait queue. */ static void megasas_complete_abort(struct megasas_instance *instance, struct megasas_cmd *cmd) { if (cmd->sync_cmd) { cmd->sync_cmd = 0; cmd->cmd_status = 0; wake_up(&instance->abort_cmd_wait_q); } return; } /** * megasas_complete_cmd - Completes a command * @instance: Adapter soft state * @cmd: Command to be completed * @alt_status: If non-zero, use this value as status to * SCSI mid-layer instead of the value returned * by the FW. This should be used if caller wants * an alternate status (as in the case of aborted * commands) */ static void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status) { int exception = 0; struct megasas_header *hdr = &cmd->frame->hdr; unsigned long flags; if (cmd->scmd) cmd->scmd->SCp.ptr = NULL; switch (hdr->cmd) { case MFI_CMD_PD_SCSI_IO: case MFI_CMD_LD_SCSI_IO: /* * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been * issued either through an IO path or an IOCTL path. If it * was via IOCTL, we will send it to internal completion. */ if (cmd->sync_cmd) { cmd->sync_cmd = 0; megasas_complete_int_cmd(instance, cmd); break; } case MFI_CMD_LD_READ: case MFI_CMD_LD_WRITE: if (alt_status) { cmd->scmd->result = alt_status << 16; exception = 1; } if (exception) { atomic_dec(&instance->fw_outstanding); scsi_dma_unmap(cmd->scmd); cmd->scmd->scsi_done(cmd->scmd); megasas_return_cmd(instance, cmd); break; } switch (hdr->cmd_status) { case MFI_STAT_OK: cmd->scmd->result = DID_OK << 16; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { memset(cmd->scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); memcpy(cmd->scmd->sense_buffer, cmd->sense, hdr->sense_len); cmd->scmd->result |= DRIVER_SENSE << 24; } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: cmd->scmd->result = DID_BAD_TARGET << 16; break; default: printk(KERN_DEBUG "megasas: MFI FW status %#x\n", hdr->cmd_status); cmd->scmd->result = DID_ERROR << 16; break; } atomic_dec(&instance->fw_outstanding); scsi_dma_unmap(cmd->scmd); cmd->scmd->scsi_done(cmd->scmd); megasas_return_cmd(instance, cmd); break; case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); } /* * See if got an event notification */ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) megasas_service_aen(instance, cmd); else megasas_complete_int_cmd(instance, cmd); break; case MFI_CMD_ABORT: /* * Cmd issued to abort another cmd returned */ megasas_complete_abort(instance, cmd); break; default: printk("megasas: Unknown command completed! [0x%X]\n", hdr->cmd); break; } } /** * megasas_deplete_reply_queue - Processes all completed commands * @instance: Adapter soft state * @alt_status: Alternate status to be returned to * SCSI mid-layer instead of the status * returned by the FW */ static int megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) { /* * Check if it is our interrupt * Clear the interrupt */ if(instance->instancet->clear_intr(instance->reg_set)) return IRQ_NONE; if (instance->hw_crit_error) goto out_done; /* * Schedule the tasklet for cmd completion */ tasklet_schedule(&instance->isr_tasklet); out_done: return IRQ_HANDLED; } /** * megasas_isr - isr entry point */ static irqreturn_t megasas_isr(int irq, void *devp) { return megasas_deplete_reply_queue((struct megasas_instance *)devp, DID_OK); } /** * megasas_transition_to_ready - Move the FW to READY state * @instance: Adapter soft state * * During the initialization, FW passes can potentially be in any one of * several possible states. If the FW in operational, waiting-for-handshake * states, driver must take steps to bring it to ready state. Otherwise, it * has to wait for the ready state. */ static int megasas_transition_to_ready(struct megasas_instance* instance) { int i; u8 max_wait; u32 fw_state; u32 cur_state; u32 abs_state, curr_abs_state; fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; if (fw_state != MFI_STATE_READY) printk(KERN_INFO "megasas: Waiting for FW to come to ready" " state\n"); while (fw_state != MFI_STATE_READY) { abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); switch (fw_state) { case MFI_STATE_FAULT: printk(KERN_DEBUG "megasas: FW in FAULT state!!\n"); return -ENODEV; case MFI_STATE_WAIT_HANDSHAKE: /* * Set the CLR bit in inbound doorbell */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->reserved_0[0]); } else { writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); } max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_WAIT_HANDSHAKE; break; case MFI_STATE_BOOT_MESSAGE_PENDING: if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { writel(MFI_INIT_HOTPLUG, &instance->reg_set->reserved_0[0]); } else writel(MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; break; case MFI_STATE_OPERATIONAL: /* * Bring it to READY state; assuming max wait 10 secs */ instance->instancet->disable_intr(instance->reg_set); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { writel(MFI_RESET_FLAGS, &instance->reg_set->reserved_0[0]); } else writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_OPERATIONAL; break; case MFI_STATE_UNDEFINED: /* * This state should not last for more than 2 seconds */ max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_UNDEFINED; break; case MFI_STATE_BB_INIT: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_BB_INIT; break; case MFI_STATE_FW_INIT: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FW_INIT; break; case MFI_STATE_FW_INIT_2: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FW_INIT_2; break; case MFI_STATE_DEVICE_SCAN: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_DEVICE_SCAN; break; case MFI_STATE_FLUSH_CACHE: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FLUSH_CACHE; break; default: printk(KERN_DEBUG "megasas: Unknown state 0x%x\n", fw_state); return -ENODEV; } /* * The cur_state should not last for more than max_wait secs */ for (i = 0; i < (max_wait * 1000); i++) { fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK ; curr_abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); if (abs_state == curr_abs_state) { msleep(1); } else break; } /* * Return error if fw_state hasn't changed after max_wait */ if (curr_abs_state == abs_state) { printk(KERN_DEBUG "FW state [%d] hasn't changed " "in %d secs\n", fw_state, max_wait); return -ENODEV; } }; printk(KERN_INFO "megasas: FW now in Ready state\n"); return 0; } /** * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool * @instance: Adapter soft state */ static void megasas_teardown_frame_pool(struct megasas_instance *instance) { int i; u32 max_cmd = instance->max_fw_cmds; struct megasas_cmd *cmd; if (!instance->frame_dma_pool) return; /* * Return all frames to pool */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->frame) pci_pool_free(instance->frame_dma_pool, cmd->frame, cmd->frame_phys_addr); if (cmd->sense) pci_pool_free(instance->sense_dma_pool, cmd->sense, cmd->sense_phys_addr); } /* * Now destroy the pool itself */ pci_pool_destroy(instance->frame_dma_pool); pci_pool_destroy(instance->sense_dma_pool); instance->frame_dma_pool = NULL; instance->sense_dma_pool = NULL; } /** * megasas_create_frame_pool - Creates DMA pool for cmd frames * @instance: Adapter soft state * * Each command packet has an embedded DMA memory buffer that is used for * filling MFI frame and the SG list that immediately follows the frame. This * function creates those DMA memory buffers for each command packet by using * PCI pool facility. */ static int megasas_create_frame_pool(struct megasas_instance *instance) { int i; u32 max_cmd; u32 sge_sz; u32 sgl_sz; u32 total_sz; u32 frame_count; struct megasas_cmd *cmd; max_cmd = instance->max_fw_cmds; /* * Size of our frame is 64 bytes for MFI frame, followed by max SG * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer */ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); if (instance->flag_ieee) { sge_sz = sizeof(struct megasas_sge_skinny); } /* * Calculated the number of 64byte frames required for SGL */ sgl_sz = sge_sz * instance->max_num_sge; frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE; /* * We need one extra frame for the MFI command */ frame_count++; total_sz = MEGAMFI_FRAME_SIZE * frame_count; /* * Use DMA pool facility provided by PCI layer */ instance->frame_dma_pool = pci_pool_create("megasas frame pool", instance->pdev, total_sz, 64, 0); if (!instance->frame_dma_pool) { printk(KERN_DEBUG "megasas: failed to setup frame pool\n"); return -ENOMEM; } instance->sense_dma_pool = pci_pool_create("megasas sense pool", instance->pdev, 128, 4, 0); if (!instance->sense_dma_pool) { printk(KERN_DEBUG "megasas: failed to setup sense pool\n"); pci_pool_destroy(instance->frame_dma_pool); instance->frame_dma_pool = NULL; return -ENOMEM; } /* * Allocate and attach a frame to each of the commands in cmd_list. * By making cmd->index as the context instead of the &cmd, we can * always use 32bit context regardless of the architecture */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; cmd->frame = pci_pool_alloc(instance->frame_dma_pool, GFP_KERNEL, &cmd->frame_phys_addr); cmd->sense = pci_pool_alloc(instance->sense_dma_pool, GFP_KERNEL, &cmd->sense_phys_addr); /* * megasas_teardown_frame_pool() takes care of freeing * whatever has been allocated */ if (!cmd->frame || !cmd->sense) { printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n"); megasas_teardown_frame_pool(instance); return -ENOMEM; } cmd->frame->io.context = cmd->index; cmd->frame->io.pad_0 = 0; } return 0; } /** * megasas_free_cmds - Free all the cmds in the free cmd pool * @instance: Adapter soft state */ static void megasas_free_cmds(struct megasas_instance *instance) { int i; /* First free the MFI frame pool */ megasas_teardown_frame_pool(instance); /* Free all the commands in the cmd_list */ for (i = 0; i < instance->max_fw_cmds; i++) kfree(instance->cmd_list[i]); /* Free the cmd_list buffer itself */ kfree(instance->cmd_list); instance->cmd_list = NULL; INIT_LIST_HEAD(&instance->cmd_pool); } /** * megasas_alloc_cmds - Allocates the command packets * @instance: Adapter soft state * * Each command that is issued to the FW, whether IO commands from the OS or * internal commands like IOCTLs, are wrapped in local data structure called * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to * the FW. * * Each frame has a 32-bit field called context (tag). This context is used * to get back the megasas_cmd from the frame when a frame gets completed in * the ISR. Typically the address of the megasas_cmd itself would be used as * the context. But we wanted to keep the differences between 32 and 64 bit * systems to the mininum. We always use 32 bit integers for the context. In * this driver, the 32 bit values are the indices into an array cmd_list. * This array is used only to look up the megasas_cmd given the context. The * free commands themselves are maintained in a linked list called cmd_pool. */ static int megasas_alloc_cmds(struct megasas_instance *instance) { int i; int j; u32 max_cmd; struct megasas_cmd *cmd; max_cmd = instance->max_fw_cmds; /* * instance->cmd_list is an array of struct megasas_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); if (!instance->cmd_list) { printk(KERN_DEBUG "megasas: out of memory\n"); return -ENOMEM; } for (i = 0; i < max_cmd; i++) { instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL); if (!instance->cmd_list[i]) { for (j = 0; j < i; j++) kfree(instance->cmd_list[j]); kfree(instance->cmd_list); instance->cmd_list = NULL; return -ENOMEM; } } /* * Add all the commands to command pool (instance->cmd_pool) */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; memset(cmd, 0, sizeof(struct megasas_cmd)); cmd->index = i; cmd->instance = instance; list_add_tail(&cmd->list, &instance->cmd_pool); } /* * Create a frame pool and assign one frame to each cmd */ if (megasas_create_frame_pool(instance)) { printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); megasas_free_cmds(instance); } return 0; } /* * megasas_get_pd_list_info - Returns FW's pd_list structure * @instance: Adapter soft state * @pd_list: pd_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_pd_list(struct megasas_instance *instance) { int ret = 0, pd_index = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_PD_LIST *ci; struct MR_PD_ADDRESS *pd_addr; dma_addr_t ci_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h); if (!ci) { printk(KERN_DEBUG "Failed to alloc mem for pd_list\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; dcmd->mbox.b[1] = 0; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); dcmd->opcode = MR_DCMD_PD_LIST_QUERY; dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); if (!megasas_issue_polled(instance, cmd)) { ret = 0; } else { ret = -1; } /* * the following function will get the instance PD LIST. */ pd_addr = ci->addr; if ( ret == 0 && (ci->count < (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { memset(instance->pd_list, 0, MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); for (pd_index = 0; pd_index < ci->count; pd_index++) { instance->pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; instance->pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType; instance->pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM; pd_addr++; } } pci_free_consistent(instance->pdev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /* * megasas_get_ld_list_info - Returns FW's ld_list structure * @instance: Adapter soft state * @ld_list: ld_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_ld_list(struct megasas_instance *instance) { int ret = 0, ld_index = 0, ids = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_LIST *ci; dma_addr_t ci_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, sizeof(struct MR_LD_LIST), &ci_h); if (!ci) { printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); dcmd->opcode = MR_DCMD_LD_GET_LIST; dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); dcmd->pad_0 = 0; if (!megasas_issue_polled(instance, cmd)) { ret = 0; } else { ret = -1; } /* the following function will get the instance PD LIST */ if ((ret == 0) && (ci->ldCount < MAX_LOGICAL_DRIVES)) { memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { if (ci->ldList[ld_index].state != 0) { ids = ci->ldList[ld_index].ref.targetId; instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; } } } pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /** * megasas_get_controller_info - Returns FW's controller structure * @instance: Adapter soft state * @ctrl_info: Controller information structure * * Issues an internal command (DCMD) to get the FW's controller structure. * This information is mainly used to find out the maximum IO transfer per * command supported by the FW. */ static int megasas_get_ctrl_info(struct megasas_instance *instance, struct megasas_ctrl_info *ctrl_info) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_ctrl_info *ci; dma_addr_t ci_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas: Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), &ci_h); if (!ci) { printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); dcmd->opcode = MR_DCMD_CTRL_GET_INFO; dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info); if (!megasas_issue_polled(instance, cmd)) { ret = 0; memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); } else { ret = -1; } pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /** * megasas_issue_init_mfi - Initializes the FW * @instance: Adapter soft state * * Issues the INIT MFI cmd */ static int megasas_issue_init_mfi(struct megasas_instance *instance) { u32 context; struct megasas_cmd *cmd; struct megasas_init_frame *init_frame; struct megasas_init_queue_info *initq_info; dma_addr_t init_frame_h; dma_addr_t initq_info_h; /* * Prepare a init frame. Note the init frame points to queue info * structure. Each frame has SGL allocated after first 64 bytes. For * this frame - since we don't need any SGL - we use SGL's space as * queue info structure * * We will not get a NULL command below. We just created the pool. */ cmd = megasas_get_cmd(instance); init_frame = (struct megasas_init_frame *)cmd->frame; initq_info = (struct megasas_init_queue_info *) ((unsigned long)init_frame + 64); init_frame_h = cmd->frame_phys_addr; initq_info_h = init_frame_h + 64; context = init_frame->context; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); init_frame->context = context; initq_info->reply_queue_entries = instance->max_fw_cmds + 1; initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h; initq_info->producer_index_phys_addr_lo = instance->producer_h; initq_info->consumer_index_phys_addr_lo = instance->consumer_h; init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; init_frame->queue_info_new_phys_addr_lo = initq_info_h; init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); /* * disable the intr before firing the init frame to FW */ instance->instancet->disable_intr(instance->reg_set); /* * Issue the init frame in polled mode */ if (megasas_issue_polled(instance, cmd)) { printk(KERN_ERR "megasas: Failed to init firmware\n"); megasas_return_cmd(instance, cmd); goto fail_fw_init; } megasas_return_cmd(instance, cmd); return 0; fail_fw_init: return -EINVAL; } /** * megasas_start_timer - Initializes a timer object * @instance: Adapter soft state * @timer: timer object to be initialized * @fn: timer function * @interval: time interval between timer function call */ static inline void megasas_start_timer(struct megasas_instance *instance, struct timer_list *timer, void *fn, unsigned long interval) { init_timer(timer); timer->expires = jiffies + interval; timer->data = (unsigned long)instance; timer->function = fn; add_timer(timer); } /** * megasas_io_completion_timer - Timer fn * @instance_addr: Address of adapter soft state * * Schedules tasklet for cmd completion * if poll_mode_io is set */ static void megasas_io_completion_timer(unsigned long instance_addr) { struct megasas_instance *instance = (struct megasas_instance *)instance_addr; if (atomic_read(&instance->fw_outstanding)) tasklet_schedule(&instance->isr_tasklet); /* Restart timer */ if (poll_mode_io) mod_timer(&instance->io_completion_timer, jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL); } /** * megasas_init_mfi - Initializes the FW * @instance: Adapter soft state * * This is the main function for initializing MFI firmware. */ static int megasas_init_mfi(struct megasas_instance *instance) { u32 context_sz; u32 reply_q_sz; u32 max_sectors_1; u32 max_sectors_2; u32 tmp_sectors; struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info; /* * Map the message registers */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) { instance->base_addr = pci_resource_start(instance->pdev, 1); } else { instance->base_addr = pci_resource_start(instance->pdev, 0); } if (pci_request_selected_regions(instance->pdev, pci_select_bars(instance->pdev, IORESOURCE_MEM), "megasas: LSI")) { printk(KERN_DEBUG "megasas: IO memory region busy!\n"); return -EBUSY; } instance->reg_set = ioremap_nocache(instance->base_addr, 8192); if (!instance->reg_set) { printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); goto fail_ioremap; } reg_set = instance->reg_set; switch(instance->pdev->device) { case PCI_DEVICE_ID_LSI_SAS1078R: case PCI_DEVICE_ID_LSI_SAS1078DE: instance->instancet = &megasas_instance_template_ppc; break; case PCI_DEVICE_ID_LSI_SAS1078GEN2: case PCI_DEVICE_ID_LSI_SAS0079GEN2: instance->instancet = &megasas_instance_template_gen2; break; case PCI_DEVICE_ID_LSI_SAS0073SKINNY: case PCI_DEVICE_ID_LSI_SAS0071SKINNY: instance->instancet = &megasas_instance_template_skinny; break; case PCI_DEVICE_ID_LSI_SAS1064R: case PCI_DEVICE_ID_DELL_PERC5: default: instance->instancet = &megasas_instance_template_xscale; break; } /* * We expect the FW state to be READY */ if (megasas_transition_to_ready(instance)) goto fail_ready_state; /* * Get various operational parameters from status register */ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; /* * Reduce the max supported cmds by 1. This is to ensure that the * reply_q_sz (1 more than the max cmd that driver may send) * does not exceed max cmds that the FW can support */ instance->max_fw_cmds = instance->max_fw_cmds-1; instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 0x10; /* * Create a pool of commands */ if (megasas_alloc_cmds(instance)) goto fail_alloc_cmds; /* * Allocate memory for reply queue. Length of reply queue should * be _one_ more than the maximum commands handled by the firmware. * * Note: When FW completes commands, it places corresponding contex * values in this circular reply queue. This circular queue is a fairly * typical producer-consumer queue. FW is the producer (of completed * commands) and the driver is the consumer. */ context_sz = sizeof(u32); reply_q_sz = context_sz * (instance->max_fw_cmds + 1); instance->reply_queue = pci_alloc_consistent(instance->pdev, reply_q_sz, &instance->reply_queue_h); if (!instance->reply_queue) { printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n"); goto fail_reply_queue; } if (megasas_issue_init_mfi(instance)) goto fail_fw_init; memset(instance->pd_list, 0 , (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); megasas_get_pd_list(instance); memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); megasas_get_ld_list(instance); ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); /* * Compute the max allowed sectors per IO: The controller info has two * limits on max sectors. Driver should use the minimum of these two. * * 1 << stripe_sz_ops.min = max sectors per strip * * Note that older firmwares ( < FW ver 30) didn't report information * to calculate max_sectors_1. So the number ended up as zero always. */ tmp_sectors = 0; if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * ctrl_info->max_strips_per_io; max_sectors_2 = ctrl_info->max_request_size; tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); } instance->max_sectors_per_req = instance->max_num_sge * PAGE_SIZE / 512; if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) instance->max_sectors_per_req = tmp_sectors; kfree(ctrl_info); /* * Setup tasklet for cmd completion */ tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc, (unsigned long)instance); /* Initialize the cmd completion timer */ if (poll_mode_io) megasas_start_timer(instance, &instance->io_completion_timer, megasas_io_completion_timer, MEGASAS_COMPLETION_TIMER_INTERVAL); return 0; fail_fw_init: pci_free_consistent(instance->pdev, reply_q_sz, instance->reply_queue, instance->reply_queue_h); fail_reply_queue: megasas_free_cmds(instance); fail_alloc_cmds: fail_ready_state: iounmap(instance->reg_set); fail_ioremap: pci_release_selected_regions(instance->pdev, pci_select_bars(instance->pdev, IORESOURCE_MEM)); return -EINVAL; } /** * megasas_release_mfi - Reverses the FW initialization * @intance: Adapter soft state */ static void megasas_release_mfi(struct megasas_instance *instance) { u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1); pci_free_consistent(instance->pdev, reply_q_sz, instance->reply_queue, instance->reply_queue_h); megasas_free_cmds(instance); iounmap(instance->reg_set); pci_release_selected_regions(instance->pdev, pci_select_bars(instance->pdev, IORESOURCE_MEM)); } /** * megasas_get_seq_num - Gets latest event sequence numbers * @instance: Adapter soft state * @eli: FW event log sequence numbers information * * FW maintains a log of all events in a non-volatile area. Upper layers would * usually find out the latest sequence number of the events, the seq number at * the boot etc. They would "read" all the events below the latest seq number * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq * number), they would subsribe to AEN (asynchronous event notification) and * wait for the events to happen. */ static int megasas_get_seq_num(struct megasas_instance *instance, struct megasas_evt_log_info *eli) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_evt_log_info *el_info; dma_addr_t el_info_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { return -ENOMEM; } dcmd = &cmd->frame->dcmd; el_info = pci_alloc_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), &el_info_h); if (!el_info) { megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(el_info, 0, sizeof(*el_info)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; dcmd->sgl.sge32[0].phys_addr = el_info_h; dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info); megasas_issue_blocked_cmd(instance, cmd); /* * Copy the data back into callers buffer */ memcpy(eli, el_info, sizeof(struct megasas_evt_log_info)); pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), el_info, el_info_h); megasas_return_cmd(instance, cmd); return 0; } /** * megasas_register_aen - Registers for asynchronous event notification * @instance: Adapter soft state * @seq_num: The starting sequence number * @class_locale: Class of the event * * This function subscribes for AEN for events beyond the @seq_num. It requests * to be notified if and only if the event is of type @class_locale */ static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word) { int ret_val; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; union megasas_evt_class_locale curr_aen; union megasas_evt_class_locale prev_aen; /* * If there an AEN pending already (aen_cmd), check if the * class_locale of that pending AEN is inclusive of the new * AEN request we currently have. If it is, then we don't have * to do anything. In other words, whichever events the current * AEN request is subscribing to, have already been subscribed * to. * * If the old_cmd is _not_ inclusive, then we have to abort * that command, form a class_locale that is superset of both * old and current and re-issue to the FW */ curr_aen.word = class_locale_word; if (instance->aen_cmd) { prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; /* * A class whose enum value is smaller is inclusive of all * higher values. If a PROGRESS (= -1) was previously * registered, then a new registration requests for higher * classes need not be sent to FW. They are automatically * included. * * Locale numbers don't have such hierarchy. They are bitmap * values */ if ((prev_aen.members.class <= curr_aen.members.class) && !((prev_aen.members.locale & curr_aen.members.locale) ^ curr_aen.members.locale)) { /* * Previously issued event registration includes * current request. Nothing to do. */ return 0; } else { curr_aen.members.locale |= prev_aen.members.locale; if (prev_aen.members.class < curr_aen.members.class) curr_aen.members.class = prev_aen.members.class; instance->aen_cmd->abort_aen = 1; ret_val = megasas_issue_blocked_abort_cmd(instance, instance-> aen_cmd); if (ret_val) { printk(KERN_DEBUG "megasas: Failed to abort " "previous AEN command\n"); return ret_val; } } } cmd = megasas_get_cmd(instance); if (!cmd) return -ENOMEM; dcmd = &cmd->frame->dcmd; memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); /* * Prepare DCMD for aen registration */ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; dcmd->mbox.w[0] = seq_num; dcmd->mbox.w[1] = curr_aen.word; dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); if (instance->aen_cmd != NULL) { megasas_return_cmd(instance, cmd); return 0; } /* * Store reference to the cmd used to register for AEN. When an * application wants us to register for AEN, we have to abort this * cmd and re-register with a new EVENT LOCALE supplied by that app */ instance->aen_cmd = cmd; /* * Issue the aen registration frame */ instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); return 0; } /** * megasas_start_aen - Subscribes to AEN during driver load time * @instance: Adapter soft state */ static int megasas_start_aen(struct megasas_instance *instance) { struct megasas_evt_log_info eli; union megasas_evt_class_locale class_locale; /* * Get the latest sequence number from FW */ memset(&eli, 0, sizeof(eli)); if (megasas_get_seq_num(instance, &eli)) return -1; /* * Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; return megasas_register_aen(instance, eli.newest_seq_num + 1, class_locale.word); } /** * megasas_io_attach - Attaches this driver to SCSI mid-layer * @instance: Adapter soft state */ static int megasas_io_attach(struct megasas_instance *instance) { struct Scsi_Host *host = instance->host; /* * Export parameters required by SCSI mid-layer */ host->irq = instance->pdev->irq; host->unique_id = instance->unique_id; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { host->can_queue = instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; } else host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; host->this_id = instance->init_id; host->sg_tablesize = instance->max_num_sge; host->max_sectors = instance->max_sectors_per_req; host->cmd_per_lun = 128; host->max_channel = MEGASAS_MAX_CHANNELS - 1; host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; host->max_lun = MEGASAS_MAX_LUN; host->max_cmd_len = 16; /* * Notify the mid-layer about the new controller */ if (scsi_add_host(host, &instance->pdev->dev)) { printk(KERN_DEBUG "megasas: scsi_add_host failed\n"); return -ENODEV; } /* * Trigger SCSI to scan our drives */ scsi_scan_host(host); return 0; } static int megasas_set_dma_mask(struct pci_dev *pdev) { /* * All our contollers are capable of performing 64-bit DMA */ if (IS_DMA64) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) goto fail_set_dma_mask; } } else { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) goto fail_set_dma_mask; } return 0; fail_set_dma_mask: return 1; } /** * megasas_probe_one - PCI hotplug entry point * @pdev: PCI device structure * @id: PCI ids of supported hotplugged adapter */ static int __devinit megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { int rval; struct Scsi_Host *host; struct megasas_instance *instance; /* * Announce PCI information */ printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); printk("bus %d:slot %d:func %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); /* * PCI prepping: enable device set bus mastering and dma mask */ rval = pci_enable_device_mem(pdev); if (rval) { return rval; } pci_set_master(pdev); if (megasas_set_dma_mask(pdev)) goto fail_set_dma_mask; host = scsi_host_alloc(&megasas_template, sizeof(struct megasas_instance)); if (!host) { printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n"); goto fail_alloc_instance; } instance = (struct megasas_instance *)host->hostdata; memset(instance, 0, sizeof(*instance)); instance->producer = pci_alloc_consistent(pdev, sizeof(u32), &instance->producer_h); instance->consumer = pci_alloc_consistent(pdev, sizeof(u32), &instance->consumer_h); if (!instance->producer || !instance->consumer) { printk(KERN_DEBUG "megasas: Failed to allocate memory for " "producer, consumer\n"); goto fail_alloc_dma_buf; } *instance->producer = 0; *instance->consumer = 0; megasas_poll_wait_aen = 0; instance->flag_ieee = 0; instance->ev = NULL; instance->evt_detail = pci_alloc_consistent(pdev, sizeof(struct megasas_evt_detail), &instance->evt_detail_h); if (!instance->evt_detail) { printk(KERN_DEBUG "megasas: Failed to allocate memory for " "event detail structure\n"); goto fail_alloc_dma_buf; } /* * Initialize locks and queues */ INIT_LIST_HEAD(&instance->cmd_pool); atomic_set(&instance->fw_outstanding,0); init_waitqueue_head(&instance->int_cmd_wait_q); init_waitqueue_head(&instance->abort_cmd_wait_q); spin_lock_init(&instance->cmd_pool_lock); spin_lock_init(&instance->fire_lock); spin_lock_init(&instance->completion_lock); spin_lock_init(&poll_aen_lock); mutex_init(&instance->aen_mutex); /* * Initialize PCI related and misc parameters */ instance->pdev = pdev; instance->host = host; instance->unique_id = pdev->bus->number << 8 | pdev->devfn; instance->init_id = MEGASAS_DEFAULT_INIT_ID; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { instance->flag_ieee = 1; sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); } else sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); megasas_dbg_lvl = 0; instance->flag = 0; instance->unload = 1; instance->last_time = 0; /* * Initialize MFI Firmware */ if (megasas_init_mfi(instance)) goto fail_init_mfi; /* * Register IRQ */ if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED, "megasas", instance)) { printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); goto fail_irq; } instance->instancet->enable_intr(instance->reg_set); /* * Store instance in PCI softstate */ pci_set_drvdata(pdev, instance); /* * Add this controller to megasas_mgmt_info structure so that it * can be exported to management applications */ megasas_mgmt_info.count++; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; megasas_mgmt_info.max_index++; /* * Initiate AEN (Asynchronous Event Notification) */ if (megasas_start_aen(instance)) { printk(KERN_DEBUG "megasas: start aen failed\n"); goto fail_start_aen; } /* * Register with SCSI mid-layer */ if (megasas_io_attach(instance)) goto fail_io_attach; instance->unload = 0; return 0; fail_start_aen: fail_io_attach: megasas_mgmt_info.count--; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; megasas_mgmt_info.max_index--; pci_set_drvdata(pdev, NULL); instance->instancet->disable_intr(instance->reg_set); free_irq(instance->pdev->irq, instance); megasas_release_mfi(instance); fail_irq: fail_init_mfi: fail_alloc_dma_buf: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); if (instance->consumer) pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); scsi_host_put(host); fail_alloc_instance: fail_set_dma_mask: pci_disable_device(pdev); return -ENODEV; } /** * megasas_flush_cache - Requests FW to flush all its caches * @instance: Adapter soft state */ static void megasas_flush_cache(struct megasas_instance *instance) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; cmd = megasas_get_cmd(instance); if (!cmd) return; dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; megasas_issue_blocked_cmd(instance, cmd); megasas_return_cmd(instance, cmd); return; } /** * megasas_shutdown_controller - Instructs FW to shutdown the controller * @instance: Adapter soft state * @opcode: Shutdown/Hibernate */ static void megasas_shutdown_controller(struct megasas_instance *instance, u32 opcode) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; cmd = megasas_get_cmd(instance); if (!cmd) return; if (instance->aen_cmd) megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = opcode; megasas_issue_blocked_cmd(instance, cmd); megasas_return_cmd(instance, cmd); return; } #ifdef CONFIG_PM /** * megasas_suspend - driver suspend entry point * @pdev: PCI device structure * @state: PCI power state to suspend routine */ static int megasas_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host; struct megasas_instance *instance; instance = pci_get_drvdata(pdev); host = instance->host; instance->unload = 1; if (poll_mode_io) del_timer_sync(&instance->io_completion_timer); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); /* cancel the delayed work if this work still in queue */ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; cancel_delayed_work( (struct delayed_work *)&ev->hotplug_work); flush_scheduled_work(); instance->ev = NULL; } tasklet_kill(&instance->isr_tasklet); pci_set_drvdata(instance->pdev, instance); instance->instancet->disable_intr(instance->reg_set); free_irq(instance->pdev->irq, instance); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } /** * megasas_resume- driver resume entry point * @pdev: PCI device structure */ static int megasas_resume(struct pci_dev *pdev) { int rval; struct Scsi_Host *host; struct megasas_instance *instance; instance = pci_get_drvdata(pdev); host = instance->host; pci_set_power_state(pdev, PCI_D0); pci_enable_wake(pdev, PCI_D0, 0); pci_restore_state(pdev); /* * PCI prepping: enable device set bus mastering and dma mask */ rval = pci_enable_device_mem(pdev); if (rval) { printk(KERN_ERR "megasas: Enable device failed\n"); return rval; } pci_set_master(pdev); if (megasas_set_dma_mask(pdev)) goto fail_set_dma_mask; /* * Initialize MFI Firmware */ *instance->producer = 0; *instance->consumer = 0; atomic_set(&instance->fw_outstanding, 0); /* * We expect the FW state to be READY */ if (megasas_transition_to_ready(instance)) goto fail_ready_state; if (megasas_issue_init_mfi(instance)) goto fail_init_mfi; tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc, (unsigned long)instance); /* * Register IRQ */ if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED, "megasas", instance)) { printk(KERN_ERR "megasas: Failed to register IRQ\n"); goto fail_irq; } instance->instancet->enable_intr(instance->reg_set); /* * Initiate AEN (Asynchronous Event Notification) */ if (megasas_start_aen(instance)) printk(KERN_ERR "megasas: Start AEN failed\n"); /* Initialize the cmd completion timer */ if (poll_mode_io) megasas_start_timer(instance, &instance->io_completion_timer, megasas_io_completion_timer, MEGASAS_COMPLETION_TIMER_INTERVAL); instance->unload = 0; return 0; fail_irq: fail_init_mfi: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); if (instance->consumer) pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); scsi_host_put(host); fail_set_dma_mask: fail_ready_state: pci_disable_device(pdev); return -ENODEV; } #else #define megasas_suspend NULL #define megasas_resume NULL #endif /** * megasas_detach_one - PCI hot"un"plug entry point * @pdev: PCI device structure */ static void __devexit megasas_detach_one(struct pci_dev *pdev) { int i; struct Scsi_Host *host; struct megasas_instance *instance; instance = pci_get_drvdata(pdev); instance->unload = 1; host = instance->host; if (poll_mode_io) del_timer_sync(&instance->io_completion_timer); scsi_remove_host(instance->host); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); /* cancel the delayed work if this work still in queue*/ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; cancel_delayed_work( (struct delayed_work *)&ev->hotplug_work); flush_scheduled_work(); instance->ev = NULL; } tasklet_kill(&instance->isr_tasklet); /* * Take the instance off the instance array. Note that we will not * decrement the max_index. We let this array be sparse array */ for (i = 0; i < megasas_mgmt_info.max_index; i++) { if (megasas_mgmt_info.instance[i] == instance) { megasas_mgmt_info.count--; megasas_mgmt_info.instance[i] = NULL; break; } } pci_set_drvdata(instance->pdev, NULL); instance->instancet->disable_intr(instance->reg_set); free_irq(instance->pdev->irq, instance); megasas_release_mfi(instance); pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); scsi_host_put(host); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return; } /** * megasas_shutdown - Shutdown entry point * @device: Generic device structure */ static void megasas_shutdown(struct pci_dev *pdev) { struct megasas_instance *instance = pci_get_drvdata(pdev); instance->unload = 1; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); } /** * megasas_mgmt_open - char node "open" entry point */ static int megasas_mgmt_open(struct inode *inode, struct file *filep) { cycle_kernel_lock(); /* * Allow only those users with admin rights */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; return 0; } /** * megasas_mgmt_fasync - Async notifier registration from applications * * This function adds the calling process to a driver global queue. When an * event occurs, SIGIO will be sent to all processes in this queue. */ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) { int rc; mutex_lock(&megasas_async_queue_mutex); rc = fasync_helper(fd, filep, mode, &megasas_async_queue); mutex_unlock(&megasas_async_queue_mutex); if (rc >= 0) { /* For sanity check when we get ioctl */ filep->private_data = filep; return 0; } printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); return rc; } /** * megasas_mgmt_poll - char node "poll" entry point * */ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) { unsigned int mask; unsigned long flags; poll_wait(file, &megasas_poll_wait, wait); spin_lock_irqsave(&poll_aen_lock, flags); if (megasas_poll_wait_aen) mask = (POLLIN | POLLRDNORM); else mask = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); return mask; } /** * megasas_mgmt_fw_ioctl - Issues management ioctls to FW * @instance: Adapter soft state * @argp: User's ioctl packet */ static int megasas_mgmt_fw_ioctl(struct megasas_instance *instance, struct megasas_iocpacket __user * user_ioc, struct megasas_iocpacket *ioc) { struct megasas_sge32 *kern_sge32; struct megasas_cmd *cmd; void *kbuff_arr[MAX_IOCTL_SGE]; dma_addr_t buf_handle = 0; int error = 0, i; void *sense = NULL; dma_addr_t sense_handle; unsigned long *sense_ptr; memset(kbuff_arr, 0, sizeof(kbuff_arr)); if (ioc->sge_count > MAX_IOCTL_SGE) { printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n", ioc->sge_count, MAX_IOCTL_SGE); return -EINVAL; } cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n"); return -ENOMEM; } /* * User's IOCTL packet has 2 frames (maximum). Copy those two * frames into our cmd's frames. cmd->frame's context will get * overwritten when we copy from user's frames. So set that value * alone separately */ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cmd->index; cmd->frame->hdr.pad_0 = 0; /* * The management interface between applications and the fw uses * MFI frames. E.g, RAID configuration changes, LD property changes * etc are accomplishes through different kinds of MFI frames. The * driver needs to care only about substituting user buffers with * kernel buffers in SGLs. The location of SGL is embedded in the * struct iocpacket itself. */ kern_sge32 = (struct megasas_sge32 *) ((unsigned long)cmd->frame + ioc->sgl_off); /* * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < ioc->sge_count; i++) { kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, ioc->sgl[i].iov_len, &buf_handle, GFP_KERNEL); if (!kbuff_arr[i]) { printk(KERN_DEBUG "megasas: Failed to alloc " "kernel SGL buffer for IOCTL \n"); error = -ENOMEM; goto out; } /* * We don't change the dma_coherent_mask, so * pci_alloc_consistent only returns 32bit addresses */ kern_sge32[i].phys_addr = (u32) buf_handle; kern_sge32[i].length = ioc->sgl[i].iov_len; /* * We created a kernel buffer corresponding to the * user buffer. Now copy in from the user buffer */ if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, (u32) (ioc->sgl[i].iov_len))) { error = -EFAULT; goto out; } } if (ioc->sense_len) { sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, &sense_handle, GFP_KERNEL); if (!sense) { error = -ENOMEM; goto out; } sense_ptr = (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); *sense_ptr = sense_handle; } /* * Set the sync_cmd flag so that the ISR knows not to complete this * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; megasas_issue_blocked_cmd(instance, cmd); cmd->sync_cmd = 0; /* * copy out the kernel buffers to user buffers */ for (i = 0; i < ioc->sge_count; i++) { if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], ioc->sgl[i].iov_len)) { error = -EFAULT; goto out; } } /* * copy out the sense */ if (ioc->sense_len) { /* * sense_ptr points to the location that has the user * sense buffer address */ sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + ioc->sense_off); if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), sense, ioc->sense_len)) { printk(KERN_ERR "megasas: Failed to copy out to user " "sense data\n"); error = -EFAULT; goto out; } } /* * copy the status codes returned by the fw */ if (copy_to_user(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, sizeof(u8))) { printk(KERN_DEBUG "megasas: Error copying out cmd_status\n"); error = -EFAULT; } out: if (sense) { dma_free_coherent(&instance->pdev->dev, ioc->sense_len, sense, sense_handle); } for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) { dma_free_coherent(&instance->pdev->dev, kern_sge32[i].length, kbuff_arr[i], kern_sge32[i].phys_addr); } megasas_return_cmd(instance, cmd); return error; } static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) { struct megasas_iocpacket __user *user_ioc = (struct megasas_iocpacket __user *)arg; struct megasas_iocpacket *ioc; struct megasas_instance *instance; int error; ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) return -ENOMEM; if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) { error = -EFAULT; goto out_kfree_ioc; } instance = megasas_lookup_instance(ioc->host_no); if (!instance) { error = -ENODEV; goto out_kfree_ioc; } if (instance->hw_crit_error == 1) { printk(KERN_DEBUG "Controller in Crit ERROR\n"); error = -ENODEV; goto out_kfree_ioc; } if (instance->unload == 1) { error = -ENODEV; goto out_kfree_ioc; } /* * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds */ if (down_interruptible(&instance->ioctl_sem)) { error = -ERESTARTSYS; goto out_kfree_ioc; } error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); up(&instance->ioctl_sem); out_kfree_ioc: kfree(ioc); return error; } static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) { struct megasas_instance *instance; struct megasas_aen aen; int error; if (file->private_data != file) { printk(KERN_DEBUG "megasas: fasync_helper was not " "called first\n"); return -EINVAL; } if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) return -EFAULT; instance = megasas_lookup_instance(aen.host_no); if (!instance) return -ENODEV; if (instance->hw_crit_error == 1) { error = -ENODEV; } if (instance->unload == 1) { return -ENODEV; } mutex_lock(&instance->aen_mutex); error = megasas_register_aen(instance, aen.seq_num, aen.class_locale_word); mutex_unlock(&instance->aen_mutex); return error; } /** * megasas_mgmt_ioctl - char node ioctl entry point */ static long megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case MEGASAS_IOC_FIRMWARE: return megasas_mgmt_ioctl_fw(file, arg); case MEGASAS_IOC_GET_AEN: return megasas_mgmt_ioctl_aen(file, arg); } return -ENOTTY; } #ifdef CONFIG_COMPAT static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) { struct compat_megasas_iocpacket __user *cioc = (struct compat_megasas_iocpacket __user *)arg; struct megasas_iocpacket __user *ioc = compat_alloc_user_space(sizeof(struct megasas_iocpacket)); int i; int error = 0; compat_uptr_t ptr; if (clear_user(ioc, sizeof(*ioc))) return -EFAULT; if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) return -EFAULT; /* * The sense_ptr is used in megasas_mgmt_fw_ioctl only when * sense_len is not null, so prepare the 64bit value under * the same condition. */ if (ioc->sense_len) { void __user **sense_ioc_ptr = (void __user **)(ioc->frame.raw + ioc->sense_off); compat_uptr_t *sense_cioc_ptr = (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); if (get_user(ptr, sense_cioc_ptr) || put_user(compat_ptr(ptr), sense_ioc_ptr)) return -EFAULT; } for (i = 0; i < MAX_IOCTL_SGE; i++) { if (get_user(ptr, &cioc->sgl[i].iov_base) || put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || copy_in_user(&ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len, sizeof(compat_size_t))) return -EFAULT; } error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); if (copy_in_user(&cioc->frame.hdr.cmd_status, &ioc->frame.hdr.cmd_status, sizeof(u8))) { printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); return -EFAULT; } return error; } static long megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case MEGASAS_IOC_FIRMWARE32: return megasas_mgmt_compat_ioctl_fw(file, arg); case MEGASAS_IOC_GET_AEN: return megasas_mgmt_ioctl_aen(file, arg); } return -ENOTTY; } #endif /* * File operations structure for management interface */ static const struct file_operations megasas_mgmt_fops = { .owner = THIS_MODULE, .open = megasas_mgmt_open, .fasync = megasas_mgmt_fasync, .unlocked_ioctl = megasas_mgmt_ioctl, .poll = megasas_mgmt_poll, #ifdef CONFIG_COMPAT .compat_ioctl = megasas_mgmt_compat_ioctl, #endif }; /* * PCI hotplug support registration structure */ static struct pci_driver megasas_pci_driver = { .name = "megaraid_sas", .id_table = megasas_pci_table, .probe = megasas_probe_one, .remove = __devexit_p(megasas_detach_one), .suspend = megasas_suspend, .resume = megasas_resume, .shutdown = megasas_shutdown, }; /* * Sysfs driver attributes */ static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", MEGASAS_VERSION); } static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); static ssize_t megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", MEGASAS_RELDATE); } static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL); static ssize_t megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_poll_for_event); } static DRIVER_ATTR(support_poll_for_event, S_IRUGO, megasas_sysfs_show_support_poll_for_event, NULL); static ssize_t megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", megasas_dbg_lvl); } static ssize_t megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count) { int retval = count; if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){ printk(KERN_ERR "megasas: could not set dbg_lvl\n"); retval = -EINVAL; } return retval; } static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, megasas_sysfs_set_dbg_lvl); static ssize_t megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", poll_mode_io); } static ssize_t megasas_sysfs_set_poll_mode_io(struct device_driver *dd, const char *buf, size_t count) { int retval = count; int tmp = poll_mode_io; int i; struct megasas_instance *instance; if (sscanf(buf, "%u", &poll_mode_io) < 1) { printk(KERN_ERR "megasas: could not set poll_mode_io\n"); retval = -EINVAL; } /* * Check if poll_mode_io is already set or is same as previous value */ if ((tmp && poll_mode_io) || (tmp == poll_mode_io)) goto out; if (poll_mode_io) { /* * Start timers for all adapters */ for (i = 0; i < megasas_mgmt_info.max_index; i++) { instance = megasas_mgmt_info.instance[i]; if (instance) { megasas_start_timer(instance, &instance->io_completion_timer, megasas_io_completion_timer, MEGASAS_COMPLETION_TIMER_INTERVAL); } } } else { /* * Delete timers for all adapters */ for (i = 0; i < megasas_mgmt_info.max_index; i++) { instance = megasas_mgmt_info.instance[i]; if (instance) del_timer_sync(&instance->io_completion_timer); } } out: return retval; } static void megasas_aen_polling(struct work_struct *work) { struct megasas_aen_event *ev = container_of(work, struct megasas_aen_event, hotplug_work); struct megasas_instance *instance = ev->instance; union megasas_evt_class_locale class_locale; struct Scsi_Host *host; struct scsi_device *sdev1; u16 pd_index = 0; u16 ld_index = 0; int i, j, doscan = 0; u32 seq_num; int error; if (!instance) { printk(KERN_ERR "invalid instance!\n"); kfree(ev); return; } instance->ev = NULL; host = instance->host; if (instance->evt_detail) { switch (instance->evt_detail->code) { case MR_EVT_PD_INSERTED: if (megasas_get_pd_list(instance) == 0) { for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (!sdev1) { scsi_add_device(host, i, j, 0); } if (sdev1) scsi_device_put(sdev1); } } } } doscan = 0; break; case MR_EVT_PD_REMOVED: if (megasas_get_pd_list(instance) == 0) { megasas_get_pd_list(instance); for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (sdev1) { scsi_device_put(sdev1); } } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } } doscan = 0; break; case MR_EVT_LD_OFFLINE: case MR_EVT_LD_DELETED: megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i + MEGASAS_MAX_LD_CHANNELS, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (sdev1) { scsi_device_put(sdev1); } } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } doscan = 0; break; case MR_EVT_LD_CREATED: megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i+MEGASAS_MAX_LD_CHANNELS, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (!sdev1) { scsi_add_device(host, i + 2, j, 0); } } if (sdev1) { scsi_device_put(sdev1); } } } doscan = 0; break; case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: doscan = 1; break; default: doscan = 0; break; } } else { printk(KERN_ERR "invalid evt_detail!\n"); kfree(ev); return; } if (doscan) { printk(KERN_INFO "scanning ...\n"); megasas_get_pd_list(instance); for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (!sdev1) { scsi_add_device(host, i, j, 0); } if (sdev1) scsi_device_put(sdev1); } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i+MEGASAS_MAX_LD_CHANNELS, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (!sdev1) { scsi_add_device(host, i+2, j, 0); } else { scsi_device_put(sdev1); } } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } } if ( instance->aen_cmd != NULL ) { kfree(ev); return ; } seq_num = instance->evt_detail->seq_num + 1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; mutex_lock(&instance->aen_mutex); error = megasas_register_aen(instance, seq_num, class_locale.word); mutex_unlock(&instance->aen_mutex); if (error) printk(KERN_ERR "register aen failed error %x\n", error); kfree(ev); } static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR, megasas_sysfs_show_poll_mode_io, megasas_sysfs_set_poll_mode_io); /** * megasas_init - Driver load entry point */ static int __init megasas_init(void) { int rval; /* * Announce driver version and other information */ printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, MEGASAS_EXT_VERSION); support_poll_for_event = 2; memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); /* * Register character device node */ rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); if (rval < 0) { printk(KERN_DEBUG "megasas: failed to open device node\n"); return rval; } megasas_mgmt_majorno = rval; /* * Register ourselves as PCI hotplug module */ rval = pci_register_driver(&megasas_pci_driver); if (rval) { printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); goto err_pcidrv; } rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_version); if (rval) goto err_dcf_attr_ver; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_release_date); if (rval) goto err_dcf_rel_date; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); if (rval) goto err_dcf_support_poll_for_event; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); if (rval) goto err_dcf_dbg_lvl; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_poll_mode_io); if (rval) goto err_dcf_poll_mode_io; return rval; err_dcf_poll_mode_io: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); err_dcf_dbg_lvl: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); err_dcf_support_poll_for_event: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); err_dcf_rel_date: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); err_dcf_attr_ver: pci_unregister_driver(&megasas_pci_driver); err_pcidrv: unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); return rval; } /** * megasas_exit - Driver unload entry point */ static void __exit megasas_exit(void) { driver_remove_file(&megasas_pci_driver.driver, &driver_attr_poll_mode_io); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); pci_unregister_driver(&megasas_pci_driver); unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); } module_init(megasas_init); module_exit(megasas_exit);
gpl-2.0
mingyaaaa/linux
sound/pci/echoaudio/mona.c
1472
4277
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define ECHO24_FAMILY #define ECHOCARD_MONA #define ECHOCARD_NAME "Mona" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_ASIC #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_DIGITAL_IO #define ECHOCARD_HAS_DIGITAL_IN_AUTOMUTE #define ECHOCARD_HAS_DIGITAL_MODE_SWITCH #define ECHOCARD_HAS_EXTERNAL_CLOCK #define ECHOCARD_HAS_ADAT 6 #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 6 */ #define PX_DIGITAL_OUT 6 /* 8 */ #define PX_ANALOG_IN 14 /* 4 */ #define PX_DIGITAL_IN 18 /* 8 */ #define PX_NUM 26 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 6 */ #define BX_DIGITAL_OUT 6 /* 8 */ #define BX_ANALOG_IN 14 /* 4 */ #define BX_DIGITAL_IN 18 /* 8 */ #define BX_NUM 26 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/io.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/mona_301_dsp.fw"); MODULE_FIRMWARE("ea/mona_361_dsp.fw"); MODULE_FIRMWARE("ea/mona_301_1_asic_48.fw"); MODULE_FIRMWARE("ea/mona_301_1_asic_96.fw"); MODULE_FIRMWARE("ea/mona_361_1_asic_48.fw"); MODULE_FIRMWARE("ea/mona_361_1_asic_96.fw"); MODULE_FIRMWARE("ea/mona_2_asic.fw"); #define FW_361_LOADER 0 #define FW_MONA_301_DSP 1 #define FW_MONA_361_DSP 2 #define FW_MONA_301_1_ASIC48 3 #define FW_MONA_301_1_ASIC96 4 #define FW_MONA_361_1_ASIC48 5 #define FW_MONA_361_1_ASIC96 6 #define FW_MONA_2_ASIC 7 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "mona_301_dsp.fw"}, {0, "mona_361_dsp.fw"}, {0, "mona_301_1_asic_48.fw"}, {0, "mona_301_1_asic_96.fw"}, {0, "mona_361_1_asic_48.fw"}, {0, "mona_361_1_asic_96.fw"}, {0, "mona_2_asic.fw"} }; static const struct pci_device_id snd_echo_ids[] = { {0x1057, 0x1801, 0xECC0, 0x0070, 0, 0, 0}, /* DSP 56301 Mona rev.0 */ {0x1057, 0x1801, 0xECC0, 0x0071, 0, 0, 0}, /* DSP 56301 Mona rev.1 */ {0x1057, 0x1801, 0xECC0, 0x0072, 0, 0, 0}, /* DSP 56301 Mona rev.2 */ {0x1057, 0x3410, 0xECC0, 0x0070, 0, 0, 0}, /* DSP 56361 Mona rev.0 */ {0x1057, 0x3410, 0xECC0, 0x0071, 0, 0, 0}, /* DSP 56361 Mona rev.1 */ {0x1057, 0x3410, 0xECC0, 0x0072, 0, 0, 0}, /* DSP 56361 Mona rev.2 */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 8000, .rate_max = 96000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, /* One page (4k) contains 512 instructions. I don't know if the hw supports lists longer than this. In this case periods_max=220 is a safe limit to make sure the list never exceeds 512 instructions. */ }; #include "mona_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio_gml.c" #include "echoaudio.c"
gpl-2.0
v1ron/linux-mainline
drivers/net/wireless/st/cw1200/debug.c
1984
11978
/* * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers * DebugFS code * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include "cw1200.h" #include "debug.h" #include "fwio.h" /* join_status */ static const char * const cw1200_debug_join_status[] = { "passive", "monitor", "station (joining)", "station (not authenticated yet)", "station", "adhoc", "access point", }; /* WSM_JOIN_PREAMBLE_... */ static const char * const cw1200_debug_preamble[] = { "long", "short", "long on 1 and 2 Mbps", }; static const char * const cw1200_debug_link_id[] = { "OFF", "REQ", "SOFT", "HARD", "RESET", "RESET_REMAP", }; static const char *cw1200_debug_mode(int mode) { switch (mode) { case NL80211_IFTYPE_UNSPECIFIED: return "unspecified"; case NL80211_IFTYPE_MONITOR: return "monitor"; case NL80211_IFTYPE_STATION: return "station"; case NL80211_IFTYPE_ADHOC: return "adhoc"; case NL80211_IFTYPE_MESH_POINT: return "mesh point"; case NL80211_IFTYPE_AP: return "access point"; case NL80211_IFTYPE_P2P_CLIENT: return "p2p client"; case NL80211_IFTYPE_P2P_GO: return "p2p go"; default: return "unsupported"; } } static void cw1200_queue_status_show(struct seq_file *seq, struct cw1200_queue *q) { int i; seq_printf(seq, "Queue %d:\n", q->queue_id); seq_printf(seq, " capacity: %zu\n", q->capacity); seq_printf(seq, " queued: %zu\n", q->num_queued); seq_printf(seq, " pending: %zu\n", q->num_pending); seq_printf(seq, " sent: %zu\n", q->num_sent); seq_printf(seq, " locked: %s\n", q->tx_locked_cnt ? "yes" : "no"); seq_printf(seq, " overfull: %s\n", q->overfull ? "yes" : "no"); seq_puts(seq, " link map: 0-> "); for (i = 0; i < q->stats->map_capacity; ++i) seq_printf(seq, "%.2d ", q->link_map_cache[i]); seq_printf(seq, "<-%zu\n", q->stats->map_capacity); } static void cw1200_debug_print_map(struct seq_file *seq, struct cw1200_common *priv, const char *label, u32 map) { int i; seq_printf(seq, "%s0-> ", label); for (i = 0; i < priv->tx_queue_stats.map_capacity; ++i) seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : ".."); seq_printf(seq, "<-%zu\n", priv->tx_queue_stats.map_capacity - 1); } static int cw1200_status_show(struct seq_file *seq, void *v) { int i; struct list_head *item; struct cw1200_common *priv = seq->private; struct cw1200_debug_priv *d = priv->debug; seq_puts(seq, "CW1200 Wireless LAN driver status\n"); seq_printf(seq, "Hardware: %d.%d\n", priv->wsm_caps.hw_id, priv->wsm_caps.hw_subid); seq_printf(seq, "Firmware: %s %d.%d\n", cw1200_fw_types[priv->wsm_caps.fw_type], priv->wsm_caps.fw_ver, priv->wsm_caps.fw_build); seq_printf(seq, "FW API: %d\n", priv->wsm_caps.fw_api); seq_printf(seq, "FW caps: 0x%.4X\n", priv->wsm_caps.fw_cap); seq_printf(seq, "FW label: '%s'\n", priv->wsm_caps.fw_label); seq_printf(seq, "Mode: %s%s\n", cw1200_debug_mode(priv->mode), priv->listening ? " (listening)" : ""); seq_printf(seq, "Join state: %s\n", cw1200_debug_join_status[priv->join_status]); if (priv->channel) seq_printf(seq, "Channel: %d%s\n", priv->channel->hw_value, priv->channel_switch_in_progress ? " (switching)" : ""); if (priv->rx_filter.promiscuous) seq_puts(seq, "Filter: promisc\n"); else if (priv->rx_filter.fcs) seq_puts(seq, "Filter: fcs\n"); if (priv->rx_filter.bssid) seq_puts(seq, "Filter: bssid\n"); if (!priv->disable_beacon_filter) seq_puts(seq, "Filter: beacons\n"); if (priv->enable_beacon || priv->mode == NL80211_IFTYPE_AP || priv->mode == NL80211_IFTYPE_ADHOC || priv->mode == NL80211_IFTYPE_MESH_POINT || priv->mode == NL80211_IFTYPE_P2P_GO) seq_printf(seq, "Beaconing: %s\n", priv->enable_beacon ? "enabled" : "disabled"); for (i = 0; i < 4; ++i) seq_printf(seq, "EDCA(%d): %d, %d, %d, %d, %d\n", i, priv->edca.params[i].cwmin, priv->edca.params[i].cwmax, priv->edca.params[i].aifns, priv->edca.params[i].txop_limit, priv->edca.params[i].max_rx_lifetime); if (priv->join_status == CW1200_JOIN_STATUS_STA) { static const char *pm_mode = "unknown"; switch (priv->powersave_mode.mode) { case WSM_PSM_ACTIVE: pm_mode = "off"; break; case WSM_PSM_PS: pm_mode = "on"; break; case WSM_PSM_FAST_PS: pm_mode = "dynamic"; break; } seq_printf(seq, "Preamble: %s\n", cw1200_debug_preamble[priv->association_mode.preamble]); seq_printf(seq, "AMPDU spcn: %d\n", priv->association_mode.mpdu_start_spacing); seq_printf(seq, "Basic rate: 0x%.8X\n", le32_to_cpu(priv->association_mode.basic_rate_set)); seq_printf(seq, "Bss lost: %d beacons\n", priv->bss_params.beacon_lost_count); seq_printf(seq, "AID: %d\n", priv->bss_params.aid); seq_printf(seq, "Rates: 0x%.8X\n", priv->bss_params.operational_rate_set); seq_printf(seq, "Powersave: %s\n", pm_mode); } seq_printf(seq, "HT: %s\n", cw1200_is_ht(&priv->ht_info) ? "on" : "off"); if (cw1200_is_ht(&priv->ht_info)) { seq_printf(seq, "Greenfield: %s\n", cw1200_ht_greenfield(&priv->ht_info) ? "yes" : "no"); seq_printf(seq, "AMPDU dens: %d\n", cw1200_ht_ampdu_density(&priv->ht_info)); } seq_printf(seq, "RSSI thold: %d\n", priv->cqm_rssi_thold); seq_printf(seq, "RSSI hyst: %d\n", priv->cqm_rssi_hyst); seq_printf(seq, "Long retr: %d\n", priv->long_frame_max_tx_count); seq_printf(seq, "Short retr: %d\n", priv->short_frame_max_tx_count); spin_lock_bh(&priv->tx_policy_cache.lock); i = 0; list_for_each(item, &priv->tx_policy_cache.used) ++i; spin_unlock_bh(&priv->tx_policy_cache.lock); seq_printf(seq, "RC in use: %d\n", i); seq_puts(seq, "\n"); for (i = 0; i < 4; ++i) { cw1200_queue_status_show(seq, &priv->tx_queue[i]); seq_puts(seq, "\n"); } cw1200_debug_print_map(seq, priv, "Link map: ", priv->link_id_map); cw1200_debug_print_map(seq, priv, "Asleep map: ", priv->sta_asleep_mask); cw1200_debug_print_map(seq, priv, "PSPOLL map: ", priv->pspoll_mask); seq_puts(seq, "\n"); for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { if (priv->link_id_db[i].status) { seq_printf(seq, "Link %d: %s, %pM\n", i + 1, cw1200_debug_link_id[priv->link_id_db[i].status], priv->link_id_db[i].mac); } } seq_puts(seq, "\n"); seq_printf(seq, "BH status: %s\n", atomic_read(&priv->bh_term) ? "terminated" : "alive"); seq_printf(seq, "Pending RX: %d\n", atomic_read(&priv->bh_rx)); seq_printf(seq, "Pending TX: %d\n", atomic_read(&priv->bh_tx)); if (priv->bh_error) seq_printf(seq, "BH errcode: %d\n", priv->bh_error); seq_printf(seq, "TX bufs: %d x %d bytes\n", priv->wsm_caps.input_buffers, priv->wsm_caps.input_buffer_size); seq_printf(seq, "Used bufs: %d\n", priv->hw_bufs_used); seq_printf(seq, "Powermgmt: %s\n", priv->powersave_enabled ? "on" : "off"); seq_printf(seq, "Device: %s\n", priv->device_can_sleep ? "asleep" : "awake"); spin_lock(&priv->wsm_cmd.lock); seq_printf(seq, "WSM status: %s\n", priv->wsm_cmd.done ? "idle" : "active"); seq_printf(seq, "WSM cmd: 0x%.4X (%td bytes)\n", priv->wsm_cmd.cmd, priv->wsm_cmd.len); seq_printf(seq, "WSM retval: %d\n", priv->wsm_cmd.ret); spin_unlock(&priv->wsm_cmd.lock); seq_printf(seq, "Datapath: %s\n", atomic_read(&priv->tx_lock) ? "locked" : "unlocked"); if (atomic_read(&priv->tx_lock)) seq_printf(seq, "TXlock cnt: %d\n", atomic_read(&priv->tx_lock)); seq_printf(seq, "TXed: %d\n", d->tx); seq_printf(seq, "AGG TXed: %d\n", d->tx_agg); seq_printf(seq, "MULTI TXed: %d (%d)\n", d->tx_multi, d->tx_multi_frames); seq_printf(seq, "RXed: %d\n", d->rx); seq_printf(seq, "AGG RXed: %d\n", d->rx_agg); seq_printf(seq, "TX miss: %d\n", d->tx_cache_miss); seq_printf(seq, "TX align: %d\n", d->tx_align); seq_printf(seq, "TX burst: %d\n", d->tx_burst); seq_printf(seq, "TX TTL: %d\n", d->tx_ttl); seq_printf(seq, "Scan: %s\n", atomic_read(&priv->scan.in_progress) ? "active" : "idle"); return 0; } static int cw1200_status_open(struct inode *inode, struct file *file) { return single_open(file, &cw1200_status_show, inode->i_private); } static const struct file_operations fops_status = { .open = cw1200_status_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static int cw1200_counters_show(struct seq_file *seq, void *v) { int ret; struct cw1200_common *priv = seq->private; struct wsm_mib_counters_table counters; ret = wsm_get_counters_table(priv, &counters); if (ret) return ret; #define PUT_COUNTER(tab, name) \ seq_printf(seq, "%s:" tab "%d\n", #name, \ __le32_to_cpu(counters.name)) PUT_COUNTER("\t\t", plcp_errors); PUT_COUNTER("\t\t", fcs_errors); PUT_COUNTER("\t\t", tx_packets); PUT_COUNTER("\t\t", rx_packets); PUT_COUNTER("\t\t", rx_packet_errors); PUT_COUNTER("\t", rx_decryption_failures); PUT_COUNTER("\t\t", rx_mic_failures); PUT_COUNTER("\t", rx_no_key_failures); PUT_COUNTER("\t", tx_multicast_frames); PUT_COUNTER("\t", tx_frames_success); PUT_COUNTER("\t", tx_frame_failures); PUT_COUNTER("\t", tx_frames_retried); PUT_COUNTER("\t", tx_frames_multi_retried); PUT_COUNTER("\t", rx_frame_duplicates); PUT_COUNTER("\t\t", rts_success); PUT_COUNTER("\t\t", rts_failures); PUT_COUNTER("\t\t", ack_failures); PUT_COUNTER("\t", rx_multicast_frames); PUT_COUNTER("\t", rx_frames_success); PUT_COUNTER("\t", rx_cmac_icv_errors); PUT_COUNTER("\t\t", rx_cmac_replays); PUT_COUNTER("\t", rx_mgmt_ccmp_replays); #undef PUT_COUNTER return 0; } static int cw1200_counters_open(struct inode *inode, struct file *file) { return single_open(file, &cw1200_counters_show, inode->i_private); } static const struct file_operations fops_counters = { .open = cw1200_counters_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static ssize_t cw1200_wsm_dumps(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct cw1200_common *priv = file->private_data; char buf[1]; if (!count) return -EINVAL; if (copy_from_user(buf, user_buf, 1)) return -EFAULT; if (buf[0] == '1') priv->wsm_enable_wsm_dumps = 1; else priv->wsm_enable_wsm_dumps = 0; return count; } static const struct file_operations fops_wsm_dumps = { .open = simple_open, .write = cw1200_wsm_dumps, .llseek = default_llseek, }; int cw1200_debug_init(struct cw1200_common *priv) { int ret = -ENOMEM; struct cw1200_debug_priv *d = kzalloc(sizeof(struct cw1200_debug_priv), GFP_KERNEL); priv->debug = d; if (!d) return ret; d->debugfs_phy = debugfs_create_dir("cw1200", priv->hw->wiphy->debugfsdir); if (!d->debugfs_phy) goto err; if (!debugfs_create_file("status", S_IRUSR, d->debugfs_phy, priv, &fops_status)) goto err; if (!debugfs_create_file("counters", S_IRUSR, d->debugfs_phy, priv, &fops_counters)) goto err; if (!debugfs_create_file("wsm_dumps", S_IWUSR, d->debugfs_phy, priv, &fops_wsm_dumps)) goto err; return 0; err: priv->debug = NULL; debugfs_remove_recursive(d->debugfs_phy); kfree(d); return ret; } void cw1200_debug_release(struct cw1200_common *priv) { struct cw1200_debug_priv *d = priv->debug; if (d) { debugfs_remove_recursive(d->debugfs_phy); priv->debug = NULL; kfree(d); } }
gpl-2.0
s0be/android_kernel_letv_msm8994
sound/pci/vx222/vx222.c
2240
7575
/* * Driver for Digigram VX222 V2/Mic PCI soundcards * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/tlv.h> #include "vx222.h" #define CARD_NAME "VX222" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("Digigram VX222 V2/Mic"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static bool mic[SNDRV_CARDS]; /* microphone */ static int ibl[SNDRV_CARDS]; /* microphone */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Digigram " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Digigram " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Digigram " CARD_NAME " soundcard."); module_param_array(mic, bool, NULL, 0444); MODULE_PARM_DESC(mic, "Enable Microphone."); module_param_array(ibl, int, NULL, 0444); MODULE_PARM_DESC(ibl, "Capture IBL size."); /* */ enum { VX_PCI_VX222_OLD, VX_PCI_VX222_NEW }; static DEFINE_PCI_DEVICE_TABLE(snd_vx222_ids) = { { 0x10b5, 0x9050, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_OLD, }, /* PLX */ { 0x10b5, 0x9030, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_NEW, }, /* PLX */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_vx222_ids); /* */ static const DECLARE_TLV_DB_SCALE(db_scale_old_vol, -11350, 50, 0); static const DECLARE_TLV_DB_SCALE(db_scale_akm, -7350, 50, 0); static struct snd_vx_hardware vx222_old_hw = { .name = "VX222/Old", .type = VX_TYPE_BOARD, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX_ANALOG_OUT_LEVEL_MAX, .output_level_db_scale = db_scale_old_vol, }; static struct snd_vx_hardware vx222_v2_hw = { .name = "VX222/v2", .type = VX_TYPE_V2, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; static struct snd_vx_hardware vx222_mic_hw = { .name = "VX222/Mic", .type = VX_TYPE_MIC, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; /* */ static int snd_vx222_free(struct vx_core *chip) { struct snd_vx222 *vx = (struct snd_vx222 *)chip; if (chip->irq >= 0) free_irq(chip->irq, (void*)chip); if (vx->port[0]) pci_release_regions(vx->pci); pci_disable_device(vx->pci); kfree(chip); return 0; } static int snd_vx222_dev_free(struct snd_device *device) { struct vx_core *chip = device->device_data; return snd_vx222_free(chip); } static int snd_vx222_create(struct snd_card *card, struct pci_dev *pci, struct snd_vx_hardware *hw, struct snd_vx222 **rchip) { struct vx_core *chip; struct snd_vx222 *vx; int i, err; static struct snd_device_ops ops = { .dev_free = snd_vx222_dev_free, }; struct snd_vx_ops *vx_ops; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; pci_set_master(pci); vx_ops = hw->type == VX_TYPE_BOARD ? &vx222_old_ops : &vx222_ops; chip = snd_vx_create(card, hw, vx_ops, sizeof(struct snd_vx222) - sizeof(struct vx_core)); if (! chip) { pci_disable_device(pci); return -ENOMEM; } vx = (struct snd_vx222 *)chip; vx->pci = pci; if ((err = pci_request_regions(pci, CARD_NAME)) < 0) { snd_vx222_free(chip); return err; } for (i = 0; i < 2; i++) vx->port[i] = pci_resource_start(pci, i + 1); if (request_irq(pci->irq, snd_vx_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_vx222_free(chip); return -EBUSY; } chip->irq = pci->irq; if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_vx222_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = vx; return 0; } static int snd_vx222_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_vx_hardware *hw; struct snd_vx222 *vx; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch ((int)pci_id->driver_data) { case VX_PCI_VX222_OLD: hw = &vx222_old_hw; break; case VX_PCI_VX222_NEW: default: if (mic[dev]) hw = &vx222_mic_hw; else hw = &vx222_v2_hw; break; } if ((err = snd_vx222_create(card, pci, hw, &vx)) < 0) { snd_card_free(card); return err; } card->private_data = vx; vx->core.ibl.size = ibl[dev]; sprintf(card->longname, "%s at 0x%lx & 0x%lx, irq %i", card->shortname, vx->port[0], vx->port[1], vx->core.irq); snd_printdd("%s at 0x%lx & 0x%lx, irq %i\n", card->shortname, vx->port[0], vx->port[1], vx->core.irq); #ifdef SND_VX_FW_LOADER vx->core.dev = &pci->dev; #endif if ((err = snd_vx_setup_firmware(&vx->core)) < 0) { snd_card_free(card); return err; } if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void snd_vx222_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM_SLEEP static int snd_vx222_suspend(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_vx222 *vx = card->private_data; int err; err = snd_vx_suspend(&vx->core); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, PCI_D3hot); return err; } static int snd_vx222_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_vx222 *vx = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "vx222: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); return snd_vx_resume(&vx->core); } static SIMPLE_DEV_PM_OPS(snd_vx222_pm, snd_vx222_suspend, snd_vx222_resume); #define SND_VX222_PM_OPS &snd_vx222_pm #else #define SND_VX222_PM_OPS NULL #endif static struct pci_driver vx222_driver = { .name = KBUILD_MODNAME, .id_table = snd_vx222_ids, .probe = snd_vx222_probe, .remove = snd_vx222_remove, .driver = { .pm = SND_VX222_PM_OPS, }, }; module_pci_driver(vx222_driver);
gpl-2.0
ArolWright/android_kernel_motorola_msm8916
sound/drivers/mpu401/mpu401.c
2240
7666
/* * Driver for generic MPU-401 boards (UART mode only) * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Copyright (c) 2004 by Castet Matthieu <castet.matthieu@free.fr> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/pnp.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/module.h> #include <sound/core.h> #include <sound/mpu401.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("MPU-401 UART"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -2}; /* exclude the first card */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ #ifdef CONFIG_PNP static bool pnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; #endif static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* MPU-401 port number */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* MPU-401 IRQ */ static bool uart_enter[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for MPU-401 device."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for MPU-401 device."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable MPU-401 device."); #ifdef CONFIG_PNP module_param_array(pnp, bool, NULL, 0444); MODULE_PARM_DESC(pnp, "PnP detection for MPU-401 device."); #endif module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for MPU-401 device."); module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for MPU-401 device."); module_param_array(uart_enter, bool, NULL, 0444); MODULE_PARM_DESC(uart_enter, "Issue UART_ENTER command at open."); static struct platform_device *platform_devices[SNDRV_CARDS]; static int pnp_registered; static unsigned int snd_mpu401_devices; static int snd_mpu401_create(int dev, struct snd_card **rcard) { struct snd_card *card; int err; if (!uart_enter[dev]) snd_printk(KERN_ERR "the uart_enter option is obsolete; remove it\n"); *rcard = NULL; err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; strcpy(card->driver, "MPU-401 UART"); strcpy(card->shortname, card->driver); sprintf(card->longname, "%s at %#lx, ", card->shortname, port[dev]); if (irq[dev] >= 0) { sprintf(card->longname + strlen(card->longname), "irq %d", irq[dev]); } else { strcat(card->longname, "polled"); } err = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, port[dev], 0, irq[dev], NULL); if (err < 0) { printk(KERN_ERR "MPU401 not detected at 0x%lx\n", port[dev]); goto _err; } *rcard = card; return 0; _err: snd_card_free(card); return err; } static int snd_mpu401_probe(struct platform_device *devptr) { int dev = devptr->id; int err; struct snd_card *card; if (port[dev] == SNDRV_AUTO_PORT) { snd_printk(KERN_ERR "specify port\n"); return -EINVAL; } if (irq[dev] == SNDRV_AUTO_IRQ) { snd_printk(KERN_ERR "specify or disable IRQ\n"); return -EINVAL; } err = snd_mpu401_create(dev, &card); if (err < 0) return err; snd_card_set_dev(card, &devptr->dev); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } platform_set_drvdata(devptr, card); return 0; } static int snd_mpu401_remove(struct platform_device *devptr) { snd_card_free(platform_get_drvdata(devptr)); platform_set_drvdata(devptr, NULL); return 0; } #define SND_MPU401_DRIVER "snd_mpu401" static struct platform_driver snd_mpu401_driver = { .probe = snd_mpu401_probe, .remove = snd_mpu401_remove, .driver = { .name = SND_MPU401_DRIVER, .owner = THIS_MODULE, }, }; #ifdef CONFIG_PNP #define IO_EXTENT 2 static struct pnp_device_id snd_mpu401_pnpids[] = { { .id = "PNPb006" }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, snd_mpu401_pnpids); static int snd_mpu401_pnp(int dev, struct pnp_dev *device, const struct pnp_device_id *id) { if (!pnp_port_valid(device, 0) || pnp_port_flags(device, 0) & IORESOURCE_DISABLED) { snd_printk(KERN_ERR "no PnP port\n"); return -ENODEV; } if (pnp_port_len(device, 0) < IO_EXTENT) { snd_printk(KERN_ERR "PnP port length is %llu, expected %d\n", (unsigned long long)pnp_port_len(device, 0), IO_EXTENT); return -ENODEV; } port[dev] = pnp_port_start(device, 0); if (!pnp_irq_valid(device, 0) || pnp_irq_flags(device, 0) & IORESOURCE_DISABLED) { snd_printk(KERN_WARNING "no PnP irq, using polling\n"); irq[dev] = -1; } else { irq[dev] = pnp_irq(device, 0); } return 0; } static int snd_mpu401_pnp_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) { static int dev; struct snd_card *card; int err; for ( ; dev < SNDRV_CARDS; ++dev) { if (!enable[dev] || !pnp[dev]) continue; err = snd_mpu401_pnp(dev, pnp_dev, id); if (err < 0) return err; err = snd_mpu401_create(dev, &card); if (err < 0) return err; if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } snd_card_set_dev(card, &pnp_dev->dev); pnp_set_drvdata(pnp_dev, card); snd_mpu401_devices++; ++dev; return 0; } return -ENODEV; } static void snd_mpu401_pnp_remove(struct pnp_dev *dev) { struct snd_card *card = (struct snd_card *) pnp_get_drvdata(dev); snd_card_disconnect(card); snd_card_free_when_closed(card); } static struct pnp_driver snd_mpu401_pnp_driver = { .name = "mpu401", .id_table = snd_mpu401_pnpids, .probe = snd_mpu401_pnp_probe, .remove = snd_mpu401_pnp_remove, }; #else static struct pnp_driver snd_mpu401_pnp_driver; #endif static void snd_mpu401_unregister_all(void) { int i; if (pnp_registered) pnp_unregister_driver(&snd_mpu401_pnp_driver); for (i = 0; i < ARRAY_SIZE(platform_devices); ++i) platform_device_unregister(platform_devices[i]); platform_driver_unregister(&snd_mpu401_driver); } static int __init alsa_card_mpu401_init(void) { int i, err; if ((err = platform_driver_register(&snd_mpu401_driver)) < 0) return err; for (i = 0; i < SNDRV_CARDS; i++) { struct platform_device *device; if (! enable[i]) continue; #ifdef CONFIG_PNP if (pnp[i]) continue; #endif device = platform_device_register_simple(SND_MPU401_DRIVER, i, NULL, 0); if (IS_ERR(device)) continue; if (!platform_get_drvdata(device)) { platform_device_unregister(device); continue; } platform_devices[i] = device; snd_mpu401_devices++; } err = pnp_register_driver(&snd_mpu401_pnp_driver); if (!err) pnp_registered = 1; if (!snd_mpu401_devices) { #ifdef MODULE printk(KERN_ERR "MPU-401 device not found or device busy\n"); #endif snd_mpu401_unregister_all(); return -ENODEV; } return 0; } static void __exit alsa_card_mpu401_exit(void) { snd_mpu401_unregister_all(); } module_init(alsa_card_mpu401_init) module_exit(alsa_card_mpu401_exit)
gpl-2.0
MoKee/android_kernel_samsung_sc03e
arch/mips/pci/pci-xlr.c
2752
5867
/* * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights * reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the NetLogic * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/console.h> #include <asm/io.h> #include <asm/netlogic/interrupt.h> #include <asm/netlogic/xlr/iomap.h> #include <asm/netlogic/xlr/pic.h> #include <asm/netlogic/xlr/xlr.h> static void *pci_config_base; #define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off)) /* PCI ops */ static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn, int where) { u32 data; u32 *cfgaddr; cfgaddr = (u32 *)(pci_config_base + pci_cfg_addr(bus->number, devfn, where & ~3)); data = *cfgaddr; return cpu_to_le32(data); } static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn, int where, u32 data) { u32 *cfgaddr; cfgaddr = (u32 *)(pci_config_base + pci_cfg_addr(bus->number, devfn, where & ~3)); *cfgaddr = cpu_to_le32(data); } static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 data; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; data = pci_cfg_read_32bit(bus, devfn, where); if (size == 1) *val = (data >> ((where & 3) << 3)) & 0xff; else if (size == 2) *val = (data >> ((where & 3) << 3)) & 0xffff; else *val = data; return PCIBIOS_SUCCESSFUL; } static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { u32 data; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; data = pci_cfg_read_32bit(bus, devfn, where); if (size == 1) data = (data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); else if (size == 2) data = (data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); else data = val; pci_cfg_write_32bit(bus, devfn, where, data); return PCIBIOS_SUCCESSFUL; } struct pci_ops nlm_pci_ops = { .read = nlm_pcibios_read, .write = nlm_pcibios_write }; static struct resource nlm_pci_mem_resource = { .name = "XLR PCI MEM", .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */ .end = 0xdfffffffUL, .flags = IORESOURCE_MEM, }; static struct resource nlm_pci_io_resource = { .name = "XLR IO MEM", .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */ .end = 0x100fffffUL, .flags = IORESOURCE_IO, }; struct pci_controller nlm_pci_controller = { .index = 0, .pci_ops = &nlm_pci_ops, .mem_resource = &nlm_pci_mem_resource, .mem_offset = 0x00000000UL, .io_resource = &nlm_pci_io_resource, .io_offset = 0x00000000UL, }; int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (!nlm_chip_is_xls()) return PIC_PCIX_IRQ; /* for XLR just one IRQ*/ /* * For XLS PCIe, there is an IRQ per Link, find out which * link the device is on to assign interrupts */ if (dev->bus->self == NULL) return 0; switch (dev->bus->self->devfn) { case 0x0: return PIC_PCIE_LINK0_IRQ; case 0x8: return PIC_PCIE_LINK1_IRQ; case 0x10: if (nlm_chip_is_xls_b()) return PIC_PCIE_XLSB0_LINK2_IRQ; else return PIC_PCIE_LINK2_IRQ; case 0x18: if (nlm_chip_is_xls_b()) return PIC_PCIE_XLSB0_LINK3_IRQ; else return PIC_PCIE_LINK3_IRQ; } WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn); return 0; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; } static int __init pcibios_init(void) { /* PSB assigns PCI resources */ pci_probe_only = 1; pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20); /* Extend IO port for memory mapped io */ ioport_resource.start = 0; ioport_resource.end = ~0; set_io_port_base(CKSEG1); nlm_pci_controller.io_map_base = CKSEG1; pr_info("Registering XLR/XLS PCIX/PCIE Controller.\n"); register_pci_controller(&nlm_pci_controller); return 0; } arch_initcall(pcibios_init); struct pci_fixup pcibios_fixups[] = { {0} };
gpl-2.0
kendling/android_kernel_google_dragon
arch/x86/kernel/asm-offsets_64.c
3008
1900
#include <asm/ia32.h> #define __SYSCALL_64(nr, sym, compat) [nr] = 1, #define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1, #ifdef CONFIG_X86_X32_ABI # define __SYSCALL_X32(nr, sym, compat) [nr] = 1, #else # define __SYSCALL_X32(nr, sym, compat) /* nothing */ #endif static char syscalls_64[] = { #include <asm/syscalls_64.h> }; #define __SYSCALL_I386(nr, sym, compat) [nr] = 1, static char syscalls_ia32[] = { #include <asm/syscalls_32.h> }; int main(void) { #ifdef CONFIG_PARAVIRT OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame); OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32); OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); BLANK(); #endif #ifdef CONFIG_IA32_EMULATION OFFSET(TI_sysenter_return, thread_info, sysenter_return); BLANK(); #define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry) ENTRY(ax); ENTRY(bx); ENTRY(cx); ENTRY(dx); ENTRY(si); ENTRY(di); ENTRY(bp); ENTRY(sp); ENTRY(ip); BLANK(); #undef ENTRY OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); BLANK(); #endif #define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry) ENTRY(bx); ENTRY(bx); ENTRY(cx); ENTRY(dx); ENTRY(sp); ENTRY(bp); ENTRY(si); ENTRY(di); ENTRY(r8); ENTRY(r9); ENTRY(r10); ENTRY(r11); ENTRY(r12); ENTRY(r13); ENTRY(r14); ENTRY(r15); ENTRY(flags); BLANK(); #undef ENTRY #define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry) ENTRY(cr0); ENTRY(cr2); ENTRY(cr3); ENTRY(cr4); ENTRY(cr8); ENTRY(gdt_desc); BLANK(); #undef ENTRY OFFSET(TSS_ist, tss_struct, x86_tss.ist); BLANK(); DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1); DEFINE(NR_syscalls, sizeof(syscalls_64)); DEFINE(__NR_ia32_syscall_max, sizeof(syscalls_ia32) - 1); DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32)); return 0; }
gpl-2.0
nmenon/linux-omap-ti-pm
lib/sha1.c
4032
2469
/* * SHA transform algorithm, originally taken from code written by * Peter Gutmann, and placed in the public domain. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/cryptohash.h> /* The SHA f()-functions. */ #define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */ #define f2(x,y,z) (x ^ y ^ z) /* XOR */ #define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */ /* The SHA Mysterious Constants */ #define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */ #define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */ #define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ #define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ /** * sha_transform - single block SHA1 transform * * @digest: 160 bit digest to update * @data: 512 bits of data to hash * @W: 80 words of workspace (see note) * * This function generates a SHA1 digest for a single 512-bit block. * Be warned, it does not handle padding and message digest, do not * confuse it with the full FIPS 180-1 digest algorithm for variable * length messages. * * Note: If the hash is security sensitive, the caller should be sure * to clear the workspace. This is left to the caller to avoid * unnecessary clears between chained hashing operations. */ void sha_transform(__u32 *digest, const char *in, __u32 *W) { __u32 a, b, c, d, e, t, i; for (i = 0; i < 16; i++) W[i] = be32_to_cpu(((const __be32 *)in)[i]); for (i = 0; i < 64; i++) W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1); a = digest[0]; b = digest[1]; c = digest[2]; d = digest[3]; e = digest[4]; for (i = 0; i < 20; i++) { t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i]; e = d; d = c; c = rol32(b, 30); b = a; a = t; } for (; i < 40; i ++) { t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i]; e = d; d = c; c = rol32(b, 30); b = a; a = t; } for (; i < 60; i ++) { t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i]; e = d; d = c; c = rol32(b, 30); b = a; a = t; } for (; i < 80; i ++) { t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i]; e = d; d = c; c = rol32(b, 30); b = a; a = t; } digest[0] += a; digest[1] += b; digest[2] += c; digest[3] += d; digest[4] += e; } EXPORT_SYMBOL(sha_transform); /** * sha_init - initialize the vectors for a SHA1 digest * @buf: vector to initialize */ void sha_init(__u32 *buf) { buf[0] = 0x67452301; buf[1] = 0xefcdab89; buf[2] = 0x98badcfe; buf[3] = 0x10325476; buf[4] = 0xc3d2e1f0; }
gpl-2.0
KyleCo76/FIK
drivers/char/agp/sworks-agp.c
4544
15544
/* * Serverworks AGPGART routines. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/agp_backend.h> #include "agp.h" #define SVWRKS_COMMAND 0x04 #define SVWRKS_APSIZE 0x10 #define SVWRKS_MMBASE 0x14 #define SVWRKS_CACHING 0x4b #define SVWRKS_AGP_ENABLE 0x60 #define SVWRKS_FEATURE 0x68 #define SVWRKS_SIZE_MASK 0xfe000000 /* Memory mapped registers */ #define SVWRKS_GART_CACHE 0x02 #define SVWRKS_GATTBASE 0x04 #define SVWRKS_TLBFLUSH 0x10 #define SVWRKS_POSTFLUSH 0x14 #define SVWRKS_DIRFLUSH 0x0c struct serverworks_page_map { unsigned long *real; unsigned long __iomem *remapped; }; static struct _serverworks_private { struct pci_dev *svrwrks_dev; /* device one */ volatile u8 __iomem *registers; struct serverworks_page_map **gatt_pages; int num_tables; struct serverworks_page_map scratch_dir; int gart_addr_ofs; int mm_addr_ofs; } serverworks_private; static int serverworks_create_page_map(struct serverworks_page_map *page_map) { int i; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) { return -ENOMEM; } set_memory_uc((unsigned long)page_map->real, 1); page_map->remapped = page_map->real; for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) writel(agp_bridge->scratch_page, page_map->remapped+i); /* Red Pen: Everyone else does pci posting flush here */ return 0; } static void serverworks_free_page_map(struct serverworks_page_map *page_map) { set_memory_wb((unsigned long)page_map->real, 1); free_page((unsigned long) page_map->real); } static void serverworks_free_gatt_pages(void) { int i; struct serverworks_page_map **tables; struct serverworks_page_map *entry; tables = serverworks_private.gatt_pages; for (i = 0; i < serverworks_private.num_tables; i++) { entry = tables[i]; if (entry != NULL) { if (entry->real != NULL) { serverworks_free_page_map(entry); } kfree(entry); } } kfree(tables); } static int serverworks_create_gatt_pages(int nr_tables) { struct serverworks_page_map **tables; struct serverworks_page_map *entry; int retval = 0; int i; tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), GFP_KERNEL); if (tables == NULL) return -ENOMEM; for (i = 0; i < nr_tables; i++) { entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL); if (entry == NULL) { retval = -ENOMEM; break; } tables[i] = entry; retval = serverworks_create_page_map(entry); if (retval != 0) break; } serverworks_private.num_tables = nr_tables; serverworks_private.gatt_pages = tables; if (retval != 0) serverworks_free_gatt_pages(); return retval; } #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\ GET_PAGE_DIR_IDX(addr)]->remapped) #ifndef GET_PAGE_DIR_OFF #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #endif #ifndef GET_PAGE_DIR_IDX #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #endif #ifndef GET_GATT_OFF #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #endif static int serverworks_create_gatt_table(struct agp_bridge_data *bridge) { struct aper_size_info_lvl2 *value; struct serverworks_page_map page_dir; int retval; u32 temp; int i; value = A_SIZE_LVL2(agp_bridge->current_size); retval = serverworks_create_page_map(&page_dir); if (retval != 0) { return retval; } retval = serverworks_create_page_map(&serverworks_private.scratch_dir); if (retval != 0) { serverworks_free_page_map(&page_dir); return retval; } /* Create a fake scratch directory */ for (i = 0; i < 1024; i++) { writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i); writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); } retval = serverworks_create_gatt_pages(value->num_entries / 1024); if (retval != 0) { serverworks_free_page_map(&page_dir); serverworks_free_page_map(&serverworks_private.scratch_dir); return retval; } agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); /* Get the address for the gart region. * This is a bus address even on the alpha, b/c its * used to program the agp master not the cpu */ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* Calculate the agp offset */ for (i = 0; i < value->num_entries / 1024; i++) writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); return 0; } static int serverworks_free_gatt_table(struct agp_bridge_data *bridge) { struct serverworks_page_map page_dir; page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; serverworks_free_gatt_pages(); serverworks_free_page_map(&page_dir); serverworks_free_page_map(&serverworks_private.scratch_dir); return 0; } static int serverworks_fetch_size(void) { int i; u32 temp; u32 temp2; struct aper_size_info_lvl2 *values; values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs, SVWRKS_SIZE_MASK); pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2); pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp); temp2 &= SVWRKS_SIZE_MASK; for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp2 == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } /* * This routine could be implemented by taking the addresses * written to the GATT, and flushing them individually. However * currently it just flushes the whole table. Which is probably * more efficient, since agp_memory blocks can be a large number of * entries. */ static void serverworks_tlbflush(struct agp_memory *temp) { unsigned long timeout; writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH); timeout = jiffies + 3*HZ; while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) { cpu_relax(); if (time_after(jiffies, timeout)) { dev_err(&serverworks_private.svrwrks_dev->dev, "TLB post flush took more than 3 seconds\n"); break; } } writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH); timeout = jiffies + 3*HZ; while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) { cpu_relax(); if (time_after(jiffies, timeout)) { dev_err(&serverworks_private.svrwrks_dev->dev, "TLB Dir flush took more than 3 seconds\n"); break; } } } static int serverworks_configure(void) { struct aper_size_info_lvl2 *current_size; u32 temp; u8 enable_reg; u16 cap_reg; current_size = A_SIZE_LVL2(agp_bridge->current_size); /* Get the memory mapped registers */ pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp); temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); if (!serverworks_private.registers) { dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp); return -ENOMEM; } writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE); readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */ writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE); readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */ cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND); cap_reg &= ~0x0007; cap_reg |= 0x4; writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND); readw(serverworks_private.registers+SVWRKS_COMMAND); pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg); enable_reg |= 0x1; /* Agp Enable bit */ pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg); serverworks_tlbflush(NULL); agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP); /* Fill in the mode register */ pci_read_config_dword(serverworks_private.svrwrks_dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg); enable_reg &= ~0x3; pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg); pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg); enable_reg |= (1<<6); pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg); return 0; } static void serverworks_cleanup(void) { iounmap((void __iomem *) serverworks_private.registers); } static int serverworks_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { int i, j, num_entries; unsigned long __iomem *cur_gatt; unsigned long addr; num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if (type != 0 || mem->type != 0) { return -EINVAL; } if ((pg_start + mem->page_count) > num_entries) { return -EINVAL; } j = pg_start; while (j < (pg_start + mem->page_count)) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr)))) return -EBUSY; j++; } if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); writel(agp_bridge->driver->mask_memory(agp_bridge, page_to_phys(mem->pages[i]), mem->type), cur_gatt+GET_GATT_OFF(addr)); } serverworks_tlbflush(mem); return 0; } static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { int i; unsigned long __iomem *cur_gatt; unsigned long addr; if (type != 0 || mem->type != 0) { return -EINVAL; } global_cache_flush(); serverworks_tlbflush(mem); for (i = pg_start; i < (mem->page_count + pg_start); i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); } serverworks_tlbflush(mem); return 0; } static const struct gatt_mask serverworks_masks[] = { {.mask = 1, .type = 0} }; static const struct aper_size_info_lvl2 serverworks_sizes[7] = { {2048, 524288, 0x80000000}, {1024, 262144, 0xc0000000}, {512, 131072, 0xe0000000}, {256, 65536, 0xf0000000}, {128, 32768, 0xf8000000}, {64, 16384, 0xfc000000}, {32, 8192, 0xfe000000} }; static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode) { u32 command; pci_read_config_dword(serverworks_private.svrwrks_dev, bridge->capndx + PCI_AGP_STATUS, &command); command = agp_collect_device_status(bridge, mode, command); command &= ~0x10; /* disable FW */ command &= ~0x08; command |= 0x100; pci_write_config_dword(serverworks_private.svrwrks_dev, bridge->capndx + PCI_AGP_COMMAND, command); agp_device_command(command, false); } static const struct agp_bridge_driver sworks_driver = { .owner = THIS_MODULE, .aperture_sizes = serverworks_sizes, .size_type = LVL2_APER_SIZE, .num_aperture_sizes = 7, .configure = serverworks_configure, .fetch_size = serverworks_fetch_size, .cleanup = serverworks_cleanup, .tlb_flush = serverworks_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = serverworks_masks, .agp_enable = serverworks_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = serverworks_create_gatt_table, .free_gatt_table = serverworks_free_gatt_table, .insert_memory = serverworks_insert_memory, .remove_memory = serverworks_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static int agp_serverworks_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; struct pci_dev *bridge_dev; u32 temp, temp2; u8 cap_ptr = 0; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); switch (pdev->device) { case 0x0006: dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n"); return -ENODEV; case PCI_DEVICE_ID_SERVERWORKS_HE: case PCI_DEVICE_ID_SERVERWORKS_LE: case 0x0007: break; default: if (cap_ptr) dev_err(&pdev->dev, "unsupported Serverworks chipset " "[%04x/%04x]\n", pdev->vendor, pdev->device); return -ENODEV; } /* Everything is on func 1 here so we are hardcoding function one */ bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1)); if (!bridge_dev) { dev_info(&pdev->dev, "can't find secondary device\n"); return -ENODEV; } serverworks_private.svrwrks_dev = bridge_dev; serverworks_private.gart_addr_ofs = 0x10; pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp); if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2); if (temp2 != 0) { dev_info(&pdev->dev, "64 bit aperture address, " "but top bits are not zero; disabling AGP\n"); return -ENODEV; } serverworks_private.mm_addr_ofs = 0x18; } else serverworks_private.mm_addr_ofs = 0x14; pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp); if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs + 4, &temp2); if (temp2 != 0) { dev_info(&pdev->dev, "64 bit MMIO address, but top " "bits are not zero; disabling AGP\n"); return -ENODEV; } } bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &sworks_driver; bridge->dev_private_data = &serverworks_private, bridge->dev = pci_dev_get(pdev); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_serverworks_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); pci_dev_put(bridge->dev); agp_remove_bridge(bridge); agp_put_bridge(bridge); pci_dev_put(serverworks_private.svrwrks_dev); serverworks_private.svrwrks_dev = NULL; } static struct pci_device_id agp_serverworks_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SERVERWORKS, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table); static struct pci_driver agp_serverworks_pci_driver = { .name = "agpgart-serverworks", .id_table = agp_serverworks_pci_table, .probe = agp_serverworks_probe, .remove = agp_serverworks_remove, }; static int __init agp_serverworks_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_serverworks_pci_driver); } static void __exit agp_serverworks_cleanup(void) { pci_unregister_driver(&agp_serverworks_pci_driver); } module_init(agp_serverworks_init); module_exit(agp_serverworks_cleanup); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
CyanogenMod/android_kernel_xiaomi_cancro
arch/powerpc/platforms/86xx/sbc8641d.c
4544
2707
/* * SBC8641D board specific routines * * Copyright 2008 Wind River Systems Inc. * * By Paul Gortmaker (see MAINTAINERS for contact information) * * Based largely on the 8641 HPCN support by Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_pci.h> #include <sysdev/fsl_soc.h> #include "mpc86xx.h" static void __init sbc8641_setup_arch(void) { #ifdef CONFIG_PCI struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("sbc8641_setup_arch()", 0); #ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") fsl_add_bridge(np, 0); #endif printk("SBC8641 board from Wind River\n"); #ifdef CONFIG_SMP mpc86xx_smp_init(); #endif } static void sbc8641_show_cpuinfo(struct seq_file *m) { uint svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Wind River Systems\n"); seq_printf(m, "SVR\t\t: 0x%x\n", svid); } /* * Called very early, device-tree isn't unflattened */ static int __init sbc8641_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "wind,sbc8641")) return 1; /* Looks good */ return 0; } static long __init mpc86xx_time_init(void) { unsigned int temp; /* Set the time base to zero */ mtspr(SPRN_TBWL, 0); mtspr(SPRN_TBWU, 0); temp = mfspr(SPRN_HID0); temp |= HID0_TBEN; mtspr(SPRN_HID0, temp); asm volatile("isync"); return 0; } static __initdata struct of_device_id of_bus_ids[] = { { .compatible = "simple-bus", }, { .compatible = "gianfar", }, {}, }; static int __init declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(sbc8641, declare_of_platform_devices); define_machine(sbc8641) { .name = "SBC8641D", .probe = sbc8641_probe, .setup_arch = sbc8641_setup_arch, .init_IRQ = mpc86xx_init_irq, .show_cpuinfo = sbc8641_show_cpuinfo, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .time_init = mpc86xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif };
gpl-2.0
armani-dev/android_kernel_xiaomi_armani
drivers/regulator/mc13892-regulator.c
4800
19810
/* * Regulator Driver for Freescale MC13892 PMIC * * Copyright 2010 Yong Shen <yong.shen@linaro.org> * * Based on draft driver from Arnaud Patard <arnaud.patard@rtp-net.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mfd/mc13892.h> #include <linux/regulator/machine.h> #include <linux/regulator/driver.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include "mc13xxx.h" #define MC13892_REVISION 7 #define MC13892_POWERCTL0 13 #define MC13892_POWERCTL0_USEROFFSPI 3 #define MC13892_POWERCTL0_VCOINCELLVSEL 20 #define MC13892_POWERCTL0_VCOINCELLVSEL_M (7<<20) #define MC13892_POWERCTL0_VCOINCELLEN (1<<23) #define MC13892_SWITCHERS0_SWxHI (1<<23) #define MC13892_SWITCHERS0 24 #define MC13892_SWITCHERS0_SW1VSEL 0 #define MC13892_SWITCHERS0_SW1VSEL_M (0x1f<<0) #define MC13892_SWITCHERS0_SW1HI (1<<23) #define MC13892_SWITCHERS0_SW1EN 0 #define MC13892_SWITCHERS1 25 #define MC13892_SWITCHERS1_SW2VSEL 0 #define MC13892_SWITCHERS1_SW2VSEL_M (0x1f<<0) #define MC13892_SWITCHERS1_SW2HI (1<<23) #define MC13892_SWITCHERS1_SW2EN 0 #define MC13892_SWITCHERS2 26 #define MC13892_SWITCHERS2_SW3VSEL 0 #define MC13892_SWITCHERS2_SW3VSEL_M (0x1f<<0) #define MC13892_SWITCHERS2_SW3HI (1<<23) #define MC13892_SWITCHERS2_SW3EN 0 #define MC13892_SWITCHERS3 27 #define MC13892_SWITCHERS3_SW4VSEL 0 #define MC13892_SWITCHERS3_SW4VSEL_M (0x1f<<0) #define MC13892_SWITCHERS3_SW4HI (1<<23) #define MC13892_SWITCHERS3_SW4EN 0 #define MC13892_SWITCHERS4 28 #define MC13892_SWITCHERS4_SW1MODE 0 #define MC13892_SWITCHERS4_SW1MODE_AUTO (8<<0) #define MC13892_SWITCHERS4_SW1MODE_M (0xf<<0) #define MC13892_SWITCHERS4_SW2MODE 10 #define MC13892_SWITCHERS4_SW2MODE_AUTO (8<<10) #define MC13892_SWITCHERS4_SW2MODE_M (0xf<<10) #define MC13892_SWITCHERS5 29 #define MC13892_SWITCHERS5_SW3MODE 0 #define MC13892_SWITCHERS5_SW3MODE_AUTO (8<<0) #define MC13892_SWITCHERS5_SW3MODE_M (0xf<<0) #define MC13892_SWITCHERS5_SW4MODE 8 #define MC13892_SWITCHERS5_SW4MODE_AUTO (8<<8) #define MC13892_SWITCHERS5_SW4MODE_M (0xf<<8) #define MC13892_SWITCHERS5_SWBSTEN (1<<20) #define MC13892_REGULATORSETTING0 30 #define MC13892_REGULATORSETTING0_VGEN1VSEL 0 #define MC13892_REGULATORSETTING0_VDIGVSEL 4 #define MC13892_REGULATORSETTING0_VGEN2VSEL 6 #define MC13892_REGULATORSETTING0_VPLLVSEL 9 #define MC13892_REGULATORSETTING0_VUSB2VSEL 11 #define MC13892_REGULATORSETTING0_VGEN3VSEL 14 #define MC13892_REGULATORSETTING0_VCAMVSEL 16 #define MC13892_REGULATORSETTING0_VGEN1VSEL_M (3<<0) #define MC13892_REGULATORSETTING0_VDIGVSEL_M (3<<4) #define MC13892_REGULATORSETTING0_VGEN2VSEL_M (7<<6) #define MC13892_REGULATORSETTING0_VPLLVSEL_M (3<<9) #define MC13892_REGULATORSETTING0_VUSB2VSEL_M (3<<11) #define MC13892_REGULATORSETTING0_VGEN3VSEL_M (1<<14) #define MC13892_REGULATORSETTING0_VCAMVSEL_M (3<<16) #define MC13892_REGULATORSETTING1 31 #define MC13892_REGULATORSETTING1_VVIDEOVSEL 2 #define MC13892_REGULATORSETTING1_VAUDIOVSEL 4 #define MC13892_REGULATORSETTING1_VSDVSEL 6 #define MC13892_REGULATORSETTING1_VVIDEOVSEL_M (3<<2) #define MC13892_REGULATORSETTING1_VAUDIOVSEL_M (3<<4) #define MC13892_REGULATORSETTING1_VSDVSEL_M (7<<6) #define MC13892_REGULATORMODE0 32 #define MC13892_REGULATORMODE0_VGEN1EN (1<<0) #define MC13892_REGULATORMODE0_VGEN1STDBY (1<<1) #define MC13892_REGULATORMODE0_VGEN1MODE (1<<2) #define MC13892_REGULATORMODE0_VIOHIEN (1<<3) #define MC13892_REGULATORMODE0_VIOHISTDBY (1<<4) #define MC13892_REGULATORMODE0_VIOHIMODE (1<<5) #define MC13892_REGULATORMODE0_VDIGEN (1<<9) #define MC13892_REGULATORMODE0_VDIGSTDBY (1<<10) #define MC13892_REGULATORMODE0_VDIGMODE (1<<11) #define MC13892_REGULATORMODE0_VGEN2EN (1<<12) #define MC13892_REGULATORMODE0_VGEN2STDBY (1<<13) #define MC13892_REGULATORMODE0_VGEN2MODE (1<<14) #define MC13892_REGULATORMODE0_VPLLEN (1<<15) #define MC13892_REGULATORMODE0_VPLLSTDBY (1<<16) #define MC13892_REGULATORMODE0_VPLLMODE (1<<17) #define MC13892_REGULATORMODE0_VUSB2EN (1<<18) #define MC13892_REGULATORMODE0_VUSB2STDBY (1<<19) #define MC13892_REGULATORMODE0_VUSB2MODE (1<<20) #define MC13892_REGULATORMODE1 33 #define MC13892_REGULATORMODE1_VGEN3EN (1<<0) #define MC13892_REGULATORMODE1_VGEN3STDBY (1<<1) #define MC13892_REGULATORMODE1_VGEN3MODE (1<<2) #define MC13892_REGULATORMODE1_VCAMEN (1<<6) #define MC13892_REGULATORMODE1_VCAMSTDBY (1<<7) #define MC13892_REGULATORMODE1_VCAMMODE (1<<8) #define MC13892_REGULATORMODE1_VCAMCONFIGEN (1<<9) #define MC13892_REGULATORMODE1_VVIDEOEN (1<<12) #define MC13892_REGULATORMODE1_VVIDEOSTDBY (1<<13) #define MC13892_REGULATORMODE1_VVIDEOMODE (1<<14) #define MC13892_REGULATORMODE1_VAUDIOEN (1<<15) #define MC13892_REGULATORMODE1_VAUDIOSTDBY (1<<16) #define MC13892_REGULATORMODE1_VAUDIOMODE (1<<17) #define MC13892_REGULATORMODE1_VSDEN (1<<18) #define MC13892_REGULATORMODE1_VSDSTDBY (1<<19) #define MC13892_REGULATORMODE1_VSDMODE (1<<20) #define MC13892_POWERMISC 34 #define MC13892_POWERMISC_GPO1EN (1<<6) #define MC13892_POWERMISC_GPO2EN (1<<8) #define MC13892_POWERMISC_GPO3EN (1<<10) #define MC13892_POWERMISC_GPO4EN (1<<12) #define MC13892_POWERMISC_PWGT1SPIEN (1<<15) #define MC13892_POWERMISC_PWGT2SPIEN (1<<16) #define MC13892_POWERMISC_GPO4ADINEN (1<<21) #define MC13892_POWERMISC_PWGTSPI_M (3 << 15) #define MC13892_USB1 50 #define MC13892_USB1_VUSBEN (1<<3) static const int mc13892_vcoincell[] = { 2500000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000, }; static const int mc13892_sw1[] = { 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000 }; static const int mc13892_sw[] = { 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, 1525000, 1550000, 1575000, 1600000, 1625000, 1650000, 1675000, 1700000, 1725000, 1750000, 1775000, 1800000, 1825000, 1850000, 1875000 }; static const int mc13892_swbst[] = { 5000000, }; static const int mc13892_viohi[] = { 2775000, }; static const int mc13892_vpll[] = { 1050000, 1250000, 1650000, 1800000, }; static const int mc13892_vdig[] = { 1050000, 1250000, 1650000, 1800000, }; static const int mc13892_vsd[] = { 1800000, 2000000, 2600000, 2700000, 2800000, 2900000, 3000000, 3150000, }; static const int mc13892_vusb2[] = { 2400000, 2600000, 2700000, 2775000, }; static const int mc13892_vvideo[] = { 2700000, 2775000, 2500000, 2600000, }; static const int mc13892_vaudio[] = { 2300000, 2500000, 2775000, 3000000, }; static const int mc13892_vcam[] = { 2500000, 2600000, 2750000, 3000000, }; static const int mc13892_vgen1[] = { 1200000, 1500000, 2775000, 3150000, }; static const int mc13892_vgen2[] = { 1200000, 1500000, 1600000, 1800000, 2700000, 2800000, 3000000, 3150000, }; static const int mc13892_vgen3[] = { 1800000, 2900000, }; static const int mc13892_vusb[] = { 3300000, }; static const int mc13892_gpo[] = { 2750000, }; static const int mc13892_pwgtdrv[] = { 5000000, }; static struct regulator_ops mc13892_gpo_regulator_ops; /* sw regulators need special care due to the "hi bit" */ static struct regulator_ops mc13892_sw_regulator_ops; #define MC13892_FIXED_DEFINE(name, reg, voltages) \ MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages, \ mc13xxx_fixed_regulator_ops) #define MC13892_GPO_DEFINE(name, reg, voltages) \ MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages, \ mc13892_gpo_regulator_ops) #define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages) \ MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \ mc13892_sw_regulator_ops) #define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages) \ MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \ mc13xxx_regulator_ops) static struct mc13xxx_regulator mc13892_regulators[] = { MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell), MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1), MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw), MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw), MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw), MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst), MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi), MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, \ mc13892_vpll), MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \ mc13892_vdig), MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, \ mc13892_vsd), MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, \ mc13892_vusb2), MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, \ mc13892_vvideo), MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, \ mc13892_vaudio), MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \ mc13892_vcam), MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, \ mc13892_vgen1), MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, \ mc13892_vgen2), MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, \ mc13892_vgen3), MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb), MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo), MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo), MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo), MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo), MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv), MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv), }; static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, u32 val) { struct mc13xxx *mc13892 = priv->mc13xxx; int ret; u32 valread; BUG_ON(val & ~mask); ret = mc13xxx_reg_read(mc13892, MC13892_POWERMISC, &valread); if (ret) return ret; /* Update the stored state for Power Gates. */ priv->powermisc_pwgt_state = (priv->powermisc_pwgt_state & ~mask) | val; priv->powermisc_pwgt_state &= MC13892_POWERMISC_PWGTSPI_M; /* Construct the new register value */ valread = (valread & ~mask) | val; /* Overwrite the PWGTxEN with the stored version */ valread = (valread & ~MC13892_POWERMISC_PWGTSPI_M) | priv->powermisc_pwgt_state; return mc13xxx_reg_write(mc13892, MC13892_POWERMISC, valread); } static int mc13892_gpo_regulator_enable(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int id = rdev_get_id(rdev); int ret; u32 en_val = mc13892_regulators[id].enable_bit; u32 mask = mc13892_regulators[id].enable_bit; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); /* Power Gate enable value is 0 */ if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI) en_val = 0; if (id == MC13892_GPO4) mask |= MC13892_POWERMISC_GPO4ADINEN; mc13xxx_lock(priv->mc13xxx); ret = mc13892_powermisc_rmw(priv, mask, en_val); mc13xxx_unlock(priv->mc13xxx); return ret; } static int mc13892_gpo_regulator_disable(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int id = rdev_get_id(rdev); int ret; u32 dis_val = 0; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); /* Power Gate disable value is 1 */ if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI) dis_val = mc13892_regulators[id].enable_bit; mc13xxx_lock(priv->mc13xxx); ret = mc13892_powermisc_rmw(priv, mc13892_regulators[id].enable_bit, dis_val); mc13xxx_unlock(priv->mc13xxx); return ret; } static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int ret, id = rdev_get_id(rdev); unsigned int val; mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val); mc13xxx_unlock(priv->mc13xxx); if (ret) return ret; /* Power Gates state is stored in powermisc_pwgt_state * where the meaning of bits is negated */ val = (val & ~MC13892_POWERMISC_PWGTSPI_M) | (priv->powermisc_pwgt_state ^ MC13892_POWERMISC_PWGTSPI_M); return (val & mc13892_regulators[id].enable_bit) != 0; } static struct regulator_ops mc13892_gpo_regulator_ops = { .enable = mc13892_gpo_regulator_enable, .disable = mc13892_gpo_regulator_disable, .is_enabled = mc13892_gpo_regulator_is_enabled, .list_voltage = mc13xxx_regulator_list_voltage, .set_voltage = mc13xxx_fixed_regulator_set_voltage, .get_voltage = mc13xxx_fixed_regulator_get_voltage, }; static int mc13892_sw_regulator_get_voltage(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int ret, id = rdev_get_id(rdev); unsigned int val, hi; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].vsel_reg, &val); mc13xxx_unlock(priv->mc13xxx); if (ret) return ret; hi = val & MC13892_SWITCHERS0_SWxHI; val = (val & mc13892_regulators[id].vsel_mask) >> mc13892_regulators[id].vsel_shift; dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); if (hi) val = (25000 * val) + 1100000; else val = (25000 * val) + 600000; return val; } static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int hi, value, mask, id = rdev_get_id(rdev); u32 valread; int ret; dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", __func__, id, min_uV, max_uV); /* Find the best index */ value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV); dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value); if (value < 0) return value; value = mc13892_regulators[id].voltages[value]; mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].vsel_reg, &valread); if (ret) goto err; if (value > 1375000) hi = 1; else if (value < 1100000) hi = 0; else hi = valread & MC13892_SWITCHERS0_SWxHI; if (hi) { value = (value - 1100000) / 25000; value |= MC13892_SWITCHERS0_SWxHI; } else value = (value - 600000) / 25000; mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI; valread = (valread & ~mask) | (value << mc13892_regulators[id].vsel_shift); ret = mc13xxx_reg_write(priv->mc13xxx, mc13892_regulators[id].vsel_reg, valread); err: mc13xxx_unlock(priv->mc13xxx); return ret; } static struct regulator_ops mc13892_sw_regulator_ops = { .is_enabled = mc13xxx_sw_regulator_is_enabled, .list_voltage = mc13xxx_regulator_list_voltage, .set_voltage = mc13892_sw_regulator_set_voltage, .get_voltage = mc13892_sw_regulator_get_voltage, }; static int mc13892_vcam_set_mode(struct regulator_dev *rdev, unsigned int mode) { unsigned int en_val = 0; struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int ret, id = rdev_get_id(rdev); if (mode == REGULATOR_MODE_FAST) en_val = MC13892_REGULATORMODE1_VCAMCONFIGEN; mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg, MC13892_REGULATORMODE1_VCAMCONFIGEN, en_val); mc13xxx_unlock(priv->mc13xxx); return ret; } static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int ret, id = rdev_get_id(rdev); unsigned int val; mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val); mc13xxx_unlock(priv->mc13xxx); if (ret) return ret; if (val & MC13892_REGULATORMODE1_VCAMCONFIGEN) return REGULATOR_MODE_FAST; return REGULATOR_MODE_NORMAL; } static int __devinit mc13892_regulator_probe(struct platform_device *pdev) { struct mc13xxx_regulator_priv *priv; struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent); struct mc13xxx_regulator_platform_data *pdata = dev_get_platdata(&pdev->dev); struct mc13xxx_regulator_init_data *mc13xxx_data; int i, ret; int num_regulators = 0; u32 val; num_regulators = mc13xxx_get_num_regulators_dt(pdev); if (num_regulators <= 0 && pdata) num_regulators = pdata->num_regulators; if (num_regulators <= 0) return -EINVAL; priv = devm_kzalloc(&pdev->dev, sizeof(*priv) + num_regulators * sizeof(priv->regulators[0]), GFP_KERNEL); if (!priv) return -ENOMEM; priv->num_regulators = num_regulators; priv->mc13xxx_regulators = mc13892_regulators; priv->mc13xxx = mc13892; platform_set_drvdata(pdev, priv); mc13xxx_lock(mc13892); ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val); if (ret) goto err_unlock; /* enable switch auto mode */ if ((val & 0x0000FFFF) == 0x45d0) { ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4, MC13892_SWITCHERS4_SW1MODE_M | MC13892_SWITCHERS4_SW2MODE_M, MC13892_SWITCHERS4_SW1MODE_AUTO | MC13892_SWITCHERS4_SW2MODE_AUTO); if (ret) goto err_unlock; ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS5, MC13892_SWITCHERS5_SW3MODE_M | MC13892_SWITCHERS5_SW4MODE_M, MC13892_SWITCHERS5_SW3MODE_AUTO | MC13892_SWITCHERS5_SW4MODE_AUTO); if (ret) goto err_unlock; } mc13xxx_unlock(mc13892); mc13892_regulators[MC13892_VCAM].desc.ops->set_mode = mc13892_vcam_set_mode; mc13892_regulators[MC13892_VCAM].desc.ops->get_mode = mc13892_vcam_get_mode; mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators, ARRAY_SIZE(mc13892_regulators)); for (i = 0; i < num_regulators; i++) { struct regulator_init_data *init_data; struct regulator_desc *desc; struct device_node *node = NULL; int id; if (mc13xxx_data) { id = mc13xxx_data[i].id; init_data = mc13xxx_data[i].init_data; node = mc13xxx_data[i].node; } else { id = pdata->regulators[i].id; init_data = pdata->regulators[i].init_data; } desc = &mc13892_regulators[id].desc; priv->regulators[i] = regulator_register( desc, &pdev->dev, init_data, priv, node); if (IS_ERR(priv->regulators[i])) { dev_err(&pdev->dev, "failed to register regulator %s\n", mc13892_regulators[i].desc.name); ret = PTR_ERR(priv->regulators[i]); goto err; } } return 0; err: while (--i >= 0) regulator_unregister(priv->regulators[i]); return ret; err_unlock: mc13xxx_unlock(mc13892); return ret; } static int __devexit mc13892_regulator_remove(struct platform_device *pdev) { struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); int i; platform_set_drvdata(pdev, NULL); for (i = 0; i < priv->num_regulators; i++) regulator_unregister(priv->regulators[i]); return 0; } static struct platform_driver mc13892_regulator_driver = { .driver = { .name = "mc13892-regulator", .owner = THIS_MODULE, }, .remove = __devexit_p(mc13892_regulator_remove), .probe = mc13892_regulator_probe, }; static int __init mc13892_regulator_init(void) { return platform_driver_register(&mc13892_regulator_driver); } subsys_initcall(mc13892_regulator_init); static void __exit mc13892_regulator_exit(void) { platform_driver_unregister(&mc13892_regulator_driver); } module_exit(mc13892_regulator_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>"); MODULE_DESCRIPTION("Regulator Driver for Freescale MC13892 PMIC"); MODULE_ALIAS("platform:mc13892-regulator");
gpl-2.0
CyanogenMod/android_kernel_samsung_hlte
drivers/isdn/mISDN/stack.c
4800
16836
/* * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/slab.h> #include <linux/mISDNif.h> #include <linux/kthread.h> #include "core.h" static u_int *debug; static inline void _queue_message(struct mISDNstack *st, struct sk_buff *skb) { struct mISDNhead *hh = mISDN_HEAD_P(skb); if (*debug & DEBUG_QUEUE_FUNC) printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n", __func__, hh->prim, hh->id, skb); skb_queue_tail(&st->msgq, skb); if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) { test_and_set_bit(mISDN_STACK_WORK, &st->status); wake_up_interruptible(&st->workq); } } static int mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb) { _queue_message(ch->st, skb); return 0; } static struct mISDNchannel * get_channel4id(struct mISDNstack *st, u_int id) { struct mISDNchannel *ch; mutex_lock(&st->lmutex); list_for_each_entry(ch, &st->layer2, list) { if (id == ch->nr) goto unlock; } ch = NULL; unlock: mutex_unlock(&st->lmutex); return ch; } static void send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb) { struct hlist_node *node; struct sock *sk; struct sk_buff *cskb = NULL; read_lock(&sl->lock); sk_for_each(sk, node, &sl->head) { if (sk->sk_state != MISDN_BOUND) continue; if (!cskb) cskb = skb_copy(skb, GFP_KERNEL); if (!cskb) { printk(KERN_WARNING "%s no skb\n", __func__); break; } if (!sock_queue_rcv_skb(sk, cskb)) cskb = NULL; } read_unlock(&sl->lock); if (cskb) dev_kfree_skb(cskb); } static void send_layer2(struct mISDNstack *st, struct sk_buff *skb) { struct sk_buff *cskb; struct mISDNhead *hh = mISDN_HEAD_P(skb); struct mISDNchannel *ch; int ret; if (!st) return; mutex_lock(&st->lmutex); if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) { /* L2 for all */ list_for_each_entry(ch, &st->layer2, list) { if (list_is_last(&ch->list, &st->layer2)) { cskb = skb; skb = NULL; } else { cskb = skb_copy(skb, GFP_KERNEL); } if (cskb) { ret = ch->send(ch, cskb); if (ret) { if (*debug & DEBUG_SEND_ERR) printk(KERN_DEBUG "%s ch%d prim(%x) addr(%x)" " err %d\n", __func__, ch->nr, hh->prim, ch->addr, ret); dev_kfree_skb(cskb); } } else { printk(KERN_WARNING "%s ch%d addr %x no mem\n", __func__, ch->nr, ch->addr); goto out; } } } else { list_for_each_entry(ch, &st->layer2, list) { if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) { ret = ch->send(ch, skb); if (!ret) skb = NULL; goto out; } } ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb); if (!ret) skb = NULL; else if (*debug & DEBUG_SEND_ERR) printk(KERN_DEBUG "%s ch%d mgr prim(%x) addr(%x) err %d\n", __func__, ch->nr, hh->prim, ch->addr, ret); } out: mutex_unlock(&st->lmutex); if (skb) dev_kfree_skb(skb); } static inline int send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb) { struct mISDNhead *hh = mISDN_HEAD_P(skb); struct mISDNchannel *ch; int lm; lm = hh->prim & MISDN_LAYERMASK; if (*debug & DEBUG_QUEUE_FUNC) printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n", __func__, hh->prim, hh->id, skb); if (lm == 0x1) { if (!hlist_empty(&st->l1sock.head)) { __net_timestamp(skb); send_socklist(&st->l1sock, skb); } return st->layer1->send(st->layer1, skb); } else if (lm == 0x2) { if (!hlist_empty(&st->l1sock.head)) send_socklist(&st->l1sock, skb); send_layer2(st, skb); return 0; } else if (lm == 0x4) { ch = get_channel4id(st, hh->id); if (ch) return ch->send(ch, skb); else printk(KERN_WARNING "%s: dev(%s) prim(%x) id(%x) no channel\n", __func__, dev_name(&st->dev->dev), hh->prim, hh->id); } else if (lm == 0x8) { WARN_ON(lm == 0x8); ch = get_channel4id(st, hh->id); if (ch) return ch->send(ch, skb); else printk(KERN_WARNING "%s: dev(%s) prim(%x) id(%x) no channel\n", __func__, dev_name(&st->dev->dev), hh->prim, hh->id); } else { /* broadcast not handled yet */ printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n", __func__, dev_name(&st->dev->dev), hh->prim); } return -ESRCH; } static void do_clear_stack(struct mISDNstack *st) { } static int mISDNStackd(void *data) { struct mISDNstack *st = data; int err = 0; sigfillset(&current->blocked); if (*debug & DEBUG_MSG_THREAD) printk(KERN_DEBUG "mISDNStackd %s started\n", dev_name(&st->dev->dev)); if (st->notify != NULL) { complete(st->notify); st->notify = NULL; } for (;;) { struct sk_buff *skb; if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) { test_and_clear_bit(mISDN_STACK_WORK, &st->status); test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); } else test_and_set_bit(mISDN_STACK_RUNNING, &st->status); while (test_bit(mISDN_STACK_WORK, &st->status)) { skb = skb_dequeue(&st->msgq); if (!skb) { test_and_clear_bit(mISDN_STACK_WORK, &st->status); /* test if a race happens */ skb = skb_dequeue(&st->msgq); if (!skb) continue; test_and_set_bit(mISDN_STACK_WORK, &st->status); } #ifdef MISDN_MSG_STATS st->msg_cnt++; #endif err = send_msg_to_layer(st, skb); if (unlikely(err)) { if (*debug & DEBUG_SEND_ERR) printk(KERN_DEBUG "%s: %s prim(%x) id(%x) " "send call(%d)\n", __func__, dev_name(&st->dev->dev), mISDN_HEAD_PRIM(skb), mISDN_HEAD_ID(skb), err); dev_kfree_skb(skb); continue; } if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) { test_and_clear_bit(mISDN_STACK_WORK, &st->status); test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); break; } } if (test_bit(mISDN_STACK_CLEARING, &st->status)) { test_and_set_bit(mISDN_STACK_STOPPED, &st->status); test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); do_clear_stack(st); test_and_clear_bit(mISDN_STACK_CLEARING, &st->status); test_and_set_bit(mISDN_STACK_RESTART, &st->status); } if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) { test_and_clear_bit(mISDN_STACK_STOPPED, &st->status); test_and_set_bit(mISDN_STACK_RUNNING, &st->status); if (!skb_queue_empty(&st->msgq)) test_and_set_bit(mISDN_STACK_WORK, &st->status); } if (test_bit(mISDN_STACK_ABORT, &st->status)) break; if (st->notify != NULL) { complete(st->notify); st->notify = NULL; } #ifdef MISDN_MSG_STATS st->sleep_cnt++; #endif test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status); wait_event_interruptible(st->workq, (st->status & mISDN_STACK_ACTION_MASK)); if (*debug & DEBUG_MSG_THREAD) printk(KERN_DEBUG "%s: %s wake status %08lx\n", __func__, dev_name(&st->dev->dev), st->status); test_and_set_bit(mISDN_STACK_ACTIVE, &st->status); test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status); if (test_bit(mISDN_STACK_STOPPED, &st->status)) { test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); #ifdef MISDN_MSG_STATS st->stopped_cnt++; #endif } } #ifdef MISDN_MSG_STATS printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d " "msg %d sleep %d stopped\n", dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt, st->stopped_cnt); printk(KERN_DEBUG "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n", dev_name(&st->dev->dev), st->thread->utime, st->thread->stime); printk(KERN_DEBUG "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n", dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw); printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n", dev_name(&st->dev->dev)); #endif test_and_set_bit(mISDN_STACK_KILLED, &st->status); test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status); test_and_clear_bit(mISDN_STACK_ABORT, &st->status); skb_queue_purge(&st->msgq); st->thread = NULL; if (st->notify != NULL) { complete(st->notify); st->notify = NULL; } return 0; } static int l1_receive(struct mISDNchannel *ch, struct sk_buff *skb) { if (!ch->st) return -ENODEV; __net_timestamp(skb); _queue_message(ch->st, skb); return 0; } void set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei) { ch->addr = sapi | (tei << 8); } void __add_layer2(struct mISDNchannel *ch, struct mISDNstack *st) { list_add_tail(&ch->list, &st->layer2); } void add_layer2(struct mISDNchannel *ch, struct mISDNstack *st) { mutex_lock(&st->lmutex); __add_layer2(ch, st); mutex_unlock(&st->lmutex); } static int st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { if (!ch->st || !ch->st->layer1) return -EINVAL; return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg); } int create_stack(struct mISDNdevice *dev) { struct mISDNstack *newst; int err; DECLARE_COMPLETION_ONSTACK(done); newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL); if (!newst) { printk(KERN_ERR "kmalloc mISDN_stack failed\n"); return -ENOMEM; } newst->dev = dev; INIT_LIST_HEAD(&newst->layer2); INIT_HLIST_HEAD(&newst->l1sock.head); rwlock_init(&newst->l1sock.lock); init_waitqueue_head(&newst->workq); skb_queue_head_init(&newst->msgq); mutex_init(&newst->lmutex); dev->D.st = newst; err = create_teimanager(dev); if (err) { printk(KERN_ERR "kmalloc teimanager failed\n"); kfree(newst); return err; } dev->teimgr->peer = &newst->own; dev->teimgr->recv = mISDN_queue_message; dev->teimgr->st = newst; newst->layer1 = &dev->D; dev->D.recv = l1_receive; dev->D.peer = &newst->own; newst->own.st = newst; newst->own.ctrl = st_own_ctrl; newst->own.send = mISDN_queue_message; newst->own.recv = mISDN_queue_message; if (*debug & DEBUG_CORE_FUNC) printk(KERN_DEBUG "%s: st(%s)\n", __func__, dev_name(&newst->dev->dev)); newst->notify = &done; newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s", dev_name(&newst->dev->dev)); if (IS_ERR(newst->thread)) { err = PTR_ERR(newst->thread); printk(KERN_ERR "mISDN:cannot create kernel thread for %s (%d)\n", dev_name(&newst->dev->dev), err); delete_teimanager(dev->teimgr); kfree(newst); } else wait_for_completion(&done); return err; } int connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch, u_int protocol, struct sockaddr_mISDN *adr) { struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch); struct channel_req rq; int err; if (*debug & DEBUG_CORE_FUNC) printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", __func__, dev_name(&dev->dev), protocol, adr->dev, adr->channel, adr->sapi, adr->tei); switch (protocol) { case ISDN_P_NT_S0: case ISDN_P_NT_E1: case ISDN_P_TE_S0: case ISDN_P_TE_E1: ch->recv = mISDN_queue_message; ch->peer = &dev->D.st->own; ch->st = dev->D.st; rq.protocol = protocol; rq.adr.channel = adr->channel; err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err, dev->id); if (err) return err; write_lock_bh(&dev->D.st->l1sock.lock); sk_add_node(&msk->sk, &dev->D.st->l1sock.head); write_unlock_bh(&dev->D.st->l1sock.lock); break; default: return -ENOPROTOOPT; } return 0; } int connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch, u_int protocol, struct sockaddr_mISDN *adr) { struct channel_req rq, rq2; int pmask, err; struct Bprotocol *bp; if (*debug & DEBUG_CORE_FUNC) printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", __func__, dev_name(&dev->dev), protocol, adr->dev, adr->channel, adr->sapi, adr->tei); ch->st = dev->D.st; pmask = 1 << (protocol & ISDN_P_B_MASK); if (pmask & dev->Bprotocols) { rq.protocol = protocol; rq.adr = *adr; err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); if (err) return err; ch->recv = rq.ch->send; ch->peer = rq.ch; rq.ch->recv = ch->send; rq.ch->peer = ch; rq.ch->st = dev->D.st; } else { bp = get_Bprotocol4mask(pmask); if (!bp) return -ENOPROTOOPT; rq2.protocol = protocol; rq2.adr = *adr; rq2.ch = ch; err = bp->create(&rq2); if (err) return err; ch->recv = rq2.ch->send; ch->peer = rq2.ch; rq2.ch->st = dev->D.st; rq.protocol = rq2.protocol; rq.adr = *adr; err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); if (err) { rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL); return err; } rq2.ch->recv = rq.ch->send; rq2.ch->peer = rq.ch; rq.ch->recv = rq2.ch->send; rq.ch->peer = rq2.ch; rq.ch->st = dev->D.st; } ch->protocol = protocol; ch->nr = rq.ch->nr; return 0; } int create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch, u_int protocol, struct sockaddr_mISDN *adr) { struct channel_req rq; int err; if (*debug & DEBUG_CORE_FUNC) printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", __func__, dev_name(&dev->dev), protocol, adr->dev, adr->channel, adr->sapi, adr->tei); rq.protocol = ISDN_P_TE_S0; if (dev->Dprotocols & (1 << ISDN_P_TE_E1)) rq.protocol = ISDN_P_TE_E1; switch (protocol) { case ISDN_P_LAPD_NT: rq.protocol = ISDN_P_NT_S0; if (dev->Dprotocols & (1 << ISDN_P_NT_E1)) rq.protocol = ISDN_P_NT_E1; case ISDN_P_LAPD_TE: ch->recv = mISDN_queue_message; ch->peer = &dev->D.st->own; ch->st = dev->D.st; rq.adr.channel = 0; err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err); if (err) break; rq.protocol = protocol; rq.adr = *adr; rq.ch = ch; err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq); printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err); if (!err) { if ((protocol == ISDN_P_LAPD_NT) && !rq.ch) break; add_layer2(rq.ch, dev->D.st); rq.ch->recv = mISDN_queue_message; rq.ch->peer = &dev->D.st->own; rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */ } break; default: err = -EPROTONOSUPPORT; } return err; } void delete_channel(struct mISDNchannel *ch) { struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch); struct mISDNchannel *pch; if (!ch->st) { printk(KERN_WARNING "%s: no stack\n", __func__); return; } if (*debug & DEBUG_CORE_FUNC) printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__, dev_name(&ch->st->dev->dev), ch->protocol); if (ch->protocol >= ISDN_P_B_START) { if (ch->peer) { ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL); ch->peer = NULL; } return; } switch (ch->protocol) { case ISDN_P_NT_S0: case ISDN_P_TE_S0: case ISDN_P_NT_E1: case ISDN_P_TE_E1: write_lock_bh(&ch->st->l1sock.lock); sk_del_node_init(&msk->sk); write_unlock_bh(&ch->st->l1sock.lock); ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL); break; case ISDN_P_LAPD_TE: pch = get_channel4id(ch->st, ch->nr); if (pch) { mutex_lock(&ch->st->lmutex); list_del(&pch->list); mutex_unlock(&ch->st->lmutex); pch->ctrl(pch, CLOSE_CHANNEL, NULL); pch = ch->st->dev->teimgr; pch->ctrl(pch, CLOSE_CHANNEL, NULL); } else printk(KERN_WARNING "%s: no l2 channel\n", __func__); break; case ISDN_P_LAPD_NT: pch = ch->st->dev->teimgr; if (pch) { pch->ctrl(pch, CLOSE_CHANNEL, NULL); } else printk(KERN_WARNING "%s: no l2 channel\n", __func__); break; default: break; } return; } void delete_stack(struct mISDNdevice *dev) { struct mISDNstack *st = dev->D.st; DECLARE_COMPLETION_ONSTACK(done); if (*debug & DEBUG_CORE_FUNC) printk(KERN_DEBUG "%s: st(%s)\n", __func__, dev_name(&st->dev->dev)); if (dev->teimgr) delete_teimanager(dev->teimgr); if (st->thread) { if (st->notify) { printk(KERN_WARNING "%s: notifier in use\n", __func__); complete(st->notify); } st->notify = &done; test_and_set_bit(mISDN_STACK_ABORT, &st->status); test_and_set_bit(mISDN_STACK_WAKEUP, &st->status); wake_up_interruptible(&st->workq); wait_for_completion(&done); } if (!list_empty(&st->layer2)) printk(KERN_WARNING "%s: layer2 list not empty\n", __func__); if (!hlist_empty(&st->l1sock.head)) printk(KERN_WARNING "%s: layer1 list not empty\n", __func__); kfree(st); } void mISDN_initstack(u_int *dp) { debug = dp; }
gpl-2.0
tkawajir/android_kernel_lg_l04e
drivers/net/ethernet/8390/etherh.c
4800
20536
/* * linux/drivers/acorn/net/etherh.c * * Copyright (C) 2000-2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * NS8390 I-cubed EtherH and ANT EtherM specific driver * Thanks to I-Cubed for information on their cards. * EtherM conversion (C) 1999 Chris Kemp and Tim Watterton * EtherM integration (C) 2000 Aleph One Ltd (Tak-Shing Chan) * EtherM integration re-engineered by Russell King. * * Changelog: * 08-12-1996 RMK 1.00 Created * RMK 1.03 Added support for EtherLan500 cards * 23-11-1997 RMK 1.04 Added media autodetection * 16-04-1998 RMK 1.05 Improved media autodetection * 10-02-2000 RMK 1.06 Updated for 2.3.43 * 13-05-2000 RMK 1.07 Updated for 2.3.99-pre8 * 12-10-1999 CK/TEW EtherM driver first release * 21-12-2000 TTC EtherH/EtherM integration * 25-12-2000 RMK 1.08 Clean integration of EtherM into this driver. * 03-01-2002 RMK 1.09 Always enable IRQs if we're in the nic slot. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <asm/ecard.h> #include <asm/io.h> #include <asm/system_info.h> #define EI_SHIFT(x) (ei_local->reg_offset[x]) #define ei_inb(_p) readb((void __iomem *)_p) #define ei_outb(_v,_p) writeb(_v,(void __iomem *)_p) #define ei_inb_p(_p) readb((void __iomem *)_p) #define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p) #define NET_DEBUG 0 #define DEBUG_INIT 2 #define DRV_NAME "etherh" #define DRV_VERSION "1.11" static char version[] __initdata = "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n"; #include "lib8390.c" static unsigned int net_debug = NET_DEBUG; struct etherh_priv { void __iomem *ioc_fast; void __iomem *memc; void __iomem *dma_base; unsigned int id; void __iomem *ctrl_port; unsigned char ctrl; u32 supported; }; struct etherh_data { unsigned long ns8390_offset; unsigned long dataport_offset; unsigned long ctrlport_offset; int ctrl_ioc; const char name[16]; u32 supported; unsigned char tx_start_page; unsigned char stop_page; }; MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("EtherH/EtherM driver"); MODULE_LICENSE("GPL"); #define ETHERH500_DATAPORT 0x800 /* MEMC */ #define ETHERH500_NS8390 0x000 /* MEMC */ #define ETHERH500_CTRLPORT 0x800 /* IOC */ #define ETHERH600_DATAPORT 0x040 /* MEMC */ #define ETHERH600_NS8390 0x800 /* MEMC */ #define ETHERH600_CTRLPORT 0x200 /* MEMC */ #define ETHERH_CP_IE 1 #define ETHERH_CP_IF 2 #define ETHERH_CP_HEARTBEAT 2 #define ETHERH_TX_START_PAGE 1 #define ETHERH_STOP_PAGE 127 /* * These came from CK/TEW */ #define ETHERM_DATAPORT 0x200 /* MEMC */ #define ETHERM_NS8390 0x800 /* MEMC */ #define ETHERM_CTRLPORT 0x23c /* MEMC */ #define ETHERM_TX_START_PAGE 64 #define ETHERM_STOP_PAGE 127 /* ------------------------------------------------------------------------ */ #define etherh_priv(dev) \ ((struct etherh_priv *)(((char *)netdev_priv(dev)) + sizeof(struct ei_device))) static inline void etherh_set_ctrl(struct etherh_priv *eh, unsigned char mask) { unsigned char ctrl = eh->ctrl | mask; eh->ctrl = ctrl; writeb(ctrl, eh->ctrl_port); } static inline void etherh_clr_ctrl(struct etherh_priv *eh, unsigned char mask) { unsigned char ctrl = eh->ctrl & ~mask; eh->ctrl = ctrl; writeb(ctrl, eh->ctrl_port); } static inline unsigned int etherh_get_stat(struct etherh_priv *eh) { return readb(eh->ctrl_port); } static void etherh_irq_enable(ecard_t *ec, int irqnr) { struct etherh_priv *eh = ec->irq_data; etherh_set_ctrl(eh, ETHERH_CP_IE); } static void etherh_irq_disable(ecard_t *ec, int irqnr) { struct etherh_priv *eh = ec->irq_data; etherh_clr_ctrl(eh, ETHERH_CP_IE); } static expansioncard_ops_t etherh_ops = { .irqenable = etherh_irq_enable, .irqdisable = etherh_irq_disable, }; static void etherh_setif(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); unsigned long flags; void __iomem *addr; local_irq_save(flags); /* set the interface type */ switch (etherh_priv(dev)->id) { case PROD_I3_ETHERLAN600: case PROD_I3_ETHERLAN600A: addr = (void __iomem *)dev->base_addr + EN0_RCNTHI; switch (dev->if_port) { case IF_PORT_10BASE2: writeb((readb(addr) & 0xf8) | 1, addr); break; case IF_PORT_10BASET: writeb((readb(addr) & 0xf8), addr); break; } break; case PROD_I3_ETHERLAN500: switch (dev->if_port) { case IF_PORT_10BASE2: etherh_clr_ctrl(etherh_priv(dev), ETHERH_CP_IF); break; case IF_PORT_10BASET: etherh_set_ctrl(etherh_priv(dev), ETHERH_CP_IF); break; } break; default: break; } local_irq_restore(flags); } static int etherh_getifstat(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); void __iomem *addr; int stat = 0; switch (etherh_priv(dev)->id) { case PROD_I3_ETHERLAN600: case PROD_I3_ETHERLAN600A: addr = (void __iomem *)dev->base_addr + EN0_RCNTHI; switch (dev->if_port) { case IF_PORT_10BASE2: stat = 1; break; case IF_PORT_10BASET: stat = readb(addr) & 4; break; } break; case PROD_I3_ETHERLAN500: switch (dev->if_port) { case IF_PORT_10BASE2: stat = 1; break; case IF_PORT_10BASET: stat = etherh_get_stat(etherh_priv(dev)) & ETHERH_CP_HEARTBEAT; break; } break; default: stat = 0; break; } return stat != 0; } /* * Configure the interface. Note that we ignore the other * parts of ifmap, since its mostly meaningless for this driver. */ static int etherh_set_config(struct net_device *dev, struct ifmap *map) { switch (map->port) { case IF_PORT_10BASE2: case IF_PORT_10BASET: /* * If the user explicitly sets the interface * media type, turn off automedia detection. */ dev->flags &= ~IFF_AUTOMEDIA; dev->if_port = map->port; break; default: return -EINVAL; } etherh_setif(dev); return 0; } /* * Reset the 8390 (hard reset). Note that we can't actually do this. */ static void etherh_reset(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); void __iomem *addr = (void __iomem *)dev->base_addr; writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr); /* * See if we need to change the interface type. * Note that we use 'interface_num' as a flag * to indicate that we need to change the media. */ if (dev->flags & IFF_AUTOMEDIA && ei_local->interface_num) { ei_local->interface_num = 0; if (dev->if_port == IF_PORT_10BASET) dev->if_port = IF_PORT_10BASE2; else dev->if_port = IF_PORT_10BASET; etherh_setif(dev); } } /* * Write a block of data out to the 8390 */ static void etherh_block_output (struct net_device *dev, int count, const unsigned char *buf, int start_page) { struct ei_device *ei_local = netdev_priv(dev); unsigned long dma_start; void __iomem *dma_base, *addr; if (ei_local->dmaing) { printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: " " DMAstat %d irqlock %d\n", dev->name, ei_local->dmaing, ei_local->irqlock); return; } /* * Make sure we have a round number of bytes if we're in word mode. */ if (count & 1 && ei_local->word16) count++; ei_local->dmaing = 1; addr = (void __iomem *)dev->base_addr; dma_base = etherh_priv(dev)->dma_base; count = (count + 1) & ~1; writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); writeb (0x42, addr + EN0_RCNTLO); writeb (0x00, addr + EN0_RCNTHI); writeb (0x42, addr + EN0_RSARLO); writeb (0x00, addr + EN0_RSARHI); writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); udelay (1); writeb (ENISR_RDC, addr + EN0_ISR); writeb (count, addr + EN0_RCNTLO); writeb (count >> 8, addr + EN0_RCNTHI); writeb (0, addr + EN0_RSARLO); writeb (start_page, addr + EN0_RSARHI); writeb (E8390_RWRITE | E8390_START, addr + E8390_CMD); if (ei_local->word16) writesw (dma_base, buf, count >> 1); else writesb (dma_base, buf, count); dma_start = jiffies; while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0) if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ printk(KERN_ERR "%s: timeout waiting for TX RDC\n", dev->name); etherh_reset (dev); __NS8390_init (dev, 1); break; } writeb (ENISR_RDC, addr + EN0_ISR); ei_local->dmaing = 0; } /* * Read a block of data from the 8390 */ static void etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { struct ei_device *ei_local = netdev_priv(dev); unsigned char *buf; void __iomem *dma_base, *addr; if (ei_local->dmaing) { printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: " " DMAstat %d irqlock %d\n", dev->name, ei_local->dmaing, ei_local->irqlock); return; } ei_local->dmaing = 1; addr = (void __iomem *)dev->base_addr; dma_base = etherh_priv(dev)->dma_base; buf = skb->data; writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); writeb (count, addr + EN0_RCNTLO); writeb (count >> 8, addr + EN0_RCNTHI); writeb (ring_offset, addr + EN0_RSARLO); writeb (ring_offset >> 8, addr + EN0_RSARHI); writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); if (ei_local->word16) { readsw (dma_base, buf, count >> 1); if (count & 1) buf[count - 1] = readb (dma_base); } else readsb (dma_base, buf, count); writeb (ENISR_RDC, addr + EN0_ISR); ei_local->dmaing = 0; } /* * Read a header from the 8390 */ static void etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { struct ei_device *ei_local = netdev_priv(dev); void __iomem *dma_base, *addr; if (ei_local->dmaing) { printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: " " DMAstat %d irqlock %d\n", dev->name, ei_local->dmaing, ei_local->irqlock); return; } ei_local->dmaing = 1; addr = (void __iomem *)dev->base_addr; dma_base = etherh_priv(dev)->dma_base; writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); writeb (sizeof (*hdr), addr + EN0_RCNTLO); writeb (0, addr + EN0_RCNTHI); writeb (0, addr + EN0_RSARLO); writeb (ring_page, addr + EN0_RSARHI); writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); if (ei_local->word16) readsw (dma_base, hdr, sizeof (*hdr) >> 1); else readsb (dma_base, hdr, sizeof (*hdr)); writeb (ENISR_RDC, addr + EN0_ISR); ei_local->dmaing = 0; } /* * Open/initialize the board. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int etherh_open(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); if (!is_valid_ether_addr(dev->dev_addr)) { printk(KERN_WARNING "%s: invalid ethernet MAC address\n", dev->name); return -EINVAL; } if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev)) return -EAGAIN; /* * Make sure that we aren't going to change the * media type on the next reset - we are about to * do automedia manually now. */ ei_local->interface_num = 0; /* * If we are doing automedia detection, do it now. * This is more reliable than the 8390's detection. */ if (dev->flags & IFF_AUTOMEDIA) { dev->if_port = IF_PORT_10BASET; etherh_setif(dev); mdelay(1); if (!etherh_getifstat(dev)) { dev->if_port = IF_PORT_10BASE2; etherh_setif(dev); } } else etherh_setif(dev); etherh_reset(dev); __ei_open(dev); return 0; } /* * The inverse routine to etherh_open(). */ static int etherh_close(struct net_device *dev) { __ei_close (dev); free_irq (dev->irq, dev); return 0; } /* * Initialisation */ static void __init etherh_banner(void) { static int version_printed; if (net_debug && version_printed++ == 0) printk(KERN_INFO "%s", version); } /* * Read the ethernet address string from the on board rom. * This is an ascii string... */ static int __devinit etherh_addr(char *addr, struct expansion_card *ec) { struct in_chunk_dir cd; char *s; if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { printk(KERN_ERR "%s: unable to read podule description string\n", dev_name(&ec->dev)); goto no_addr; } s = strchr(cd.d.string, '('); if (s) { int i; for (i = 0; i < 6; i++) { addr[i] = simple_strtoul(s + 1, &s, 0x10); if (*s != (i == 5? ')' : ':')) break; } if (i == 6) return 0; } printk(KERN_ERR "%s: unable to parse MAC address: %s\n", dev_name(&ec->dev), cd.d.string); no_addr: return -ENODEV; } /* * Create an ethernet address from the system serial number. */ static int __init etherm_addr(char *addr) { unsigned int serial; if (system_serial_low == 0 && system_serial_high == 0) return -ENODEV; serial = system_serial_low | system_serial_high; addr[0] = 0; addr[1] = 0; addr[2] = 0xa4; addr[3] = 0x10 + (serial >> 24); addr[4] = serial >> 16; addr[5] = serial >> 8; return 0; } static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); } static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { cmd->supported = etherh_priv(dev)->supported; ethtool_cmd_speed_set(cmd, SPEED_10); cmd->duplex = DUPLEX_HALF; cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ? AUTONEG_ENABLE : AUTONEG_DISABLE); return 0; } static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { switch (cmd->autoneg) { case AUTONEG_ENABLE: dev->flags |= IFF_AUTOMEDIA; break; case AUTONEG_DISABLE: switch (cmd->port) { case PORT_TP: dev->if_port = IF_PORT_10BASET; break; case PORT_BNC: dev->if_port = IF_PORT_10BASE2; break; default: return -EINVAL; } dev->flags &= ~IFF_AUTOMEDIA; break; default: return -EINVAL; } etherh_setif(dev); return 0; } static const struct ethtool_ops etherh_ethtool_ops = { .get_settings = etherh_get_settings, .set_settings = etherh_set_settings, .get_drvinfo = etherh_get_drvinfo, }; static const struct net_device_ops etherh_netdev_ops = { .ndo_open = etherh_open, .ndo_stop = etherh_close, .ndo_set_config = etherh_set_config, .ndo_start_xmit = __ei_start_xmit, .ndo_tx_timeout = __ei_tx_timeout, .ndo_get_stats = __ei_get_stats, .ndo_set_rx_mode = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = __ei_poll, #endif }; static u32 etherh_regoffsets[16]; static u32 etherm_regoffsets[16]; static int __devinit etherh_probe(struct expansion_card *ec, const struct ecard_id *id) { const struct etherh_data *data = id->data; struct ei_device *ei_local; struct net_device *dev; struct etherh_priv *eh; int ret; etherh_banner(); ret = ecard_request_resources(ec); if (ret) goto out; dev = ____alloc_ei_netdev(sizeof(struct etherh_priv)); if (!dev) { ret = -ENOMEM; goto release; } SET_NETDEV_DEV(dev, &ec->dev); dev->netdev_ops = &etherh_netdev_ops; dev->irq = ec->irq; dev->ethtool_ops = &etherh_ethtool_ops; if (data->supported & SUPPORTED_Autoneg) dev->flags |= IFF_AUTOMEDIA; if (data->supported & SUPPORTED_TP) { dev->flags |= IFF_PORTSEL; dev->if_port = IF_PORT_10BASET; } else if (data->supported & SUPPORTED_BNC) { dev->flags |= IFF_PORTSEL; dev->if_port = IF_PORT_10BASE2; } else dev->if_port = IF_PORT_UNKNOWN; eh = etherh_priv(dev); eh->supported = data->supported; eh->ctrl = 0; eh->id = ec->cid.product; eh->memc = ecardm_iomap(ec, ECARD_RES_MEMC, 0, PAGE_SIZE); if (!eh->memc) { ret = -ENOMEM; goto free; } eh->ctrl_port = eh->memc; if (data->ctrl_ioc) { eh->ioc_fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, PAGE_SIZE); if (!eh->ioc_fast) { ret = -ENOMEM; goto free; } eh->ctrl_port = eh->ioc_fast; } dev->base_addr = (unsigned long)eh->memc + data->ns8390_offset; eh->dma_base = eh->memc + data->dataport_offset; eh->ctrl_port += data->ctrlport_offset; /* * IRQ and control port handling - only for non-NIC slot cards. */ if (ec->slot_no != 8) { ecard_setirq(ec, &etherh_ops, eh); } else { /* * If we're in the NIC slot, make sure the IRQ is enabled */ etherh_set_ctrl(eh, ETHERH_CP_IE); } ei_local = netdev_priv(dev); spin_lock_init(&ei_local->page_lock); if (ec->cid.product == PROD_ANT_ETHERM) { etherm_addr(dev->dev_addr); ei_local->reg_offset = etherm_regoffsets; } else { etherh_addr(dev->dev_addr, ec); ei_local->reg_offset = etherh_regoffsets; } ei_local->name = dev->name; ei_local->word16 = 1; ei_local->tx_start_page = data->tx_start_page; ei_local->rx_start_page = ei_local->tx_start_page + TX_PAGES; ei_local->stop_page = data->stop_page; ei_local->reset_8390 = etherh_reset; ei_local->block_input = etherh_block_input; ei_local->block_output = etherh_block_output; ei_local->get_8390_hdr = etherh_get_header; ei_local->interface_num = 0; etherh_reset(dev); __NS8390_init(dev, 0); ret = register_netdev(dev); if (ret) goto free; printk(KERN_INFO "%s: %s in slot %d, %pM\n", dev->name, data->name, ec->slot_no, dev->dev_addr); ecard_set_drvdata(ec, dev); return 0; free: free_netdev(dev); release: ecard_release_resources(ec); out: return ret; } static void __devexit etherh_remove(struct expansion_card *ec) { struct net_device *dev = ecard_get_drvdata(ec); ecard_set_drvdata(ec, NULL); unregister_netdev(dev); free_netdev(dev); ecard_release_resources(ec); } static struct etherh_data etherm_data = { .ns8390_offset = ETHERM_NS8390, .dataport_offset = ETHERM_NS8390 + ETHERM_DATAPORT, .ctrlport_offset = ETHERM_NS8390 + ETHERM_CTRLPORT, .name = "ANT EtherM", .supported = SUPPORTED_10baseT_Half, .tx_start_page = ETHERM_TX_START_PAGE, .stop_page = ETHERM_STOP_PAGE, }; static struct etherh_data etherlan500_data = { .ns8390_offset = ETHERH500_NS8390, .dataport_offset = ETHERH500_NS8390 + ETHERH500_DATAPORT, .ctrlport_offset = ETHERH500_CTRLPORT, .ctrl_ioc = 1, .name = "i3 EtherH 500", .supported = SUPPORTED_10baseT_Half, .tx_start_page = ETHERH_TX_START_PAGE, .stop_page = ETHERH_STOP_PAGE, }; static struct etherh_data etherlan600_data = { .ns8390_offset = ETHERH600_NS8390, .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT, .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT, .name = "i3 EtherH 600", .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg, .tx_start_page = ETHERH_TX_START_PAGE, .stop_page = ETHERH_STOP_PAGE, }; static struct etherh_data etherlan600a_data = { .ns8390_offset = ETHERH600_NS8390, .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT, .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT, .name = "i3 EtherH 600A", .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg, .tx_start_page = ETHERH_TX_START_PAGE, .stop_page = ETHERH_STOP_PAGE, }; static const struct ecard_id etherh_ids[] = { { MANU_ANT, PROD_ANT_ETHERM, &etherm_data }, { MANU_I3, PROD_I3_ETHERLAN500, &etherlan500_data }, { MANU_I3, PROD_I3_ETHERLAN600, &etherlan600_data }, { MANU_I3, PROD_I3_ETHERLAN600A, &etherlan600a_data }, { 0xffff, 0xffff } }; static struct ecard_driver etherh_driver = { .probe = etherh_probe, .remove = __devexit_p(etherh_remove), .id_table = etherh_ids, .drv = { .name = DRV_NAME, }, }; static int __init etherh_init(void) { int i; for (i = 0; i < 16; i++) { etherh_regoffsets[i] = i << 2; etherm_regoffsets[i] = i << 5; } return ecard_register_driver(&etherh_driver); } static void __exit etherh_exit(void) { ecard_remove_driver(&etherh_driver); } module_init(etherh_init); module_exit(etherh_exit);
gpl-2.0
redglasses/android_kernel_lge_g3-V20f
drivers/video/backlight/kb3886_bl.c
8128
4950
/* * Backlight Driver for the KB3886 Backlight * * Copyright (c) 2007-2008 Claudio Nieder * * Based on corgi_bl.c by Richard Purdie and kb3886 driver by Robert Woerle * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/delay.h> #include <linux/dmi.h> #define KB3886_PARENT 0x64 #define KB3886_IO 0x60 #define KB3886_ADC_DAC_PWM 0xC4 #define KB3886_PWM0_WRITE 0x81 #define KB3886_PWM0_READ 0x41 static DEFINE_MUTEX(bl_mutex); static void kb3886_bl_set_intensity(int intensity) { mutex_lock(&bl_mutex); intensity = intensity&0xff; outb(KB3886_ADC_DAC_PWM, KB3886_PARENT); msleep(10); outb(KB3886_PWM0_WRITE, KB3886_IO); msleep(10); outb(intensity, KB3886_IO); mutex_unlock(&bl_mutex); } struct kb3886bl_machinfo { int max_intensity; int default_intensity; int limit_mask; void (*set_bl_intensity)(int intensity); }; static struct kb3886bl_machinfo kb3886_bl_machinfo = { .max_intensity = 0xff, .default_intensity = 0xa0, .limit_mask = 0x7f, .set_bl_intensity = kb3886_bl_set_intensity, }; static struct platform_device kb3886bl_device = { .name = "kb3886-bl", .dev = { .platform_data = &kb3886_bl_machinfo, }, .id = -1, }; static struct platform_device *devices[] __initdata = { &kb3886bl_device, }; /* * Back to driver */ static int kb3886bl_intensity; static struct backlight_device *kb3886_backlight_device; static struct kb3886bl_machinfo *bl_machinfo; static unsigned long kb3886bl_flags; #define KB3886BL_SUSPENDED 0x01 static struct dmi_system_id __initdata kb3886bl_device_table[] = { { .ident = "Sahara Touch-iT", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SDV"), DMI_MATCH(DMI_PRODUCT_NAME, "iTouch T201"), }, }, { } }; static int kb3886bl_send_intensity(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (kb3886bl_flags & KB3886BL_SUSPENDED) intensity = 0; bl_machinfo->set_bl_intensity(intensity); kb3886bl_intensity = intensity; return 0; } #ifdef CONFIG_PM static int kb3886bl_suspend(struct platform_device *pdev, pm_message_t state) { struct backlight_device *bd = platform_get_drvdata(pdev); kb3886bl_flags |= KB3886BL_SUSPENDED; backlight_update_status(bd); return 0; } static int kb3886bl_resume(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); kb3886bl_flags &= ~KB3886BL_SUSPENDED; backlight_update_status(bd); return 0; } #else #define kb3886bl_suspend NULL #define kb3886bl_resume NULL #endif static int kb3886bl_get_intensity(struct backlight_device *bd) { return kb3886bl_intensity; } static const struct backlight_ops kb3886bl_ops = { .get_brightness = kb3886bl_get_intensity, .update_status = kb3886bl_send_intensity, }; static int kb3886bl_probe(struct platform_device *pdev) { struct backlight_properties props; struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data; bl_machinfo = machinfo; if (!machinfo->limit_mask) machinfo->limit_mask = -1; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = machinfo->max_intensity; kb3886_backlight_device = backlight_device_register("kb3886-bl", &pdev->dev, NULL, &kb3886bl_ops, &props); if (IS_ERR(kb3886_backlight_device)) return PTR_ERR(kb3886_backlight_device); platform_set_drvdata(pdev, kb3886_backlight_device); kb3886_backlight_device->props.power = FB_BLANK_UNBLANK; kb3886_backlight_device->props.brightness = machinfo->default_intensity; backlight_update_status(kb3886_backlight_device); return 0; } static int kb3886bl_remove(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); backlight_device_unregister(bd); return 0; } static struct platform_driver kb3886bl_driver = { .probe = kb3886bl_probe, .remove = kb3886bl_remove, .suspend = kb3886bl_suspend, .resume = kb3886bl_resume, .driver = { .name = "kb3886-bl", }, }; static int __init kb3886_init(void) { if (!dmi_check_system(kb3886bl_device_table)) return -ENODEV; platform_add_devices(devices, ARRAY_SIZE(devices)); return platform_driver_register(&kb3886bl_driver); } static void __exit kb3886_exit(void) { platform_driver_unregister(&kb3886bl_driver); } module_init(kb3886_init); module_exit(kb3886_exit); MODULE_AUTHOR("Claudio Nieder <private@claudio.ch>"); MODULE_DESCRIPTION("Tabletkiosk Sahara Touch-iT Backlight Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("dmi:*:svnSDV:pniTouchT201:*");
gpl-2.0
nunogil/lge-kernel-sniper
net/netrom/sysctl_net_netrom.c
9152
4319
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) 1996 Mike Shaver (shaver@zeroknowledge.com) */ #include <linux/mm.h> #include <linux/sysctl.h> #include <linux/init.h> #include <net/ax25.h> #include <net/netrom.h> /* * Values taken from NET/ROM documentation. */ static int min_quality[] = {0}, max_quality[] = {255}; static int min_obs[] = {0}, max_obs[] = {255}; static int min_ttl[] = {0}, max_ttl[] = {255}; static int min_t1[] = {5 * HZ}; static int max_t1[] = {600 * HZ}; static int min_n2[] = {2}, max_n2[] = {127}; static int min_t2[] = {1 * HZ}; static int max_t2[] = {60 * HZ}; static int min_t4[] = {1 * HZ}; static int max_t4[] = {1000 * HZ}; static int min_window[] = {1}, max_window[] = {127}; static int min_idle[] = {0 * HZ}; static int max_idle[] = {65535 * HZ}; static int min_route[] = {0}, max_route[] = {1}; static int min_fails[] = {1}, max_fails[] = {10}; static int min_reset[] = {0}, max_reset[] = {1}; static struct ctl_table_header *nr_table_header; static ctl_table nr_table[] = { { .procname = "default_path_quality", .data = &sysctl_netrom_default_path_quality, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_quality, .extra2 = &max_quality }, { .procname = "obsolescence_count_initialiser", .data = &sysctl_netrom_obsolescence_count_initialiser, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_obs, .extra2 = &max_obs }, { .procname = "network_ttl_initialiser", .data = &sysctl_netrom_network_ttl_initialiser, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_ttl, .extra2 = &max_ttl }, { .procname = "transport_timeout", .data = &sysctl_netrom_transport_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t1, .extra2 = &max_t1 }, { .procname = "transport_maximum_tries", .data = &sysctl_netrom_transport_maximum_tries, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_n2, .extra2 = &max_n2 }, { .procname = "transport_acknowledge_delay", .data = &sysctl_netrom_transport_acknowledge_delay, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t2, .extra2 = &max_t2 }, { .procname = "transport_busy_delay", .data = &sysctl_netrom_transport_busy_delay, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t4, .extra2 = &max_t4 }, { .procname = "transport_requested_window_size", .data = &sysctl_netrom_transport_requested_window_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_window, .extra2 = &max_window }, { .procname = "transport_no_activity_timeout", .data = &sysctl_netrom_transport_no_activity_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_idle, .extra2 = &max_idle }, { .procname = "routing_control", .data = &sysctl_netrom_routing_control, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_route, .extra2 = &max_route }, { .procname = "link_fails_count", .data = &sysctl_netrom_link_fails_count, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_fails, .extra2 = &max_fails }, { .procname = "reset", .data = &sysctl_netrom_reset_circuit, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_reset, .extra2 = &max_reset }, { } }; static struct ctl_path nr_path[] = { { .procname = "net", }, { .procname = "netrom", }, { } }; void __init nr_register_sysctl(void) { nr_table_header = register_sysctl_paths(nr_path, nr_table); } void nr_unregister_sysctl(void) { unregister_sysctl_table(nr_table_header); }
gpl-2.0
andr7e/android_kernel_elephone_p6000
kernel/arch/blackfin/mach-bf561/dma.c
12224
3134
/* * the simple DMA Implementation for Blackfin * * Copyright 2007-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <asm/blackfin.h> #include <asm/dma.h> struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = { (struct dma_register *) DMA1_0_NEXT_DESC_PTR, (struct dma_register *) DMA1_1_NEXT_DESC_PTR, (struct dma_register *) DMA1_2_NEXT_DESC_PTR, (struct dma_register *) DMA1_3_NEXT_DESC_PTR, (struct dma_register *) DMA1_4_NEXT_DESC_PTR, (struct dma_register *) DMA1_5_NEXT_DESC_PTR, (struct dma_register *) DMA1_6_NEXT_DESC_PTR, (struct dma_register *) DMA1_7_NEXT_DESC_PTR, (struct dma_register *) DMA1_8_NEXT_DESC_PTR, (struct dma_register *) DMA1_9_NEXT_DESC_PTR, (struct dma_register *) DMA1_10_NEXT_DESC_PTR, (struct dma_register *) DMA1_11_NEXT_DESC_PTR, (struct dma_register *) DMA2_0_NEXT_DESC_PTR, (struct dma_register *) DMA2_1_NEXT_DESC_PTR, (struct dma_register *) DMA2_2_NEXT_DESC_PTR, (struct dma_register *) DMA2_3_NEXT_DESC_PTR, (struct dma_register *) DMA2_4_NEXT_DESC_PTR, (struct dma_register *) DMA2_5_NEXT_DESC_PTR, (struct dma_register *) DMA2_6_NEXT_DESC_PTR, (struct dma_register *) DMA2_7_NEXT_DESC_PTR, (struct dma_register *) DMA2_8_NEXT_DESC_PTR, (struct dma_register *) DMA2_9_NEXT_DESC_PTR, (struct dma_register *) DMA2_10_NEXT_DESC_PTR, (struct dma_register *) DMA2_11_NEXT_DESC_PTR, (struct dma_register *) MDMA_D0_NEXT_DESC_PTR, (struct dma_register *) MDMA_S0_NEXT_DESC_PTR, (struct dma_register *) MDMA_D1_NEXT_DESC_PTR, (struct dma_register *) MDMA_S1_NEXT_DESC_PTR, (struct dma_register *) MDMA_D2_NEXT_DESC_PTR, (struct dma_register *) MDMA_S2_NEXT_DESC_PTR, (struct dma_register *) MDMA_D3_NEXT_DESC_PTR, (struct dma_register *) MDMA_S3_NEXT_DESC_PTR, (struct dma_register *) IMDMA_D0_NEXT_DESC_PTR, (struct dma_register *) IMDMA_S0_NEXT_DESC_PTR, (struct dma_register *) IMDMA_D1_NEXT_DESC_PTR, (struct dma_register *) IMDMA_S1_NEXT_DESC_PTR, }; EXPORT_SYMBOL(dma_io_base_addr); int channel2irq(unsigned int channel) { int ret_irq = -1; switch (channel) { case CH_PPI0: ret_irq = IRQ_PPI0; break; case CH_PPI1: ret_irq = IRQ_PPI1; break; case CH_SPORT0_RX: ret_irq = IRQ_SPORT0_RX; break; case CH_SPORT0_TX: ret_irq = IRQ_SPORT0_TX; break; case CH_SPORT1_RX: ret_irq = IRQ_SPORT1_RX; break; case CH_SPORT1_TX: ret_irq = IRQ_SPORT1_TX; break; case CH_SPI: ret_irq = IRQ_SPI; break; case CH_UART_RX: ret_irq = IRQ_UART_RX; break; case CH_UART_TX: ret_irq = IRQ_UART_TX; break; case CH_MEM_STREAM0_SRC: case CH_MEM_STREAM0_DEST: ret_irq = IRQ_MEM_DMA0; break; case CH_MEM_STREAM1_SRC: case CH_MEM_STREAM1_DEST: ret_irq = IRQ_MEM_DMA1; break; case CH_MEM_STREAM2_SRC: case CH_MEM_STREAM2_DEST: ret_irq = IRQ_MEM_DMA2; break; case CH_MEM_STREAM3_SRC: case CH_MEM_STREAM3_DEST: ret_irq = IRQ_MEM_DMA3; break; case CH_IMEM_STREAM0_SRC: case CH_IMEM_STREAM0_DEST: ret_irq = IRQ_IMEM_DMA0; break; case CH_IMEM_STREAM1_SRC: case CH_IMEM_STREAM1_DEST: ret_irq = IRQ_IMEM_DMA1; break; } return ret_irq; }
gpl-2.0
varunchitre15/thunderzap-sa
drivers/block/paride/frpw.c
15552
7429
/* frpw.c (c) 1996-8 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License frpw.c is a low-level protocol driver for the Freecom "Power" parallel port IDE adapter. Some applications of this adapter may require a "printer" reset prior to loading the driver. This can be done by loading and unloading the "lp" driver, or it can be done by this driver if you define FRPW_HARD_RESET. The latter is not recommended as it may upset devices on other ports. */ /* Changes: 1.01 GRG 1998.05.06 init_proto, release_proto fix chip detect added EPP-16 and EPP-32 1.02 GRG 1998.09.23 added hard reset to initialisation process 1.03 GRG 1998.12.14 made hard reset conditional */ #define FRPW_VERSION "1.03" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" #define cec4 w2(0xc);w2(0xe);w2(0xe);w2(0xc);w2(4);w2(4);w2(4); #define j44(l,h) (((l>>4)&0x0f)|(h&0xf0)) /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set */ static int cont_map[2] = { 0x08, 0x10 }; static int frpw_read_regr( PIA *pi, int cont, int regr ) { int h,l,r; r = regr + cont_map[cont]; w2(4); w0(r); cec4; w2(6); l = r1(); w2(4); h = r1(); w2(4); return j44(l,h); } static void frpw_write_regr( PIA *pi, int cont, int regr, int val) { int r; r = regr + cont_map[cont]; w2(4); w0(r); cec4; w0(val); w2(5);w2(7);w2(5);w2(4); } static void frpw_read_block_int( PIA *pi, char * buf, int count, int regr ) { int h, l, k, ph; switch(pi->mode) { case 0: w2(4); w0(regr); cec4; for (k=0;k<count;k++) { w2(6); l = r1(); w2(4); h = r1(); buf[k] = j44(l,h); } w2(4); break; case 1: ph = 2; w2(4); w0(regr + 0xc0); cec4; w0(0xff); for (k=0;k<count;k++) { w2(0xa4 + ph); buf[k] = r0(); ph = 2 - ph; } w2(0xac); w2(0xa4); w2(4); break; case 2: w2(4); w0(regr + 0x80); cec4; for (k=0;k<count;k++) buf[k] = r4(); w2(0xac); w2(0xa4); w2(4); break; case 3: w2(4); w0(regr + 0x80); cec4; for (k=0;k<count-2;k++) buf[k] = r4(); w2(0xac); w2(0xa4); buf[count-2] = r4(); buf[count-1] = r4(); w2(4); break; case 4: w2(4); w0(regr + 0x80); cec4; for (k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w(); w2(0xac); w2(0xa4); buf[count-2] = r4(); buf[count-1] = r4(); w2(4); break; case 5: w2(4); w0(regr + 0x80); cec4; for (k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l(); buf[count-4] = r4(); buf[count-3] = r4(); w2(0xac); w2(0xa4); buf[count-2] = r4(); buf[count-1] = r4(); w2(4); break; } } static void frpw_read_block( PIA *pi, char * buf, int count) { frpw_read_block_int(pi,buf,count,0x08); } static void frpw_write_block( PIA *pi, char * buf, int count ) { int k; switch(pi->mode) { case 0: case 1: case 2: w2(4); w0(8); cec4; w2(5); for (k=0;k<count;k++) { w0(buf[k]); w2(7);w2(5); } w2(4); break; case 3: w2(4); w0(0xc8); cec4; w2(5); for (k=0;k<count;k++) w4(buf[k]); w2(4); break; case 4: w2(4); w0(0xc8); cec4; w2(5); for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]); w2(4); break; case 5: w2(4); w0(0xc8); cec4; w2(5); for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]); w2(4); break; } } static void frpw_connect ( PIA *pi ) { pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(4); } static void frpw_disconnect ( PIA *pi ) { w2(4); w0(0x20); cec4; w0(pi->saved_r0); w2(pi->saved_r2); } /* Stub logic to see if PNP string is available - used to distinguish between the Xilinx and ASIC implementations of the Freecom adapter. */ static int frpw_test_pnp ( PIA *pi ) /* returns chip_type: 0 = Xilinx, 1 = ASIC */ { int olddelay, a, b; #ifdef FRPW_HARD_RESET w0(0); w2(8); udelay(50); w2(0xc); /* parallel bus reset */ mdelay(1500); #endif olddelay = pi->delay; pi->delay = 10; pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(4); w0(4); w2(6); w2(7); a = r1() & 0xff; w2(4); b = r1() & 0xff; w2(0xc); w2(0xe); w2(4); pi->delay = olddelay; w0(pi->saved_r0); w2(pi->saved_r2); return ((~a&0x40) && (b&0x40)); } /* We use the pi->private to remember the result of the PNP test. To make this work, private = port*2 + chip. Yes, I know it's a hack :-( */ static int frpw_test_proto( PIA *pi, char * scratch, int verbose ) { int j, k, r; int e[2] = {0,0}; if ((pi->private>>1) != pi->port) pi->private = frpw_test_pnp(pi) + 2*pi->port; if (((pi->private%2) == 0) && (pi->mode > 2)) { if (verbose) printk("%s: frpw: Xilinx does not support mode %d\n", pi->device, pi->mode); return 1; } if (((pi->private%2) == 1) && (pi->mode == 2)) { if (verbose) printk("%s: frpw: ASIC does not support mode 2\n", pi->device); return 1; } frpw_connect(pi); for (j=0;j<2;j++) { frpw_write_regr(pi,0,6,0xa0+j*0x10); for (k=0;k<256;k++) { frpw_write_regr(pi,0,2,k^0xaa); frpw_write_regr(pi,0,3,k^0x55); if (frpw_read_regr(pi,0,2) != (k^0xaa)) e[j]++; } } frpw_disconnect(pi); frpw_connect(pi); frpw_read_block_int(pi,scratch,512,0x10); r = 0; for (k=0;k<128;k++) if (scratch[k] != k) r++; frpw_disconnect(pi); if (verbose) { printk("%s: frpw: port 0x%x, chip %ld, mode %d, test=(%d,%d,%d)\n", pi->device,pi->port,(pi->private%2),pi->mode,e[0],e[1],r); } return (r || (e[0] && e[1])); } static void frpw_log_adapter( PIA *pi, char * scratch, int verbose ) { char *mode_string[6] = {"4-bit","8-bit","EPP", "EPP-8","EPP-16","EPP-32"}; printk("%s: frpw %s, Freecom (%s) adapter at 0x%x, ", pi->device, FRPW_VERSION,((pi->private%2) == 0)?"Xilinx":"ASIC",pi->port); printk("mode %d (%s), delay %d\n",pi->mode, mode_string[pi->mode],pi->delay); } static struct pi_protocol frpw = { .owner = THIS_MODULE, .name = "frpw", .max_mode = 6, .epp_first = 2, .default_delay = 2, .max_units = 1, .write_regr = frpw_write_regr, .read_regr = frpw_read_regr, .write_block = frpw_write_block, .read_block = frpw_read_block, .connect = frpw_connect, .disconnect = frpw_disconnect, .test_proto = frpw_test_proto, .log_adapter = frpw_log_adapter, }; static int __init frpw_init(void) { return paride_register(&frpw); } static void __exit frpw_exit(void) { paride_unregister(&frpw); } MODULE_LICENSE("GPL"); module_init(frpw_init) module_exit(frpw_exit)
gpl-2.0
Alexpux/GCC
gcc/testsuite/gcc.dg/inline-20.c
193
1535
/* Test -fno-gnu89-extern-inline. */ /* { dg-do compile } */ /* { dg-options "-std=c99 -fno-gnu89-inline" } */ /* { dg-final { scan-assembler-not "dontgenerate" } } */ /* { dg-final { scan-assembler "func1" } } */ /* { dg-final { scan-assembler "func2" } } */ /* { dg-final { scan-assembler "func3" } } */ /* { dg-final { scan-assembler "func4" } } */ /* { dg-final { scan-assembler "func5" } } */ /* { dg-final { scan-assembler "func6" } } */ /* { dg-final { scan-assembler "func7" } } */ /* { dg-final { scan-assembler "func8" } } */ /* { dg-final { scan-assembler "func9" } } */ #ifdef __GNUC_GNU_INLINE__ #error __GNUC_GNU_INLINE__ is defined #endif #ifndef __GNUC_STDC_INLINE__ #error __GNUC_STDC_INLINE__ is not defined #endif inline int dontgenerate1 (void) { return 1; } inline int dontgenerate2 (void); inline int dontgenerate2 (void) { return 2; } inline int dontgenerate3 (void) { return 3; } inline int dontgenerate3 (void); extern inline int func1 (void) { return 1; } extern inline int func2 (void); inline int func2 (void) { return 2; } inline int func3 (void) { return 3; } extern inline int func3 (void); inline int func4 (void); extern inline int func4 (void) { return 4; } extern inline int func5 (void) { return 5; } inline int func5 (void); extern int func6 (void); inline int func6 (void) { return 6; } inline int func7 (void) { return 7; } extern int func7 (void); inline int func8 (void); extern int func8 (void) { return 8; } extern int func9 (void) { return 9; } inline int func9 (void);
gpl-2.0
RadiumBot/Radium_jflte
kernel/ptrace.c
193
28829
/* * linux/kernel/ptrace.c * * (C) Copyright 1999 Linus Torvalds * * Common interfaces for "ptrace()" which we do not want * to continually duplicate across every architecture. */ #include <linux/capability.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/ptrace.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/audit.h> #include <linux/pid_namespace.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <linux/regset.h> #include <linux/hw_breakpoint.h> #include <linux/cn_proc.h> static int ptrace_trapping_sleep_fn(void *flags) { schedule(); return 0; } /* * ptrace a task: make the debugger its new parent and * move it to the ptrace list. * * Must be called with the tasklist lock write-held. */ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) { BUG_ON(!list_empty(&child->ptrace_entry)); list_add(&child->ptrace_entry, &new_parent->ptraced); child->parent = new_parent; } /** * __ptrace_unlink - unlink ptracee and restore its execution state * @child: ptracee to be unlinked * * Remove @child from the ptrace list, move it back to the original parent, * and restore the execution state so that it conforms to the group stop * state. * * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer * exiting. For PTRACE_DETACH, unless the ptracee has been killed between * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. * If the ptracer is exiting, the ptracee can be in any state. * * After detach, the ptracee should be in a state which conforms to the * group stop. If the group is stopped or in the process of stopping, the * ptracee should be put into TASK_STOPPED; otherwise, it should be woken * up from TASK_TRACED. * * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, * it goes through TRACED -> RUNNING -> STOPPED transition which is similar * to but in the opposite direction of what happens while attaching to a * stopped task. However, in this direction, the intermediate RUNNING * state is not hidden even from the current ptracer and if it immediately * re-attaches and performs a WNOHANG wait(2), it may fail. * * CONTEXT: * write_lock_irq(tasklist_lock) */ void __ptrace_unlink(struct task_struct *child) { BUG_ON(!child->ptrace); child->ptrace = 0; child->parent = child->real_parent; list_del_init(&child->ptrace_entry); spin_lock(&child->sighand->siglock); /* * Clear all pending traps and TRAPPING. TRAPPING should be * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. */ task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); task_clear_jobctl_trapping(child); /* * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and * @child isn't dead. */ if (!(child->flags & PF_EXITING) && (child->signal->flags & SIGNAL_STOP_STOPPED || child->signal->group_stop_count)) { child->jobctl |= JOBCTL_STOP_PENDING; /* * This is only possible if this thread was cloned by the * traced task running in the stopped group, set the signal * for the future reports. * FIXME: we should change ptrace_init_task() to handle this * case. */ if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) child->jobctl |= SIGSTOP; } /* * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick * @child in the butt. Note that @resume should be used iff @child * is in TASK_TRACED; otherwise, we might unduly disrupt * TASK_KILLABLE sleeps. */ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) ptrace_signal_wake_up(child, true); spin_unlock(&child->sighand->siglock); } /* Ensure that nothing can wake it up, even SIGKILL */ static bool ptrace_freeze_traced(struct task_struct *task) { bool ret = false; /* Lockless, nobody but us can set this flag */ if (task->jobctl & JOBCTL_LISTENING) return ret; spin_lock_irq(&task->sighand->siglock); if (task_is_traced(task) && !__fatal_signal_pending(task)) { task->state = __TASK_TRACED; ret = true; } spin_unlock_irq(&task->sighand->siglock); return ret; } static void ptrace_unfreeze_traced(struct task_struct *task) { if (task->state != __TASK_TRACED) return; WARN_ON(!task->ptrace || task->parent != current); spin_lock_irq(&task->sighand->siglock); if (__fatal_signal_pending(task)) wake_up_state(task, __TASK_TRACED); else task->state = TASK_TRACED; spin_unlock_irq(&task->sighand->siglock); } /** * ptrace_check_attach - check whether ptracee is ready for ptrace operation * @child: ptracee to check for * @ignore_state: don't check whether @child is currently %TASK_TRACED * * Check whether @child is being ptraced by %current and ready for further * ptrace operations. If @ignore_state is %false, @child also should be in * %TASK_TRACED state and on return the child is guaranteed to be traced * and not executing. If @ignore_state is %true, @child can be in any * state. * * CONTEXT: * Grabs and releases tasklist_lock and @child->sighand->siglock. * * RETURNS: * 0 on success, -ESRCH if %child is not ready. */ int ptrace_check_attach(struct task_struct *child, bool ignore_state) { int ret = -ESRCH; /* * We take the read lock around doing both checks to close a * possible race where someone else was tracing our child and * detached between these two checks. After this locked check, * we are sure that this is our traced child and that can only * be changed by us so it's not changing right after this. */ read_lock(&tasklist_lock); if (child->ptrace && child->parent == current) { WARN_ON(child->state == __TASK_TRACED); /* * child->sighand can't be NULL, release_task() * does ptrace_unlink() before __exit_signal(). */ if (ignore_state || ptrace_freeze_traced(child)) ret = 0; } read_unlock(&tasklist_lock); if (!ret && !ignore_state) { if (!wait_task_inactive(child, __TASK_TRACED)) { /* * This can only happen if may_ptrace_stop() fails and * ptrace_stop() changes ->state back to TASK_RUNNING, * so we should not worry about leaking __TASK_TRACED. */ WARN_ON(child->state == __TASK_TRACED); ret = -ESRCH; } } return ret; } static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) { if (mode & PTRACE_MODE_NOAUDIT) return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); else return has_ns_capability(current, ns, CAP_SYS_PTRACE); } int __ptrace_may_access(struct task_struct *task, unsigned int mode) { const struct cred *cred = current_cred(), *tcred; /* May we inspect the given task? * This check is used both for attaching with ptrace * and for allowing access to sensitive information in /proc. * * ptrace_attach denies several cases that /proc allows * because setting up the necessary parent/child relationship * or halting the specified task is impossible. */ int dumpable = 0; /* Don't let security modules deny introspection */ if (same_thread_group(task, current)) return 0; rcu_read_lock(); tcred = __task_cred(task); if (cred->user->user_ns == tcred->user->user_ns && (cred->uid == tcred->euid && cred->uid == tcred->suid && cred->uid == tcred->uid && cred->gid == tcred->egid && cred->gid == tcred->sgid && cred->gid == tcred->gid)) goto ok; if (ptrace_has_cap(tcred->user->user_ns, mode)) goto ok; rcu_read_unlock(); return -EPERM; ok: rcu_read_unlock(); smp_rmb(); if (task->mm) dumpable = get_dumpable(task->mm); if (dumpable != SUID_DUMP_USER && !ptrace_has_cap(task_user_ns(task), mode)) return -EPERM; return security_ptrace_access_check(task, mode); } bool ptrace_may_access(struct task_struct *task, unsigned int mode) { int err; task_lock(task); err = __ptrace_may_access(task, mode); task_unlock(task); return !err; } static int ptrace_attach(struct task_struct *task, long request, unsigned long addr, unsigned long flags) { bool seize = (request == PTRACE_SEIZE); int retval; retval = -EIO; if (seize) { if (addr != 0) goto out; if (flags & ~(unsigned long)PTRACE_O_MASK) goto out; flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); } else { flags = PT_PTRACED; } audit_ptrace(task); retval = -EPERM; if (unlikely(task->flags & PF_KTHREAD)) goto out; if (same_thread_group(task, current)) goto out; /* * Protect exec's credential calculations against our interference; * SUID, SGID and LSM creds get determined differently * under ptrace. */ retval = -ERESTARTNOINTR; if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) goto out; task_lock(task); retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); task_unlock(task); if (retval) goto unlock_creds; write_lock_irq(&tasklist_lock); retval = -EPERM; if (unlikely(task->exit_state)) goto unlock_tasklist; if (task->ptrace) goto unlock_tasklist; if (seize) flags |= PT_SEIZED; if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE)) flags |= PT_PTRACE_CAP; task->ptrace = flags; __ptrace_link(task, current); /* SEIZE doesn't trap tracee on attach */ if (!seize) send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); spin_lock(&task->sighand->siglock); /* * If the task is already STOPPED, set JOBCTL_TRAP_STOP and * TRAPPING, and kick it so that it transits to TRACED. TRAPPING * will be cleared if the child completes the transition or any * event which clears the group stop states happens. We'll wait * for the transition to complete before returning from this * function. * * This hides STOPPED -> RUNNING -> TRACED transition from the * attaching thread but a different thread in the same group can * still observe the transient RUNNING state. IOW, if another * thread's WNOHANG wait(2) on the stopped tracee races against * ATTACH, the wait(2) may fail due to the transient RUNNING. * * The following task_is_stopped() test is safe as both transitions * in and out of STOPPED are protected by siglock. */ if (task_is_stopped(task) && task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) signal_wake_up_state(task, __TASK_STOPPED); spin_unlock(&task->sighand->siglock); retval = 0; unlock_tasklist: write_unlock_irq(&tasklist_lock); unlock_creds: mutex_unlock(&task->signal->cred_guard_mutex); out: if (!retval) { wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE); proc_ptrace_connector(task, PTRACE_ATTACH); } return retval; } /** * ptrace_traceme -- helper for PTRACE_TRACEME * * Performs checks and sets PT_PTRACED. * Should be used by all ptrace implementations for PTRACE_TRACEME. */ static int ptrace_traceme(void) { int ret = -EPERM; write_lock_irq(&tasklist_lock); /* Are we already being traced? */ if (!current->ptrace) { ret = security_ptrace_traceme(current->parent); /* * Check PF_EXITING to ensure ->real_parent has not passed * exit_ptrace(). Otherwise we don't report the error but * pretend ->real_parent untraces us right after return. */ if (!ret && !(current->real_parent->flags & PF_EXITING)) { current->ptrace = PT_PTRACED; __ptrace_link(current, current->real_parent); } } write_unlock_irq(&tasklist_lock); return ret; } /* * Called with irqs disabled, returns true if childs should reap themselves. */ static int ignoring_children(struct sighand_struct *sigh) { int ret; spin_lock(&sigh->siglock); ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); spin_unlock(&sigh->siglock); return ret; } /* * Called with tasklist_lock held for writing. * Unlink a traced task, and clean it up if it was a traced zombie. * Return true if it needs to be reaped with release_task(). * (We can't call release_task() here because we already hold tasklist_lock.) * * If it's a zombie, our attachedness prevented normal parent notification * or self-reaping. Do notification now if it would have happened earlier. * If it should reap itself, return true. * * If it's our own child, there is no notification to do. But if our normal * children self-reap, then this child was prevented by ptrace and we must * reap it now, in that case we must also wake up sub-threads sleeping in * do_wait(). */ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) { bool dead; __ptrace_unlink(p); if (p->exit_state != EXIT_ZOMBIE) return false; dead = !thread_group_leader(p); if (!dead && thread_group_empty(p)) { if (!same_thread_group(p->real_parent, tracer)) dead = do_notify_parent(p, p->exit_signal); else if (ignoring_children(tracer->sighand)) { __wake_up_parent(p, tracer); dead = true; } } /* Mark it as in the process of being reaped. */ if (dead) p->exit_state = EXIT_DEAD; return dead; } static int ptrace_detach(struct task_struct *child, unsigned int data) { bool dead = false; if (!valid_signal(data)) return -EIO; /* Architecture-specific hardware disable .. */ ptrace_disable(child); clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); write_lock_irq(&tasklist_lock); /* * This child can be already killed. Make sure de_thread() or * our sub-thread doing do_wait() didn't do release_task() yet. */ if (child->ptrace) { child->exit_code = data; dead = __ptrace_detach(current, child); } write_unlock_irq(&tasklist_lock); proc_ptrace_connector(child, PTRACE_DETACH); if (unlikely(dead)) release_task(child); return 0; } /* * Detach all tasks we were using ptrace on. Called with tasklist held * for writing, and returns with it held too. But note it can release * and reacquire the lock. */ void exit_ptrace(struct task_struct *tracer) __releases(&tasklist_lock) __acquires(&tasklist_lock) { struct task_struct *p, *n; LIST_HEAD(ptrace_dead); if (likely(list_empty(&tracer->ptraced))) return; list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { if (__ptrace_detach(tracer, p)) list_add(&p->ptrace_entry, &ptrace_dead); } write_unlock_irq(&tasklist_lock); BUG_ON(!list_empty(&tracer->ptraced)); list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { list_del_init(&p->ptrace_entry); release_task(p); } write_lock_irq(&tasklist_lock); } int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) { int copied = 0; while (len > 0) { char buf[128]; int this_len, retval; this_len = (len > sizeof(buf)) ? sizeof(buf) : len; retval = access_process_vm(tsk, src, buf, this_len, 0); if (!retval) { if (copied) break; return -EIO; } if (copy_to_user(dst, buf, retval)) return -EFAULT; copied += retval; src += retval; dst += retval; len -= retval; } return copied; } int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) { int copied = 0; while (len > 0) { char buf[128]; int this_len, retval; this_len = (len > sizeof(buf)) ? sizeof(buf) : len; if (copy_from_user(buf, src, this_len)) return -EFAULT; retval = access_process_vm(tsk, dst, buf, this_len, 1); if (!retval) { if (copied) break; return -EIO; } copied += retval; src += retval; dst += retval; len -= retval; } return copied; } static int ptrace_setoptions(struct task_struct *child, unsigned long data) { unsigned flags; if (data & ~(unsigned long)PTRACE_O_MASK) return -EINVAL; /* Avoid intermediate state when all opts are cleared */ flags = child->ptrace; flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); flags |= (data << PT_OPT_FLAG_SHIFT); child->ptrace = flags; return 0; } static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) { unsigned long flags; int error = -ESRCH; if (lock_task_sighand(child, &flags)) { error = -EINVAL; if (likely(child->last_siginfo != NULL)) { *info = *child->last_siginfo; error = 0; } unlock_task_sighand(child, &flags); } return error; } static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) { unsigned long flags; int error = -ESRCH; if (lock_task_sighand(child, &flags)) { error = -EINVAL; if (likely(child->last_siginfo != NULL)) { *child->last_siginfo = *info; error = 0; } unlock_task_sighand(child, &flags); } return error; } #ifdef PTRACE_SINGLESTEP #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) #else #define is_singlestep(request) 0 #endif #ifdef PTRACE_SINGLEBLOCK #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) #else #define is_singleblock(request) 0 #endif #ifdef PTRACE_SYSEMU #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) #else #define is_sysemu_singlestep(request) 0 #endif static int ptrace_resume(struct task_struct *child, long request, unsigned long data) { bool need_siglock; if (!valid_signal(data)) return -EIO; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) set_tsk_thread_flag(child, TIF_SYSCALL_EMU); else clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); #endif if (is_singleblock(request)) { if (unlikely(!arch_has_block_step())) return -EIO; user_enable_block_step(child); } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { if (unlikely(!arch_has_single_step())) return -EIO; user_enable_single_step(child); } else { user_disable_single_step(child); } /* * Change ->exit_code and ->state under siglock to avoid the race * with wait_task_stopped() in between; a non-zero ->exit_code will * wrongly look like another report from tracee. * * Note that we need siglock even if ->exit_code == data and/or this * status was not reported yet, the new status must not be cleared by * wait_task_stopped() after resume. * * If data == 0 we do not care if wait_task_stopped() reports the old * status and clears the code too; this can't race with the tracee, it * takes siglock after resume. */ need_siglock = data && !thread_group_empty(current); if (need_siglock) spin_lock_irq(&child->sighand->siglock); child->exit_code = data; wake_up_state(child, __TASK_TRACED); if (need_siglock) spin_unlock_irq(&child->sighand->siglock); return 0; } #ifdef CONFIG_HAVE_ARCH_TRACEHOOK static const struct user_regset * find_regset(const struct user_regset_view *view, unsigned int type) { const struct user_regset *regset; int n; for (n = 0; n < view->n; ++n) { regset = view->regsets + n; if (regset->core_note_type == type) return regset; } return NULL; } static int ptrace_regset(struct task_struct *task, int req, unsigned int type, struct iovec *kiov) { const struct user_regset_view *view = task_user_regset_view(task); const struct user_regset *regset = find_regset(view, type); int regset_no; if (!regset || (kiov->iov_len % regset->size) != 0) return -EINVAL; regset_no = regset - view->regsets; kiov->iov_len = min(kiov->iov_len, (__kernel_size_t) (regset->n * regset->size)); if (req == PTRACE_GETREGSET) return copy_regset_to_user(task, view, regset_no, 0, kiov->iov_len, kiov->iov_base); else return copy_regset_from_user(task, view, regset_no, 0, kiov->iov_len, kiov->iov_base); } #endif int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data) { bool seized = child->ptrace & PT_SEIZED; int ret = -EIO; siginfo_t siginfo, *si; void __user *datavp = (void __user *) data; unsigned long __user *datalp = datavp; unsigned long flags; switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: return generic_ptrace_peekdata(child, addr, data); case PTRACE_POKETEXT: case PTRACE_POKEDATA: return generic_ptrace_pokedata(child, addr, data); #ifdef PTRACE_OLDSETOPTIONS case PTRACE_OLDSETOPTIONS: #endif case PTRACE_SETOPTIONS: ret = ptrace_setoptions(child, data); break; case PTRACE_GETEVENTMSG: ret = put_user(child->ptrace_message, datalp); break; case PTRACE_GETSIGINFO: ret = ptrace_getsiginfo(child, &siginfo); if (!ret) ret = copy_siginfo_to_user(datavp, &siginfo); break; case PTRACE_SETSIGINFO: if (copy_from_user(&siginfo, datavp, sizeof siginfo)) ret = -EFAULT; else ret = ptrace_setsiginfo(child, &siginfo); break; case PTRACE_INTERRUPT: /* * Stop tracee without any side-effect on signal or job * control. At least one trap is guaranteed to happen * after this request. If @child is already trapped, the * current trap is not disturbed and another trap will * happen after the current trap is ended with PTRACE_CONT. * * The actual trap might not be PTRACE_EVENT_STOP trap but * the pending condition is cleared regardless. */ if (unlikely(!seized || !lock_task_sighand(child, &flags))) break; /* * INTERRUPT doesn't disturb existing trap sans one * exception. If ptracer issued LISTEN for the current * STOP, this INTERRUPT should clear LISTEN and re-trap * tracee into STOP. */ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); unlock_task_sighand(child, &flags); ret = 0; break; case PTRACE_LISTEN: /* * Listen for events. Tracee must be in STOP. It's not * resumed per-se but is not considered to be in TRACED by * wait(2) or ptrace(2). If an async event (e.g. group * stop state change) happens, tracee will enter STOP trap * again. Alternatively, ptracer can issue INTERRUPT to * finish listening and re-trap tracee into STOP. */ if (unlikely(!seized || !lock_task_sighand(child, &flags))) break; si = child->last_siginfo; if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { child->jobctl |= JOBCTL_LISTENING; /* * If NOTIFY is set, it means event happened between * start of this trap and now. Trigger re-trap. */ if (child->jobctl & JOBCTL_TRAP_NOTIFY) ptrace_signal_wake_up(child, true); ret = 0; } unlock_task_sighand(child, &flags); break; case PTRACE_DETACH: /* detach a process that was attached. */ ret = ptrace_detach(child, data); break; #ifdef CONFIG_BINFMT_ELF_FDPIC case PTRACE_GETFDPIC: { struct mm_struct *mm = get_task_mm(child); unsigned long tmp = 0; ret = -ESRCH; if (!mm) break; switch (addr) { case PTRACE_GETFDPIC_EXEC: tmp = mm->context.exec_fdpic_loadmap; break; case PTRACE_GETFDPIC_INTERP: tmp = mm->context.interp_fdpic_loadmap; break; default: break; } mmput(mm); ret = put_user(tmp, datalp); break; } #endif #ifdef PTRACE_SINGLESTEP case PTRACE_SINGLESTEP: #endif #ifdef PTRACE_SINGLEBLOCK case PTRACE_SINGLEBLOCK: #endif #ifdef PTRACE_SYSEMU case PTRACE_SYSEMU: case PTRACE_SYSEMU_SINGLESTEP: #endif case PTRACE_SYSCALL: case PTRACE_CONT: return ptrace_resume(child, request, data); case PTRACE_KILL: if (child->exit_state) /* already dead */ return 0; return ptrace_resume(child, request, SIGKILL); #ifdef CONFIG_HAVE_ARCH_TRACEHOOK case PTRACE_GETREGSET: case PTRACE_SETREGSET: { struct iovec kiov; struct iovec __user *uiov = datavp; if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) return -EFAULT; if (__get_user(kiov.iov_base, &uiov->iov_base) || __get_user(kiov.iov_len, &uiov->iov_len)) return -EFAULT; ret = ptrace_regset(child, request, addr, &kiov); if (!ret) ret = __put_user(kiov.iov_len, &uiov->iov_len); break; } #endif default: break; } return ret; } static struct task_struct *ptrace_get_task_struct(pid_t pid) { struct task_struct *child; rcu_read_lock(); child = find_task_by_vpid(pid); if (child) get_task_struct(child); rcu_read_unlock(); if (!child) return ERR_PTR(-ESRCH); return child; } #ifndef arch_ptrace_attach #define arch_ptrace_attach(child) do { } while (0) #endif SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, unsigned long, data) { struct task_struct *child; long ret; if (request == PTRACE_TRACEME) { ret = ptrace_traceme(); if (!ret) arch_ptrace_attach(current); goto out; } child = ptrace_get_task_struct(pid); if (IS_ERR(child)) { ret = PTR_ERR(child); goto out; } if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { ret = ptrace_attach(child, request, addr, data); /* * Some architectures need to do book-keeping after * a ptrace attach. */ if (!ret) arch_ptrace_attach(child); goto out_put_task_struct; } ret = ptrace_check_attach(child, request == PTRACE_KILL || request == PTRACE_INTERRUPT); if (ret < 0) goto out_put_task_struct; ret = arch_ptrace(child, request, addr, data); if (ret || request != PTRACE_DETACH) ptrace_unfreeze_traced(child); out_put_task_struct: put_task_struct(child); out: return ret; } int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, unsigned long data) { unsigned long tmp; int copied; copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); if (copied != sizeof(tmp)) return -EIO; return put_user(tmp, (unsigned long __user *)data); } int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, unsigned long data) { int copied; copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); return (copied == sizeof(data)) ? 0 : -EIO; } #if defined CONFIG_COMPAT #include <linux/compat.h> int compat_ptrace_request(struct task_struct *child, compat_long_t request, compat_ulong_t addr, compat_ulong_t data) { compat_ulong_t __user *datap = compat_ptr(data); compat_ulong_t word; siginfo_t siginfo; int ret; switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: ret = access_process_vm(child, addr, &word, sizeof(word), 0); if (ret != sizeof(word)) ret = -EIO; else ret = put_user(word, datap); break; case PTRACE_POKETEXT: case PTRACE_POKEDATA: ret = access_process_vm(child, addr, &data, sizeof(data), 1); ret = (ret != sizeof(data) ? -EIO : 0); break; case PTRACE_GETEVENTMSG: ret = put_user((compat_ulong_t) child->ptrace_message, datap); break; case PTRACE_GETSIGINFO: ret = ptrace_getsiginfo(child, &siginfo); if (!ret) ret = copy_siginfo_to_user32( (struct compat_siginfo __user *) datap, &siginfo); break; case PTRACE_SETSIGINFO: memset(&siginfo, 0, sizeof siginfo); if (copy_siginfo_from_user32( &siginfo, (struct compat_siginfo __user *) datap)) ret = -EFAULT; else ret = ptrace_setsiginfo(child, &siginfo); break; #ifdef CONFIG_HAVE_ARCH_TRACEHOOK case PTRACE_GETREGSET: case PTRACE_SETREGSET: { struct iovec kiov; struct compat_iovec __user *uiov = (struct compat_iovec __user *) datap; compat_uptr_t ptr; compat_size_t len; if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) return -EFAULT; if (__get_user(ptr, &uiov->iov_base) || __get_user(len, &uiov->iov_len)) return -EFAULT; kiov.iov_base = compat_ptr(ptr); kiov.iov_len = len; ret = ptrace_regset(child, request, addr, &kiov); if (!ret) ret = __put_user(kiov.iov_len, &uiov->iov_len); break; } #endif default: ret = ptrace_request(child, request, addr, data); } return ret; } asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, compat_long_t addr, compat_long_t data) { struct task_struct *child; long ret; if (request == PTRACE_TRACEME) { ret = ptrace_traceme(); goto out; } child = ptrace_get_task_struct(pid); if (IS_ERR(child)) { ret = PTR_ERR(child); goto out; } if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { ret = ptrace_attach(child, request, addr, data); /* * Some architectures need to do book-keeping after * a ptrace attach. */ if (!ret) arch_ptrace_attach(child); goto out_put_task_struct; } ret = ptrace_check_attach(child, request == PTRACE_KILL || request == PTRACE_INTERRUPT); if (!ret) { ret = compat_arch_ptrace(child, request, addr, data); if (ret || request != PTRACE_DETACH) ptrace_unfreeze_traced(child); } out_put_task_struct: put_task_struct(child); out: return ret; } #endif /* CONFIG_COMPAT */ #ifdef CONFIG_HAVE_HW_BREAKPOINT int ptrace_get_breakpoints(struct task_struct *tsk) { if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) return 0; return -1; } void ptrace_put_breakpoints(struct task_struct *tsk) { if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) flush_ptrace_hw_breakpoint(tsk); } #endif /* CONFIG_HAVE_HW_BREAKPOINT */
gpl-2.0
cile/pyramid-kernel
drivers/staging/dream/camera/mt9t013_reg.c
1473
6811
/* * Copyright (C) 2009 QUALCOMM Incorporated. */ #include "mt9t013.h" #include <linux/kernel.h> struct reg_struct const mt9t013_reg_pat[2] = { { /* Preview 2x2 binning 20fps, pclk MHz, MCLK 24MHz */ /* vt_pix_clk_div:REG=0x0300 update get_snapshot_fps * if this change */ 8, /* vt_sys_clk_div: REG=0x0302 update get_snapshot_fps * if this change */ 1, /* pre_pll_clk_div REG=0x0304 update get_snapshot_fps * if this change */ 2, /* pll_multiplier REG=0x0306 60 for 30fps preview, 40 * for 20fps preview * 46 for 30fps preview, try 47/48 to increase further */ 46, /* op_pix_clk_div REG=0x0308 */ 8, /* op_sys_clk_div REG=0x030A */ 1, /* scale_m REG=0x0404 */ 16, /* row_speed REG=0x3016 */ 0x0111, /* x_addr_start REG=0x3004 */ 8, /* x_addr_end REG=0x3008 */ 2053, /* y_addr_start REG=0x3002 */ 8, /* y_addr_end REG=0x3006 */ 1541, /* read_mode REG=0x3040 */ 0x046C, /* x_output_size REG=0x034C */ 1024, /* y_output_size REG=0x034E */ 768, /* line_length_pck REG=0x300C */ 2616, /* frame_length_lines REG=0x300A */ 916, /* coarse_int_time REG=0x3012 */ 16, /* fine_int_time REG=0x3014 */ 1461 }, { /*Snapshot */ /* vt_pix_clk_div REG=0x0300 update get_snapshot_fps * if this change */ 8, /* vt_sys_clk_div REG=0x0302 update get_snapshot_fps * if this change */ 1, /* pre_pll_clk_div REG=0x0304 update get_snapshot_fps * if this change */ 2, /* pll_multiplier REG=0x0306 50 for 15fps snapshot, * 40 for 10fps snapshot * 46 for 30fps snapshot, try 47/48 to increase further */ 46, /* op_pix_clk_div REG=0x0308 */ 8, /* op_sys_clk_div REG=0x030A */ 1, /* scale_m REG=0x0404 */ 16, /* row_speed REG=0x3016 */ 0x0111, /* x_addr_start REG=0x3004 */ 8, /* x_addr_end REG=0x3008 */ 2071, /* y_addr_start REG=0x3002 */ 8, /* y_addr_end REG=0x3006 */ 1551, /* read_mode REG=0x3040 */ 0x0024, /* x_output_size REG=0x034C */ 2064, /* y_output_size REG=0x034E */ 1544, /* line_length_pck REG=0x300C */ 2952, /* frame_length_lines REG=0x300A */ 1629, /* coarse_int_time REG=0x3012 */ 16, /* fine_int_time REG=0x3014 */ 733 } }; struct mt9t013_i2c_reg_conf mt9t013_test_tbl[] = { { 0x3044, 0x0544 & 0xFBFF }, { 0x30CA, 0x0004 | 0x0001 }, { 0x30D4, 0x9020 & 0x7FFF }, { 0x31E0, 0x0003 & 0xFFFE }, { 0x3180, 0x91FF & 0x7FFF }, { 0x301A, (0x10CC | 0x8000) & 0xFFF7 }, { 0x301E, 0x0000 }, { 0x3780, 0x0000 }, }; /* [Lens shading 85 Percent TL84] */ struct mt9t013_i2c_reg_conf mt9t013_lc_tbl[] = { { 0x360A, 0x0290 }, /* P_RD_P0Q0 */ { 0x360C, 0xC92D }, /* P_RD_P0Q1 */ { 0x360E, 0x0771 }, /* P_RD_P0Q2 */ { 0x3610, 0xE38C }, /* P_RD_P0Q3 */ { 0x3612, 0xD74F }, /* P_RD_P0Q4 */ { 0x364A, 0x168C }, /* P_RD_P1Q0 */ { 0x364C, 0xCACB }, /* P_RD_P1Q1 */ { 0x364E, 0x8C4C }, /* P_RD_P1Q2 */ { 0x3650, 0x0BEA }, /* P_RD_P1Q3 */ { 0x3652, 0xDC0F }, /* P_RD_P1Q4 */ { 0x368A, 0x70B0 }, /* P_RD_P2Q0 */ { 0x368C, 0x200B }, /* P_RD_P2Q1 */ { 0x368E, 0x30B2 }, /* P_RD_P2Q2 */ { 0x3690, 0xD04F }, /* P_RD_P2Q3 */ { 0x3692, 0xACF5 }, /* P_RD_P2Q4 */ { 0x36CA, 0xF7C9 }, /* P_RD_P3Q0 */ { 0x36CC, 0x2AED }, /* P_RD_P3Q1 */ { 0x36CE, 0xA652 }, /* P_RD_P3Q2 */ { 0x36D0, 0x8192 }, /* P_RD_P3Q3 */ { 0x36D2, 0x3A15 }, /* P_RD_P3Q4 */ { 0x370A, 0xDA30 }, /* P_RD_P4Q0 */ { 0x370C, 0x2E2F }, /* P_RD_P4Q1 */ { 0x370E, 0xBB56 }, /* P_RD_P4Q2 */ { 0x3710, 0x8195 }, /* P_RD_P4Q3 */ { 0x3712, 0x02F9 }, /* P_RD_P4Q4 */ { 0x3600, 0x0230 }, /* P_GR_P0Q0 */ { 0x3602, 0x58AD }, /* P_GR_P0Q1 */ { 0x3604, 0x18D1 }, /* P_GR_P0Q2 */ { 0x3606, 0x260D }, /* P_GR_P0Q3 */ { 0x3608, 0xF530 }, /* P_GR_P0Q4 */ { 0x3640, 0x17EB }, /* P_GR_P1Q0 */ { 0x3642, 0x3CAB }, /* P_GR_P1Q1 */ { 0x3644, 0x87CE }, /* P_GR_P1Q2 */ { 0x3646, 0xC02E }, /* P_GR_P1Q3 */ { 0x3648, 0xF48F }, /* P_GR_P1Q4 */ { 0x3680, 0x5350 }, /* P_GR_P2Q0 */ { 0x3682, 0x7EAF }, /* P_GR_P2Q1 */ { 0x3684, 0x4312 }, /* P_GR_P2Q2 */ { 0x3686, 0xC652 }, /* P_GR_P2Q3 */ { 0x3688, 0xBC15 }, /* P_GR_P2Q4 */ { 0x36C0, 0xB8AD }, /* P_GR_P3Q0 */ { 0x36C2, 0xBDCD }, /* P_GR_P3Q1 */ { 0x36C4, 0xE4B2 }, /* P_GR_P3Q2 */ { 0x36C6, 0xB50F }, /* P_GR_P3Q3 */ { 0x36C8, 0x5B95 }, /* P_GR_P3Q4 */ { 0x3700, 0xFC90 }, /* P_GR_P4Q0 */ { 0x3702, 0x8C51 }, /* P_GR_P4Q1 */ { 0x3704, 0xCED6 }, /* P_GR_P4Q2 */ { 0x3706, 0xB594 }, /* P_GR_P4Q3 */ { 0x3708, 0x0A39 }, /* P_GR_P4Q4 */ { 0x3614, 0x0230 }, /* P_BL_P0Q0 */ { 0x3616, 0x160D }, /* P_BL_P0Q1 */ { 0x3618, 0x08D1 }, /* P_BL_P0Q2 */ { 0x361A, 0x98AB }, /* P_BL_P0Q3 */ { 0x361C, 0xEA50 }, /* P_BL_P0Q4 */ { 0x3654, 0xB4EA }, /* P_BL_P1Q0 */ { 0x3656, 0xEA6C }, /* P_BL_P1Q1 */ { 0x3658, 0xFE08 }, /* P_BL_P1Q2 */ { 0x365A, 0x2C6E }, /* P_BL_P1Q3 */ { 0x365C, 0xEB0E }, /* P_BL_P1Q4 */ { 0x3694, 0x6DF0 }, /* P_BL_P2Q0 */ { 0x3696, 0x3ACF }, /* P_BL_P2Q1 */ { 0x3698, 0x3E0F }, /* P_BL_P2Q2 */ { 0x369A, 0xB2B1 }, /* P_BL_P2Q3 */ { 0x369C, 0xC374 }, /* P_BL_P2Q4 */ { 0x36D4, 0xF2AA }, /* P_BL_P3Q0 */ { 0x36D6, 0x8CCC }, /* P_BL_P3Q1 */ { 0x36D8, 0xDEF2 }, /* P_BL_P3Q2 */ { 0x36DA, 0xFA11 }, /* P_BL_P3Q3 */ { 0x36DC, 0x42F5 }, /* P_BL_P3Q4 */ { 0x3714, 0xF4F1 }, /* P_BL_P4Q0 */ { 0x3716, 0xF6F0 }, /* P_BL_P4Q1 */ { 0x3718, 0x8FD6 }, /* P_BL_P4Q2 */ { 0x371A, 0xEA14 }, /* P_BL_P4Q3 */ { 0x371C, 0x6338 }, /* P_BL_P4Q4 */ { 0x361E, 0x0350 }, /* P_GB_P0Q0 */ { 0x3620, 0x91AE }, /* P_GB_P0Q1 */ { 0x3622, 0x0571 }, /* P_GB_P0Q2 */ { 0x3624, 0x100D }, /* P_GB_P0Q3 */ { 0x3626, 0xCA70 }, /* P_GB_P0Q4 */ { 0x365E, 0xE6CB }, /* P_GB_P1Q0 */ { 0x3660, 0x50ED }, /* P_GB_P1Q1 */ { 0x3662, 0x3DAE }, /* P_GB_P1Q2 */ { 0x3664, 0xAA4F }, /* P_GB_P1Q3 */ { 0x3666, 0xDC50 }, /* P_GB_P1Q4 */ { 0x369E, 0x5470 }, /* P_GB_P2Q0 */ { 0x36A0, 0x1F6E }, /* P_GB_P2Q1 */ { 0x36A2, 0x6671 }, /* P_GB_P2Q2 */ { 0x36A4, 0xC010 }, /* P_GB_P2Q3 */ { 0x36A6, 0x8DF5 }, /* P_GB_P2Q4 */ { 0x36DE, 0x0B0C }, /* P_GB_P3Q0 */ { 0x36E0, 0x84CE }, /* P_GB_P3Q1 */ { 0x36E2, 0x8493 }, /* P_GB_P3Q2 */ { 0x36E4, 0xA610 }, /* P_GB_P3Q3 */ { 0x36E6, 0x50B5 }, /* P_GB_P3Q4 */ { 0x371E, 0x9651 }, /* P_GB_P4Q0 */ { 0x3720, 0x1EAB }, /* P_GB_P4Q1 */ { 0x3722, 0xAF76 }, /* P_GB_P4Q2 */ { 0x3724, 0xE4F4 }, /* P_GB_P4Q3 */ { 0x3726, 0x79F8 }, /* P_GB_P4Q4 */ { 0x3782, 0x0410 }, /* POLY_ORIGIN_C */ { 0x3784, 0x0320 }, /* POLY_ORIGIN_R */ { 0x3780, 0x8000 } /* POLY_SC_ENABLE */ }; struct mt9t013_reg mt9t013_regs = { .reg_pat = &mt9t013_reg_pat[0], .reg_pat_size = ARRAY_SIZE(mt9t013_reg_pat), .ttbl = &mt9t013_test_tbl[0], .ttbl_size = ARRAY_SIZE(mt9t013_test_tbl), .lctbl = &mt9t013_lc_tbl[0], .lctbl_size = ARRAY_SIZE(mt9t013_lc_tbl), .rftbl = &mt9t013_lc_tbl[0], /* &mt9t013_rolloff_tbl[0], */ .rftbl_size = ARRAY_SIZE(mt9t013_lc_tbl) };
gpl-2.0
bbedward/ZenKernel_Flounder
arch/powerpc/platforms/pseries/iommu.c
1729
38799
/* * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation * * Rewrite, cleanup: * * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation * Copyright (C) 2006 Olof Johansson <olof@lixom.net> * * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/memblock.h> #include <linux/spinlock.h> #include <linux/sched.h> /* for show_stack */ #include <linux/string.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/crash_dump.h> #include <linux/memory.h> #include <linux/of.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/iommu.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/tce.h> #include <asm/ppc-pci.h> #include <asm/udbg.h> #include <asm/mmzone.h> #include "plpar_wrappers.h" static void tce_invalidate_pSeries_sw(struct iommu_table *tbl, u64 *startp, u64 *endp) { u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; unsigned long start, end, inc; start = __pa(startp); end = __pa(endp); inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */ /* If this is non-zero, change the format. We shift the * address and or in the magic from the device tree. */ if (tbl->it_busno) { start <<= 12; end <<= 12; inc <<= 12; start |= tbl->it_busno; end |= tbl->it_busno; } end |= inc - 1; /* round up end to be different than start */ mb(); /* Make sure TCEs in memory are written */ while (start <= end) { out_be64(invalidate, start); start += inc; } } static int tce_build_pSeries(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) { u64 proto_tce; u64 *tcep, *tces; u64 rpn; proto_tce = TCE_PCI_READ; // Read allowed if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; tces = tcep = ((u64 *)tbl->it_base) + index; while (npages--) { /* can't move this out since we might cross MEMBLOCK boundary */ rpn = __pa(uaddr) >> TCE_SHIFT; *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; uaddr += TCE_PAGE_SIZE; tcep++; } if (tbl->it_type & TCE_PCI_SWINV_CREATE) tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); return 0; } static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) { u64 *tcep, *tces; tces = tcep = ((u64 *)tbl->it_base) + index; while (npages--) *(tcep++) = 0; if (tbl->it_type & TCE_PCI_SWINV_FREE) tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); } static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) { u64 *tcep; tcep = ((u64 *)tbl->it_base) + index; return *tcep; } static void tce_free_pSeriesLP(struct iommu_table*, long, long); static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) { u64 rc = 0; u64 proto_tce, tce; u64 rpn; int ret = 0; long tcenum_start = tcenum, npages_start = npages; rpn = __pa(uaddr) >> TCE_SHIFT; proto_tce = TCE_PCI_READ; if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; while (npages--) { tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce); if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { ret = (int)rc; tce_free_pSeriesLP(tbl, tcenum_start, (npages_start - (npages + 1))); break; } if (rc && printk_ratelimit()) { printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\ttcenum = 0x%llx\n", (u64)tcenum); printk("\ttce val = 0x%llx\n", tce ); show_stack(current, (unsigned long *)__get_SP()); } tcenum++; rpn++; } return ret; } static DEFINE_PER_CPU(u64 *, tce_page); static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) { u64 rc = 0; u64 proto_tce; u64 *tcep; u64 rpn; long l, limit; long tcenum_start = tcenum, npages_start = npages; int ret = 0; unsigned long flags; if (npages == 1) { return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction, attrs); } local_irq_save(flags); /* to protect tcep and the page behind it */ tcep = __get_cpu_var(tce_page); /* This is safe to do since interrupts are off when we're called * from iommu_alloc{,_sg}() */ if (!tcep) { tcep = (u64 *)__get_free_page(GFP_ATOMIC); /* If allocation fails, fall back to the loop implementation */ if (!tcep) { local_irq_restore(flags); return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction, attrs); } __get_cpu_var(tce_page) = tcep; } rpn = __pa(uaddr) >> TCE_SHIFT; proto_tce = TCE_PCI_READ; if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; /* We can map max one pageful of TCEs at a time */ do { /* * Set up the page with TCE data, looping through and setting * the values. */ limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE); for (l = 0; l < limit; l++) { tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; rpn++; } rc = plpar_tce_put_indirect((u64)tbl->it_index, (u64)tcenum << 12, (u64)__pa(tcep), limit); npages -= limit; tcenum += limit; } while (npages > 0 && !rc); local_irq_restore(flags); if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { ret = (int)rc; tce_freemulti_pSeriesLP(tbl, tcenum_start, (npages_start - (npages + limit))); return ret; } if (rc && printk_ratelimit()) { printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\tnpages = 0x%llx\n", (u64)npages); printk("\ttce[0] val = 0x%llx\n", tcep[0]); show_stack(current, (unsigned long *)__get_SP()); } return ret; } static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) { u64 rc; while (npages--) { rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0); if (rc && printk_ratelimit()) { printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\ttcenum = 0x%llx\n", (u64)tcenum); show_stack(current, (unsigned long *)__get_SP()); } tcenum++; } } static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) { u64 rc; rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); if (rc && printk_ratelimit()) { printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); printk("\trc = %lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\tnpages = 0x%llx\n", (u64)npages); show_stack(current, (unsigned long *)__get_SP()); } } static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) { u64 rc; unsigned long tce_ret; rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret); if (rc && printk_ratelimit()) { printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\ttcenum = 0x%llx\n", (u64)tcenum); show_stack(current, (unsigned long *)__get_SP()); } return tce_ret; } /* this is compatible with cells for the device tree property */ struct dynamic_dma_window_prop { __be32 liobn; /* tce table number */ __be64 dma_base; /* address hi,lo */ __be32 tce_shift; /* ilog2(tce_page_size) */ __be32 window_shift; /* ilog2(tce_window_size) */ }; struct direct_window { struct device_node *device; const struct dynamic_dma_window_prop *prop; struct list_head list; }; /* Dynamic DMA Window support */ struct ddw_query_response { u32 windows_available; u32 largest_available_block; u32 page_size; u32 migration_capable; }; struct ddw_create_response { u32 liobn; u32 addr_hi; u32 addr_lo; }; static LIST_HEAD(direct_window_list); /* prevents races between memory on/offline and window creation */ static DEFINE_SPINLOCK(direct_window_list_lock); /* protects initializing window twice for same device */ static DEFINE_MUTEX(direct_window_init_mutex); #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn, unsigned long num_pfn, const void *arg) { const struct dynamic_dma_window_prop *maprange = arg; int rc; u64 tce_size, num_tce, dma_offset, next; u32 tce_shift; long limit; tce_shift = be32_to_cpu(maprange->tce_shift); tce_size = 1ULL << tce_shift; next = start_pfn << PAGE_SHIFT; num_tce = num_pfn << PAGE_SHIFT; /* round back to the beginning of the tce page size */ num_tce += next & (tce_size - 1); next &= ~(tce_size - 1); /* covert to number of tces */ num_tce |= tce_size - 1; num_tce >>= tce_shift; do { /* * Set up the page with TCE data, looping through and setting * the values. */ limit = min_t(long, num_tce, 512); dma_offset = next + be64_to_cpu(maprange->dma_base); rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn), dma_offset, 0, limit); next += limit * tce_size; num_tce -= limit; } while (num_tce > 0 && !rc); return rc; } static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, unsigned long num_pfn, const void *arg) { const struct dynamic_dma_window_prop *maprange = arg; u64 *tcep, tce_size, num_tce, dma_offset, next, proto_tce, liobn; u32 tce_shift; u64 rc = 0; long l, limit; local_irq_disable(); /* to protect tcep and the page behind it */ tcep = __get_cpu_var(tce_page); if (!tcep) { tcep = (u64 *)__get_free_page(GFP_ATOMIC); if (!tcep) { local_irq_enable(); return -ENOMEM; } __get_cpu_var(tce_page) = tcep; } proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; liobn = (u64)be32_to_cpu(maprange->liobn); tce_shift = be32_to_cpu(maprange->tce_shift); tce_size = 1ULL << tce_shift; next = start_pfn << PAGE_SHIFT; num_tce = num_pfn << PAGE_SHIFT; /* round back to the beginning of the tce page size */ num_tce += next & (tce_size - 1); next &= ~(tce_size - 1); /* covert to number of tces */ num_tce |= tce_size - 1; num_tce >>= tce_shift; /* We can map max one pageful of TCEs at a time */ do { /* * Set up the page with TCE data, looping through and setting * the values. */ limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE); dma_offset = next + be64_to_cpu(maprange->dma_base); for (l = 0; l < limit; l++) { tcep[l] = proto_tce | next; next += tce_size; } rc = plpar_tce_put_indirect(liobn, dma_offset, (u64)__pa(tcep), limit); num_tce -= limit; } while (num_tce > 0 && !rc); /* error cleanup: caller will clear whole range */ local_irq_enable(); return rc; } static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, unsigned long num_pfn, void *arg) { return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); } #ifdef CONFIG_PCI static void iommu_table_setparms(struct pci_controller *phb, struct device_node *dn, struct iommu_table *tbl) { struct device_node *node; const unsigned long *basep, *sw_inval; const u32 *sizep; node = phb->dn; basep = of_get_property(node, "linux,tce-base", NULL); sizep = of_get_property(node, "linux,tce-size", NULL); if (basep == NULL || sizep == NULL) { printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has " "missing tce entries !\n", dn->full_name); return; } tbl->it_base = (unsigned long)__va(*basep); if (!is_kdump_kernel()) memset((void *)tbl->it_base, 0, *sizep); tbl->it_busno = phb->bus->number; /* Units of tce entries */ tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT; /* Test if we are going over 2GB of DMA space */ if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); } phb->dma_window_base_cur += phb->dma_window_size; /* Set the tce table size - measured in entries */ tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT; tbl->it_index = 0; tbl->it_blocksize = 16; tbl->it_type = TCE_PCI; sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL); if (sw_inval) { /* * This property contains information on how to * invalidate the TCE entry. The first property is * the base MMIO address used to invalidate entries. * The second property tells us the format of the TCE * invalidate (whether it needs to be shifted) and * some magic routing info to add to our invalidate * command. */ tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8); tbl->it_busno = sw_inval[1]; /* overload this with magic */ tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; } } /* * iommu_table_setparms_lpar * * Function: On pSeries LPAR systems, return TCE table info, given a pci bus. */ static void iommu_table_setparms_lpar(struct pci_controller *phb, struct device_node *dn, struct iommu_table *tbl, const void *dma_window) { unsigned long offset, size; of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); tbl->it_busno = phb->bus->number; tbl->it_base = 0; tbl->it_blocksize = 16; tbl->it_type = TCE_PCI; tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; tbl->it_size = size >> IOMMU_PAGE_SHIFT; } static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) { struct device_node *dn; struct iommu_table *tbl; struct device_node *isa_dn, *isa_dn_orig; struct device_node *tmp; struct pci_dn *pci; int children; dn = pci_bus_to_OF_node(bus); pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name); if (bus->self) { /* This is not a root bus, any setup will be done for the * device-side of the bridge in iommu_dev_setup_pSeries(). */ return; } pci = PCI_DN(dn); /* Check if the ISA bus on the system is under * this PHB. */ isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa"); while (isa_dn && isa_dn != dn) isa_dn = isa_dn->parent; if (isa_dn_orig) of_node_put(isa_dn_orig); /* Count number of direct PCI children of the PHB. */ for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling) children++; pr_debug("Children: %d\n", children); /* Calculate amount of DMA window per slot. Each window must be * a power of two (due to pci_alloc_consistent requirements). * * Keep 256MB aside for PHBs with ISA. */ if (!isa_dn) { /* No ISA/IDE - just set window size and return */ pci->phb->dma_window_size = 0x80000000ul; /* To be divided */ while (pci->phb->dma_window_size * children > 0x80000000ul) pci->phb->dma_window_size >>= 1; pr_debug("No ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size); pci->phb->dma_window_base_cur = 0; return; } /* If we have ISA, then we probably have an IDE * controller too. Allocate a 128MB table but * skip the first 128MB to avoid stepping on ISA * space. */ pci->phb->dma_window_size = 0x8000000ul; pci->phb->dma_window_base_cur = 0x8000000ul; tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, pci->phb->node); iommu_table_setparms(pci->phb, dn, tbl); pci->iommu_table = iommu_init_table(tbl, pci->phb->node); /* Divide the rest (1.75GB) among the children */ pci->phb->dma_window_size = 0x80000000ul; while (pci->phb->dma_window_size * children > 0x70000000ul) pci->phb->dma_window_size >>= 1; pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size); } static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) { struct iommu_table *tbl; struct device_node *dn, *pdn; struct pci_dn *ppci; const void *dma_window = NULL; dn = pci_bus_to_OF_node(bus); pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n", dn->full_name); /* Find nearest ibm,dma-window, walking up the device tree */ for (pdn = dn; pdn != NULL; pdn = pdn->parent) { dma_window = of_get_property(pdn, "ibm,dma-window", NULL); if (dma_window != NULL) break; } if (dma_window == NULL) { pr_debug(" no ibm,dma-window property !\n"); return; } ppci = PCI_DN(pdn); pr_debug(" parent is %s, iommu_table: 0x%p\n", pdn->full_name, ppci->iommu_table); if (!ppci->iommu_table) { tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, ppci->phb->node); iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window); ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node); pr_debug(" created table: %p\n", ppci->iommu_table); } } static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) { struct device_node *dn; struct iommu_table *tbl; pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev)); dn = dev->dev.of_node; /* If we're the direct child of a root bus, then we need to allocate * an iommu table ourselves. The bus setup code should have setup * the window sizes already. */ if (!dev->bus->self) { struct pci_controller *phb = PCI_DN(dn)->phb; pr_debug(" --> first child, no bridge. Allocating iommu table.\n"); tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, phb->node); iommu_table_setparms(phb, dn, tbl); PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node); set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table); return; } /* If this device is further down the bus tree, search upwards until * an already allocated iommu table is found and use that. */ while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL) dn = dn->parent; if (dn && PCI_DN(dn)) set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table); else printk(KERN_WARNING "iommu: Device %s has no iommu table\n", pci_name(dev)); } static int __read_mostly disable_ddw; static int __init disable_ddw_setup(char *str) { disable_ddw = 1; printk(KERN_INFO "ppc iommu: disabling ddw.\n"); return 0; } early_param("disable_ddw", disable_ddw_setup); static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u64 liobn) { int ret; ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn); if (ret) pr_warning("%s: failed to remove DMA window: rtas returned " "%d to ibm,remove-pe-dma-window(%x) %llx\n", np->full_name, ret, ddw_avail[2], liobn); else pr_debug("%s: successfully removed DMA window: rtas returned " "%d to ibm,remove-pe-dma-window(%x) %llx\n", np->full_name, ret, ddw_avail[2], liobn); } static void remove_ddw(struct device_node *np) { struct dynamic_dma_window_prop *dwp; struct property *win64; const u32 *ddw_avail; u64 liobn; int len, ret; ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len); win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); if (!win64) return; if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp)) goto delprop; dwp = win64->value; liobn = (u64)be32_to_cpu(dwp->liobn); /* clear the whole window, note the arg is in kernel pages */ ret = tce_clearrange_multi_pSeriesLP(0, 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp); if (ret) pr_warning("%s failed to clear tces in window.\n", np->full_name); else pr_debug("%s successfully cleared tces in window.\n", np->full_name); __remove_ddw(np, ddw_avail, liobn); delprop: ret = of_remove_property(np, win64); if (ret) pr_warning("%s: failed to remove direct window property: %d\n", np->full_name, ret); } static u64 find_existing_ddw(struct device_node *pdn) { struct direct_window *window; const struct dynamic_dma_window_prop *direct64; u64 dma_addr = 0; spin_lock(&direct_window_list_lock); /* check if we already created a window and dupe that config if so */ list_for_each_entry(window, &direct_window_list, list) { if (window->device == pdn) { direct64 = window->prop; dma_addr = direct64->dma_base; break; } } spin_unlock(&direct_window_list_lock); return dma_addr; } static void __restore_default_window(struct eeh_dev *edev, u32 ddw_restore_token) { u32 cfg_addr; u64 buid; int ret; /* * Get the config address and phb buid of the PE window. * Rely on eeh to retrieve this for us. * Retrieve them from the pci device, not the node with the * dma-window property */ cfg_addr = edev->config_addr; if (edev->pe_config_addr) cfg_addr = edev->pe_config_addr; buid = edev->phb->buid; do { ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr, BUID_HI(buid), BUID_LO(buid)); } while (rtas_busy_delay(ret)); pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n", ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret); } static int find_existing_ddw_windows(void) { struct device_node *pdn; const struct dynamic_dma_window_prop *direct64; const u32 *ddw_extensions; if (!firmware_has_feature(FW_FEATURE_LPAR)) return 0; for_each_node_with_property(pdn, DIRECT64_PROPNAME) { direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL); if (!direct64) continue; /* * We need to ensure the IOMMU table is active when we * return from the IOMMU setup so that the common code * can clear the table or find the holes. To that end, * first, remove any existing DDW configuration. */ remove_ddw(pdn); /* * Second, if we are running on a new enough level of * firmware where the restore API is present, use it to * restore the 32-bit window, which was removed in * create_ddw. * If the API is not present, then create_ddw couldn't * have removed the 32-bit window in the first place, so * removing the DDW configuration should be sufficient. */ ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", NULL); if (ddw_extensions && ddw_extensions[0] > 0) __restore_default_window(of_node_to_eeh_dev(pdn), ddw_extensions[1]); } return 0; } machine_arch_initcall(pseries, find_existing_ddw_windows); static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, struct ddw_query_response *query) { struct eeh_dev *edev; u32 cfg_addr; u64 buid; int ret; /* * Get the config address and phb buid of the PE window. * Rely on eeh to retrieve this for us. * Retrieve them from the pci device, not the node with the * dma-window property */ edev = pci_dev_to_eeh_dev(dev); cfg_addr = edev->config_addr; if (edev->pe_config_addr) cfg_addr = edev->pe_config_addr; buid = edev->phb->buid; ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, cfg_addr, BUID_HI(buid), BUID_LO(buid)); dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid), BUID_LO(buid), ret); return ret; } static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, struct ddw_create_response *create, int page_shift, int window_shift) { struct eeh_dev *edev; u32 cfg_addr; u64 buid; int ret; /* * Get the config address and phb buid of the PE window. * Rely on eeh to retrieve this for us. * Retrieve them from the pci device, not the node with the * dma-window property */ edev = pci_dev_to_eeh_dev(dev); cfg_addr = edev->config_addr; if (edev->pe_config_addr) cfg_addr = edev->pe_config_addr; buid = edev->phb->buid; do { /* extra outputs are LIOBN and dma-addr (hi, lo) */ ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); } while (rtas_busy_delay(ret)); dev_info(&dev->dev, "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1], cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); return ret; } static void restore_default_window(struct pci_dev *dev, u32 ddw_restore_token) { __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token); } struct failed_ddw_pdn { struct device_node *pdn; struct list_head list; }; static LIST_HEAD(failed_ddw_pdn_list); /* * If the PE supports dynamic dma windows, and there is space for a table * that can map all pages in a linear offset, then setup such a table, * and record the dma-offset in the struct device. * * dev: the pci device we are checking * pdn: the parent pe node with the ibm,dma_window property * Future: also check if we can remap the base window for our base page size * * returns the dma offset for use by dma_set_mask */ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) { int len, ret; struct ddw_query_response query; struct ddw_create_response create; int page_shift; u64 dma_addr, max_addr; struct device_node *dn; const u32 *uninitialized_var(ddw_avail); const u32 *uninitialized_var(ddw_extensions); u32 ddw_restore_token = 0; struct direct_window *window; struct property *win64; struct dynamic_dma_window_prop *ddwprop; const void *dma_window = NULL; unsigned long liobn, offset, size; struct failed_ddw_pdn *fpdn; mutex_lock(&direct_window_init_mutex); dma_addr = find_existing_ddw(pdn); if (dma_addr != 0) goto out_unlock; /* * If we already went through this for a previous function of * the same device and failed, we don't want to muck with the * DMA window again, as it will race with in-flight operations * and can lead to EEHs. The above mutex protects access to the * list. */ list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) { if (!strcmp(fpdn->pdn->full_name, pdn->full_name)) goto out_unlock; } /* * the ibm,ddw-applicable property holds the tokens for: * ibm,query-pe-dma-window * ibm,create-pe-dma-window * ibm,remove-pe-dma-window * for the given node in that order. * the property is actually in the parent, not the PE */ ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); if (!ddw_avail || len < 3 * sizeof(u32)) goto out_unlock; /* * the extensions property is only required to exist in certain * levels of firmware and later * the ibm,ddw-extensions property is a list with the first * element containing the number of extensions and each * subsequent entry is a value corresponding to that extension */ ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", &len); if (ddw_extensions) { /* * each new defined extension length should be added to * the top of the switch so the "earlier" entries also * get picked up */ switch (ddw_extensions[0]) { /* ibm,reset-pe-dma-windows */ case 1: ddw_restore_token = ddw_extensions[1]; break; } } /* * Only remove the existing DMA window if we can restore back to * the default state. Removing the existing window maximizes the * resources available to firmware for dynamic window creation. */ if (ddw_restore_token) { dma_window = of_get_property(pdn, "ibm,dma-window", NULL); of_parse_dma_window(pdn, dma_window, &liobn, &offset, &size); __remove_ddw(pdn, ddw_avail, liobn); } /* * Query if there is a second window of size to map the * whole partition. Query returns number of windows, largest * block assigned to PE (partition endpoint), and two bitmasks * of page sizes: supported and supported for migrate-dma. */ dn = pci_device_to_OF_node(dev); ret = query_ddw(dev, ddw_avail, &query); if (ret != 0) goto out_restore_window; if (query.windows_available == 0) { /* * no additional windows are available for this device. * We might be able to reallocate the existing window, * trading in for a larger page size. */ dev_dbg(&dev->dev, "no free dynamic windows"); goto out_restore_window; } if (query.page_size & 4) { page_shift = 24; /* 16MB */ } else if (query.page_size & 2) { page_shift = 16; /* 64kB */ } else if (query.page_size & 1) { page_shift = 12; /* 4kB */ } else { dev_dbg(&dev->dev, "no supported direct page size in mask %x", query.page_size); goto out_restore_window; } /* verify the window * number of ptes will map the partition */ /* check largest block * page size > max memory hotplug addr */ max_addr = memory_hotplug_max(); if (query.largest_available_block < (max_addr >> page_shift)) { dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " "%llu-sized pages\n", max_addr, query.largest_available_block, 1ULL << page_shift); goto out_restore_window; } len = order_base_2(max_addr); win64 = kzalloc(sizeof(struct property), GFP_KERNEL); if (!win64) { dev_info(&dev->dev, "couldn't allocate property for 64bit dma window\n"); goto out_restore_window; } win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); win64->length = sizeof(*ddwprop); if (!win64->name || !win64->value) { dev_info(&dev->dev, "couldn't allocate property name and value\n"); goto out_free_prop; } ret = create_ddw(dev, ddw_avail, &create, page_shift, len); if (ret != 0) goto out_free_prop; ddwprop->liobn = cpu_to_be32(create.liobn); ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2)); ddwprop->tce_shift = cpu_to_be32(page_shift); ddwprop->window_shift = cpu_to_be32(len); dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n", create.liobn, dn->full_name); window = kzalloc(sizeof(*window), GFP_KERNEL); if (!window) goto out_clear_window; ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, win64->value, tce_setrange_multi_pSeriesLP_walk); if (ret) { dev_info(&dev->dev, "failed to map direct window for %s: %d\n", dn->full_name, ret); goto out_free_window; } ret = of_add_property(pdn, win64); if (ret) { dev_err(&dev->dev, "unable to add dma window property for %s: %d", pdn->full_name, ret); goto out_free_window; } window->device = pdn; window->prop = ddwprop; spin_lock(&direct_window_list_lock); list_add(&window->list, &direct_window_list); spin_unlock(&direct_window_list_lock); dma_addr = of_read_number(&create.addr_hi, 2); goto out_unlock; out_free_window: kfree(window); out_clear_window: remove_ddw(pdn); out_free_prop: kfree(win64->name); kfree(win64->value); kfree(win64); out_restore_window: if (ddw_restore_token) restore_default_window(dev, ddw_restore_token); fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); if (!fpdn) goto out_unlock; fpdn->pdn = pdn; list_add(&fpdn->list, &failed_ddw_pdn_list); out_unlock: mutex_unlock(&direct_window_init_mutex); return dma_addr; } static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) { struct device_node *pdn, *dn; struct iommu_table *tbl; const void *dma_window = NULL; struct pci_dn *pci; pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); /* dev setup for LPAR is a little tricky, since the device tree might * contain the dma-window properties per-device and not necessarily * for the bus. So we need to search upwards in the tree until we * either hit a dma-window property, OR find a parent with a table * already allocated. */ dn = pci_device_to_OF_node(dev); pr_debug(" node is %s\n", dn->full_name); for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; pdn = pdn->parent) { dma_window = of_get_property(pdn, "ibm,dma-window", NULL); if (dma_window) break; } if (!pdn || !PCI_DN(pdn)) { printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: " "no DMA window found for pci dev=%s dn=%s\n", pci_name(dev), of_node_full_name(dn)); return; } pr_debug(" parent is %s\n", pdn->full_name); pci = PCI_DN(pdn); if (!pci->iommu_table) { tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, pci->phb->node); iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window); pci->iommu_table = iommu_init_table(tbl, pci->phb->node); pr_debug(" created table: %p\n", pci->iommu_table); } else { pr_debug(" found DMA window, table: %p\n", pci->iommu_table); } set_iommu_table_base(&dev->dev, pci->iommu_table); } static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) { bool ddw_enabled = false; struct device_node *pdn, *dn; struct pci_dev *pdev; const void *dma_window = NULL; u64 dma_offset; if (!dev->dma_mask) return -EIO; if (!dev_is_pci(dev)) goto check_mask; pdev = to_pci_dev(dev); /* only attempt to use a new window if 64-bit DMA is requested */ if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { dn = pci_device_to_OF_node(pdev); dev_dbg(dev, "node is %s\n", dn->full_name); /* * the device tree might contain the dma-window properties * per-device and not necessarily for the bus. So we need to * search upwards in the tree until we either hit a dma-window * property, OR find a parent with a table already allocated. */ for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; pdn = pdn->parent) { dma_window = of_get_property(pdn, "ibm,dma-window", NULL); if (dma_window) break; } if (pdn && PCI_DN(pdn)) { dma_offset = enable_ddw(pdev, pdn); if (dma_offset != 0) { dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset); set_dma_offset(dev, dma_offset); set_dma_ops(dev, &dma_direct_ops); ddw_enabled = true; } } } /* fall back on iommu ops, restore table pointer with ops */ if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) { dev_info(dev, "Restoring 32-bit DMA via iommu\n"); set_dma_ops(dev, &dma_iommu_ops); pci_dma_dev_setup_pSeriesLP(pdev); } check_mask: if (!dma_supported(dev, dma_mask)) return -EIO; *dev->dma_mask = dma_mask; return 0; } static u64 dma_get_required_mask_pSeriesLP(struct device *dev) { if (!dev->dma_mask) return 0; if (!disable_ddw && dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); struct device_node *dn; dn = pci_device_to_OF_node(pdev); /* search upwards for ibm,dma-window */ for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table; dn = dn->parent) if (of_get_property(dn, "ibm,dma-window", NULL)) break; /* if there is a ibm,ddw-applicable property require 64 bits */ if (dn && PCI_DN(dn) && of_get_property(dn, "ibm,ddw-applicable", NULL)) return DMA_BIT_MASK(64); } return dma_iommu_ops.get_required_mask(dev); } #else /* CONFIG_PCI */ #define pci_dma_bus_setup_pSeries NULL #define pci_dma_dev_setup_pSeries NULL #define pci_dma_bus_setup_pSeriesLP NULL #define pci_dma_dev_setup_pSeriesLP NULL #define dma_set_mask_pSeriesLP NULL #define dma_get_required_mask_pSeriesLP NULL #endif /* !CONFIG_PCI */ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct direct_window *window; struct memory_notify *arg = data; int ret = 0; switch (action) { case MEM_GOING_ONLINE: spin_lock(&direct_window_list_lock); list_for_each_entry(window, &direct_window_list, list) { ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, arg->nr_pages, window->prop); /* XXX log error */ } spin_unlock(&direct_window_list_lock); break; case MEM_CANCEL_ONLINE: case MEM_OFFLINE: spin_lock(&direct_window_list_lock); list_for_each_entry(window, &direct_window_list, list) { ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, arg->nr_pages, window->prop); /* XXX log error */ } spin_unlock(&direct_window_list_lock); break; default: break; } if (ret && action != MEM_CANCEL_ONLINE) return NOTIFY_BAD; return NOTIFY_OK; } static struct notifier_block iommu_mem_nb = { .notifier_call = iommu_mem_notifier, }; static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) { int err = NOTIFY_OK; struct device_node *np = node; struct pci_dn *pci = PCI_DN(np); struct direct_window *window; switch (action) { case OF_RECONFIG_DETACH_NODE: remove_ddw(np); if (pci && pci->iommu_table) iommu_free_table(pci->iommu_table, np->full_name); spin_lock(&direct_window_list_lock); list_for_each_entry(window, &direct_window_list, list) { if (window->device == np) { list_del(&window->list); kfree(window); break; } } spin_unlock(&direct_window_list_lock); break; default: err = NOTIFY_DONE; break; } return err; } static struct notifier_block iommu_reconfig_nb = { .notifier_call = iommu_reconfig_notifier, }; /* These are called very early. */ void iommu_init_early_pSeries(void) { if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) return; if (firmware_has_feature(FW_FEATURE_LPAR)) { if (firmware_has_feature(FW_FEATURE_MULTITCE)) { ppc_md.tce_build = tce_buildmulti_pSeriesLP; ppc_md.tce_free = tce_freemulti_pSeriesLP; } else { ppc_md.tce_build = tce_build_pSeriesLP; ppc_md.tce_free = tce_free_pSeriesLP; } ppc_md.tce_get = tce_get_pSeriesLP; ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP; ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP; ppc_md.dma_set_mask = dma_set_mask_pSeriesLP; ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP; } else { ppc_md.tce_build = tce_build_pSeries; ppc_md.tce_free = tce_free_pSeries; ppc_md.tce_get = tce_get_pseries; ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries; ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries; } of_reconfig_notifier_register(&iommu_reconfig_nb); register_memory_notifier(&iommu_mem_nb); set_pci_dma_ops(&dma_iommu_ops); } static int __init disable_multitce(char *str) { if (strcmp(str, "off") == 0 && firmware_has_feature(FW_FEATURE_LPAR) && firmware_has_feature(FW_FEATURE_MULTITCE)) { printk(KERN_INFO "Disabling MULTITCE firmware feature\n"); ppc_md.tce_build = tce_build_pSeriesLP; ppc_md.tce_free = tce_free_pSeriesLP; powerpc_firmware_features &= ~FW_FEATURE_MULTITCE; } return 1; } __setup("multitce=", disable_multitce);
gpl-2.0
PureNexusProject/android_kernel_htc_flounder
net/ieee802154/netlink.c
2241
3016
/* * Netlink inteface for IEEE 802.15.4 stack * * Copyright 2007, 2008 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Written by: * Sergey Lapin <slapin@ossfans.org> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> * Maxim Osipov <maxim.osipov@siemens.com> */ #include <linux/kernel.h> #include <linux/gfp.h> #include <net/genetlink.h> #include <linux/nl802154.h> #include "ieee802154.h" static unsigned int ieee802154_seq_num; static DEFINE_SPINLOCK(ieee802154_seq_lock); struct genl_family nl802154_family = { .id = GENL_ID_GENERATE, .hdrsize = 0, .name = IEEE802154_NL_NAME, .version = 1, .maxattr = IEEE802154_ATTR_MAX, }; /* Requests to userspace */ struct sk_buff *ieee802154_nl_create(int flags, u8 req) { void *hdr; struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); unsigned long f; if (!msg) return NULL; spin_lock_irqsave(&ieee802154_seq_lock, f); hdr = genlmsg_put(msg, 0, ieee802154_seq_num++, &nl802154_family, flags, req); spin_unlock_irqrestore(&ieee802154_seq_lock, f); if (!hdr) { nlmsg_free(msg); return NULL; } return msg; } int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group) { struct nlmsghdr *nlh = nlmsg_hdr(msg); void *hdr = genlmsg_data(nlmsg_data(nlh)); if (genlmsg_end(msg, hdr) < 0) goto out; return genlmsg_multicast(msg, 0, group, GFP_ATOMIC); out: nlmsg_free(msg); return -ENOBUFS; } struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info, int flags, u8 req) { void *hdr; struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return NULL; hdr = genlmsg_put_reply(msg, info, &nl802154_family, flags, req); if (!hdr) { nlmsg_free(msg); return NULL; } return msg; } int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info) { struct nlmsghdr *nlh = nlmsg_hdr(msg); void *hdr = genlmsg_data(nlmsg_data(nlh)); if (genlmsg_end(msg, hdr) < 0) goto out; return genlmsg_reply(msg, info); out: nlmsg_free(msg); return -ENOBUFS; } int __init ieee802154_nl_init(void) { int rc; rc = genl_register_family(&nl802154_family); if (rc) goto fail; rc = nl802154_mac_register(); if (rc) goto fail; rc = nl802154_phy_register(); if (rc) goto fail; return 0; fail: genl_unregister_family(&nl802154_family); return rc; } void __exit ieee802154_nl_exit(void) { genl_unregister_family(&nl802154_family); }
gpl-2.0
Pafcholini/kernel-msm-3.10
tools/perf/ui/gtk/annotate.c
2241
5800
#include "gtk.h" #include "util/debug.h" #include "util/annotate.h" #include "util/evsel.h" #include "ui/helpline.h" enum { ANN_COL__PERCENT, ANN_COL__OFFSET, ANN_COL__LINE, MAX_ANN_COLS }; static const char *const col_names[] = { "Overhead", "Offset", "Line" }; static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym, struct disasm_line *dl, int evidx) { struct sym_hist *symhist; double percent = 0.0; const char *markup; int ret = 0; strcpy(buf, ""); if (dl->offset == (s64) -1) return 0; symhist = annotation__histogram(symbol__annotation(sym), evidx); if (!symbol_conf.event_group && !symhist->addr[dl->offset]) return 0; percent = 100.0 * symhist->addr[dl->offset] / symhist->sum; markup = perf_gtk__get_percent_color(percent); if (markup) ret += scnprintf(buf, size, "%s", markup); ret += scnprintf(buf + ret, size - ret, "%6.2f%%", percent); if (markup) ret += scnprintf(buf + ret, size - ret, "</span>"); return ret; } static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym, struct map *map, struct disasm_line *dl) { u64 start = map__rip_2objdump(map, sym->start); strcpy(buf, ""); if (dl->offset == (s64) -1) return 0; return scnprintf(buf, size, "%"PRIx64, start + dl->offset); } static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl) { int ret = 0; char *line = g_markup_escape_text(dl->line, -1); const char *markup = "<span fgcolor='gray'>"; strcpy(buf, ""); if (!line) return 0; if (dl->offset != (s64) -1) markup = NULL; if (markup) ret += scnprintf(buf, size, "%s", markup); ret += scnprintf(buf + ret, size - ret, "%s", line); if (markup) ret += scnprintf(buf + ret, size - ret, "</span>"); g_free(line); return ret; } static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym, struct map *map, struct perf_evsel *evsel, struct hist_browser_timer *hbt __maybe_unused) { struct disasm_line *pos, *n; struct annotation *notes; GType col_types[MAX_ANN_COLS]; GtkCellRenderer *renderer; GtkListStore *store; GtkWidget *view; int i; char s[512]; notes = symbol__annotation(sym); for (i = 0; i < MAX_ANN_COLS; i++) { col_types[i] = G_TYPE_STRING; } store = gtk_list_store_newv(MAX_ANN_COLS, col_types); view = gtk_tree_view_new(); renderer = gtk_cell_renderer_text_new(); for (i = 0; i < MAX_ANN_COLS; i++) { gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), -1, col_names[i], renderer, "markup", i, NULL); } gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); g_object_unref(GTK_TREE_MODEL(store)); list_for_each_entry(pos, &notes->src->source, node) { GtkTreeIter iter; int ret = 0; gtk_list_store_append(store, &iter); if (perf_evsel__is_group_event(evsel)) { for (i = 0; i < evsel->nr_members; i++) { ret += perf_gtk__get_percent(s + ret, sizeof(s) - ret, sym, pos, evsel->idx + i); ret += scnprintf(s + ret, sizeof(s) - ret, " "); } } else { ret = perf_gtk__get_percent(s, sizeof(s), sym, pos, evsel->idx); } if (ret) gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1); if (perf_gtk__get_offset(s, sizeof(s), sym, map, pos)) gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1); if (perf_gtk__get_line(s, sizeof(s), pos)) gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1); } gtk_container_add(GTK_CONTAINER(window), view); list_for_each_entry_safe(pos, n, &notes->src->source, node) { list_del(&pos->node); disasm_line__free(pos); } return 0; } int symbol__gtk_annotate(struct symbol *sym, struct map *map, struct perf_evsel *evsel, struct hist_browser_timer *hbt) { GtkWidget *window; GtkWidget *notebook; GtkWidget *scrolled_window; GtkWidget *tab_label; if (map->dso->annotate_warned) return -1; if (symbol__annotate(sym, map, 0) < 0) { ui__error("%s", ui_helpline__current); return -1; } if (perf_gtk__is_active_context(pgctx)) { window = pgctx->main_window; notebook = pgctx->notebook; } else { GtkWidget *vbox; GtkWidget *infobar; GtkWidget *statbar; signal(SIGSEGV, perf_gtk__signal); signal(SIGFPE, perf_gtk__signal); signal(SIGINT, perf_gtk__signal); signal(SIGQUIT, perf_gtk__signal); signal(SIGTERM, perf_gtk__signal); window = gtk_window_new(GTK_WINDOW_TOPLEVEL); gtk_window_set_title(GTK_WINDOW(window), "perf annotate"); g_signal_connect(window, "delete_event", gtk_main_quit, NULL); pgctx = perf_gtk__activate_context(window); if (!pgctx) return -1; vbox = gtk_vbox_new(FALSE, 0); notebook = gtk_notebook_new(); pgctx->notebook = notebook; gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); infobar = perf_gtk__setup_info_bar(); if (infobar) { gtk_box_pack_start(GTK_BOX(vbox), infobar, FALSE, FALSE, 0); } statbar = perf_gtk__setup_statusbar(); gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); gtk_container_add(GTK_CONTAINER(window), vbox); } scrolled_window = gtk_scrolled_window_new(NULL, NULL); tab_label = gtk_label_new(sym->name); gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC); gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); perf_gtk__annotate_symbol(scrolled_window, sym, map, evsel, hbt); return 0; } void perf_gtk__show_annotations(void) { GtkWidget *window; if (!perf_gtk__is_active_context(pgctx)) return; window = pgctx->main_window; gtk_widget_show_all(window); perf_gtk__resize_window(window); gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); gtk_main(); perf_gtk__deactivate_context(&pgctx); }
gpl-2.0
mythos234/SimplKernel-LL-N910G
drivers/mmc/host/bfin_sdh.c
2241
16898
/* * bfin_sdh.c - Analog Devices Blackfin SDH Controller * * Copyright (C) 2007-2009 Analog Device Inc. * * Licensed under the GPL-2 or later. */ #define DRIVER_NAME "bfin-sdh" #include <linux/module.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/mmc/host.h> #include <linux/proc_fs.h> #include <linux/gfp.h> #include <asm/cacheflush.h> #include <asm/dma.h> #include <asm/portmux.h> #include <asm/bfin_sdh.h> #if defined(CONFIG_BF51x) || defined(__ADSPBF60x__) #define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL #define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL #define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT #define bfin_write_SDH_COMMAND bfin_write_RSI_COMMAND #define bfin_write_SDH_DATA_TIMER bfin_write_RSI_DATA_TIMER #define bfin_read_SDH_RESPONSE0 bfin_read_RSI_RESPONSE0 #define bfin_read_SDH_RESPONSE1 bfin_read_RSI_RESPONSE1 #define bfin_read_SDH_RESPONSE2 bfin_read_RSI_RESPONSE2 #define bfin_read_SDH_RESPONSE3 bfin_read_RSI_RESPONSE3 #define bfin_write_SDH_DATA_LGTH bfin_write_RSI_DATA_LGTH #define bfin_read_SDH_DATA_CTL bfin_read_RSI_DATA_CTL #define bfin_write_SDH_DATA_CTL bfin_write_RSI_DATA_CTL #define bfin_read_SDH_DATA_CNT bfin_read_RSI_DATA_CNT #define bfin_write_SDH_STATUS_CLR bfin_write_RSI_STATUS_CLR #define bfin_read_SDH_E_STATUS bfin_read_RSI_E_STATUS #define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS #define bfin_read_SDH_STATUS bfin_read_RSI_STATUS #define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0 #define bfin_write_SDH_E_MASK bfin_write_RSI_E_MASK #define bfin_read_SDH_CFG bfin_read_RSI_CFG #define bfin_write_SDH_CFG bfin_write_RSI_CFG # if defined(__ADSPBF60x__) # define bfin_read_SDH_BLK_SIZE bfin_read_RSI_BLKSZ # define bfin_write_SDH_BLK_SIZE bfin_write_RSI_BLKSZ # else # define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL # define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL # endif #endif struct sdh_host { struct mmc_host *mmc; spinlock_t lock; struct resource *res; void __iomem *base; int irq; int stat_irq; int dma_ch; int dma_dir; struct dma_desc_array *sg_cpu; dma_addr_t sg_dma; int dma_len; unsigned long sclk; unsigned int imask; unsigned int power_mode; unsigned int clk_div; struct mmc_request *mrq; struct mmc_command *cmd; struct mmc_data *data; }; static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev) { return pdev->dev.platform_data; } static void sdh_stop_clock(struct sdh_host *host) { bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E); SSYNC(); } static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->imask |= mask; bfin_write_SDH_MASK0(mask); SSYNC(); spin_unlock_irqrestore(&host->lock, flags); } static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->imask &= ~mask; bfin_write_SDH_MASK0(host->imask); SSYNC(); spin_unlock_irqrestore(&host->lock, flags); } static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) { unsigned int length; unsigned int data_ctl; unsigned int dma_cfg; unsigned int cycle_ns, timeout; dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags); host->data = data; data_ctl = 0; dma_cfg = 0; length = data->blksz * data->blocks; bfin_write_SDH_DATA_LGTH(length); if (data->flags & MMC_DATA_STREAM) data_ctl |= DTX_MODE; if (data->flags & MMC_DATA_READ) data_ctl |= DTX_DIR; /* Only supports power-of-2 block size */ if (data->blksz & (data->blksz - 1)) return -EINVAL; #ifndef RSI_BLKSZ data_ctl |= ((ffs(data->blksz) - 1) << 4); #else bfin_write_SDH_BLK_SIZE(data->blksz); #endif bfin_write_SDH_DATA_CTL(data_ctl); /* the time of a host clock period in ns */ cycle_ns = 1000000000 / (host->sclk / (2 * (host->clk_div + 1))); timeout = data->timeout_ns / cycle_ns; timeout += data->timeout_clks; bfin_write_SDH_DATA_TIMER(timeout); SSYNC(); if (data->flags & MMC_DATA_READ) { host->dma_dir = DMA_FROM_DEVICE; dma_cfg |= WNR; } else host->dma_dir = DMA_TO_DEVICE; sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); #if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) dma_cfg |= DMAFLOW_ARRAY | RESTART | WDSIZE_32 | DMAEN; # ifdef RSI_BLKSZ dma_cfg |= PSIZE_32 | NDSIZE_3; # else dma_cfg |= NDSIZE_5; # endif { struct scatterlist *sg; int i; for_each_sg(data->sg, sg, host->dma_len, i) { host->sg_cpu[i].start_addr = sg_dma_address(sg); host->sg_cpu[i].cfg = dma_cfg; host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; host->sg_cpu[i].x_modify = 4; dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", i, host->sg_cpu[i].start_addr, host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, host->sg_cpu[i].x_modify); } } flush_dcache_range((unsigned int)host->sg_cpu, (unsigned int)host->sg_cpu + host->dma_len * sizeof(struct dma_desc_array)); /* Set the last descriptor to stop mode */ host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE); host->sg_cpu[host->dma_len - 1].cfg |= DI_EN; set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); set_dma_x_count(host->dma_ch, 0); set_dma_x_modify(host->dma_ch, 0); SSYNC(); set_dma_config(host->dma_ch, dma_cfg); #elif defined(CONFIG_BF51x) /* RSI DMA doesn't work in array mode */ dma_cfg |= WDSIZE_32 | DMAEN; set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); set_dma_x_count(host->dma_ch, length / 4); set_dma_x_modify(host->dma_ch, 4); SSYNC(); set_dma_config(host->dma_ch, dma_cfg); #endif bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); SSYNC(); dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__); return 0; } static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd) { unsigned int sdh_cmd; unsigned int stat_mask; dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd); WARN_ON(host->cmd != NULL); host->cmd = cmd; sdh_cmd = 0; stat_mask = 0; sdh_cmd |= cmd->opcode; if (cmd->flags & MMC_RSP_PRESENT) { sdh_cmd |= CMD_RSP; stat_mask |= CMD_RESP_END; } else { stat_mask |= CMD_SENT; } if (cmd->flags & MMC_RSP_136) sdh_cmd |= CMD_L_RSP; stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT; sdh_enable_stat_irq(host, stat_mask); bfin_write_SDH_ARGUMENT(cmd->arg); bfin_write_SDH_COMMAND(sdh_cmd | CMD_E); bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E); SSYNC(); } static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq) { dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); host->mrq = NULL; host->cmd = NULL; host->data = NULL; mmc_request_done(host->mmc, mrq); } static int sdh_cmd_done(struct sdh_host *host, unsigned int stat) { struct mmc_command *cmd = host->cmd; int ret = 0; dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd); if (!cmd) return 0; host->cmd = NULL; if (cmd->flags & MMC_RSP_PRESENT) { cmd->resp[0] = bfin_read_SDH_RESPONSE0(); if (cmd->flags & MMC_RSP_136) { cmd->resp[1] = bfin_read_SDH_RESPONSE1(); cmd->resp[2] = bfin_read_SDH_RESPONSE2(); cmd->resp[3] = bfin_read_SDH_RESPONSE3(); } } if (stat & CMD_TIME_OUT) cmd->error = -ETIMEDOUT; else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC) cmd->error = -EILSEQ; sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)); if (host->data && !cmd->error) { if (host->data->flags & MMC_DATA_WRITE) { ret = sdh_setup_data(host, host->data); if (ret) return 0; } sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT); } else sdh_finish_request(host, host->mrq); return 1; } static int sdh_data_done(struct sdh_host *host, unsigned int stat) { struct mmc_data *data = host->data; dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat); if (!data) return 0; disable_dma(host->dma_ch); dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); if (stat & DAT_TIME_OUT) data->error = -ETIMEDOUT; else if (stat & DAT_CRC_FAIL) data->error = -EILSEQ; else if (stat & (RX_OVERRUN | TX_UNDERRUN)) data->error = -EIO; if (!data->error) data->bytes_xfered = data->blocks * data->blksz; else data->bytes_xfered = 0; bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \ DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN); bfin_write_SDH_DATA_CTL(0); SSYNC(); host->data = NULL; if (host->mrq->stop) { sdh_stop_clock(host); sdh_start_cmd(host, host->mrq->stop); } else { sdh_finish_request(host, host->mrq); } return 1; } static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct sdh_host *host = mmc_priv(mmc); int ret = 0; dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); WARN_ON(host->mrq != NULL); spin_lock(&host->lock); host->mrq = mrq; host->data = mrq->data; if (mrq->data && mrq->data->flags & MMC_DATA_READ) { ret = sdh_setup_data(host, mrq->data); if (ret) goto data_err; } sdh_start_cmd(host, mrq->cmd); data_err: spin_unlock(&host->lock); } static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sdh_host *host; u16 clk_ctl = 0; #ifndef RSI_BLKSZ u16 pwr_ctl = 0; #endif u16 cfg; host = mmc_priv(mmc); spin_lock(&host->lock); cfg = bfin_read_SDH_CFG(); cfg |= MWE; switch (ios->bus_width) { case MMC_BUS_WIDTH_4: #ifndef RSI_BLKSZ cfg &= ~PD_SDDAT3; #endif cfg |= PUP_SDDAT3; /* Enable 4 bit SDIO */ cfg |= SD4E; clk_ctl |= WIDE_BUS_4; break; case MMC_BUS_WIDTH_8: #ifndef RSI_BLKSZ cfg &= ~PD_SDDAT3; #endif cfg |= PUP_SDDAT3; /* Disable 4 bit SDIO */ cfg &= ~SD4E; clk_ctl |= BYTE_BUS_8; break; default: cfg &= ~PUP_SDDAT3; /* Disable 4 bit SDIO */ cfg &= ~SD4E; } host->power_mode = ios->power_mode; #ifndef RSI_BLKSZ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { pwr_ctl |= ROD_CTL; # ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND pwr_ctl |= SD_CMD_OD; # endif } if (ios->power_mode != MMC_POWER_OFF) pwr_ctl |= PWR_ON; else pwr_ctl &= ~PWR_ON; bfin_write_SDH_PWR_CTL(pwr_ctl); #else # ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) cfg |= SD_CMD_OD; else cfg &= ~SD_CMD_OD; # endif if (ios->power_mode != MMC_POWER_OFF) cfg |= PWR_ON; else cfg &= ~PWR_ON; bfin_write_SDH_CFG(cfg); #endif SSYNC(); if (ios->power_mode == MMC_POWER_ON && ios->clock) { unsigned char clk_div; clk_div = (get_sclk() / ios->clock - 1) / 2; clk_div = min_t(unsigned char, clk_div, 0xFF); clk_ctl |= clk_div; clk_ctl |= CLK_E; host->clk_div = clk_div; bfin_write_SDH_CLK_CTL(clk_ctl); } else sdh_stop_clock(host); /* set up sdh interrupt mask*/ if (ios->power_mode == MMC_POWER_ON) bfin_write_SDH_MASK0(DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN | CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL); else bfin_write_SDH_MASK0(0); SSYNC(); spin_unlock(&host->lock); dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", host->clk_div, host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0, ios->clock); } static const struct mmc_host_ops sdh_ops = { .request = sdh_request, .set_ios = sdh_set_ios, }; static irqreturn_t sdh_dma_irq(int irq, void *devid) { struct sdh_host *host = devid; dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04lx\n", __func__, get_dma_curr_irqstat(host->dma_ch)); clear_dma_irqstat(host->dma_ch); SSYNC(); return IRQ_HANDLED; } static irqreturn_t sdh_stat_irq(int irq, void *devid) { struct sdh_host *host = devid; unsigned int status; int handled = 0; dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); spin_lock(&host->lock); status = bfin_read_SDH_E_STATUS(); if (status & SD_CARD_DET) { mmc_detect_change(host->mmc, 0); bfin_write_SDH_E_STATUS(SD_CARD_DET); } status = bfin_read_SDH_STATUS(); if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) { handled |= sdh_cmd_done(host, status); bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \ CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT); SSYNC(); } status = bfin_read_SDH_STATUS(); if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN)) handled |= sdh_data_done(host, status); spin_unlock(&host->lock); dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); return IRQ_RETVAL(handled); } static void sdh_reset(void) { #if defined(CONFIG_BF54x) /* Secure Digital Host shares DMA with Nand controller */ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); #endif bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); SSYNC(); /* Disable card inserting detection pin. set MMC_CAP_NEEDS_POLL, and * mmc stack will do the detection. */ bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); SSYNC(); } static int sdh_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct sdh_host *host; struct bfin_sd_host *drv_data = get_sdh_data(pdev); int ret; if (!drv_data) { dev_err(&pdev->dev, "missing platform driver data\n"); ret = -EINVAL; goto out; } mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out; } mmc->ops = &sdh_ops; #if defined(CONFIG_BF51x) mmc->max_segs = 1; #else mmc->max_segs = PAGE_SIZE / sizeof(struct dma_desc_array); #endif #ifdef RSI_BLKSZ mmc->max_seg_size = -1; #else mmc->max_seg_size = 1 << 16; #endif mmc->max_blk_size = 1 << 11; mmc->max_blk_count = 1 << 11; mmc->max_req_size = PAGE_SIZE; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->f_max = get_sclk(); mmc->f_min = mmc->f_max >> 9; mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL; host = mmc_priv(mmc); host->mmc = mmc; host->sclk = get_sclk(); spin_lock_init(&host->lock); host->irq = drv_data->irq_int0; host->dma_ch = drv_data->dma_chan; ret = request_dma(host->dma_ch, DRIVER_NAME "DMA"); if (ret) { dev_err(&pdev->dev, "unable to request DMA channel\n"); goto out1; } ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host); if (ret) { dev_err(&pdev->dev, "unable to request DMA irq\n"); goto out2; } host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); if (host->sg_cpu == NULL) { ret = -ENOMEM; goto out2; } platform_set_drvdata(pdev, mmc); ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); if (ret) { dev_err(&pdev->dev, "unable to request status irq\n"); goto out3; } ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME); if (ret) { dev_err(&pdev->dev, "unable to request peripheral pins\n"); goto out4; } sdh_reset(); mmc_add_host(mmc); return 0; out4: free_irq(host->irq, host); out3: mmc_remove_host(mmc); dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); out2: free_dma(host->dma_ch); out1: mmc_free_host(mmc); out: return ret; } static int sdh_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (mmc) { struct sdh_host *host = mmc_priv(mmc); mmc_remove_host(mmc); sdh_stop_clock(host); free_irq(host->irq, host); free_dma(host->dma_ch); dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); mmc_free_host(mmc); } return 0; } #ifdef CONFIG_PM static int sdh_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = platform_get_drvdata(dev); struct bfin_sd_host *drv_data = get_sdh_data(dev); int ret = 0; if (mmc) ret = mmc_suspend_host(mmc); peripheral_free_list(drv_data->pin_req); return ret; } static int sdh_resume(struct platform_device *dev) { struct mmc_host *mmc = platform_get_drvdata(dev); struct bfin_sd_host *drv_data = get_sdh_data(dev); int ret = 0; ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME); if (ret) { dev_err(&dev->dev, "unable to request peripheral pins\n"); return ret; } sdh_reset(); if (mmc) ret = mmc_resume_host(mmc); return ret; } #else # define sdh_suspend NULL # define sdh_resume NULL #endif static struct platform_driver sdh_driver = { .probe = sdh_probe, .remove = sdh_remove, .suspend = sdh_suspend, .resume = sdh_resume, .driver = { .name = DRIVER_NAME, }, }; module_platform_driver(sdh_driver); MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver"); MODULE_AUTHOR("Cliff Cai, Roy Huang"); MODULE_LICENSE("GPL");
gpl-2.0
shazzl/sa77
fs/cifs/cifsfs.c
2753
32505
/* * fs/cifs/cifsfs.c * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) * * Common Internet FileSystem (CIFS) client * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Note that BB means BUGBUG (ie something to fix eventually) */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/list.h> #include <linux/seq_file.h> #include <linux/vfs.h> #include <linux/mempool.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/namei.h> #include <net/ipv6.h> #include "cifsfs.h" #include "cifspdu.h" #define DECLARE_GLOBALS_HERE #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include <linux/mm.h> #include <linux/key-type.h> #include "cifs_spnego.h" #include "fscache.h" #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ int cifsFYI = 0; int cifsERROR = 1; int traceSMB = 0; bool enable_oplocks = true; unsigned int linuxExtEnabled = 1; unsigned int lookupCacheEnabled = 1; unsigned int multiuser_mount = 0; unsigned int global_secflags = CIFSSEC_DEF; /* unsigned int ntlmv2_support = 0; */ unsigned int sign_CIFS_PDUs = 1; static const struct super_operations cifs_super_ops; unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; module_param(CIFSMaxBufSize, int, 0); MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). " "Default: 16384 Range: 8192 to 130048"); unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL; module_param(cifs_min_rcv, int, 0); MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: " "1 to 64"); unsigned int cifs_min_small = 30; module_param(cifs_min_small, int, 0); MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " "Range: 2 to 256"); unsigned int cifs_max_pending = CIFS_MAX_REQ; module_param(cifs_max_pending, int, 0444); MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " "Default: 32767 Range: 2 to 32767."); module_param(enable_oplocks, bool, 0644); MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks (bool). Default:" "y/Y/1"); extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_req_poolp; extern mempool_t *cifs_mid_poolp; struct workqueue_struct *cifsiod_wq; static int cifs_read_super(struct super_block *sb) { struct inode *inode; struct cifs_sb_info *cifs_sb; int rc = 0; cifs_sb = CIFS_SB(sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) sb->s_flags |= MS_POSIXACL; if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES) sb->s_maxbytes = MAX_LFS_FILESIZE; else sb->s_maxbytes = MAX_NON_LFS; /* BB FIXME fix time_gran to be larger for LANMAN sessions */ sb->s_time_gran = 100; sb->s_magic = CIFS_MAGIC_NUMBER; sb->s_op = &cifs_super_ops; sb->s_bdi = &cifs_sb->bdi; sb->s_blocksize = CIFS_MAX_MSGSIZE; sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ inode = cifs_root_iget(sb); if (IS_ERR(inode)) { rc = PTR_ERR(inode); goto out_no_root; } sb->s_root = d_make_root(inode); if (!sb->s_root) { rc = -ENOMEM; goto out_no_root; } /* do that *after* d_alloc_root() - we want NULL ->d_op for root here */ if (cifs_sb_master_tcon(cifs_sb)->nocase) sb->s_d_op = &cifs_ci_dentry_ops; else sb->s_d_op = &cifs_dentry_ops; #ifdef CONFIG_CIFS_NFSD_EXPORT if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cFYI(1, "export ops supported"); sb->s_export_op = &cifs_export_ops; } #endif /* CONFIG_CIFS_NFSD_EXPORT */ return 0; out_no_root: cERROR(1, "cifs_read_super: get root inode failed"); return rc; } static void cifs_kill_sb(struct super_block *sb) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); kill_anon_super(sb); cifs_umount(cifs_sb); } static int cifs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); int rc = -EOPNOTSUPP; int xid; xid = GetXid(); buf->f_type = CIFS_MAGIC_NUMBER; /* * PATH_MAX may be too long - it would presumably be total path, * but note that some servers (includinng Samba 3) have a shorter * maximum path. * * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO. */ buf->f_namelen = PATH_MAX; buf->f_files = 0; /* undefined */ buf->f_ffree = 0; /* unlimited */ /* * We could add a second check for a QFS Unix capability bit */ if ((tcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability))) rc = CIFSSMBQFSPosixInfo(xid, tcon, buf); /* * Only need to call the old QFSInfo if failed on newer one, * e.g. by OS/2. **/ if (rc && (tcon->ses->capabilities & CAP_NT_SMBS)) rc = CIFSSMBQFSInfo(xid, tcon, buf); /* * Some old Windows servers also do not support level 103, retry with * older level one if old server failed the previous call or we * bypassed it because we detected that this was an older LANMAN sess */ if (rc) rc = SMBOldQFSInfo(xid, tcon, buf); FreeXid(xid); return 0; } static int cifs_permission(struct inode *inode, int mask) { struct cifs_sb_info *cifs_sb; cifs_sb = CIFS_SB(inode->i_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { if ((mask & MAY_EXEC) && !execute_ok(inode)) return -EACCES; else return 0; } else /* file mode might have been restricted at mount time on the client (above and beyond ACL on servers) for servers which do not support setting and viewing mode bits, so allowing client to check permissions is useful */ return generic_permission(inode, mask); } static struct kmem_cache *cifs_inode_cachep; static struct kmem_cache *cifs_req_cachep; static struct kmem_cache *cifs_mid_cachep; static struct kmem_cache *cifs_sm_req_cachep; mempool_t *cifs_sm_req_poolp; mempool_t *cifs_req_poolp; mempool_t *cifs_mid_poolp; static struct inode * cifs_alloc_inode(struct super_block *sb) { struct cifsInodeInfo *cifs_inode; cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL); if (!cifs_inode) return NULL; cifs_inode->cifsAttrs = 0x20; /* default */ cifs_inode->time = 0; /* Until the file is open and we have gotten oplock info back from the server, can not assume caching of file data or metadata */ cifs_set_oplock_level(cifs_inode, 0); cifs_inode->delete_pending = false; cifs_inode->invalid_mapping = false; cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ cifs_inode->server_eof = 0; cifs_inode->uniqueid = 0; cifs_inode->createtime = 0; /* Can not set i_flags here - they get immediately overwritten to zero by the VFS */ /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/ INIT_LIST_HEAD(&cifs_inode->openFileList); return &cifs_inode->vfs_inode; } static void cifs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(cifs_inode_cachep, CIFS_I(inode)); } static void cifs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, cifs_i_callback); } static void cifs_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); cifs_fscache_release_inode_cookie(inode); } static void cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) { struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; seq_printf(s, ",addr="); switch (server->dstaddr.ss_family) { case AF_INET: seq_printf(s, "%pI4", &sa->sin_addr.s_addr); break; case AF_INET6: seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr); if (sa6->sin6_scope_id) seq_printf(s, "%%%u", sa6->sin6_scope_id); break; default: seq_printf(s, "(unknown)"); } } static void cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server) { seq_printf(s, ",sec="); switch (server->secType) { case LANMAN: seq_printf(s, "lanman"); break; case NTLMv2: seq_printf(s, "ntlmv2"); break; case NTLM: seq_printf(s, "ntlm"); break; case Kerberos: seq_printf(s, "krb5"); break; case RawNTLMSSP: seq_printf(s, "ntlmssp"); break; default: /* shouldn't ever happen */ seq_printf(s, "unknown"); break; } if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) seq_printf(s, "i"); } /* * cifs_show_options() is for displaying mount options in /proc/mounts. * Not all settable options are displayed but most of the important * ones are. */ static int cifs_show_options(struct seq_file *s, struct dentry *root) { struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct sockaddr *srcaddr; srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; cifs_show_security(s, tcon->ses->server); seq_printf(s, ",unc=%s", tcon->treeName); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) seq_printf(s, ",multiuser"); else if (tcon->ses->user_name) seq_printf(s, ",username=%s", tcon->ses->user_name); if (tcon->ses->domainName) seq_printf(s, ",domain=%s", tcon->ses->domainName); if (srcaddr->sa_family != AF_UNSPEC) { struct sockaddr_in *saddr4; struct sockaddr_in6 *saddr6; saddr4 = (struct sockaddr_in *)srcaddr; saddr6 = (struct sockaddr_in6 *)srcaddr; if (srcaddr->sa_family == AF_INET6) seq_printf(s, ",srcaddr=%pI6c", &saddr6->sin6_addr); else if (srcaddr->sa_family == AF_INET) seq_printf(s, ",srcaddr=%pI4", &saddr4->sin_addr.s_addr); else seq_printf(s, ",srcaddr=BAD-AF:%i", (int)(srcaddr->sa_family)); } seq_printf(s, ",uid=%u", cifs_sb->mnt_uid); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) seq_printf(s, ",forceuid"); else seq_printf(s, ",noforceuid"); seq_printf(s, ",gid=%u", cifs_sb->mnt_gid); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) seq_printf(s, ",forcegid"); else seq_printf(s, ",noforcegid"); cifs_show_address(s, tcon->ses->server); if (!tcon->unix_ext) seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho", cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); if (tcon->seal) seq_printf(s, ",seal"); if (tcon->nocase) seq_printf(s, ",nocase"); if (tcon->retry) seq_printf(s, ",hard"); if (tcon->unix_ext) seq_printf(s, ",unix"); else seq_printf(s, ",nounix"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) seq_printf(s, ",posixpaths"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) seq_printf(s, ",setuids"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) seq_printf(s, ",serverino"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) seq_printf(s, ",rwpidforward"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) seq_printf(s, ",forcemand"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) seq_printf(s, ",directio"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) seq_printf(s, ",nouser_xattr"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) seq_printf(s, ",mapchars"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) seq_printf(s, ",sfu"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) seq_printf(s, ",nobrl"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) seq_printf(s, ",cifsacl"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) seq_printf(s, ",dynperm"); if (root->d_sb->s_flags & MS_POSIXACL) seq_printf(s, ",acl"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) seq_printf(s, ",mfsymlinks"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) seq_printf(s, ",fsc"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) seq_printf(s, ",nostrictsync"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) seq_printf(s, ",noperm"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) seq_printf(s, ",strictcache"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) seq_printf(s, ",backupgid=%u", cifs_sb->mnt_backupgid); seq_printf(s, ",rsize=%u", cifs_sb->rsize); seq_printf(s, ",wsize=%u", cifs_sb->wsize); /* convert actimeo and display it in seconds */ seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); return 0; } static void cifs_umount_begin(struct super_block *sb) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_tcon *tcon; if (cifs_sb == NULL) return; tcon = cifs_sb_master_tcon(cifs_sb); spin_lock(&cifs_tcp_ses_lock); if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) { /* we have other mounts to same share or we have already tried to force umount this and woken up all waiting network requests, nothing to do */ spin_unlock(&cifs_tcp_ses_lock); return; } else if (tcon->tc_count == 1) tcon->tidStatus = CifsExiting; spin_unlock(&cifs_tcp_ses_lock); /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ /* cancel_notify_requests(tcon); */ if (tcon->ses && tcon->ses->server) { cFYI(1, "wake up tasks now - umount begin not complete"); wake_up_all(&tcon->ses->server->request_q); wake_up_all(&tcon->ses->server->response_q); msleep(1); /* yield */ /* we have to kick the requests once more */ wake_up_all(&tcon->ses->server->response_q); msleep(1); } return; } #ifdef CONFIG_CIFS_STATS2 static int cifs_show_stats(struct seq_file *s, struct dentry *root) { /* BB FIXME */ return 0; } #endif static int cifs_remount(struct super_block *sb, int *flags, char *data) { *flags |= MS_NODIRATIME; return 0; } static int cifs_drop_inode(struct inode *inode) { struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); /* no serverino => unconditional eviction */ return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) || generic_drop_inode(inode); } static const struct super_operations cifs_super_ops = { .statfs = cifs_statfs, .alloc_inode = cifs_alloc_inode, .destroy_inode = cifs_destroy_inode, .drop_inode = cifs_drop_inode, .evict_inode = cifs_evict_inode, /* .delete_inode = cifs_delete_inode, */ /* Do not need above function unless later we add lazy close of inodes or unless the kernel forgets to call us with the same number of releases (closes) as opens */ .show_options = cifs_show_options, .umount_begin = cifs_umount_begin, .remount_fs = cifs_remount, #ifdef CONFIG_CIFS_STATS2 .show_stats = cifs_show_stats, #endif }; /* * Get root dentry from superblock according to prefix path mount option. * Return dentry with refcount + 1 on success and NULL otherwise. */ static struct dentry * cifs_get_root(struct smb_vol *vol, struct super_block *sb) { struct dentry *dentry; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); char *full_path = NULL; char *s, *p; char sep; full_path = cifs_build_path_to_root(vol, cifs_sb, cifs_sb_master_tcon(cifs_sb)); if (full_path == NULL) return ERR_PTR(-ENOMEM); cFYI(1, "Get root dentry for %s", full_path); sep = CIFS_DIR_SEP(cifs_sb); dentry = dget(sb->s_root); p = s = full_path; do { struct inode *dir = dentry->d_inode; struct dentry *child; if (!dir) { dput(dentry); dentry = ERR_PTR(-ENOENT); break; } /* skip separators */ while (*s == sep) s++; if (!*s) break; p = s++; /* next separator */ while (*s && *s != sep) s++; mutex_lock(&dir->i_mutex); child = lookup_one_len(p, dentry, s - p); mutex_unlock(&dir->i_mutex); dput(dentry); dentry = child; } while (!IS_ERR(dentry)); kfree(full_path); return dentry; } static int cifs_set_super(struct super_block *sb, void *data) { struct cifs_mnt_data *mnt_data = data; sb->s_fs_info = mnt_data->cifs_sb; return set_anon_super(sb, NULL); } static struct dentry * cifs_do_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { int rc; struct super_block *sb; struct cifs_sb_info *cifs_sb; struct smb_vol *volume_info; struct cifs_mnt_data mnt_data; struct dentry *root; cFYI(1, "Devname: %s flags: %d ", dev_name, flags); volume_info = cifs_get_volume_info((char *)data, dev_name); if (IS_ERR(volume_info)) return ERR_CAST(volume_info); cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); if (cifs_sb == NULL) { root = ERR_PTR(-ENOMEM); goto out_nls; } cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); if (cifs_sb->mountdata == NULL) { root = ERR_PTR(-ENOMEM); goto out_cifs_sb; } cifs_setup_cifs_sb(volume_info, cifs_sb); rc = cifs_mount(cifs_sb, volume_info); if (rc) { if (!(flags & MS_SILENT)) cERROR(1, "cifs_mount failed w/return code = %d", rc); root = ERR_PTR(rc); goto out_mountdata; } mnt_data.vol = volume_info; mnt_data.cifs_sb = cifs_sb; mnt_data.flags = flags; sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data); if (IS_ERR(sb)) { root = ERR_CAST(sb); cifs_umount(cifs_sb); goto out; } if (sb->s_root) { cFYI(1, "Use existing superblock"); cifs_umount(cifs_sb); } else { sb->s_flags = flags; /* BB should we make this contingent on mount parm? */ sb->s_flags |= MS_NODIRATIME | MS_NOATIME; rc = cifs_read_super(sb); if (rc) { root = ERR_PTR(rc); goto out_super; } sb->s_flags |= MS_ACTIVE; } root = cifs_get_root(volume_info, sb); if (IS_ERR(root)) goto out_super; cFYI(1, "dentry root is: %p", root); goto out; out_super: deactivate_locked_super(sb); out: cifs_cleanup_volume_info(volume_info); return root; out_mountdata: kfree(cifs_sb->mountdata); out_cifs_sb: kfree(cifs_sb); out_nls: unload_nls(volume_info->local_nls); goto out; } static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; ssize_t written; int rc; written = generic_file_aio_write(iocb, iov, nr_segs, pos); if (CIFS_I(inode)->clientCanCacheAll) return written; rc = filemap_fdatawrite(inode->i_mapping); if (rc) cFYI(1, "cifs_file_aio_write: %d rc on %p inode", rc, inode); return written; } static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) { /* * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate * the cached file length */ if (origin != SEEK_SET && origin != SEEK_CUR) { int rc; struct inode *inode = file->f_path.dentry->d_inode; /* * We need to be sure that all dirty pages are written and the * server has the newest file length. */ if (!CIFS_I(inode)->clientCanCacheRead && inode->i_mapping && inode->i_mapping->nrpages != 0) { rc = filemap_fdatawait(inode->i_mapping); if (rc) { mapping_set_error(inode->i_mapping, rc); return rc; } } /* * Some applications poll for the file length in this strange * way so we must seek to end on non-oplocked files by * setting the revalidate time to zero. */ CIFS_I(inode)->time = 0; rc = cifs_revalidate_file_attr(file); if (rc < 0) return (loff_t)rc; } return generic_file_llseek(file, offset, origin); } static int cifs_setlease(struct file *file, long arg, struct file_lock **lease) { /* note that this is called by vfs setlease with lock_flocks held to protect *lease from going away */ struct inode *inode = file->f_path.dentry->d_inode; struct cifsFileInfo *cfile = file->private_data; if (!(S_ISREG(inode->i_mode))) return -EINVAL; /* check if file is oplocked */ if (((arg == F_RDLCK) && (CIFS_I(inode)->clientCanCacheRead)) || ((arg == F_WRLCK) && (CIFS_I(inode)->clientCanCacheAll))) return generic_setlease(file, arg, lease); else if (tlink_tcon(cfile->tlink)->local_lease && !CIFS_I(inode)->clientCanCacheRead) /* If the server claims to support oplock on this file, then we still need to check oplock even if the local_lease mount option is set, but there are servers which do not support oplock for which this mount option may be useful if the user knows that the file won't be changed on the server by anyone else */ return generic_setlease(file, arg, lease); else return -EAGAIN; } struct file_system_type cifs_fs_type = { .owner = THIS_MODULE, .name = "cifs", .mount = cifs_do_mount, .kill_sb = cifs_kill_sb, /* .fs_flags */ }; const struct inode_operations cifs_dir_inode_ops = { .create = cifs_create, .lookup = cifs_lookup, .getattr = cifs_getattr, .unlink = cifs_unlink, .link = cifs_hardlink, .mkdir = cifs_mkdir, .rmdir = cifs_rmdir, .rename = cifs_rename, .permission = cifs_permission, /* revalidate:cifs_revalidate, */ .setattr = cifs_setattr, .symlink = cifs_symlink, .mknod = cifs_mknod, #ifdef CONFIG_CIFS_XATTR .setxattr = cifs_setxattr, .getxattr = cifs_getxattr, .listxattr = cifs_listxattr, .removexattr = cifs_removexattr, #endif }; const struct inode_operations cifs_file_inode_ops = { /* revalidate:cifs_revalidate, */ .setattr = cifs_setattr, .getattr = cifs_getattr, /* do we need this anymore? */ .rename = cifs_rename, .permission = cifs_permission, #ifdef CONFIG_CIFS_XATTR .setxattr = cifs_setxattr, .getxattr = cifs_getxattr, .listxattr = cifs_listxattr, .removexattr = cifs_removexattr, #endif }; const struct inode_operations cifs_symlink_inode_ops = { .readlink = generic_readlink, .follow_link = cifs_follow_link, .put_link = cifs_put_link, .permission = cifs_permission, /* BB add the following two eventually */ /* revalidate: cifs_revalidate, setattr: cifs_notify_change, *//* BB do we need notify change */ #ifdef CONFIG_CIFS_XATTR .setxattr = cifs_setxattr, .getxattr = cifs_getxattr, .listxattr = cifs_listxattr, .removexattr = cifs_removexattr, #endif }; const struct file_operations cifs_file_ops = { .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = cifs_file_aio_write, .open = cifs_open, .release = cifs_close, .lock = cifs_lock, .fsync = cifs_fsync, .flush = cifs_flush, .mmap = cifs_file_mmap, .splice_read = generic_file_splice_read, .llseek = cifs_llseek, #ifdef CONFIG_CIFS_POSIX .unlocked_ioctl = cifs_ioctl, #endif /* CONFIG_CIFS_POSIX */ .setlease = cifs_setlease, }; const struct file_operations cifs_file_strict_ops = { .read = do_sync_read, .write = do_sync_write, .aio_read = cifs_strict_readv, .aio_write = cifs_strict_writev, .open = cifs_open, .release = cifs_close, .lock = cifs_lock, .fsync = cifs_strict_fsync, .flush = cifs_flush, .mmap = cifs_file_strict_mmap, .splice_read = generic_file_splice_read, .llseek = cifs_llseek, #ifdef CONFIG_CIFS_POSIX .unlocked_ioctl = cifs_ioctl, #endif /* CONFIG_CIFS_POSIX */ .setlease = cifs_setlease, }; const struct file_operations cifs_file_direct_ops = { /* BB reevaluate whether they can be done with directio, no cache */ .read = do_sync_read, .write = do_sync_write, .aio_read = cifs_user_readv, .aio_write = cifs_user_writev, .open = cifs_open, .release = cifs_close, .lock = cifs_lock, .fsync = cifs_fsync, .flush = cifs_flush, .mmap = cifs_file_mmap, .splice_read = generic_file_splice_read, #ifdef CONFIG_CIFS_POSIX .unlocked_ioctl = cifs_ioctl, #endif /* CONFIG_CIFS_POSIX */ .llseek = cifs_llseek, .setlease = cifs_setlease, }; const struct file_operations cifs_file_nobrl_ops = { .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = cifs_file_aio_write, .open = cifs_open, .release = cifs_close, .fsync = cifs_fsync, .flush = cifs_flush, .mmap = cifs_file_mmap, .splice_read = generic_file_splice_read, .llseek = cifs_llseek, #ifdef CONFIG_CIFS_POSIX .unlocked_ioctl = cifs_ioctl, #endif /* CONFIG_CIFS_POSIX */ .setlease = cifs_setlease, }; const struct file_operations cifs_file_strict_nobrl_ops = { .read = do_sync_read, .write = do_sync_write, .aio_read = cifs_strict_readv, .aio_write = cifs_strict_writev, .open = cifs_open, .release = cifs_close, .fsync = cifs_strict_fsync, .flush = cifs_flush, .mmap = cifs_file_strict_mmap, .splice_read = generic_file_splice_read, .llseek = cifs_llseek, #ifdef CONFIG_CIFS_POSIX .unlocked_ioctl = cifs_ioctl, #endif /* CONFIG_CIFS_POSIX */ .setlease = cifs_setlease, }; const struct file_operations cifs_file_direct_nobrl_ops = { /* BB reevaluate whether they can be done with directio, no cache */ .read = do_sync_read, .write = do_sync_write, .aio_read = cifs_user_readv, .aio_write = cifs_user_writev, .open = cifs_open, .release = cifs_close, .fsync = cifs_fsync, .flush = cifs_flush, .mmap = cifs_file_mmap, .splice_read = generic_file_splice_read, #ifdef CONFIG_CIFS_POSIX .unlocked_ioctl = cifs_ioctl, #endif /* CONFIG_CIFS_POSIX */ .llseek = cifs_llseek, .setlease = cifs_setlease, }; const struct file_operations cifs_dir_ops = { .readdir = cifs_readdir, .release = cifs_closedir, .read = generic_read_dir, .unlocked_ioctl = cifs_ioctl, .llseek = generic_file_llseek, }; static void cifs_init_once(void *inode) { struct cifsInodeInfo *cifsi = inode; inode_init_once(&cifsi->vfs_inode); INIT_LIST_HEAD(&cifsi->llist); mutex_init(&cifsi->lock_mutex); } static int cifs_init_inodecache(void) { cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", sizeof(struct cifsInodeInfo), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), cifs_init_once); if (cifs_inode_cachep == NULL) return -ENOMEM; return 0; } static void cifs_destroy_inodecache(void) { kmem_cache_destroy(cifs_inode_cachep); } static int cifs_init_request_bufs(void) { if (CIFSMaxBufSize < 8192) { /* Buffer size can not be smaller than 2 * PATH_MAX since maximum Unicode path name has to fit in any SMB/CIFS path based frames */ CIFSMaxBufSize = 8192; } else if (CIFSMaxBufSize > 1024*127) { CIFSMaxBufSize = 1024 * 127; } else { CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ } /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */ cifs_req_cachep = kmem_cache_create("cifs_request", CIFSMaxBufSize + MAX_CIFS_HDR_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL); if (cifs_req_cachep == NULL) return -ENOMEM; if (cifs_min_rcv < 1) cifs_min_rcv = 1; else if (cifs_min_rcv > 64) { cifs_min_rcv = 64; cERROR(1, "cifs_min_rcv set to maximum (64)"); } cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, cifs_req_cachep); if (cifs_req_poolp == NULL) { kmem_cache_destroy(cifs_req_cachep); return -ENOMEM; } /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and almost all handle based requests (but not write response, nor is it sufficient for path based requests). A smaller size would have been more efficient (compacting multiple slab items on one 4k page) for the case in which debug was on, but this larger size allows more SMBs to use small buffer alloc and is still much more efficient to alloc 1 per page off the slab compared to 17K (5page) alloc of large cifs buffers even when page debugging is on */ cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL); if (cifs_sm_req_cachep == NULL) { mempool_destroy(cifs_req_poolp); kmem_cache_destroy(cifs_req_cachep); return -ENOMEM; } if (cifs_min_small < 2) cifs_min_small = 2; else if (cifs_min_small > 256) { cifs_min_small = 256; cFYI(1, "cifs_min_small set to maximum (256)"); } cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, cifs_sm_req_cachep); if (cifs_sm_req_poolp == NULL) { mempool_destroy(cifs_req_poolp); kmem_cache_destroy(cifs_req_cachep); kmem_cache_destroy(cifs_sm_req_cachep); return -ENOMEM; } return 0; } static void cifs_destroy_request_bufs(void) { mempool_destroy(cifs_req_poolp); kmem_cache_destroy(cifs_req_cachep); mempool_destroy(cifs_sm_req_poolp); kmem_cache_destroy(cifs_sm_req_cachep); } static int cifs_init_mids(void) { cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", sizeof(struct mid_q_entry), 0, SLAB_HWCACHE_ALIGN, NULL); if (cifs_mid_cachep == NULL) return -ENOMEM; /* 3 is a reasonable minimum number of simultaneous operations */ cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); if (cifs_mid_poolp == NULL) { kmem_cache_destroy(cifs_mid_cachep); return -ENOMEM; } return 0; } static void cifs_destroy_mids(void) { mempool_destroy(cifs_mid_poolp); kmem_cache_destroy(cifs_mid_cachep); } static int __init init_cifs(void) { int rc = 0; cifs_proc_init(); INIT_LIST_HEAD(&cifs_tcp_ses_list); #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */ INIT_LIST_HEAD(&GlobalDnotifyReqList); INIT_LIST_HEAD(&GlobalDnotifyRsp_Q); #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */ /* * Initialize Global counters */ atomic_set(&sesInfoAllocCount, 0); atomic_set(&tconInfoAllocCount, 0); atomic_set(&tcpSesAllocCount, 0); atomic_set(&tcpSesReconnectCount, 0); atomic_set(&tconInfoReconnectCount, 0); atomic_set(&bufAllocCount, 0); atomic_set(&smBufAllocCount, 0); #ifdef CONFIG_CIFS_STATS2 atomic_set(&totBufAllocCount, 0); atomic_set(&totSmBufAllocCount, 0); #endif /* CONFIG_CIFS_STATS2 */ atomic_set(&midCount, 0); GlobalCurrentXid = 0; GlobalTotalActiveXid = 0; GlobalMaxActiveXid = 0; spin_lock_init(&cifs_tcp_ses_lock); spin_lock_init(&cifs_file_list_lock); spin_lock_init(&GlobalMid_Lock); if (cifs_max_pending < 2) { cifs_max_pending = 2; cFYI(1, "cifs_max_pending set to min of 2"); } else if (cifs_max_pending > CIFS_MAX_REQ) { cifs_max_pending = CIFS_MAX_REQ; cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ); } cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); if (!cifsiod_wq) { rc = -ENOMEM; goto out_clean_proc; } rc = cifs_fscache_register(); if (rc) goto out_destroy_wq; rc = cifs_init_inodecache(); if (rc) goto out_unreg_fscache; rc = cifs_init_mids(); if (rc) goto out_destroy_inodecache; rc = cifs_init_request_bufs(); if (rc) goto out_destroy_mids; #ifdef CONFIG_CIFS_UPCALL rc = register_key_type(&cifs_spnego_key_type); if (rc) goto out_destroy_request_bufs; #endif /* CONFIG_CIFS_UPCALL */ #ifdef CONFIG_CIFS_ACL rc = init_cifs_idmap(); if (rc) goto out_register_key_type; #endif /* CONFIG_CIFS_ACL */ rc = register_filesystem(&cifs_fs_type); if (rc) goto out_init_cifs_idmap; return 0; out_init_cifs_idmap: #ifdef CONFIG_CIFS_ACL exit_cifs_idmap(); out_register_key_type: #endif #ifdef CONFIG_CIFS_UPCALL unregister_key_type(&cifs_spnego_key_type); out_destroy_request_bufs: #endif cifs_destroy_request_bufs(); out_destroy_mids: cifs_destroy_mids(); out_destroy_inodecache: cifs_destroy_inodecache(); out_unreg_fscache: cifs_fscache_unregister(); out_destroy_wq: destroy_workqueue(cifsiod_wq); out_clean_proc: cifs_proc_clean(); return rc; } static void __exit exit_cifs(void) { cFYI(DBG2, "exit_cifs"); unregister_filesystem(&cifs_fs_type); cifs_dfs_release_automount_timer(); #ifdef CONFIG_CIFS_ACL cifs_destroy_idmaptrees(); exit_cifs_idmap(); #endif #ifdef CONFIG_CIFS_UPCALL unregister_key_type(&cifs_spnego_key_type); #endif cifs_destroy_request_bufs(); cifs_destroy_mids(); cifs_destroy_inodecache(); cifs_fscache_unregister(); destroy_workqueue(cifsiod_wq); cifs_proc_clean(); } MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>"); MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ MODULE_DESCRIPTION ("VFS to access servers complying with the SNIA CIFS Specification " "e.g. Samba and Windows"); MODULE_VERSION(CIFS_VERSION); module_init(init_cifs) module_exit(exit_cifs)
gpl-2.0
Buckmarble/Elite_Lunar_kernel
arch/powerpc/platforms/85xx/p4080_ds.c
4545
2211
/* * P4080 DS Setup * * Maintained by Kumar Gala (see MAINTAINERS for contact information) * * Copyright 2009 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <linux/of_platform.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <asm/ehv_pic.h> #include "corenet_ds.h" /* * Called very early, device-tree isn't unflattened */ static int __init p4080_ds_probe(void) { unsigned long root = of_get_flat_dt_root(); #ifdef CONFIG_SMP extern struct smp_ops_t smp_85xx_ops; #endif if (of_flat_dt_is_compatible(root, "fsl,P4080DS")) return 1; /* Check if we're running under the Freescale hypervisor */ if (of_flat_dt_is_compatible(root, "fsl,P4080DS-hv")) { ppc_md.init_IRQ = ehv_pic_init; ppc_md.get_irq = ehv_pic_get_irq; ppc_md.restart = fsl_hv_restart; ppc_md.power_off = fsl_hv_halt; ppc_md.halt = fsl_hv_halt; #ifdef CONFIG_SMP /* * Disable the timebase sync operations because we can't write * to the timebase registers under the hypervisor. */ smp_85xx_ops.give_timebase = NULL; smp_85xx_ops.take_timebase = NULL; #endif return 1; } return 0; } define_machine(p4080_ds) { .name = "P4080 DS", .probe = p4080_ds_probe, .setup_arch = corenet_ds_setup_arch, .init_IRQ = corenet_ds_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_coreint_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, .power_save = e500_idle, }; machine_device_initcall(p4080_ds, corenet_ds_publish_devices); #ifdef CONFIG_SWIOTLB machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier); #endif
gpl-2.0
flar2/flo-ElementalX
drivers/net/wireless/mwl8k.c
4801
145308
/* * drivers/net/wireless/mwl8k.c * Driver for Marvell TOPDOG 802.11 Wireless cards * * Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <net/mac80211.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/workqueue.h> #define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver" #define MWL8K_NAME KBUILD_MODNAME #define MWL8K_VERSION "0.13" /* Module parameters */ static bool ap_mode_default; module_param(ap_mode_default, bool, 0); MODULE_PARM_DESC(ap_mode_default, "Set to 1 to make ap mode the default instead of sta mode"); /* Register definitions */ #define MWL8K_HIU_GEN_PTR 0x00000c10 #define MWL8K_MODE_STA 0x0000005a #define MWL8K_MODE_AP 0x000000a5 #define MWL8K_HIU_INT_CODE 0x00000c14 #define MWL8K_FWSTA_READY 0xf0f1f2f4 #define MWL8K_FWAP_READY 0xf1f2f4a5 #define MWL8K_INT_CODE_CMD_FINISHED 0x00000005 #define MWL8K_HIU_SCRATCH 0x00000c40 /* Host->device communications */ #define MWL8K_HIU_H2A_INTERRUPT_EVENTS 0x00000c18 #define MWL8K_HIU_H2A_INTERRUPT_STATUS 0x00000c1c #define MWL8K_HIU_H2A_INTERRUPT_MASK 0x00000c20 #define MWL8K_HIU_H2A_INTERRUPT_CLEAR_SEL 0x00000c24 #define MWL8K_HIU_H2A_INTERRUPT_STATUS_MASK 0x00000c28 #define MWL8K_H2A_INT_DUMMY (1 << 20) #define MWL8K_H2A_INT_RESET (1 << 15) #define MWL8K_H2A_INT_DOORBELL (1 << 1) #define MWL8K_H2A_INT_PPA_READY (1 << 0) /* Device->host communications */ #define MWL8K_HIU_A2H_INTERRUPT_EVENTS 0x00000c2c #define MWL8K_HIU_A2H_INTERRUPT_STATUS 0x00000c30 #define MWL8K_HIU_A2H_INTERRUPT_MASK 0x00000c34 #define MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL 0x00000c38 #define MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK 0x00000c3c #define MWL8K_A2H_INT_DUMMY (1 << 20) #define MWL8K_A2H_INT_BA_WATCHDOG (1 << 14) #define MWL8K_A2H_INT_CHNL_SWITCHED (1 << 11) #define MWL8K_A2H_INT_QUEUE_EMPTY (1 << 10) #define MWL8K_A2H_INT_RADAR_DETECT (1 << 7) #define MWL8K_A2H_INT_RADIO_ON (1 << 6) #define MWL8K_A2H_INT_RADIO_OFF (1 << 5) #define MWL8K_A2H_INT_MAC_EVENT (1 << 3) #define MWL8K_A2H_INT_OPC_DONE (1 << 2) #define MWL8K_A2H_INT_RX_READY (1 << 1) #define MWL8K_A2H_INT_TX_DONE (1 << 0) /* HW micro second timer register * located at offset 0xA600. This * will be used to timestamp tx * packets. */ #define MWL8K_HW_TIMER_REGISTER 0x0000a600 #define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \ MWL8K_A2H_INT_CHNL_SWITCHED | \ MWL8K_A2H_INT_QUEUE_EMPTY | \ MWL8K_A2H_INT_RADAR_DETECT | \ MWL8K_A2H_INT_RADIO_ON | \ MWL8K_A2H_INT_RADIO_OFF | \ MWL8K_A2H_INT_MAC_EVENT | \ MWL8K_A2H_INT_OPC_DONE | \ MWL8K_A2H_INT_RX_READY | \ MWL8K_A2H_INT_TX_DONE | \ MWL8K_A2H_INT_BA_WATCHDOG) #define MWL8K_RX_QUEUES 1 #define MWL8K_TX_WMM_QUEUES 4 #define MWL8K_MAX_AMPDU_QUEUES 8 #define MWL8K_MAX_TX_QUEUES (MWL8K_TX_WMM_QUEUES + MWL8K_MAX_AMPDU_QUEUES) #define mwl8k_tx_queues(priv) (MWL8K_TX_WMM_QUEUES + (priv)->num_ampdu_queues) struct rxd_ops { int rxd_size; void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr); void (*rxd_refill)(void *rxd, dma_addr_t addr, int len); int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status, __le16 *qos, s8 *noise); }; struct mwl8k_device_info { char *part_name; char *helper_image; char *fw_image_sta; char *fw_image_ap; struct rxd_ops *ap_rxd_ops; u32 fw_api_ap; }; struct mwl8k_rx_queue { int rxd_count; /* hw receives here */ int head; /* refill descs here */ int tail; void *rxd; dma_addr_t rxd_dma; struct { struct sk_buff *skb; DEFINE_DMA_UNMAP_ADDR(dma); } *buf; }; struct mwl8k_tx_queue { /* hw transmits here */ int head; /* sw appends here */ int tail; unsigned int len; struct mwl8k_tx_desc *txd; dma_addr_t txd_dma; struct sk_buff **skb; }; enum { AMPDU_NO_STREAM, AMPDU_STREAM_NEW, AMPDU_STREAM_IN_PROGRESS, AMPDU_STREAM_ACTIVE, }; struct mwl8k_ampdu_stream { struct ieee80211_sta *sta; u8 tid; u8 state; u8 idx; u8 txq_idx; /* index of this stream in priv->txq */ }; struct mwl8k_priv { struct ieee80211_hw *hw; struct pci_dev *pdev; int irq; struct mwl8k_device_info *device_info; void __iomem *sram; void __iomem *regs; /* firmware */ const struct firmware *fw_helper; const struct firmware *fw_ucode; /* hardware/firmware parameters */ bool ap_fw; struct rxd_ops *rxd_ops; struct ieee80211_supported_band band_24; struct ieee80211_channel channels_24[14]; struct ieee80211_rate rates_24[14]; struct ieee80211_supported_band band_50; struct ieee80211_channel channels_50[4]; struct ieee80211_rate rates_50[9]; u32 ap_macids_supported; u32 sta_macids_supported; /* Ampdu stream information */ u8 num_ampdu_queues; spinlock_t stream_lock; struct mwl8k_ampdu_stream ampdu[MWL8K_MAX_AMPDU_QUEUES]; struct work_struct watchdog_ba_handle; /* firmware access */ struct mutex fw_mutex; struct task_struct *fw_mutex_owner; struct task_struct *hw_restart_owner; int fw_mutex_depth; struct completion *hostcmd_wait; /* lock held over TX and TX reap */ spinlock_t tx_lock; /* TX quiesce completion, protected by fw_mutex and tx_lock */ struct completion *tx_wait; /* List of interfaces. */ u32 macids_used; struct list_head vif_list; /* power management status cookie from firmware */ u32 *cookie; dma_addr_t cookie_dma; u16 num_mcaddrs; u8 hw_rev; u32 fw_rev; /* * Running count of TX packets in flight, to avoid * iterating over the transmit rings each time. */ int pending_tx_pkts; struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES]; struct mwl8k_tx_queue txq[MWL8K_MAX_TX_QUEUES]; u32 txq_offset[MWL8K_MAX_TX_QUEUES]; bool radio_on; bool radio_short_preamble; bool sniffer_enabled; bool wmm_enabled; /* XXX need to convert this to handle multiple interfaces */ bool capture_beacon; u8 capture_bssid[ETH_ALEN]; struct sk_buff *beacon_skb; /* * This FJ worker has to be global as it is scheduled from the * RX handler. At this point we don't know which interface it * belongs to until the list of bssids waiting to complete join * is checked. */ struct work_struct finalize_join_worker; /* Tasklet to perform TX reclaim. */ struct tasklet_struct poll_tx_task; /* Tasklet to perform RX. */ struct tasklet_struct poll_rx_task; /* Most recently reported noise in dBm */ s8 noise; /* * preserve the queue configurations so they can be restored if/when * the firmware image is swapped. */ struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_WMM_QUEUES]; /* To perform the task of reloading the firmware */ struct work_struct fw_reload; bool hw_restart_in_progress; /* async firmware loading state */ unsigned fw_state; char *fw_pref; char *fw_alt; struct completion firmware_loading_complete; }; #define MAX_WEP_KEY_LEN 13 #define NUM_WEP_KEYS 4 /* Per interface specific private data */ struct mwl8k_vif { struct list_head list; struct ieee80211_vif *vif; /* Firmware macid for this vif. */ int macid; /* Non AMPDU sequence number assigned by driver. */ u16 seqno; /* Saved WEP keys */ struct { u8 enabled; u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN]; } wep_key_conf[NUM_WEP_KEYS]; /* BSSID */ u8 bssid[ETH_ALEN]; /* A flag to indicate is HW crypto is enabled for this bssid */ bool is_hw_crypto_enabled; }; #define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) #define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8)) struct tx_traffic_info { u32 start_time; u32 pkts; }; #define MWL8K_MAX_TID 8 struct mwl8k_sta { /* Index into station database. Returned by UPDATE_STADB. */ u8 peer_id; u8 is_ampdu_allowed; struct tx_traffic_info tx_stats[MWL8K_MAX_TID]; }; #define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv)) static const struct ieee80211_channel mwl8k_channels_24[] = { { .center_freq = 2412, .hw_value = 1, }, { .center_freq = 2417, .hw_value = 2, }, { .center_freq = 2422, .hw_value = 3, }, { .center_freq = 2427, .hw_value = 4, }, { .center_freq = 2432, .hw_value = 5, }, { .center_freq = 2437, .hw_value = 6, }, { .center_freq = 2442, .hw_value = 7, }, { .center_freq = 2447, .hw_value = 8, }, { .center_freq = 2452, .hw_value = 9, }, { .center_freq = 2457, .hw_value = 10, }, { .center_freq = 2462, .hw_value = 11, }, { .center_freq = 2467, .hw_value = 12, }, { .center_freq = 2472, .hw_value = 13, }, { .center_freq = 2484, .hw_value = 14, }, }; static const struct ieee80211_rate mwl8k_rates_24[] = { { .bitrate = 10, .hw_value = 2, }, { .bitrate = 20, .hw_value = 4, }, { .bitrate = 55, .hw_value = 11, }, { .bitrate = 110, .hw_value = 22, }, { .bitrate = 220, .hw_value = 44, }, { .bitrate = 60, .hw_value = 12, }, { .bitrate = 90, .hw_value = 18, }, { .bitrate = 120, .hw_value = 24, }, { .bitrate = 180, .hw_value = 36, }, { .bitrate = 240, .hw_value = 48, }, { .bitrate = 360, .hw_value = 72, }, { .bitrate = 480, .hw_value = 96, }, { .bitrate = 540, .hw_value = 108, }, { .bitrate = 720, .hw_value = 144, }, }; static const struct ieee80211_channel mwl8k_channels_50[] = { { .center_freq = 5180, .hw_value = 36, }, { .center_freq = 5200, .hw_value = 40, }, { .center_freq = 5220, .hw_value = 44, }, { .center_freq = 5240, .hw_value = 48, }, }; static const struct ieee80211_rate mwl8k_rates_50[] = { { .bitrate = 60, .hw_value = 12, }, { .bitrate = 90, .hw_value = 18, }, { .bitrate = 120, .hw_value = 24, }, { .bitrate = 180, .hw_value = 36, }, { .bitrate = 240, .hw_value = 48, }, { .bitrate = 360, .hw_value = 72, }, { .bitrate = 480, .hw_value = 96, }, { .bitrate = 540, .hw_value = 108, }, { .bitrate = 720, .hw_value = 144, }, }; /* Set or get info from Firmware */ #define MWL8K_CMD_GET 0x0000 #define MWL8K_CMD_SET 0x0001 #define MWL8K_CMD_SET_LIST 0x0002 /* Firmware command codes */ #define MWL8K_CMD_CODE_DNLD 0x0001 #define MWL8K_CMD_GET_HW_SPEC 0x0003 #define MWL8K_CMD_SET_HW_SPEC 0x0004 #define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010 #define MWL8K_CMD_GET_STAT 0x0014 #define MWL8K_CMD_RADIO_CONTROL 0x001c #define MWL8K_CMD_RF_TX_POWER 0x001e #define MWL8K_CMD_TX_POWER 0x001f #define MWL8K_CMD_RF_ANTENNA 0x0020 #define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */ #define MWL8K_CMD_SET_PRE_SCAN 0x0107 #define MWL8K_CMD_SET_POST_SCAN 0x0108 #define MWL8K_CMD_SET_RF_CHANNEL 0x010a #define MWL8K_CMD_SET_AID 0x010d #define MWL8K_CMD_SET_RATE 0x0110 #define MWL8K_CMD_SET_FINALIZE_JOIN 0x0111 #define MWL8K_CMD_RTS_THRESHOLD 0x0113 #define MWL8K_CMD_SET_SLOT 0x0114 #define MWL8K_CMD_SET_EDCA_PARAMS 0x0115 #define MWL8K_CMD_SET_WMM_MODE 0x0123 #define MWL8K_CMD_MIMO_CONFIG 0x0125 #define MWL8K_CMD_USE_FIXED_RATE 0x0126 #define MWL8K_CMD_ENABLE_SNIFFER 0x0150 #define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */ #define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 #define MWL8K_CMD_GET_WATCHDOG_BITMAP 0x0205 #define MWL8K_CMD_DEL_MAC_ADDR 0x0206 /* per-vif */ #define MWL8K_CMD_BSS_START 0x1100 /* per-vif */ #define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */ #define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */ #define MWL8K_CMD_UPDATE_STADB 0x1123 #define MWL8K_CMD_BASTREAM 0x1125 static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) { u16 command = le16_to_cpu(cmd); #define MWL8K_CMDNAME(x) case MWL8K_CMD_##x: do {\ snprintf(buf, bufsize, "%s", #x);\ return buf;\ } while (0) switch (command & ~0x8000) { MWL8K_CMDNAME(CODE_DNLD); MWL8K_CMDNAME(GET_HW_SPEC); MWL8K_CMDNAME(SET_HW_SPEC); MWL8K_CMDNAME(MAC_MULTICAST_ADR); MWL8K_CMDNAME(GET_STAT); MWL8K_CMDNAME(RADIO_CONTROL); MWL8K_CMDNAME(RF_TX_POWER); MWL8K_CMDNAME(TX_POWER); MWL8K_CMDNAME(RF_ANTENNA); MWL8K_CMDNAME(SET_BEACON); MWL8K_CMDNAME(SET_PRE_SCAN); MWL8K_CMDNAME(SET_POST_SCAN); MWL8K_CMDNAME(SET_RF_CHANNEL); MWL8K_CMDNAME(SET_AID); MWL8K_CMDNAME(SET_RATE); MWL8K_CMDNAME(SET_FINALIZE_JOIN); MWL8K_CMDNAME(RTS_THRESHOLD); MWL8K_CMDNAME(SET_SLOT); MWL8K_CMDNAME(SET_EDCA_PARAMS); MWL8K_CMDNAME(SET_WMM_MODE); MWL8K_CMDNAME(MIMO_CONFIG); MWL8K_CMDNAME(USE_FIXED_RATE); MWL8K_CMDNAME(ENABLE_SNIFFER); MWL8K_CMDNAME(SET_MAC_ADDR); MWL8K_CMDNAME(SET_RATEADAPT_MODE); MWL8K_CMDNAME(BSS_START); MWL8K_CMDNAME(SET_NEW_STN); MWL8K_CMDNAME(UPDATE_ENCRYPTION); MWL8K_CMDNAME(UPDATE_STADB); MWL8K_CMDNAME(BASTREAM); MWL8K_CMDNAME(GET_WATCHDOG_BITMAP); default: snprintf(buf, bufsize, "0x%x", cmd); } #undef MWL8K_CMDNAME return buf; } /* Hardware and firmware reset */ static void mwl8k_hw_reset(struct mwl8k_priv *priv) { iowrite32(MWL8K_H2A_INT_RESET, priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); iowrite32(MWL8K_H2A_INT_RESET, priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); msleep(20); } /* Release fw image */ static void mwl8k_release_fw(const struct firmware **fw) { if (*fw == NULL) return; release_firmware(*fw); *fw = NULL; } static void mwl8k_release_firmware(struct mwl8k_priv *priv) { mwl8k_release_fw(&priv->fw_ucode); mwl8k_release_fw(&priv->fw_helper); } /* states for asynchronous f/w loading */ static void mwl8k_fw_state_machine(const struct firmware *fw, void *context); enum { FW_STATE_INIT = 0, FW_STATE_LOADING_PREF, FW_STATE_LOADING_ALT, FW_STATE_ERROR, }; /* Request fw image */ static int mwl8k_request_fw(struct mwl8k_priv *priv, const char *fname, const struct firmware **fw, bool nowait) { /* release current image */ if (*fw != NULL) mwl8k_release_fw(fw); if (nowait) return request_firmware_nowait(THIS_MODULE, 1, fname, &priv->pdev->dev, GFP_KERNEL, priv, mwl8k_fw_state_machine); else return request_firmware(fw, fname, &priv->pdev->dev); } static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image, bool nowait) { struct mwl8k_device_info *di = priv->device_info; int rc; if (di->helper_image != NULL) { if (nowait) rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper, true); else rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper, false); if (rc) printk(KERN_ERR "%s: Error requesting helper fw %s\n", pci_name(priv->pdev), di->helper_image); if (rc || nowait) return rc; } if (nowait) { /* * if we get here, no helper image is needed. Skip the * FW_STATE_INIT state. */ priv->fw_state = FW_STATE_LOADING_PREF; rc = mwl8k_request_fw(priv, fw_image, &priv->fw_ucode, true); } else rc = mwl8k_request_fw(priv, fw_image, &priv->fw_ucode, false); if (rc) { printk(KERN_ERR "%s: Error requesting firmware file %s\n", pci_name(priv->pdev), fw_image); mwl8k_release_fw(&priv->fw_helper); return rc; } return 0; } struct mwl8k_cmd_pkt { __le16 code; __le16 length; __u8 seq_num; __u8 macid; __le16 result; char payload[0]; } __packed; /* * Firmware loading. */ static int mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length) { void __iomem *regs = priv->regs; dma_addr_t dma_addr; int loops; dma_addr = pci_map_single(priv->pdev, data, length, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(priv->pdev, dma_addr)) return -ENOMEM; iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR); iowrite32(0, regs + MWL8K_HIU_INT_CODE); iowrite32(MWL8K_H2A_INT_DOORBELL, regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); iowrite32(MWL8K_H2A_INT_DUMMY, regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); loops = 1000; do { u32 int_code; int_code = ioread32(regs + MWL8K_HIU_INT_CODE); if (int_code == MWL8K_INT_CODE_CMD_FINISHED) { iowrite32(0, regs + MWL8K_HIU_INT_CODE); break; } cond_resched(); udelay(1); } while (--loops); pci_unmap_single(priv->pdev, dma_addr, length, PCI_DMA_TODEVICE); return loops ? 0 : -ETIMEDOUT; } static int mwl8k_load_fw_image(struct mwl8k_priv *priv, const u8 *data, size_t length) { struct mwl8k_cmd_pkt *cmd; int done; int rc = 0; cmd = kmalloc(sizeof(*cmd) + 256, GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD); cmd->seq_num = 0; cmd->macid = 0; cmd->result = 0; done = 0; while (length) { int block_size = length > 256 ? 256 : length; memcpy(cmd->payload, data + done, block_size); cmd->length = cpu_to_le16(block_size); rc = mwl8k_send_fw_load_cmd(priv, cmd, sizeof(*cmd) + block_size); if (rc) break; done += block_size; length -= block_size; } if (!rc) { cmd->length = 0; rc = mwl8k_send_fw_load_cmd(priv, cmd, sizeof(*cmd)); } kfree(cmd); return rc; } static int mwl8k_feed_fw_image(struct mwl8k_priv *priv, const u8 *data, size_t length) { unsigned char *buffer; int may_continue, rc = 0; u32 done, prev_block_size; buffer = kmalloc(1024, GFP_KERNEL); if (buffer == NULL) return -ENOMEM; done = 0; prev_block_size = 0; may_continue = 1000; while (may_continue > 0) { u32 block_size; block_size = ioread32(priv->regs + MWL8K_HIU_SCRATCH); if (block_size & 1) { block_size &= ~1; may_continue--; } else { done += prev_block_size; length -= prev_block_size; } if (block_size > 1024 || block_size > length) { rc = -EOVERFLOW; break; } if (length == 0) { rc = 0; break; } if (block_size == 0) { rc = -EPROTO; may_continue--; udelay(1); continue; } prev_block_size = block_size; memcpy(buffer, data + done, block_size); rc = mwl8k_send_fw_load_cmd(priv, buffer, block_size); if (rc) break; } if (!rc && length != 0) rc = -EREMOTEIO; kfree(buffer); return rc; } static int mwl8k_load_firmware(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; const struct firmware *fw = priv->fw_ucode; int rc; int loops; if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) { const struct firmware *helper = priv->fw_helper; if (helper == NULL) { printk(KERN_ERR "%s: helper image needed but none " "given\n", pci_name(priv->pdev)); return -EINVAL; } rc = mwl8k_load_fw_image(priv, helper->data, helper->size); if (rc) { printk(KERN_ERR "%s: unable to load firmware " "helper image\n", pci_name(priv->pdev)); return rc; } msleep(20); rc = mwl8k_feed_fw_image(priv, fw->data, fw->size); } else { rc = mwl8k_load_fw_image(priv, fw->data, fw->size); } if (rc) { printk(KERN_ERR "%s: unable to load firmware image\n", pci_name(priv->pdev)); return rc; } iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR); loops = 500000; do { u32 ready_code; ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE); if (ready_code == MWL8K_FWAP_READY) { priv->ap_fw = true; break; } else if (ready_code == MWL8K_FWSTA_READY) { priv->ap_fw = false; break; } cond_resched(); udelay(1); } while (--loops); return loops ? 0 : -ETIMEDOUT; } /* DMA header used by firmware and hardware. */ struct mwl8k_dma_data { __le16 fwlen; struct ieee80211_hdr wh; char data[0]; } __packed; /* Routines to add/remove DMA header from skb. */ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos) { struct mwl8k_dma_data *tr; int hdrlen; tr = (struct mwl8k_dma_data *)skb->data; hdrlen = ieee80211_hdrlen(tr->wh.frame_control); if (hdrlen != sizeof(tr->wh)) { if (ieee80211_is_data_qos(tr->wh.frame_control)) { memmove(tr->data - hdrlen, &tr->wh, hdrlen - 2); *((__le16 *)(tr->data - 2)) = qos; } else { memmove(tr->data - hdrlen, &tr->wh, hdrlen); } } if (hdrlen != sizeof(*tr)) skb_pull(skb, sizeof(*tr) - hdrlen); } #define REDUCED_TX_HEADROOM 8 static void mwl8k_add_dma_header(struct mwl8k_priv *priv, struct sk_buff *skb, int head_pad, int tail_pad) { struct ieee80211_hdr *wh; int hdrlen; int reqd_hdrlen; struct mwl8k_dma_data *tr; /* * Add a firmware DMA header; the firmware requires that we * present a 2-byte payload length followed by a 4-address * header (without QoS field), followed (optionally) by any * WEP/ExtIV header (but only filled in for CCMP). */ wh = (struct ieee80211_hdr *)skb->data; hdrlen = ieee80211_hdrlen(wh->frame_control); /* * Check if skb_resize is required because of * tx_headroom adjustment. */ if (priv->ap_fw && (hdrlen < (sizeof(struct ieee80211_cts) + REDUCED_TX_HEADROOM))) { if (pskb_expand_head(skb, REDUCED_TX_HEADROOM, 0, GFP_ATOMIC)) { wiphy_err(priv->hw->wiphy, "Failed to reallocate TX buffer\n"); return; } skb->truesize += REDUCED_TX_HEADROOM; } reqd_hdrlen = sizeof(*tr) + head_pad; if (hdrlen != reqd_hdrlen) skb_push(skb, reqd_hdrlen - hdrlen); if (ieee80211_is_data_qos(wh->frame_control)) hdrlen -= IEEE80211_QOS_CTL_LEN; tr = (struct mwl8k_dma_data *)skb->data; if (wh != &tr->wh) memmove(&tr->wh, wh, hdrlen); if (hdrlen != sizeof(tr->wh)) memset(((void *)&tr->wh) + hdrlen, 0, sizeof(tr->wh) - hdrlen); /* * Firmware length is the length of the fully formed "802.11 * payload". That is, everything except for the 802.11 header. * This includes all crypto material including the MIC. */ tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad); } static void mwl8k_encapsulate_tx_frame(struct mwl8k_priv *priv, struct sk_buff *skb) { struct ieee80211_hdr *wh; struct ieee80211_tx_info *tx_info; struct ieee80211_key_conf *key_conf; int data_pad; int head_pad = 0; wh = (struct ieee80211_hdr *)skb->data; tx_info = IEEE80211_SKB_CB(skb); key_conf = NULL; if (ieee80211_is_data(wh->frame_control)) key_conf = tx_info->control.hw_key; /* * Make sure the packet header is in the DMA header format (4-address * without QoS), and add head & tail padding when HW crypto is enabled. * * We have the following trailer padding requirements: * - WEP: 4 trailer bytes (ICV) * - TKIP: 12 trailer bytes (8 MIC + 4 ICV) * - CCMP: 8 trailer bytes (MIC) */ data_pad = 0; if (key_conf != NULL) { head_pad = key_conf->iv_len; switch (key_conf->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: data_pad = 4; break; case WLAN_CIPHER_SUITE_TKIP: data_pad = 12; break; case WLAN_CIPHER_SUITE_CCMP: data_pad = 8; break; } } mwl8k_add_dma_header(priv, skb, head_pad, data_pad); } /* * Packet reception for 88w8366 AP firmware. */ struct mwl8k_rxd_8366_ap { __le16 pkt_len; __u8 sq2; __u8 rate; __le32 pkt_phys_addr; __le32 next_rxd_phys_addr; __le16 qos_control; __le16 htsig2; __le32 hw_rssi_info; __le32 hw_noise_floor_info; __u8 noise_floor; __u8 pad0[3]; __u8 rssi; __u8 rx_status; __u8 channel; __u8 rx_ctrl; } __packed; #define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80 #define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40 #define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f) #define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80 /* 8366 AP rx_status bits */ #define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80 #define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF #define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02 #define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04 #define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08 static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr) { struct mwl8k_rxd_8366_ap *rxd = _rxd; rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST; } static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len) { struct mwl8k_rxd_8366_ap *rxd = _rxd; rxd->pkt_len = cpu_to_le16(len); rxd->pkt_phys_addr = cpu_to_le32(addr); wmb(); rxd->rx_ctrl = 0; } static int mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status, __le16 *qos, s8 *noise) { struct mwl8k_rxd_8366_ap *rxd = _rxd; if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST)) return -1; rmb(); memset(status, 0, sizeof(*status)); status->signal = -rxd->rssi; *noise = -rxd->noise_floor; if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) { status->flag |= RX_FLAG_HT; if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ) status->flag |= RX_FLAG_40MHZ; status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate); } else { int i; for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) { if (mwl8k_rates_24[i].hw_value == rxd->rate) { status->rate_idx = i; break; } } } if (rxd->channel > 14) { status->band = IEEE80211_BAND_5GHZ; if (!(status->flag & RX_FLAG_HT)) status->rate_idx -= 5; } else { status->band = IEEE80211_BAND_2GHZ; } status->freq = ieee80211_channel_to_frequency(rxd->channel, status->band); *qos = rxd->qos_control; if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) && (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) && (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR)) status->flag |= RX_FLAG_MMIC_ERROR; return le16_to_cpu(rxd->pkt_len); } static struct rxd_ops rxd_8366_ap_ops = { .rxd_size = sizeof(struct mwl8k_rxd_8366_ap), .rxd_init = mwl8k_rxd_8366_ap_init, .rxd_refill = mwl8k_rxd_8366_ap_refill, .rxd_process = mwl8k_rxd_8366_ap_process, }; /* * Packet reception for STA firmware. */ struct mwl8k_rxd_sta { __le16 pkt_len; __u8 link_quality; __u8 noise_level; __le32 pkt_phys_addr; __le32 next_rxd_phys_addr; __le16 qos_control; __le16 rate_info; __le32 pad0[4]; __u8 rssi; __u8 channel; __le16 pad1; __u8 rx_ctrl; __u8 rx_status; __u8 pad2[2]; } __packed; #define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000 #define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3) #define MWL8K_STA_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f) #define MWL8K_STA_RATE_INFO_40MHZ 0x0004 #define MWL8K_STA_RATE_INFO_SHORTGI 0x0002 #define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001 #define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02 #define MWL8K_STA_RX_CTRL_DECRYPT_ERROR 0x04 /* ICV=0 or MIC=1 */ #define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE 0x08 /* Key is uploaded only in failure case */ #define MWL8K_STA_RX_CTRL_KEY_INDEX 0x30 static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr) { struct mwl8k_rxd_sta *rxd = _rxd; rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); rxd->rx_ctrl = MWL8K_STA_RX_CTRL_OWNED_BY_HOST; } static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len) { struct mwl8k_rxd_sta *rxd = _rxd; rxd->pkt_len = cpu_to_le16(len); rxd->pkt_phys_addr = cpu_to_le32(addr); wmb(); rxd->rx_ctrl = 0; } static int mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, __le16 *qos, s8 *noise) { struct mwl8k_rxd_sta *rxd = _rxd; u16 rate_info; if (!(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_OWNED_BY_HOST)) return -1; rmb(); rate_info = le16_to_cpu(rxd->rate_info); memset(status, 0, sizeof(*status)); status->signal = -rxd->rssi; *noise = -rxd->noise_level; status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info); status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info); if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE) status->flag |= RX_FLAG_SHORTPRE; if (rate_info & MWL8K_STA_RATE_INFO_40MHZ) status->flag |= RX_FLAG_40MHZ; if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI) status->flag |= RX_FLAG_SHORT_GI; if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT) status->flag |= RX_FLAG_HT; if (rxd->channel > 14) { status->band = IEEE80211_BAND_5GHZ; if (!(status->flag & RX_FLAG_HT)) status->rate_idx -= 5; } else { status->band = IEEE80211_BAND_2GHZ; } status->freq = ieee80211_channel_to_frequency(rxd->channel, status->band); *qos = rxd->qos_control; if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) && (rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE)) status->flag |= RX_FLAG_MMIC_ERROR; return le16_to_cpu(rxd->pkt_len); } static struct rxd_ops rxd_sta_ops = { .rxd_size = sizeof(struct mwl8k_rxd_sta), .rxd_init = mwl8k_rxd_sta_init, .rxd_refill = mwl8k_rxd_sta_refill, .rxd_process = mwl8k_rxd_sta_process, }; #define MWL8K_RX_DESCS 256 #define MWL8K_RX_MAXSZ 3800 static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_rx_queue *rxq = priv->rxq + index; int size; int i; rxq->rxd_count = 0; rxq->head = 0; rxq->tail = 0; size = MWL8K_RX_DESCS * priv->rxd_ops->rxd_size; rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma); if (rxq->rxd == NULL) { wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n"); return -ENOMEM; } memset(rxq->rxd, 0, size); rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL); if (rxq->buf == NULL) { wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n"); pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); return -ENOMEM; } for (i = 0; i < MWL8K_RX_DESCS; i++) { int desc_size; void *rxd; int nexti; dma_addr_t next_dma_addr; desc_size = priv->rxd_ops->rxd_size; rxd = rxq->rxd + (i * priv->rxd_ops->rxd_size); nexti = i + 1; if (nexti == MWL8K_RX_DESCS) nexti = 0; next_dma_addr = rxq->rxd_dma + (nexti * desc_size); priv->rxd_ops->rxd_init(rxd, next_dma_addr); } return 0; } static int rxq_refill(struct ieee80211_hw *hw, int index, int limit) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_rx_queue *rxq = priv->rxq + index; int refilled; refilled = 0; while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) { struct sk_buff *skb; dma_addr_t addr; int rx; void *rxd; skb = dev_alloc_skb(MWL8K_RX_MAXSZ); if (skb == NULL) break; addr = pci_map_single(priv->pdev, skb->data, MWL8K_RX_MAXSZ, DMA_FROM_DEVICE); rxq->rxd_count++; rx = rxq->tail++; if (rxq->tail == MWL8K_RX_DESCS) rxq->tail = 0; rxq->buf[rx].skb = skb; dma_unmap_addr_set(&rxq->buf[rx], dma, addr); rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size); priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ); refilled++; } return refilled; } /* Must be called only when the card's reception is completely halted */ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_rx_queue *rxq = priv->rxq + index; int i; if (rxq->rxd == NULL) return; for (i = 0; i < MWL8K_RX_DESCS; i++) { if (rxq->buf[i].skb != NULL) { pci_unmap_single(priv->pdev, dma_unmap_addr(&rxq->buf[i], dma), MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); dma_unmap_addr_set(&rxq->buf[i], dma, 0); kfree_skb(rxq->buf[i].skb); rxq->buf[i].skb = NULL; } } kfree(rxq->buf); rxq->buf = NULL; pci_free_consistent(priv->pdev, MWL8K_RX_DESCS * priv->rxd_ops->rxd_size, rxq->rxd, rxq->rxd_dma); rxq->rxd = NULL; } /* * Scan a list of BSSIDs to process for finalize join. * Allows for extension to process multiple BSSIDs. */ static inline int mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh) { return priv->capture_beacon && ieee80211_is_beacon(wh->frame_control) && !compare_ether_addr(wh->addr3, priv->capture_bssid); } static inline void mwl8k_save_beacon(struct ieee80211_hw *hw, struct sk_buff *skb) { struct mwl8k_priv *priv = hw->priv; priv->capture_beacon = false; memset(priv->capture_bssid, 0, ETH_ALEN); /* * Use GFP_ATOMIC as rxq_process is called from * the primary interrupt handler, memory allocation call * must not sleep. */ priv->beacon_skb = skb_copy(skb, GFP_ATOMIC); if (priv->beacon_skb != NULL) ieee80211_queue_work(hw, &priv->finalize_join_worker); } static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list, u8 *bssid) { struct mwl8k_vif *mwl8k_vif; list_for_each_entry(mwl8k_vif, vif_list, list) { if (memcmp(bssid, mwl8k_vif->bssid, ETH_ALEN) == 0) return mwl8k_vif; } return NULL; } static int rxq_process(struct ieee80211_hw *hw, int index, int limit) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_vif *mwl8k_vif = NULL; struct mwl8k_rx_queue *rxq = priv->rxq + index; int processed; processed = 0; while (rxq->rxd_count && limit--) { struct sk_buff *skb; void *rxd; int pkt_len; struct ieee80211_rx_status status; struct ieee80211_hdr *wh; __le16 qos; skb = rxq->buf[rxq->head].skb; if (skb == NULL) break; rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size); pkt_len = priv->rxd_ops->rxd_process(rxd, &status, &qos, &priv->noise); if (pkt_len < 0) break; rxq->buf[rxq->head].skb = NULL; pci_unmap_single(priv->pdev, dma_unmap_addr(&rxq->buf[rxq->head], dma), MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0); rxq->head++; if (rxq->head == MWL8K_RX_DESCS) rxq->head = 0; rxq->rxd_count--; wh = &((struct mwl8k_dma_data *)skb->data)->wh; /* * Check for a pending join operation. Save a * copy of the beacon and schedule a tasklet to * send a FINALIZE_JOIN command to the firmware. */ if (mwl8k_capture_bssid(priv, (void *)skb->data)) mwl8k_save_beacon(hw, skb); if (ieee80211_has_protected(wh->frame_control)) { /* Check if hw crypto has been enabled for * this bss. If yes, set the status flags * accordingly */ mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list, wh->addr1); if (mwl8k_vif != NULL && mwl8k_vif->is_hw_crypto_enabled) { /* * When MMIC ERROR is encountered * by the firmware, payload is * dropped and only 32 bytes of * mwl8k Firmware header is sent * to the host. * * We need to add four bytes of * key information. In it * MAC80211 expects keyidx set to * 0 for triggering Counter * Measure of MMIC failure. */ if (status.flag & RX_FLAG_MMIC_ERROR) { struct mwl8k_dma_data *tr; tr = (struct mwl8k_dma_data *)skb->data; memset((void *)&(tr->data), 0, 4); pkt_len += 4; } if (!ieee80211_is_auth(wh->frame_control)) status.flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; } } skb_put(skb, pkt_len); mwl8k_remove_dma_header(skb, qos); memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); ieee80211_rx_irqsafe(hw, skb); processed++; } return processed; } /* * Packet transmission. */ #define MWL8K_TXD_STATUS_OK 0x00000001 #define MWL8K_TXD_STATUS_OK_RETRY 0x00000002 #define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004 #define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008 #define MWL8K_TXD_STATUS_FW_OWNED 0x80000000 #define MWL8K_QOS_QLEN_UNSPEC 0xff00 #define MWL8K_QOS_ACK_POLICY_MASK 0x0060 #define MWL8K_QOS_ACK_POLICY_NORMAL 0x0000 #define MWL8K_QOS_ACK_POLICY_BLOCKACK 0x0060 #define MWL8K_QOS_EOSP 0x0010 struct mwl8k_tx_desc { __le32 status; __u8 data_rate; __u8 tx_priority; __le16 qos_control; __le32 pkt_phys_addr; __le16 pkt_len; __u8 dest_MAC_addr[ETH_ALEN]; __le32 next_txd_phys_addr; __le32 timestamp; __le16 rate_info; __u8 peer_id; __u8 tx_frag_cnt; } __packed; #define MWL8K_TX_DESCS 128 static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_tx_queue *txq = priv->txq + index; int size; int i; txq->len = 0; txq->head = 0; txq->tail = 0; size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc); txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma); if (txq->txd == NULL) { wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n"); return -ENOMEM; } memset(txq->txd, 0, size); txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); if (txq->skb == NULL) { wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n"); pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); return -ENOMEM; } for (i = 0; i < MWL8K_TX_DESCS; i++) { struct mwl8k_tx_desc *tx_desc; int nexti; tx_desc = txq->txd + i; nexti = (i + 1) % MWL8K_TX_DESCS; tx_desc->status = 0; tx_desc->next_txd_phys_addr = cpu_to_le32(txq->txd_dma + nexti * sizeof(*tx_desc)); } return 0; } static inline void mwl8k_tx_start(struct mwl8k_priv *priv) { iowrite32(MWL8K_H2A_INT_PPA_READY, priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); iowrite32(MWL8K_H2A_INT_DUMMY, priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); ioread32(priv->regs + MWL8K_HIU_INT_CODE); } static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; int i; for (i = 0; i < mwl8k_tx_queues(priv); i++) { struct mwl8k_tx_queue *txq = priv->txq + i; int fw_owned = 0; int drv_owned = 0; int unused = 0; int desc; for (desc = 0; desc < MWL8K_TX_DESCS; desc++) { struct mwl8k_tx_desc *tx_desc = txq->txd + desc; u32 status; status = le32_to_cpu(tx_desc->status); if (status & MWL8K_TXD_STATUS_FW_OWNED) fw_owned++; else drv_owned++; if (tx_desc->pkt_len == 0) unused++; } wiphy_err(hw->wiphy, "txq[%d] len=%d head=%d tail=%d " "fw_owned=%d drv_owned=%d unused=%d\n", i, txq->len, txq->head, txq->tail, fw_owned, drv_owned, unused); } } /* * Must be called with priv->fw_mutex held and tx queues stopped. */ #define MWL8K_TX_WAIT_TIMEOUT_MS 5000 static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; DECLARE_COMPLETION_ONSTACK(tx_wait); int retry; int rc; might_sleep(); /* Since fw restart is in progress, allow only the firmware * commands from the restart code and block the other * commands since they are going to fail in any case since * the firmware has crashed */ if (priv->hw_restart_in_progress) { if (priv->hw_restart_owner == current) return 0; else return -EBUSY; } /* * The TX queues are stopped at this point, so this test * doesn't need to take ->tx_lock. */ if (!priv->pending_tx_pkts) return 0; retry = 0; rc = 0; spin_lock_bh(&priv->tx_lock); priv->tx_wait = &tx_wait; while (!rc) { int oldcount; unsigned long timeout; oldcount = priv->pending_tx_pkts; spin_unlock_bh(&priv->tx_lock); timeout = wait_for_completion_timeout(&tx_wait, msecs_to_jiffies(MWL8K_TX_WAIT_TIMEOUT_MS)); spin_lock_bh(&priv->tx_lock); if (timeout) { WARN_ON(priv->pending_tx_pkts); if (retry) wiphy_notice(hw->wiphy, "tx rings drained\n"); break; } if (priv->pending_tx_pkts < oldcount) { wiphy_notice(hw->wiphy, "waiting for tx rings to drain (%d -> %d pkts)\n", oldcount, priv->pending_tx_pkts); retry = 1; continue; } priv->tx_wait = NULL; wiphy_err(hw->wiphy, "tx rings stuck for %d ms\n", MWL8K_TX_WAIT_TIMEOUT_MS); mwl8k_dump_tx_rings(hw); priv->hw_restart_in_progress = true; ieee80211_queue_work(hw, &priv->fw_reload); rc = -ETIMEDOUT; } spin_unlock_bh(&priv->tx_lock); return rc; } #define MWL8K_TXD_SUCCESS(status) \ ((status) & (MWL8K_TXD_STATUS_OK | \ MWL8K_TXD_STATUS_OK_RETRY | \ MWL8K_TXD_STATUS_OK_MORE_RETRY)) static int mwl8k_tid_queue_mapping(u8 tid) { BUG_ON(tid > 7); switch (tid) { case 0: case 3: return IEEE80211_AC_BE; break; case 1: case 2: return IEEE80211_AC_BK; break; case 4: case 5: return IEEE80211_AC_VI; break; case 6: case 7: return IEEE80211_AC_VO; break; default: return -1; break; } } /* The firmware will fill in the rate information * for each packet that gets queued in the hardware * and these macros will interpret that info. */ #define RI_FORMAT(a) (a & 0x0001) #define RI_RATE_ID_MCS(a) ((a & 0x01f8) >> 3) static int mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_tx_queue *txq = priv->txq + index; int processed; processed = 0; while (txq->len > 0 && limit--) { int tx; struct mwl8k_tx_desc *tx_desc; unsigned long addr; int size; struct sk_buff *skb; struct ieee80211_tx_info *info; u32 status; struct ieee80211_sta *sta; struct mwl8k_sta *sta_info = NULL; u16 rate_info; struct ieee80211_hdr *wh; tx = txq->head; tx_desc = txq->txd + tx; status = le32_to_cpu(tx_desc->status); if (status & MWL8K_TXD_STATUS_FW_OWNED) { if (!force) break; tx_desc->status &= ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED); } txq->head = (tx + 1) % MWL8K_TX_DESCS; BUG_ON(txq->len == 0); txq->len--; priv->pending_tx_pkts--; addr = le32_to_cpu(tx_desc->pkt_phys_addr); size = le16_to_cpu(tx_desc->pkt_len); skb = txq->skb[tx]; txq->skb[tx] = NULL; BUG_ON(skb == NULL); pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE); mwl8k_remove_dma_header(skb, tx_desc->qos_control); wh = (struct ieee80211_hdr *) skb->data; /* Mark descriptor as unused */ tx_desc->pkt_phys_addr = 0; tx_desc->pkt_len = 0; info = IEEE80211_SKB_CB(skb); if (ieee80211_is_data(wh->frame_control)) { sta = info->control.sta; if (sta) { sta_info = MWL8K_STA(sta); BUG_ON(sta_info == NULL); rate_info = le16_to_cpu(tx_desc->rate_info); /* If rate is < 6.5 Mpbs for an ht station * do not form an ampdu. If the station is a * legacy station (format = 0), do not form an * ampdu */ if (RI_RATE_ID_MCS(rate_info) < 1 || RI_FORMAT(rate_info) == 0) { sta_info->is_ampdu_allowed = false; } else { sta_info->is_ampdu_allowed = true; } } } ieee80211_tx_info_clear_status(info); /* Rate control is happening in the firmware. * Ensure no tx rate is being reported. */ info->status.rates[0].idx = -1; info->status.rates[0].count = 1; if (MWL8K_TXD_SUCCESS(status)) info->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, skb); processed++; } return processed; } /* must be called only when the card's transmit is completely halted */ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_tx_queue *txq = priv->txq + index; if (txq->txd == NULL) return; mwl8k_txq_reclaim(hw, index, INT_MAX, 1); kfree(txq->skb); txq->skb = NULL; pci_free_consistent(priv->pdev, MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc), txq->txd, txq->txd_dma); txq->txd = NULL; } /* caller must hold priv->stream_lock when calling the stream functions */ static struct mwl8k_ampdu_stream * mwl8k_add_stream(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 tid) { struct mwl8k_ampdu_stream *stream; struct mwl8k_priv *priv = hw->priv; int i; for (i = 0; i < priv->num_ampdu_queues; i++) { stream = &priv->ampdu[i]; if (stream->state == AMPDU_NO_STREAM) { stream->sta = sta; stream->state = AMPDU_STREAM_NEW; stream->tid = tid; stream->idx = i; stream->txq_idx = MWL8K_TX_WMM_QUEUES + i; wiphy_debug(hw->wiphy, "Added a new stream for %pM %d", sta->addr, tid); return stream; } } return NULL; } static int mwl8k_start_stream(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream) { int ret; /* if the stream has already been started, don't start it again */ if (stream->state != AMPDU_STREAM_NEW) return 0; ret = ieee80211_start_tx_ba_session(stream->sta, stream->tid, 0); if (ret) wiphy_debug(hw->wiphy, "Failed to start stream for %pM %d: " "%d\n", stream->sta->addr, stream->tid, ret); else wiphy_debug(hw->wiphy, "Started stream for %pM %d\n", stream->sta->addr, stream->tid); return ret; } static void mwl8k_remove_stream(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream) { wiphy_debug(hw->wiphy, "Remove stream for %pM %d\n", stream->sta->addr, stream->tid); memset(stream, 0, sizeof(*stream)); } static struct mwl8k_ampdu_stream * mwl8k_lookup_stream(struct ieee80211_hw *hw, u8 *addr, u8 tid) { struct mwl8k_priv *priv = hw->priv; int i; for (i = 0 ; i < priv->num_ampdu_queues; i++) { struct mwl8k_ampdu_stream *stream; stream = &priv->ampdu[i]; if (stream->state == AMPDU_NO_STREAM) continue; if (!memcmp(stream->sta->addr, addr, ETH_ALEN) && stream->tid == tid) return stream; } return NULL; } #define MWL8K_AMPDU_PACKET_THRESHOLD 64 static inline bool mwl8k_ampdu_allowed(struct ieee80211_sta *sta, u8 tid) { struct mwl8k_sta *sta_info = MWL8K_STA(sta); struct tx_traffic_info *tx_stats; BUG_ON(tid >= MWL8K_MAX_TID); tx_stats = &sta_info->tx_stats[tid]; return sta_info->is_ampdu_allowed && tx_stats->pkts > MWL8K_AMPDU_PACKET_THRESHOLD; } static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid) { struct mwl8k_sta *sta_info = MWL8K_STA(sta); struct tx_traffic_info *tx_stats; BUG_ON(tid >= MWL8K_MAX_TID); tx_stats = &sta_info->tx_stats[tid]; if (tx_stats->start_time == 0) tx_stats->start_time = jiffies; /* reset the packet count after each second elapses. If the number of * packets ever exceeds the ampdu_min_traffic threshold, we will allow * an ampdu stream to be started. */ if (jiffies - tx_stats->start_time > HZ) { tx_stats->pkts = 0; tx_stats->start_time = 0; } else tx_stats->pkts++; } static void mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) { struct mwl8k_priv *priv = hw->priv; struct ieee80211_tx_info *tx_info; struct mwl8k_vif *mwl8k_vif; struct ieee80211_sta *sta; struct ieee80211_hdr *wh; struct mwl8k_tx_queue *txq; struct mwl8k_tx_desc *tx; dma_addr_t dma; u32 txstatus; u8 txdatarate; u16 qos; int txpriority; u8 tid = 0; struct mwl8k_ampdu_stream *stream = NULL; bool start_ba_session = false; bool mgmtframe = false; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; wh = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_data_qos(wh->frame_control)) qos = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(wh))); else qos = 0; if (ieee80211_is_mgmt(wh->frame_control)) mgmtframe = true; if (priv->ap_fw) mwl8k_encapsulate_tx_frame(priv, skb); else mwl8k_add_dma_header(priv, skb, 0, 0); wh = &((struct mwl8k_dma_data *)skb->data)->wh; tx_info = IEEE80211_SKB_CB(skb); sta = tx_info->control.sta; mwl8k_vif = MWL8K_VIF(tx_info->control.vif); if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno); mwl8k_vif->seqno += 0x10; } /* Setup firmware control bit fields for each frame type. */ txstatus = 0; txdatarate = 0; if (ieee80211_is_mgmt(wh->frame_control) || ieee80211_is_ctl(wh->frame_control)) { txdatarate = 0; qos |= MWL8K_QOS_QLEN_UNSPEC | MWL8K_QOS_EOSP; } else if (ieee80211_is_data(wh->frame_control)) { txdatarate = 1; if (is_multicast_ether_addr(wh->addr1)) txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX; qos &= ~MWL8K_QOS_ACK_POLICY_MASK; if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) qos |= MWL8K_QOS_ACK_POLICY_BLOCKACK; else qos |= MWL8K_QOS_ACK_POLICY_NORMAL; } /* Queue ADDBA request in the respective data queue. While setting up * the ampdu stream, mac80211 queues further packets for that * particular ra/tid pair. However, packets piled up in the hardware * for that ra/tid pair will still go out. ADDBA request and the * related data packets going out from different queues asynchronously * will cause a shift in the receiver window which might result in * ampdu packets getting dropped at the receiver after the stream has * been setup. */ if (unlikely(ieee80211_is_action(wh->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_BACK && mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ && priv->ap_fw)) { u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; index = mwl8k_tid_queue_mapping(tid); } txpriority = index; if (priv->ap_fw && sta && sta->ht_cap.ht_supported && skb->protocol != cpu_to_be16(ETH_P_PAE) && ieee80211_is_data_qos(wh->frame_control)) { tid = qos & 0xf; mwl8k_tx_count_packet(sta, tid); spin_lock(&priv->stream_lock); stream = mwl8k_lookup_stream(hw, sta->addr, tid); if (stream != NULL) { if (stream->state == AMPDU_STREAM_ACTIVE) { txpriority = stream->txq_idx; index = stream->txq_idx; } else if (stream->state == AMPDU_STREAM_NEW) { /* We get here if the driver sends us packets * after we've initiated a stream, but before * our ampdu_action routine has been called * with IEEE80211_AMPDU_TX_START to get the SSN * for the ADDBA request. So this packet can * go out with no risk of sequence number * mismatch. No special handling is required. */ } else { /* Drop packets that would go out after the * ADDBA request was sent but before the ADDBA * response is received. If we don't do this, * the recipient would probably receive it * after the ADDBA request with SSN 0. This * will cause the recipient's BA receive window * to shift, which would cause the subsequent * packets in the BA stream to be discarded. * mac80211 queues our packets for us in this * case, so this is really just a safety check. */ wiphy_warn(hw->wiphy, "Cannot send packet while ADDBA " "dialog is underway.\n"); spin_unlock(&priv->stream_lock); dev_kfree_skb(skb); return; } } else { /* Defer calling mwl8k_start_stream so that the current * skb can go out before the ADDBA request. This * prevents sequence number mismatch at the recepient * as described above. */ if (mwl8k_ampdu_allowed(sta, tid)) { stream = mwl8k_add_stream(hw, sta, tid); if (stream != NULL) start_ba_session = true; } } spin_unlock(&priv->stream_lock); } dma = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(priv->pdev, dma)) { wiphy_debug(hw->wiphy, "failed to dma map skb, dropping TX frame.\n"); if (start_ba_session) { spin_lock(&priv->stream_lock); mwl8k_remove_stream(hw, stream); spin_unlock(&priv->stream_lock); } dev_kfree_skb(skb); return; } spin_lock_bh(&priv->tx_lock); txq = priv->txq + index; /* Mgmt frames that go out frequently are probe * responses. Other mgmt frames got out relatively * infrequently. Hence reserve 2 buffers so that * other mgmt frames do not get dropped due to an * already queued probe response in one of the * reserved buffers. */ if (txq->len >= MWL8K_TX_DESCS - 2) { if (!mgmtframe || txq->len == MWL8K_TX_DESCS) { if (start_ba_session) { spin_lock(&priv->stream_lock); mwl8k_remove_stream(hw, stream); spin_unlock(&priv->stream_lock); } spin_unlock_bh(&priv->tx_lock); dev_kfree_skb(skb); return; } } BUG_ON(txq->skb[txq->tail] != NULL); txq->skb[txq->tail] = skb; tx = txq->txd + txq->tail; tx->data_rate = txdatarate; tx->tx_priority = txpriority; tx->qos_control = cpu_to_le16(qos); tx->pkt_phys_addr = cpu_to_le32(dma); tx->pkt_len = cpu_to_le16(skb->len); tx->rate_info = 0; if (!priv->ap_fw && tx_info->control.sta != NULL) tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id; else tx->peer_id = 0; if (priv->ap_fw) tx->timestamp = cpu_to_le32(ioread32(priv->regs + MWL8K_HW_TIMER_REGISTER)); wmb(); tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); txq->len++; priv->pending_tx_pkts++; txq->tail++; if (txq->tail == MWL8K_TX_DESCS) txq->tail = 0; mwl8k_tx_start(priv); spin_unlock_bh(&priv->tx_lock); /* Initiate the ampdu session here */ if (start_ba_session) { spin_lock(&priv->stream_lock); if (mwl8k_start_stream(hw, stream)) mwl8k_remove_stream(hw, stream); spin_unlock(&priv->stream_lock); } } /* * Firmware access. * * We have the following requirements for issuing firmware commands: * - Some commands require that the packet transmit path is idle when * the command is issued. (For simplicity, we'll just quiesce the * transmit path for every command.) * - There are certain sequences of commands that need to be issued to * the hardware sequentially, with no other intervening commands. * * This leads to an implementation of a "firmware lock" as a mutex that * can be taken recursively, and which is taken by both the low-level * command submission function (mwl8k_post_cmd) as well as any users of * that function that require issuing of an atomic sequence of commands, * and quiesces the transmit path whenever it's taken. */ static int mwl8k_fw_lock(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; if (priv->fw_mutex_owner != current) { int rc; mutex_lock(&priv->fw_mutex); ieee80211_stop_queues(hw); rc = mwl8k_tx_wait_empty(hw); if (rc) { if (!priv->hw_restart_in_progress) ieee80211_wake_queues(hw); mutex_unlock(&priv->fw_mutex); return rc; } priv->fw_mutex_owner = current; } priv->fw_mutex_depth++; return 0; } static void mwl8k_fw_unlock(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; if (!--priv->fw_mutex_depth) { if (!priv->hw_restart_in_progress) ieee80211_wake_queues(hw); priv->fw_mutex_owner = NULL; mutex_unlock(&priv->fw_mutex); } } /* * Command processing. */ /* Timeout firmware commands after 10s */ #define MWL8K_CMD_TIMEOUT_MS 10000 static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) { DECLARE_COMPLETION_ONSTACK(cmd_wait); struct mwl8k_priv *priv = hw->priv; void __iomem *regs = priv->regs; dma_addr_t dma_addr; unsigned int dma_size; int rc; unsigned long timeout = 0; u8 buf[32]; cmd->result = (__force __le16) 0xffff; dma_size = le16_to_cpu(cmd->length); dma_addr = pci_map_single(priv->pdev, cmd, dma_size, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(priv->pdev, dma_addr)) return -ENOMEM; rc = mwl8k_fw_lock(hw); if (rc) { pci_unmap_single(priv->pdev, dma_addr, dma_size, PCI_DMA_BIDIRECTIONAL); return rc; } priv->hostcmd_wait = &cmd_wait; iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR); iowrite32(MWL8K_H2A_INT_DOORBELL, regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); iowrite32(MWL8K_H2A_INT_DUMMY, regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); timeout = wait_for_completion_timeout(&cmd_wait, msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS)); priv->hostcmd_wait = NULL; mwl8k_fw_unlock(hw); pci_unmap_single(priv->pdev, dma_addr, dma_size, PCI_DMA_BIDIRECTIONAL); if (!timeout) { wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n", mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), MWL8K_CMD_TIMEOUT_MS); rc = -ETIMEDOUT; } else { int ms; ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(timeout); rc = cmd->result ? -EINVAL : 0; if (rc) wiphy_err(hw->wiphy, "Command %s error 0x%x\n", mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), le16_to_cpu(cmd->result)); else if (ms > 2000) wiphy_notice(hw->wiphy, "Command %s took %d ms\n", mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), ms); } return rc; } static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct mwl8k_cmd_pkt *cmd) { if (vif != NULL) cmd->macid = MWL8K_VIF(vif)->macid; return mwl8k_post_cmd(hw, cmd); } /* * Setup code shared between STA and AP firmware images. */ static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24)); memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24)); BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24)); memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24)); priv->band_24.band = IEEE80211_BAND_2GHZ; priv->band_24.channels = priv->channels_24; priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24); priv->band_24.bitrates = priv->rates_24; priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24; } static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50)); memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50)); BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50)); memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50)); priv->band_50.band = IEEE80211_BAND_5GHZ; priv->band_50.channels = priv->channels_50; priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50); priv->band_50.bitrates = priv->rates_50; priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50); hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50; } /* * CMD_GET_HW_SPEC (STA version). */ struct mwl8k_cmd_get_hw_spec_sta { struct mwl8k_cmd_pkt header; __u8 hw_rev; __u8 host_interface; __le16 num_mcaddrs; __u8 perm_addr[ETH_ALEN]; __le16 region_code; __le32 fw_rev; __le32 ps_cookie; __le32 caps; __u8 mcs_bitmap[16]; __le32 rx_queue_ptr; __le32 num_tx_queues; __le32 tx_queue_ptrs[MWL8K_TX_WMM_QUEUES]; __le32 caps2; __le32 num_tx_desc_per_queue; __le32 total_rxd; } __packed; #define MWL8K_CAP_MAX_AMSDU 0x20000000 #define MWL8K_CAP_GREENFIELD 0x08000000 #define MWL8K_CAP_AMPDU 0x04000000 #define MWL8K_CAP_RX_STBC 0x01000000 #define MWL8K_CAP_TX_STBC 0x00800000 #define MWL8K_CAP_SHORTGI_40MHZ 0x00400000 #define MWL8K_CAP_SHORTGI_20MHZ 0x00200000 #define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000 #define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000 #define MWL8K_CAP_DELAY_BA 0x00003000 #define MWL8K_CAP_MIMO 0x00000200 #define MWL8K_CAP_40MHZ 0x00000100 #define MWL8K_CAP_BAND_MASK 0x00000007 #define MWL8K_CAP_5GHZ 0x00000004 #define MWL8K_CAP_2GHZ4 0x00000001 static void mwl8k_set_ht_caps(struct ieee80211_hw *hw, struct ieee80211_supported_band *band, u32 cap) { int rx_streams; int tx_streams; band->ht_cap.ht_supported = 1; if (cap & MWL8K_CAP_MAX_AMSDU) band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; if (cap & MWL8K_CAP_GREENFIELD) band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD; if (cap & MWL8K_CAP_AMPDU) { hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; } if (cap & MWL8K_CAP_RX_STBC) band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC; if (cap & MWL8K_CAP_TX_STBC) band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; if (cap & MWL8K_CAP_SHORTGI_40MHZ) band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; if (cap & MWL8K_CAP_SHORTGI_20MHZ) band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; if (cap & MWL8K_CAP_DELAY_BA) band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA; if (cap & MWL8K_CAP_40MHZ) band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK); tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK); band->ht_cap.mcs.rx_mask[0] = 0xff; if (rx_streams >= 2) band->ht_cap.mcs.rx_mask[1] = 0xff; if (rx_streams >= 3) band->ht_cap.mcs.rx_mask[2] = 0xff; band->ht_cap.mcs.rx_mask[4] = 0x01; band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; if (rx_streams != tx_streams) { band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; band->ht_cap.mcs.tx_params |= (tx_streams - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; } } static void mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps) { struct mwl8k_priv *priv = hw->priv; if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) { mwl8k_setup_2ghz_band(hw); if (caps & MWL8K_CAP_MIMO) mwl8k_set_ht_caps(hw, &priv->band_24, caps); } if (caps & MWL8K_CAP_5GHZ) { mwl8k_setup_5ghz_band(hw); if (caps & MWL8K_CAP_MIMO) mwl8k_set_ht_caps(hw, &priv->band_50, caps); } } static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_get_hw_spec_sta *cmd; int rc; int i; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC); cmd->header.length = cpu_to_le16(sizeof(*cmd)); memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); cmd->num_tx_queues = cpu_to_le32(mwl8k_tx_queues(priv)); for (i = 0; i < mwl8k_tx_queues(priv); i++) cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); rc = mwl8k_post_cmd(hw, &cmd->header); if (!rc) { SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); priv->fw_rev = le32_to_cpu(cmd->fw_rev); priv->hw_rev = cmd->hw_rev; mwl8k_set_caps(hw, le32_to_cpu(cmd->caps)); priv->ap_macids_supported = 0x00000000; priv->sta_macids_supported = 0x00000001; } kfree(cmd); return rc; } /* * CMD_GET_HW_SPEC (AP version). */ struct mwl8k_cmd_get_hw_spec_ap { struct mwl8k_cmd_pkt header; __u8 hw_rev; __u8 host_interface; __le16 num_wcb; __le16 num_mcaddrs; __u8 perm_addr[ETH_ALEN]; __le16 region_code; __le16 num_antenna; __le32 fw_rev; __le32 wcbbase0; __le32 rxwrptr; __le32 rxrdptr; __le32 ps_cookie; __le32 wcbbase1; __le32 wcbbase2; __le32 wcbbase3; __le32 fw_api_version; __le32 caps; __le32 num_of_ampdu_queues; __le32 wcbbase_ampdu[MWL8K_MAX_AMPDU_QUEUES]; } __packed; static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_get_hw_spec_ap *cmd; int rc, i; u32 api_version; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC); cmd->header.length = cpu_to_le16(sizeof(*cmd)); memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); rc = mwl8k_post_cmd(hw, &cmd->header); if (!rc) { int off; api_version = le32_to_cpu(cmd->fw_api_version); if (priv->device_info->fw_api_ap != api_version) { printk(KERN_ERR "%s: Unsupported fw API version for %s." " Expected %d got %d.\n", MWL8K_NAME, priv->device_info->part_name, priv->device_info->fw_api_ap, api_version); rc = -EINVAL; goto done; } SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); priv->fw_rev = le32_to_cpu(cmd->fw_rev); priv->hw_rev = cmd->hw_rev; mwl8k_set_caps(hw, le32_to_cpu(cmd->caps)); priv->ap_macids_supported = 0x000000ff; priv->sta_macids_supported = 0x00000000; priv->num_ampdu_queues = le32_to_cpu(cmd->num_of_ampdu_queues); if (priv->num_ampdu_queues > MWL8K_MAX_AMPDU_QUEUES) { wiphy_warn(hw->wiphy, "fw reported %d ampdu queues" " but we only support %d.\n", priv->num_ampdu_queues, MWL8K_MAX_AMPDU_QUEUES); priv->num_ampdu_queues = MWL8K_MAX_AMPDU_QUEUES; } off = le32_to_cpu(cmd->rxwrptr) & 0xffff; iowrite32(priv->rxq[0].rxd_dma, priv->sram + off); off = le32_to_cpu(cmd->rxrdptr) & 0xffff; iowrite32(priv->rxq[0].rxd_dma, priv->sram + off); priv->txq_offset[0] = le32_to_cpu(cmd->wcbbase0) & 0xffff; priv->txq_offset[1] = le32_to_cpu(cmd->wcbbase1) & 0xffff; priv->txq_offset[2] = le32_to_cpu(cmd->wcbbase2) & 0xffff; priv->txq_offset[3] = le32_to_cpu(cmd->wcbbase3) & 0xffff; for (i = 0; i < priv->num_ampdu_queues; i++) priv->txq_offset[i + MWL8K_TX_WMM_QUEUES] = le32_to_cpu(cmd->wcbbase_ampdu[i]) & 0xffff; } done: kfree(cmd); return rc; } /* * CMD_SET_HW_SPEC. */ struct mwl8k_cmd_set_hw_spec { struct mwl8k_cmd_pkt header; __u8 hw_rev; __u8 host_interface; __le16 num_mcaddrs; __u8 perm_addr[ETH_ALEN]; __le16 region_code; __le32 fw_rev; __le32 ps_cookie; __le32 caps; __le32 rx_queue_ptr; __le32 num_tx_queues; __le32 tx_queue_ptrs[MWL8K_MAX_TX_QUEUES]; __le32 flags; __le32 num_tx_desc_per_queue; __le32 total_rxd; } __packed; /* If enabled, MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY will cause * packets to expire 500 ms after the timestamp in the tx descriptor. That is, * the packets that are queued for more than 500ms, will be dropped in the * hardware. This helps minimizing the issues caused due to head-of-line * blocking where a slow client can hog the bandwidth and affect traffic to a * faster client. */ #define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400 #define MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR 0x00000200 #define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 #define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 #define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010 static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_set_hw_spec *cmd; int rc; int i; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_HW_SPEC); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); cmd->num_tx_queues = cpu_to_le32(mwl8k_tx_queues(priv)); /* * Mac80211 stack has Q0 as highest priority and Q3 as lowest in * that order. Firmware has Q3 as highest priority and Q0 as lowest * in that order. Map Q3 of mac80211 to Q0 of firmware so that the * priority is interpreted the right way in firmware. */ for (i = 0; i < mwl8k_tx_queues(priv); i++) { int j = mwl8k_tx_queues(priv) - 1 - i; cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma); } cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON | MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY | MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR); cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_MAC_MULTICAST_ADR. */ struct mwl8k_cmd_mac_multicast_adr { struct mwl8k_cmd_pkt header; __le16 action; __le16 numaddr; __u8 addr[0][ETH_ALEN]; }; #define MWL8K_ENABLE_RX_DIRECTED 0x0001 #define MWL8K_ENABLE_RX_MULTICAST 0x0002 #define MWL8K_ENABLE_RX_ALL_MULTICAST 0x0004 #define MWL8K_ENABLE_RX_BROADCAST 0x0008 static struct mwl8k_cmd_pkt * __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, struct netdev_hw_addr_list *mc_list) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_mac_multicast_adr *cmd; int size; int mc_count = 0; if (mc_list) mc_count = netdev_hw_addr_list_count(mc_list); if (allmulti || mc_count > priv->num_mcaddrs) { allmulti = 1; mc_count = 0; } size = sizeof(*cmd) + mc_count * ETH_ALEN; cmd = kzalloc(size, GFP_ATOMIC); if (cmd == NULL) return NULL; cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR); cmd->header.length = cpu_to_le16(size); cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_DIRECTED | MWL8K_ENABLE_RX_BROADCAST); if (allmulti) { cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST); } else if (mc_count) { struct netdev_hw_addr *ha; int i = 0; cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); cmd->numaddr = cpu_to_le16(mc_count); netdev_hw_addr_list_for_each(ha, mc_list) { memcpy(cmd->addr[i], ha->addr, ETH_ALEN); } } return &cmd->header; } /* * CMD_GET_STAT. */ struct mwl8k_cmd_get_stat { struct mwl8k_cmd_pkt header; __le32 stats[64]; } __packed; #define MWL8K_STAT_ACK_FAILURE 9 #define MWL8K_STAT_RTS_FAILURE 12 #define MWL8K_STAT_FCS_ERROR 24 #define MWL8K_STAT_RTS_SUCCESS 11 static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct mwl8k_cmd_get_stat *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT); cmd->header.length = cpu_to_le16(sizeof(*cmd)); rc = mwl8k_post_cmd(hw, &cmd->header); if (!rc) { stats->dot11ACKFailureCount = le32_to_cpu(cmd->stats[MWL8K_STAT_ACK_FAILURE]); stats->dot11RTSFailureCount = le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_FAILURE]); stats->dot11FCSErrorCount = le32_to_cpu(cmd->stats[MWL8K_STAT_FCS_ERROR]); stats->dot11RTSSuccessCount = le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_SUCCESS]); } kfree(cmd); return rc; } /* * CMD_RADIO_CONTROL. */ struct mwl8k_cmd_radio_control { struct mwl8k_cmd_pkt header; __le16 action; __le16 control; __le16 radio_on; } __packed; static int mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_radio_control *cmd; int rc; if (enable == priv->radio_on && !force) return 0; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_RADIO_CONTROL); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->control = cpu_to_le16(priv->radio_short_preamble ? 3 : 1); cmd->radio_on = cpu_to_le16(enable ? 0x0001 : 0x0000); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); if (!rc) priv->radio_on = enable; return rc; } static int mwl8k_cmd_radio_disable(struct ieee80211_hw *hw) { return mwl8k_cmd_radio_control(hw, 0, 0); } static int mwl8k_cmd_radio_enable(struct ieee80211_hw *hw) { return mwl8k_cmd_radio_control(hw, 1, 0); } static int mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble) { struct mwl8k_priv *priv = hw->priv; priv->radio_short_preamble = short_preamble; return mwl8k_cmd_radio_control(hw, 1, 1); } /* * CMD_RF_TX_POWER. */ #define MWL8K_RF_TX_POWER_LEVEL_TOTAL 8 struct mwl8k_cmd_rf_tx_power { struct mwl8k_cmd_pkt header; __le16 action; __le16 support_level; __le16 current_level; __le16 reserved; __le16 power_level_list[MWL8K_RF_TX_POWER_LEVEL_TOTAL]; } __packed; static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm) { struct mwl8k_cmd_rf_tx_power *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_TX_POWER); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->support_level = cpu_to_le16(dBm); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_TX_POWER. */ #define MWL8K_TX_POWER_LEVEL_TOTAL 12 struct mwl8k_cmd_tx_power { struct mwl8k_cmd_pkt header; __le16 action; __le16 band; __le16 channel; __le16 bw; __le16 sub_ch; __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; } __packed; static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw, struct ieee80211_conf *conf, unsigned short pwr) { struct ieee80211_channel *channel = conf->channel; struct mwl8k_cmd_tx_power *cmd; int rc; int i; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_TX_POWER); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST); if (channel->band == IEEE80211_BAND_2GHZ) cmd->band = cpu_to_le16(0x1); else if (channel->band == IEEE80211_BAND_5GHZ) cmd->band = cpu_to_le16(0x4); cmd->channel = cpu_to_le16(channel->hw_value); if (conf->channel_type == NL80211_CHAN_NO_HT || conf->channel_type == NL80211_CHAN_HT20) { cmd->bw = cpu_to_le16(0x2); } else { cmd->bw = cpu_to_le16(0x4); if (conf->channel_type == NL80211_CHAN_HT40MINUS) cmd->sub_ch = cpu_to_le16(0x3); else if (conf->channel_type == NL80211_CHAN_HT40PLUS) cmd->sub_ch = cpu_to_le16(0x1); } for (i = 0; i < MWL8K_TX_POWER_LEVEL_TOTAL; i++) cmd->power_level_list[i] = cpu_to_le16(pwr); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_RF_ANTENNA. */ struct mwl8k_cmd_rf_antenna { struct mwl8k_cmd_pkt header; __le16 antenna; __le16 mode; } __packed; #define MWL8K_RF_ANTENNA_RX 1 #define MWL8K_RF_ANTENNA_TX 2 static int mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask) { struct mwl8k_cmd_rf_antenna *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_ANTENNA); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->antenna = cpu_to_le16(antenna); cmd->mode = cpu_to_le16(mask); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_BEACON. */ struct mwl8k_cmd_set_beacon { struct mwl8k_cmd_pkt header; __le16 beacon_len; __u8 beacon[0]; }; static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 *beacon, int len) { struct mwl8k_cmd_set_beacon *cmd; int rc; cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON); cmd->header.length = cpu_to_le16(sizeof(*cmd) + len); cmd->beacon_len = cpu_to_le16(len); memcpy(cmd->beacon, beacon, len); rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_PRE_SCAN. */ struct mwl8k_cmd_set_pre_scan { struct mwl8k_cmd_pkt header; } __packed; static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw) { struct mwl8k_cmd_set_pre_scan *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_PRE_SCAN); cmd->header.length = cpu_to_le16(sizeof(*cmd)); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_POST_SCAN. */ struct mwl8k_cmd_set_post_scan { struct mwl8k_cmd_pkt header; __le32 isibss; __u8 bssid[ETH_ALEN]; } __packed; static int mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac) { struct mwl8k_cmd_set_post_scan *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_POST_SCAN); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->isibss = 0; memcpy(cmd->bssid, mac, ETH_ALEN); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_RF_CHANNEL. */ struct mwl8k_cmd_set_rf_channel { struct mwl8k_cmd_pkt header; __le16 action; __u8 current_channel; __le32 channel_flags; } __packed; static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, struct ieee80211_conf *conf) { struct ieee80211_channel *channel = conf->channel; struct mwl8k_cmd_set_rf_channel *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RF_CHANNEL); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->current_channel = channel->hw_value; if (channel->band == IEEE80211_BAND_2GHZ) cmd->channel_flags |= cpu_to_le32(0x00000001); else if (channel->band == IEEE80211_BAND_5GHZ) cmd->channel_flags |= cpu_to_le32(0x00000004); if (conf->channel_type == NL80211_CHAN_NO_HT || conf->channel_type == NL80211_CHAN_HT20) cmd->channel_flags |= cpu_to_le32(0x00000080); else if (conf->channel_type == NL80211_CHAN_HT40MINUS) cmd->channel_flags |= cpu_to_le32(0x000001900); else if (conf->channel_type == NL80211_CHAN_HT40PLUS) cmd->channel_flags |= cpu_to_le32(0x000000900); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_AID. */ #define MWL8K_FRAME_PROT_DISABLED 0x00 #define MWL8K_FRAME_PROT_11G 0x07 #define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02 #define MWL8K_FRAME_PROT_11N_HT_ALL 0x06 struct mwl8k_cmd_update_set_aid { struct mwl8k_cmd_pkt header; __le16 aid; /* AP's MAC address (BSSID) */ __u8 bssid[ETH_ALEN]; __le16 protection_mode; __u8 supp_rates[14]; } __packed; static void legacy_rate_mask_to_array(u8 *rates, u32 mask) { int i; int j; /* * Clear nonstandard rates 4 and 13. */ mask &= 0x1fef; for (i = 0, j = 0; i < 14; i++) { if (mask & (1 << i)) rates[j++] = mwl8k_rates_24[i].hw_value; } } static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 legacy_rate_mask) { struct mwl8k_cmd_update_set_aid *cmd; u16 prot_mode; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->aid = cpu_to_le16(vif->bss_conf.aid); memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); if (vif->bss_conf.use_cts_prot) { prot_mode = MWL8K_FRAME_PROT_11G; } else { switch (vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) { case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY; break; case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL; break; default: prot_mode = MWL8K_FRAME_PROT_DISABLED; break; } } cmd->protection_mode = cpu_to_le16(prot_mode); legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_RATE. */ struct mwl8k_cmd_set_rate { struct mwl8k_cmd_pkt header; __u8 legacy_rates[14]; /* Bitmap for supported MCS codes. */ __u8 mcs_set[16]; __u8 reserved[16]; } __packed; static int mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 legacy_rate_mask, u8 *mcs_rates) { struct mwl8k_cmd_set_rate *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE); cmd->header.length = cpu_to_le16(sizeof(*cmd)); legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask); memcpy(cmd->mcs_set, mcs_rates, 16); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_FINALIZE_JOIN. */ #define MWL8K_FJ_BEACON_MAXLEN 128 struct mwl8k_cmd_finalize_join { struct mwl8k_cmd_pkt header; __le32 sleep_interval; /* Number of beacon periods to sleep */ __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN]; } __packed; static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame, int framelen, int dtim) { struct mwl8k_cmd_finalize_join *cmd; struct ieee80211_mgmt *payload = frame; int payload_len; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1); payload_len = framelen - ieee80211_hdrlen(payload->frame_control); if (payload_len < 0) payload_len = 0; else if (payload_len > MWL8K_FJ_BEACON_MAXLEN) payload_len = MWL8K_FJ_BEACON_MAXLEN; memcpy(cmd->beacon_data, &payload->u.beacon, payload_len); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_RTS_THRESHOLD. */ struct mwl8k_cmd_set_rts_threshold { struct mwl8k_cmd_pkt header; __le16 action; __le16 threshold; } __packed; static int mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh) { struct mwl8k_cmd_set_rts_threshold *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->threshold = cpu_to_le16(rts_thresh); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_SLOT. */ struct mwl8k_cmd_set_slot { struct mwl8k_cmd_pkt header; __le16 action; __u8 short_slot; } __packed; static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time) { struct mwl8k_cmd_set_slot *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->short_slot = short_slot_time; rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_EDCA_PARAMS. */ struct mwl8k_cmd_set_edca_params { struct mwl8k_cmd_pkt header; /* See MWL8K_SET_EDCA_XXX below */ __le16 action; /* TX opportunity in units of 32 us */ __le16 txop; union { struct { /* Log exponent of max contention period: 0...15 */ __le32 log_cw_max; /* Log exponent of min contention period: 0...15 */ __le32 log_cw_min; /* Adaptive interframe spacing in units of 32us */ __u8 aifs; /* TX queue to configure */ __u8 txq; } ap; struct { /* Log exponent of max contention period: 0...15 */ __u8 log_cw_max; /* Log exponent of min contention period: 0...15 */ __u8 log_cw_min; /* Adaptive interframe spacing in units of 32us */ __u8 aifs; /* TX queue to configure */ __u8 txq; } sta; }; } __packed; #define MWL8K_SET_EDCA_CW 0x01 #define MWL8K_SET_EDCA_TXOP 0x02 #define MWL8K_SET_EDCA_AIFS 0x04 #define MWL8K_SET_EDCA_ALL (MWL8K_SET_EDCA_CW | \ MWL8K_SET_EDCA_TXOP | \ MWL8K_SET_EDCA_AIFS) static int mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum, __u16 cw_min, __u16 cw_max, __u8 aifs, __u16 txop) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_set_edca_params *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL); cmd->txop = cpu_to_le16(txop); if (priv->ap_fw) { cmd->ap.log_cw_max = cpu_to_le32(ilog2(cw_max + 1)); cmd->ap.log_cw_min = cpu_to_le32(ilog2(cw_min + 1)); cmd->ap.aifs = aifs; cmd->ap.txq = qnum; } else { cmd->sta.log_cw_max = (u8)ilog2(cw_max + 1); cmd->sta.log_cw_min = (u8)ilog2(cw_min + 1); cmd->sta.aifs = aifs; cmd->sta.txq = qnum; } rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_WMM_MODE. */ struct mwl8k_cmd_set_wmm_mode { struct mwl8k_cmd_pkt header; __le16 action; } __packed; static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_set_wmm_mode *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(!!enable); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); if (!rc) priv->wmm_enabled = enable; return rc; } /* * CMD_MIMO_CONFIG. */ struct mwl8k_cmd_mimo_config { struct mwl8k_cmd_pkt header; __le32 action; __u8 rx_antenna_map; __u8 tx_antenna_map; } __packed; static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) { struct mwl8k_cmd_mimo_config *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET); cmd->rx_antenna_map = rx; cmd->tx_antenna_map = tx; rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_USE_FIXED_RATE (STA version). */ struct mwl8k_cmd_use_fixed_rate_sta { struct mwl8k_cmd_pkt header; __le32 action; __le32 allow_rate_drop; __le32 num_rates; struct { __le32 is_ht_rate; __le32 enable_retry; __le32 rate; __le32 retry_count; } rate_entry[8]; __le32 rate_type; __le32 reserved1; __le32 reserved2; } __packed; #define MWL8K_USE_AUTO_RATE 0x0002 #define MWL8K_UCAST_RATE 0 static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw) { struct mwl8k_cmd_use_fixed_rate_sta *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE); cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_USE_FIXED_RATE (AP version). */ struct mwl8k_cmd_use_fixed_rate_ap { struct mwl8k_cmd_pkt header; __le32 action; __le32 allow_rate_drop; __le32 num_rates; struct mwl8k_rate_entry_ap { __le32 is_ht_rate; __le32 enable_retry; __le32 rate; __le32 retry_count; } rate_entry[4]; u8 multicast_rate; u8 multicast_rate_type; u8 management_rate; } __packed; static int mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt) { struct mwl8k_cmd_use_fixed_rate_ap *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE); cmd->multicast_rate = mcast; cmd->management_rate = mgmt; rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_ENABLE_SNIFFER. */ struct mwl8k_cmd_enable_sniffer { struct mwl8k_cmd_pkt header; __le32 action; } __packed; static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable) { struct mwl8k_cmd_enable_sniffer *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(!!enable); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } struct mwl8k_cmd_update_mac_addr { struct mwl8k_cmd_pkt header; union { struct { __le16 mac_type; __u8 mac_addr[ETH_ALEN]; } mbss; __u8 mac_addr[ETH_ALEN]; }; } __packed; #define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0 #define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1 #define MWL8K_MAC_TYPE_PRIMARY_AP 2 #define MWL8K_MAC_TYPE_SECONDARY_AP 3 static int mwl8k_cmd_update_mac_addr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 *mac, bool set) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); struct mwl8k_cmd_update_mac_addr *cmd; int mac_type; int rc; mac_type = MWL8K_MAC_TYPE_PRIMARY_AP; if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) { if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported)) mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT; else mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT; } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) { if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported)) mac_type = MWL8K_MAC_TYPE_PRIMARY_AP; else mac_type = MWL8K_MAC_TYPE_SECONDARY_AP; } cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; if (set) cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR); else cmd->header.code = cpu_to_le16(MWL8K_CMD_DEL_MAC_ADDR); cmd->header.length = cpu_to_le16(sizeof(*cmd)); if (priv->ap_fw) { cmd->mbss.mac_type = cpu_to_le16(mac_type); memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN); } else { memcpy(cmd->mac_addr, mac, ETH_ALEN); } rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); kfree(cmd); return rc; } /* * MWL8K_CMD_SET_MAC_ADDR. */ static inline int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 *mac) { return mwl8k_cmd_update_mac_addr(hw, vif, mac, true); } /* * MWL8K_CMD_DEL_MAC_ADDR. */ static inline int mwl8k_cmd_del_mac_addr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 *mac) { return mwl8k_cmd_update_mac_addr(hw, vif, mac, false); } /* * CMD_SET_RATEADAPT_MODE. */ struct mwl8k_cmd_set_rate_adapt_mode { struct mwl8k_cmd_pkt header; __le16 action; __le16 mode; } __packed; static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode) { struct mwl8k_cmd_set_rate_adapt_mode *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->mode = cpu_to_le16(mode); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_GET_WATCHDOG_BITMAP. */ struct mwl8k_cmd_get_watchdog_bitmap { struct mwl8k_cmd_pkt header; u8 bitmap; } __packed; static int mwl8k_cmd_get_watchdog_bitmap(struct ieee80211_hw *hw, u8 *bitmap) { struct mwl8k_cmd_get_watchdog_bitmap *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_WATCHDOG_BITMAP); cmd->header.length = cpu_to_le16(sizeof(*cmd)); rc = mwl8k_post_cmd(hw, &cmd->header); if (!rc) *bitmap = cmd->bitmap; kfree(cmd); return rc; } #define INVALID_BA 0xAA static void mwl8k_watchdog_ba_events(struct work_struct *work) { int rc; u8 bitmap = 0, stream_index; struct mwl8k_ampdu_stream *streams; struct mwl8k_priv *priv = container_of(work, struct mwl8k_priv, watchdog_ba_handle); rc = mwl8k_cmd_get_watchdog_bitmap(priv->hw, &bitmap); if (rc) return; if (bitmap == INVALID_BA) return; /* the bitmap is the hw queue number. Map it to the ampdu queue. */ stream_index = bitmap - MWL8K_TX_WMM_QUEUES; BUG_ON(stream_index >= priv->num_ampdu_queues); streams = &priv->ampdu[stream_index]; if (streams->state == AMPDU_STREAM_ACTIVE) ieee80211_stop_tx_ba_session(streams->sta, streams->tid); return; } /* * CMD_BSS_START. */ struct mwl8k_cmd_bss_start { struct mwl8k_cmd_pkt header; __le32 enable; } __packed; static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int enable) { struct mwl8k_cmd_bss_start *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->enable = cpu_to_le32(enable); rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); kfree(cmd); return rc; } /* * CMD_BASTREAM. */ /* * UPSTREAM is tx direction */ #define BASTREAM_FLAG_DIRECTION_UPSTREAM 0x00 #define BASTREAM_FLAG_IMMEDIATE_TYPE 0x01 enum ba_stream_action_type { MWL8K_BA_CREATE, MWL8K_BA_UPDATE, MWL8K_BA_DESTROY, MWL8K_BA_FLUSH, MWL8K_BA_CHECK, }; struct mwl8k_create_ba_stream { __le32 flags; __le32 idle_thrs; __le32 bar_thrs; __le32 window_size; u8 peer_mac_addr[6]; u8 dialog_token; u8 tid; u8 queue_id; u8 param_info; __le32 ba_context; u8 reset_seq_no_flag; __le16 curr_seq_no; u8 sta_src_mac_addr[6]; } __packed; struct mwl8k_destroy_ba_stream { __le32 flags; __le32 ba_context; } __packed; struct mwl8k_cmd_bastream { struct mwl8k_cmd_pkt header; __le32 action; union { struct mwl8k_create_ba_stream create_params; struct mwl8k_destroy_ba_stream destroy_params; }; } __packed; static int mwl8k_check_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream) { struct mwl8k_cmd_bastream *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(MWL8K_BA_CHECK); cmd->create_params.queue_id = stream->idx; memcpy(&cmd->create_params.peer_mac_addr[0], stream->sta->addr, ETH_ALEN); cmd->create_params.tid = stream->tid; cmd->create_params.flags = cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE) | cpu_to_le32(BASTREAM_FLAG_DIRECTION_UPSTREAM); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } static int mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream, u8 buf_size) { struct mwl8k_cmd_bastream *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(MWL8K_BA_CREATE); cmd->create_params.bar_thrs = cpu_to_le32((u32)buf_size); cmd->create_params.window_size = cpu_to_le32((u32)buf_size); cmd->create_params.queue_id = stream->idx; memcpy(cmd->create_params.peer_mac_addr, stream->sta->addr, ETH_ALEN); cmd->create_params.tid = stream->tid; cmd->create_params.curr_seq_no = cpu_to_le16(0); cmd->create_params.reset_seq_no_flag = 1; cmd->create_params.param_info = (stream->sta->ht_cap.ampdu_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) | ((stream->sta->ht_cap.ampdu_density << 2) & IEEE80211_HT_AMPDU_PARM_DENSITY); cmd->create_params.flags = cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE | BASTREAM_FLAG_DIRECTION_UPSTREAM); rc = mwl8k_post_cmd(hw, &cmd->header); wiphy_debug(hw->wiphy, "Created a BA stream for %pM : tid %d\n", stream->sta->addr, stream->tid); kfree(cmd); return rc; } static void mwl8k_destroy_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream) { struct mwl8k_cmd_bastream *cmd; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return; cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(MWL8K_BA_DESTROY); cmd->destroy_params.ba_context = cpu_to_le32(stream->idx); mwl8k_post_cmd(hw, &cmd->header); wiphy_debug(hw->wiphy, "Deleted BA stream index %d\n", stream->idx); kfree(cmd); } /* * CMD_SET_NEW_STN. */ struct mwl8k_cmd_set_new_stn { struct mwl8k_cmd_pkt header; __le16 aid; __u8 mac_addr[6]; __le16 stn_id; __le16 action; __le16 rsvd; __le32 legacy_rates; __u8 ht_rates[4]; __le16 cap_info; __le16 ht_capabilities_info; __u8 mac_ht_param_info; __u8 rev; __u8 control_channel; __u8 add_channel; __le16 op_mode; __le16 stbc; __u8 add_qos_info; __u8 is_qos_sta; __le32 fw_sta_ptr; } __packed; #define MWL8K_STA_ACTION_ADD 0 #define MWL8K_STA_ACTION_REMOVE 2 static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mwl8k_cmd_set_new_stn *cmd; u32 rates; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->aid = cpu_to_le16(sta->aid); memcpy(cmd->mac_addr, sta->addr, ETH_ALEN); cmd->stn_id = cpu_to_le16(sta->aid); cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD); if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) rates = sta->supp_rates[IEEE80211_BAND_2GHZ]; else rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5; cmd->legacy_rates = cpu_to_le32(rates); if (sta->ht_cap.ht_supported) { cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0]; cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1]; cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2]; cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3]; cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap); cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) | ((sta->ht_cap.ampdu_density & 7) << 2); cmd->is_qos_sta = 1; } rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); kfree(cmd); return rc; } static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mwl8k_cmd_set_new_stn *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN); cmd->header.length = cpu_to_le16(sizeof(*cmd)); memcpy(cmd->mac_addr, vif->addr, ETH_ALEN); rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); kfree(cmd); return rc; } static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 *addr) { struct mwl8k_cmd_set_new_stn *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN); cmd->header.length = cpu_to_le16(sizeof(*cmd)); memcpy(cmd->mac_addr, addr, ETH_ALEN); cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE); rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); kfree(cmd); return rc; } /* * CMD_UPDATE_ENCRYPTION. */ #define MAX_ENCR_KEY_LENGTH 16 #define MIC_KEY_LENGTH 8 struct mwl8k_cmd_update_encryption { struct mwl8k_cmd_pkt header; __le32 action; __le32 reserved; __u8 mac_addr[6]; __u8 encr_type; } __packed; struct mwl8k_cmd_set_key { struct mwl8k_cmd_pkt header; __le32 action; __le32 reserved; __le16 length; __le16 key_type_id; __le32 key_info; __le32 key_id; __le16 key_len; __u8 key_material[MAX_ENCR_KEY_LENGTH]; __u8 tkip_tx_mic_key[MIC_KEY_LENGTH]; __u8 tkip_rx_mic_key[MIC_KEY_LENGTH]; __le16 tkip_rsc_low; __le32 tkip_rsc_high; __le16 tkip_tsc_low; __le32 tkip_tsc_high; __u8 mac_addr[6]; } __packed; enum { MWL8K_ENCR_ENABLE, MWL8K_ENCR_SET_KEY, MWL8K_ENCR_REMOVE_KEY, MWL8K_ENCR_SET_GROUP_KEY, }; #define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP 0 #define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE 1 #define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP 4 #define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED 7 #define MWL8K_UPDATE_ENCRYPTION_TYPE_AES 8 enum { MWL8K_ALG_WEP, MWL8K_ALG_TKIP, MWL8K_ALG_CCMP, }; #define MWL8K_KEY_FLAG_TXGROUPKEY 0x00000004 #define MWL8K_KEY_FLAG_PAIRWISE 0x00000008 #define MWL8K_KEY_FLAG_TSC_VALID 0x00000040 #define MWL8K_KEY_FLAG_WEP_TXKEY 0x01000000 #define MWL8K_KEY_FLAG_MICKEY_VALID 0x02000000 static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 *addr, u8 encr_type) { struct mwl8k_cmd_update_encryption *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE); memcpy(cmd->mac_addr, addr, ETH_ALEN); cmd->encr_type = encr_type; rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); kfree(cmd); return rc; } static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd, u8 *addr, struct ieee80211_key_conf *key) { cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->length = cpu_to_le16(sizeof(*cmd) - offsetof(struct mwl8k_cmd_set_key, length)); cmd->key_id = cpu_to_le32(key->keyidx); cmd->key_len = cpu_to_le16(key->keylen); memcpy(cmd->mac_addr, addr, ETH_ALEN); switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP); if (key->keyidx == 0) cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY); break; case WLAN_CIPHER_SUITE_TKIP: cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP); cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE) : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY); cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID | MWL8K_KEY_FLAG_TSC_VALID); break; case WLAN_CIPHER_SUITE_CCMP: cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP); cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE) : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY); break; default: return -ENOTSUPP; } return 0; } static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 *addr, struct ieee80211_key_conf *key) { struct mwl8k_cmd_set_key *cmd; int rc; int keymlen; u32 action; u8 idx; struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; rc = mwl8k_encryption_set_cmd_info(cmd, addr, key); if (rc < 0) goto done; idx = key->keyidx; if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) action = MWL8K_ENCR_SET_KEY; else action = MWL8K_ENCR_SET_GROUP_KEY; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (!mwl8k_vif->wep_key_conf[idx].enabled) { memcpy(mwl8k_vif->wep_key_conf[idx].key, key, sizeof(*key) + key->keylen); mwl8k_vif->wep_key_conf[idx].enabled = 1; } keymlen = key->keylen; action = MWL8K_ENCR_SET_KEY; break; case WLAN_CIPHER_SUITE_TKIP: keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH; break; case WLAN_CIPHER_SUITE_CCMP: keymlen = key->keylen; break; default: rc = -ENOTSUPP; goto done; } memcpy(cmd->key_material, key->key, keymlen); cmd->action = cpu_to_le32(action); rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); done: kfree(cmd); return rc; } static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 *addr, struct ieee80211_key_conf *key) { struct mwl8k_cmd_set_key *cmd; int rc; struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; rc = mwl8k_encryption_set_cmd_info(cmd, addr, key); if (rc < 0) goto done; if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104) mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0; cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY); rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); done: kfree(cmd); return rc; } static int mwl8k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd_param, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { int rc = 0; u8 encr_type; u8 *addr; struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); if (vif->type == NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (sta == NULL) addr = vif->addr; else addr = sta->addr; if (cmd_param == SET_KEY) { rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key); if (rc) goto out; if ((key->cipher == WLAN_CIPHER_SUITE_WEP40) || (key->cipher == WLAN_CIPHER_SUITE_WEP104)) encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP; else encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED; rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr, encr_type); if (rc) goto out; mwl8k_vif->is_hw_crypto_enabled = true; } else { rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key); if (rc) goto out; } out: return rc; } /* * CMD_UPDATE_STADB. */ struct ewc_ht_info { __le16 control1; __le16 control2; __le16 control3; } __packed; struct peer_capability_info { /* Peer type - AP vs. STA. */ __u8 peer_type; /* Basic 802.11 capabilities from assoc resp. */ __le16 basic_caps; /* Set if peer supports 802.11n high throughput (HT). */ __u8 ht_support; /* Valid if HT is supported. */ __le16 ht_caps; __u8 extended_ht_caps; struct ewc_ht_info ewc_info; /* Legacy rate table. Intersection of our rates and peer rates. */ __u8 legacy_rates[12]; /* HT rate table. Intersection of our rates and peer rates. */ __u8 ht_rates[16]; __u8 pad[16]; /* If set, interoperability mode, no proprietary extensions. */ __u8 interop; __u8 pad2; __u8 station_id; __le16 amsdu_enabled; } __packed; struct mwl8k_cmd_update_stadb { struct mwl8k_cmd_pkt header; /* See STADB_ACTION_TYPE */ __le32 action; /* Peer MAC address */ __u8 peer_addr[ETH_ALEN]; __le32 reserved; /* Peer info - valid during add/update. */ struct peer_capability_info peer_info; } __packed; #define MWL8K_STA_DB_MODIFY_ENTRY 1 #define MWL8K_STA_DB_DEL_ENTRY 2 /* Peer Entry flags - used to define the type of the peer node */ #define MWL8K_PEER_TYPE_ACCESSPOINT 2 static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mwl8k_cmd_update_stadb *cmd; struct peer_capability_info *p; u32 rates; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY); memcpy(cmd->peer_addr, sta->addr, ETH_ALEN); p = &cmd->peer_info; p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT; p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability); p->ht_support = sta->ht_cap.ht_supported; p->ht_caps = cpu_to_le16(sta->ht_cap.cap); p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) | ((sta->ht_cap.ampdu_density & 7) << 2); if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) rates = sta->supp_rates[IEEE80211_BAND_2GHZ]; else rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5; legacy_rate_mask_to_array(p->legacy_rates, rates); memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16); p->interop = 1; p->amsdu_enabled = 0; rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc ? rc : p->station_id; } static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 *addr) { struct mwl8k_cmd_update_stadb *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY); memcpy(cmd->peer_addr, addr, ETH_ALEN); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * Interrupt handling. */ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id) { struct ieee80211_hw *hw = dev_id; struct mwl8k_priv *priv = hw->priv; u32 status; status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); if (!status) return IRQ_NONE; if (status & MWL8K_A2H_INT_TX_DONE) { status &= ~MWL8K_A2H_INT_TX_DONE; tasklet_schedule(&priv->poll_tx_task); } if (status & MWL8K_A2H_INT_RX_READY) { status &= ~MWL8K_A2H_INT_RX_READY; tasklet_schedule(&priv->poll_rx_task); } if (status & MWL8K_A2H_INT_BA_WATCHDOG) { status &= ~MWL8K_A2H_INT_BA_WATCHDOG; ieee80211_queue_work(hw, &priv->watchdog_ba_handle); } if (status) iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); if (status & MWL8K_A2H_INT_OPC_DONE) { if (priv->hostcmd_wait != NULL) complete(priv->hostcmd_wait); } if (status & MWL8K_A2H_INT_QUEUE_EMPTY) { if (!mutex_is_locked(&priv->fw_mutex) && priv->radio_on && priv->pending_tx_pkts) mwl8k_tx_start(priv); } return IRQ_HANDLED; } static void mwl8k_tx_poll(unsigned long data) { struct ieee80211_hw *hw = (struct ieee80211_hw *)data; struct mwl8k_priv *priv = hw->priv; int limit; int i; limit = 32; spin_lock_bh(&priv->tx_lock); for (i = 0; i < mwl8k_tx_queues(priv); i++) limit -= mwl8k_txq_reclaim(hw, i, limit, 0); if (!priv->pending_tx_pkts && priv->tx_wait != NULL) { complete(priv->tx_wait); priv->tx_wait = NULL; } spin_unlock_bh(&priv->tx_lock); if (limit) { writel(~MWL8K_A2H_INT_TX_DONE, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); } else { tasklet_schedule(&priv->poll_tx_task); } } static void mwl8k_rx_poll(unsigned long data) { struct ieee80211_hw *hw = (struct ieee80211_hw *)data; struct mwl8k_priv *priv = hw->priv; int limit; limit = 32; limit -= rxq_process(hw, 0, limit); limit -= rxq_refill(hw, 0, limit); if (limit) { writel(~MWL8K_A2H_INT_RX_READY, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); } else { tasklet_schedule(&priv->poll_rx_task); } } /* * Core driver operations. */ static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct mwl8k_priv *priv = hw->priv; int index = skb_get_queue_mapping(skb); if (!priv->radio_on) { wiphy_debug(hw->wiphy, "dropped TX frame since radio disabled\n"); dev_kfree_skb(skb); return; } mwl8k_txq_xmit(hw, index, skb); } static int mwl8k_start(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; int rc; rc = request_irq(priv->pdev->irq, mwl8k_interrupt, IRQF_SHARED, MWL8K_NAME, hw); if (rc) { priv->irq = -1; wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); return -EIO; } priv->irq = priv->pdev->irq; /* Enable TX reclaim and RX tasklets. */ tasklet_enable(&priv->poll_tx_task); tasklet_enable(&priv->poll_rx_task); /* Enable interrupts */ iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); rc = mwl8k_fw_lock(hw); if (!rc) { rc = mwl8k_cmd_radio_enable(hw); if (!priv->ap_fw) { if (!rc) rc = mwl8k_cmd_enable_sniffer(hw, 0); if (!rc) rc = mwl8k_cmd_set_pre_scan(hw); if (!rc) rc = mwl8k_cmd_set_post_scan(hw, "\x00\x00\x00\x00\x00\x00"); } if (!rc) rc = mwl8k_cmd_set_rateadapt_mode(hw, 0); if (!rc) rc = mwl8k_cmd_set_wmm_mode(hw, 0); mwl8k_fw_unlock(hw); } if (rc) { iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); free_irq(priv->pdev->irq, hw); priv->irq = -1; tasklet_disable(&priv->poll_tx_task); tasklet_disable(&priv->poll_rx_task); } return rc; } static void mwl8k_stop(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; int i; if (!priv->hw_restart_in_progress) mwl8k_cmd_radio_disable(hw); ieee80211_stop_queues(hw); /* Disable interrupts */ iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); if (priv->irq != -1) { free_irq(priv->pdev->irq, hw); priv->irq = -1; } /* Stop finalize join worker */ cancel_work_sync(&priv->finalize_join_worker); cancel_work_sync(&priv->watchdog_ba_handle); if (priv->beacon_skb != NULL) dev_kfree_skb(priv->beacon_skb); /* Stop TX reclaim and RX tasklets. */ tasklet_disable(&priv->poll_tx_task); tasklet_disable(&priv->poll_rx_task); /* Return all skbs to mac80211 */ for (i = 0; i < mwl8k_tx_queues(priv); i++) mwl8k_txq_reclaim(hw, i, INT_MAX, 1); } static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image); static int mwl8k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_vif *mwl8k_vif; u32 macids_supported; int macid, rc; struct mwl8k_device_info *di; /* * Reject interface creation if sniffer mode is active, as * STA operation is mutually exclusive with hardware sniffer * mode. (Sniffer mode is only used on STA firmware.) */ if (priv->sniffer_enabled) { wiphy_info(hw->wiphy, "unable to create STA interface because sniffer mode is enabled\n"); return -EINVAL; } di = priv->device_info; switch (vif->type) { case NL80211_IFTYPE_AP: if (!priv->ap_fw && di->fw_image_ap) { /* we must load the ap fw to meet this request */ if (!list_empty(&priv->vif_list)) return -EBUSY; rc = mwl8k_reload_firmware(hw, di->fw_image_ap); if (rc) return rc; } macids_supported = priv->ap_macids_supported; break; case NL80211_IFTYPE_STATION: if (priv->ap_fw && di->fw_image_sta) { /* we must load the sta fw to meet this request */ if (!list_empty(&priv->vif_list)) return -EBUSY; rc = mwl8k_reload_firmware(hw, di->fw_image_sta); if (rc) return rc; } macids_supported = priv->sta_macids_supported; break; default: return -EINVAL; } macid = ffs(macids_supported & ~priv->macids_used); if (!macid--) return -EBUSY; /* Setup driver private area. */ mwl8k_vif = MWL8K_VIF(vif); memset(mwl8k_vif, 0, sizeof(*mwl8k_vif)); mwl8k_vif->vif = vif; mwl8k_vif->macid = macid; mwl8k_vif->seqno = 0; memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN); mwl8k_vif->is_hw_crypto_enabled = false; /* Set the mac address. */ mwl8k_cmd_set_mac_addr(hw, vif, vif->addr); if (priv->ap_fw) mwl8k_cmd_set_new_stn_add_self(hw, vif); priv->macids_used |= 1 << mwl8k_vif->macid; list_add_tail(&mwl8k_vif->list, &priv->vif_list); return 0; } static void mwl8k_remove_vif(struct mwl8k_priv *priv, struct mwl8k_vif *vif) { /* Has ieee80211_restart_hw re-added the removed interfaces? */ if (!priv->macids_used) return; priv->macids_used &= ~(1 << vif->macid); list_del(&vif->list); } static void mwl8k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); if (priv->ap_fw) mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr); mwl8k_cmd_del_mac_addr(hw, vif, vif->addr); mwl8k_remove_vif(priv, mwl8k_vif); } static void mwl8k_hw_restart_work(struct work_struct *work) { struct mwl8k_priv *priv = container_of(work, struct mwl8k_priv, fw_reload); struct ieee80211_hw *hw = priv->hw; struct mwl8k_device_info *di; int rc; /* If some command is waiting for a response, clear it */ if (priv->hostcmd_wait != NULL) { complete(priv->hostcmd_wait); priv->hostcmd_wait = NULL; } priv->hw_restart_owner = current; di = priv->device_info; mwl8k_fw_lock(hw); if (priv->ap_fw) rc = mwl8k_reload_firmware(hw, di->fw_image_ap); else rc = mwl8k_reload_firmware(hw, di->fw_image_sta); if (rc) goto fail; priv->hw_restart_owner = NULL; priv->hw_restart_in_progress = false; /* * This unlock will wake up the queues and * also opens the command path for other * commands */ mwl8k_fw_unlock(hw); ieee80211_restart_hw(hw); wiphy_err(hw->wiphy, "Firmware restarted successfully\n"); return; fail: mwl8k_fw_unlock(hw); wiphy_err(hw->wiphy, "Firmware restart failed\n"); } static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) { struct ieee80211_conf *conf = &hw->conf; struct mwl8k_priv *priv = hw->priv; int rc; if (conf->flags & IEEE80211_CONF_IDLE) { mwl8k_cmd_radio_disable(hw); return 0; } rc = mwl8k_fw_lock(hw); if (rc) return rc; rc = mwl8k_cmd_radio_enable(hw); if (rc) goto out; rc = mwl8k_cmd_set_rf_channel(hw, conf); if (rc) goto out; if (conf->power_level > 18) conf->power_level = 18; if (priv->ap_fw) { if (conf->flags & IEEE80211_CONF_CHANGE_POWER) { rc = mwl8k_cmd_tx_power(hw, conf, conf->power_level); if (rc) goto out; } rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3); if (rc) wiphy_warn(hw->wiphy, "failed to set # of RX antennas"); rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7); if (rc) wiphy_warn(hw->wiphy, "failed to set # of TX antennas"); } else { rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level); if (rc) goto out; rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7); } out: mwl8k_fw_unlock(hw); return rc; } static void mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { struct mwl8k_priv *priv = hw->priv; u32 ap_legacy_rates = 0; u8 ap_mcs_rates[16]; int rc; if (mwl8k_fw_lock(hw)) return; /* * No need to capture a beacon if we're no longer associated. */ if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc) priv->capture_beacon = false; /* * Get the AP's legacy and MCS rates. */ if (vif->bss_conf.assoc) { struct ieee80211_sta *ap; rcu_read_lock(); ap = ieee80211_find_sta(vif, vif->bss_conf.bssid); if (ap == NULL) { rcu_read_unlock(); goto out; } if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) { ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ]; } else { ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_5GHZ] << 5; } memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16); rcu_read_unlock(); } if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) { rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates); if (rc) goto out; rc = mwl8k_cmd_use_fixed_rate_sta(hw); if (rc) goto out; } if (changed & BSS_CHANGED_ERP_PREAMBLE) { rc = mwl8k_set_radio_preamble(hw, vif->bss_conf.use_short_preamble); if (rc) goto out; } if (changed & BSS_CHANGED_ERP_SLOT) { rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot); if (rc) goto out; } if (vif->bss_conf.assoc && (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT))) { rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates); if (rc) goto out; } if (vif->bss_conf.assoc && (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) { /* * Finalize the join. Tell rx handler to process * next beacon from our BSSID. */ memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN); priv->capture_beacon = true; } out: mwl8k_fw_unlock(hw); } static void mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { int rc; if (mwl8k_fw_lock(hw)) return; if (changed & BSS_CHANGED_ERP_PREAMBLE) { rc = mwl8k_set_radio_preamble(hw, vif->bss_conf.use_short_preamble); if (rc) goto out; } if (changed & BSS_CHANGED_BASIC_RATES) { int idx; int rate; /* * Use lowest supported basic rate for multicasts * and management frames (such as probe responses -- * beacons will always go out at 1 Mb/s). */ idx = ffs(vif->bss_conf.basic_rates); if (idx) idx--; if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) rate = mwl8k_rates_24[idx].hw_value; else rate = mwl8k_rates_50[idx].hw_value; mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate); } if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) { struct sk_buff *skb; skb = ieee80211_beacon_get(hw, vif); if (skb != NULL) { mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len); kfree_skb(skb); } } if (changed & BSS_CHANGED_BEACON_ENABLED) mwl8k_cmd_bss_start(hw, vif, info->enable_beacon); out: mwl8k_fw_unlock(hw); } static void mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { struct mwl8k_priv *priv = hw->priv; if (!priv->ap_fw) mwl8k_bss_info_changed_sta(hw, vif, info, changed); else mwl8k_bss_info_changed_ap(hw, vif, info, changed); } static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { struct mwl8k_cmd_pkt *cmd; /* * Synthesize and return a command packet that programs the * hardware multicast address filter. At this point we don't * know whether FIF_ALLMULTI is being requested, but if it is, * we'll end up throwing this packet away and creating a new * one in mwl8k_configure_filter(). */ cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_list); return (unsigned long)cmd; } static int mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags) { struct mwl8k_priv *priv = hw->priv; /* * Hardware sniffer mode is mutually exclusive with STA * operation, so refuse to enable sniffer mode if a STA * interface is active. */ if (!list_empty(&priv->vif_list)) { if (net_ratelimit()) wiphy_info(hw->wiphy, "not enabling sniffer mode because STA interface is active\n"); return 0; } if (!priv->sniffer_enabled) { if (mwl8k_cmd_enable_sniffer(hw, 1)) return 0; priv->sniffer_enabled = true; } *total_flags &= FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL | FIF_OTHER_BSS; return 1; } static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv) { if (!list_empty(&priv->vif_list)) return list_entry(priv->vif_list.next, struct mwl8k_vif, list); return NULL; } static void mwl8k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_pkt *cmd = (void *)(unsigned long)multicast; /* * AP firmware doesn't allow fine-grained control over * the receive filter. */ if (priv->ap_fw) { *total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC; kfree(cmd); return; } /* * Enable hardware sniffer mode if FIF_CONTROL or * FIF_OTHER_BSS is requested. */ if (*total_flags & (FIF_CONTROL | FIF_OTHER_BSS) && mwl8k_configure_filter_sniffer(hw, changed_flags, total_flags)) { kfree(cmd); return; } /* Clear unsupported feature flags */ *total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC; if (mwl8k_fw_lock(hw)) { kfree(cmd); return; } if (priv->sniffer_enabled) { mwl8k_cmd_enable_sniffer(hw, 0); priv->sniffer_enabled = false; } if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { if (*total_flags & FIF_BCN_PRBRESP_PROMISC) { /* * Disable the BSS filter. */ mwl8k_cmd_set_pre_scan(hw); } else { struct mwl8k_vif *mwl8k_vif; const u8 *bssid; /* * Enable the BSS filter. * * If there is an active STA interface, use that * interface's BSSID, otherwise use a dummy one * (where the OUI part needs to be nonzero for * the BSSID to be accepted by POST_SCAN). */ mwl8k_vif = mwl8k_first_vif(priv); if (mwl8k_vif != NULL) bssid = mwl8k_vif->vif->bss_conf.bssid; else bssid = "\x01\x00\x00\x00\x00\x00"; mwl8k_cmd_set_post_scan(hw, bssid); } } /* * If FIF_ALLMULTI is being requested, throw away the command * packet that ->prepare_multicast() built and replace it with * a command packet that enables reception of all multicast * packets. */ if (*total_flags & FIF_ALLMULTI) { kfree(cmd); cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, NULL); } if (cmd != NULL) { mwl8k_post_cmd(hw, cmd); kfree(cmd); } mwl8k_fw_unlock(hw); } static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { return mwl8k_cmd_set_rts_threshold(hw, value); } static int mwl8k_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mwl8k_priv *priv = hw->priv; if (priv->ap_fw) return mwl8k_cmd_set_new_stn_del(hw, vif, sta->addr); else return mwl8k_cmd_update_stadb_del(hw, vif, sta->addr); } static int mwl8k_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mwl8k_priv *priv = hw->priv; int ret; int i; struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); struct ieee80211_key_conf *key; if (!priv->ap_fw) { ret = mwl8k_cmd_update_stadb_add(hw, vif, sta); if (ret >= 0) { MWL8K_STA(sta)->peer_id = ret; if (sta->ht_cap.ht_supported) MWL8K_STA(sta)->is_ampdu_allowed = true; ret = 0; } } else { ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta); } for (i = 0; i < NUM_WEP_KEYS; i++) { key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key); if (mwl8k_vif->wep_key_conf[i].enabled) mwl8k_set_key(hw, SET_KEY, vif, sta, key); } return ret; } static int mwl8k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *params) { struct mwl8k_priv *priv = hw->priv; int rc; rc = mwl8k_fw_lock(hw); if (!rc) { BUG_ON(queue > MWL8K_TX_WMM_QUEUES - 1); memcpy(&priv->wmm_params[queue], params, sizeof(*params)); if (!priv->wmm_enabled) rc = mwl8k_cmd_set_wmm_mode(hw, 1); if (!rc) { int q = MWL8K_TX_WMM_QUEUES - 1 - queue; rc = mwl8k_cmd_set_edca_params(hw, q, params->cw_min, params->cw_max, params->aifs, params->txop); } mwl8k_fw_unlock(hw); } return rc; } static int mwl8k_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { return mwl8k_cmd_get_stat(hw, stats); } static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct mwl8k_priv *priv = hw->priv; struct ieee80211_conf *conf = &hw->conf; if (idx != 0) return -ENOENT; survey->channel = conf->channel; survey->filled = SURVEY_INFO_NOISE_DBM; survey->noise = priv->noise; return 0; } #define MAX_AMPDU_ATTEMPTS 5 static int mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size) { int i, rc = 0; struct mwl8k_priv *priv = hw->priv; struct mwl8k_ampdu_stream *stream; u8 *addr = sta->addr; if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) return -ENOTSUPP; spin_lock(&priv->stream_lock); stream = mwl8k_lookup_stream(hw, addr, tid); switch (action) { case IEEE80211_AMPDU_RX_START: case IEEE80211_AMPDU_RX_STOP: break; case IEEE80211_AMPDU_TX_START: /* By the time we get here the hw queues may contain outgoing * packets for this RA/TID that are not part of this BA * session. The hw will assign sequence numbers to these * packets as they go out. So if we query the hw for its next * sequence number and use that for the SSN here, it may end up * being wrong, which will lead to sequence number mismatch at * the recipient. To avoid this, we reset the sequence number * to O for the first MPDU in this BA stream. */ *ssn = 0; if (stream == NULL) { /* This means that somebody outside this driver called * ieee80211_start_tx_ba_session. This is unexpected * because we do our own rate control. Just warn and * move on. */ wiphy_warn(hw->wiphy, "Unexpected call to %s. " "Proceeding anyway.\n", __func__); stream = mwl8k_add_stream(hw, sta, tid); } if (stream == NULL) { wiphy_debug(hw->wiphy, "no free AMPDU streams\n"); rc = -EBUSY; break; } stream->state = AMPDU_STREAM_IN_PROGRESS; /* Release the lock before we do the time consuming stuff */ spin_unlock(&priv->stream_lock); for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) { rc = mwl8k_check_ba(hw, stream); /* If HW restart is in progress mwl8k_post_cmd will * return -EBUSY. Avoid retrying mwl8k_check_ba in * such cases */ if (!rc || rc == -EBUSY) break; /* * HW queues take time to be flushed, give them * sufficient time */ msleep(1000); } spin_lock(&priv->stream_lock); if (rc) { wiphy_err(hw->wiphy, "Stream for tid %d busy after %d" " attempts\n", tid, MAX_AMPDU_ATTEMPTS); mwl8k_remove_stream(hw, stream); rc = -EBUSY; break; } ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); break; case IEEE80211_AMPDU_TX_STOP: if (stream) { if (stream->state == AMPDU_STREAM_ACTIVE) { spin_unlock(&priv->stream_lock); mwl8k_destroy_ba(hw, stream); spin_lock(&priv->stream_lock); } mwl8k_remove_stream(hw, stream); } ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: BUG_ON(stream == NULL); BUG_ON(stream->state != AMPDU_STREAM_IN_PROGRESS); spin_unlock(&priv->stream_lock); rc = mwl8k_create_ba(hw, stream, buf_size); spin_lock(&priv->stream_lock); if (!rc) stream->state = AMPDU_STREAM_ACTIVE; else { spin_unlock(&priv->stream_lock); mwl8k_destroy_ba(hw, stream); spin_lock(&priv->stream_lock); wiphy_debug(hw->wiphy, "Failed adding stream for sta %pM tid %d\n", addr, tid); mwl8k_remove_stream(hw, stream); } break; default: rc = -ENOTSUPP; } spin_unlock(&priv->stream_lock); return rc; } static const struct ieee80211_ops mwl8k_ops = { .tx = mwl8k_tx, .start = mwl8k_start, .stop = mwl8k_stop, .add_interface = mwl8k_add_interface, .remove_interface = mwl8k_remove_interface, .config = mwl8k_config, .bss_info_changed = mwl8k_bss_info_changed, .prepare_multicast = mwl8k_prepare_multicast, .configure_filter = mwl8k_configure_filter, .set_key = mwl8k_set_key, .set_rts_threshold = mwl8k_set_rts_threshold, .sta_add = mwl8k_sta_add, .sta_remove = mwl8k_sta_remove, .conf_tx = mwl8k_conf_tx, .get_stats = mwl8k_get_stats, .get_survey = mwl8k_get_survey, .ampdu_action = mwl8k_ampdu_action, }; static void mwl8k_finalize_join_worker(struct work_struct *work) { struct mwl8k_priv *priv = container_of(work, struct mwl8k_priv, finalize_join_worker); struct sk_buff *skb = priv->beacon_skb; struct ieee80211_mgmt *mgmt = (void *)skb->data; int len = skb->len - offsetof(struct ieee80211_mgmt, u.beacon.variable); const u8 *tim = cfg80211_find_ie(WLAN_EID_TIM, mgmt->u.beacon.variable, len); int dtim_period = 1; if (tim && tim[1] >= 2) dtim_period = tim[3]; mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len, dtim_period); dev_kfree_skb(skb); priv->beacon_skb = NULL; } enum { MWL8363 = 0, MWL8687, MWL8366, }; #define MWL8K_8366_AP_FW_API 2 #define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw" #define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api) static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = { [MWL8363] = { .part_name = "88w8363", .helper_image = "mwl8k/helper_8363.fw", .fw_image_sta = "mwl8k/fmimage_8363.fw", }, [MWL8687] = { .part_name = "88w8687", .helper_image = "mwl8k/helper_8687.fw", .fw_image_sta = "mwl8k/fmimage_8687.fw", }, [MWL8366] = { .part_name = "88w8366", .helper_image = "mwl8k/helper_8366.fw", .fw_image_sta = "mwl8k/fmimage_8366.fw", .fw_image_ap = MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API), .fw_api_ap = MWL8K_8366_AP_FW_API, .ap_rxd_ops = &rxd_8366_ap_ops, }, }; MODULE_FIRMWARE("mwl8k/helper_8363.fw"); MODULE_FIRMWARE("mwl8k/fmimage_8363.fw"); MODULE_FIRMWARE("mwl8k/helper_8687.fw"); MODULE_FIRMWARE("mwl8k/fmimage_8687.fw"); MODULE_FIRMWARE("mwl8k/helper_8366.fw"); MODULE_FIRMWARE("mwl8k/fmimage_8366.fw"); MODULE_FIRMWARE(MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API)); static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, }, { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, }, { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, }, { }, }; MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table); static int mwl8k_request_alt_fw(struct mwl8k_priv *priv) { int rc; printk(KERN_ERR "%s: Error requesting preferred fw %s.\n" "Trying alternative firmware %s\n", pci_name(priv->pdev), priv->fw_pref, priv->fw_alt); rc = mwl8k_request_fw(priv, priv->fw_alt, &priv->fw_ucode, true); if (rc) { printk(KERN_ERR "%s: Error requesting alt fw %s\n", pci_name(priv->pdev), priv->fw_alt); return rc; } return 0; } static int mwl8k_firmware_load_success(struct mwl8k_priv *priv); static void mwl8k_fw_state_machine(const struct firmware *fw, void *context) { struct mwl8k_priv *priv = context; struct mwl8k_device_info *di = priv->device_info; int rc; switch (priv->fw_state) { case FW_STATE_INIT: if (!fw) { printk(KERN_ERR "%s: Error requesting helper fw %s\n", pci_name(priv->pdev), di->helper_image); goto fail; } priv->fw_helper = fw; rc = mwl8k_request_fw(priv, priv->fw_pref, &priv->fw_ucode, true); if (rc && priv->fw_alt) { rc = mwl8k_request_alt_fw(priv); if (rc) goto fail; priv->fw_state = FW_STATE_LOADING_ALT; } else if (rc) goto fail; else priv->fw_state = FW_STATE_LOADING_PREF; break; case FW_STATE_LOADING_PREF: if (!fw) { if (priv->fw_alt) { rc = mwl8k_request_alt_fw(priv); if (rc) goto fail; priv->fw_state = FW_STATE_LOADING_ALT; } else goto fail; } else { priv->fw_ucode = fw; rc = mwl8k_firmware_load_success(priv); if (rc) goto fail; else complete(&priv->firmware_loading_complete); } break; case FW_STATE_LOADING_ALT: if (!fw) { printk(KERN_ERR "%s: Error requesting alt fw %s\n", pci_name(priv->pdev), di->helper_image); goto fail; } priv->fw_ucode = fw; rc = mwl8k_firmware_load_success(priv); if (rc) goto fail; else complete(&priv->firmware_loading_complete); break; default: printk(KERN_ERR "%s: Unexpected firmware loading state: %d\n", MWL8K_NAME, priv->fw_state); BUG_ON(1); } return; fail: priv->fw_state = FW_STATE_ERROR; complete(&priv->firmware_loading_complete); device_release_driver(&priv->pdev->dev); mwl8k_release_firmware(priv); } #define MAX_RESTART_ATTEMPTS 1 static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image, bool nowait) { struct mwl8k_priv *priv = hw->priv; int rc; int count = MAX_RESTART_ATTEMPTS; retry: /* Reset firmware and hardware */ mwl8k_hw_reset(priv); /* Ask userland hotplug daemon for the device firmware */ rc = mwl8k_request_firmware(priv, fw_image, nowait); if (rc) { wiphy_err(hw->wiphy, "Firmware files not found\n"); return rc; } if (nowait) return rc; /* Load firmware into hardware */ rc = mwl8k_load_firmware(hw); if (rc) wiphy_err(hw->wiphy, "Cannot start firmware\n"); /* Reclaim memory once firmware is successfully loaded */ mwl8k_release_firmware(priv); if (rc && count) { /* FW did not start successfully; * lets try one more time */ count--; wiphy_err(hw->wiphy, "Trying to reload the firmware again\n"); msleep(20); goto retry; } return rc; } static int mwl8k_init_txqs(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; int rc = 0; int i; for (i = 0; i < mwl8k_tx_queues(priv); i++) { rc = mwl8k_txq_init(hw, i); if (rc) break; if (priv->ap_fw) iowrite32(priv->txq[i].txd_dma, priv->sram + priv->txq_offset[i]); } return rc; } /* initialize hw after successfully loading a firmware image */ static int mwl8k_probe_hw(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; int rc = 0; int i; if (priv->ap_fw) { priv->rxd_ops = priv->device_info->ap_rxd_ops; if (priv->rxd_ops == NULL) { wiphy_err(hw->wiphy, "Driver does not have AP firmware image support for this hardware\n"); goto err_stop_firmware; } } else { priv->rxd_ops = &rxd_sta_ops; } priv->sniffer_enabled = false; priv->wmm_enabled = false; priv->pending_tx_pkts = 0; rc = mwl8k_rxq_init(hw, 0); if (rc) goto err_stop_firmware; rxq_refill(hw, 0, INT_MAX); /* For the sta firmware, we need to know the dma addresses of tx queues * before sending MWL8K_CMD_GET_HW_SPEC. So we must initialize them * prior to issuing this command. But for the AP case, we learn the * total number of queues from the result CMD_GET_HW_SPEC, so for this * case we must initialize the tx queues after. */ priv->num_ampdu_queues = 0; if (!priv->ap_fw) { rc = mwl8k_init_txqs(hw); if (rc) goto err_free_queues; } iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); iowrite32(MWL8K_A2H_INT_TX_DONE|MWL8K_A2H_INT_RX_READY| MWL8K_A2H_INT_BA_WATCHDOG, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL); iowrite32(MWL8K_A2H_INT_OPC_DONE, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); rc = request_irq(priv->pdev->irq, mwl8k_interrupt, IRQF_SHARED, MWL8K_NAME, hw); if (rc) { wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); goto err_free_queues; } /* * When hw restart is requested, * mac80211 will take care of clearing * the ampdu streams, so do not clear * the ampdu state here */ if (!priv->hw_restart_in_progress) memset(priv->ampdu, 0, sizeof(priv->ampdu)); /* * Temporarily enable interrupts. Initial firmware host * commands use interrupts and avoid polling. Disable * interrupts when done. */ iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); /* Get config data, mac addrs etc */ if (priv->ap_fw) { rc = mwl8k_cmd_get_hw_spec_ap(hw); if (!rc) rc = mwl8k_init_txqs(hw); if (!rc) rc = mwl8k_cmd_set_hw_spec(hw); } else { rc = mwl8k_cmd_get_hw_spec_sta(hw); } if (rc) { wiphy_err(hw->wiphy, "Cannot initialise firmware\n"); goto err_free_irq; } /* Turn radio off */ rc = mwl8k_cmd_radio_disable(hw); if (rc) { wiphy_err(hw->wiphy, "Cannot disable\n"); goto err_free_irq; } /* Clear MAC address */ rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00"); if (rc) { wiphy_err(hw->wiphy, "Cannot clear MAC address\n"); goto err_free_irq; } /* Disable interrupts */ iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); free_irq(priv->pdev->irq, hw); wiphy_info(hw->wiphy, "%s v%d, %pm, %s firmware %u.%u.%u.%u\n", priv->device_info->part_name, priv->hw_rev, hw->wiphy->perm_addr, priv->ap_fw ? "AP" : "STA", (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff, (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff); return 0; err_free_irq: iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); free_irq(priv->pdev->irq, hw); err_free_queues: for (i = 0; i < mwl8k_tx_queues(priv); i++) mwl8k_txq_deinit(hw, i); mwl8k_rxq_deinit(hw, 0); err_stop_firmware: mwl8k_hw_reset(priv); return rc; } /* * invoke mwl8k_reload_firmware to change the firmware image after the device * has already been registered */ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image) { int i, rc = 0; struct mwl8k_priv *priv = hw->priv; struct mwl8k_vif *vif, *tmp_vif; mwl8k_stop(hw); mwl8k_rxq_deinit(hw, 0); /* * All the existing interfaces are re-added by the ieee80211_reconfig; * which means driver should remove existing interfaces before calling * ieee80211_restart_hw */ if (priv->hw_restart_in_progress) list_for_each_entry_safe(vif, tmp_vif, &priv->vif_list, list) mwl8k_remove_vif(priv, vif); for (i = 0; i < mwl8k_tx_queues(priv); i++) mwl8k_txq_deinit(hw, i); rc = mwl8k_init_firmware(hw, fw_image, false); if (rc) goto fail; rc = mwl8k_probe_hw(hw); if (rc) goto fail; if (priv->hw_restart_in_progress) return rc; rc = mwl8k_start(hw); if (rc) goto fail; rc = mwl8k_config(hw, ~0); if (rc) goto fail; for (i = 0; i < MWL8K_TX_WMM_QUEUES; i++) { rc = mwl8k_conf_tx(hw, NULL, i, &priv->wmm_params[i]); if (rc) goto fail; } return rc; fail: printk(KERN_WARNING "mwl8k: Failed to reload firmware image.\n"); return rc; } static int mwl8k_firmware_load_success(struct mwl8k_priv *priv) { struct ieee80211_hw *hw = priv->hw; int i, rc; rc = mwl8k_load_firmware(hw); mwl8k_release_firmware(priv); if (rc) { wiphy_err(hw->wiphy, "Cannot start firmware\n"); return rc; } /* * Extra headroom is the size of the required DMA header * minus the size of the smallest 802.11 frame (CTS frame). */ hw->extra_tx_headroom = sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts); hw->extra_tx_headroom -= priv->ap_fw ? REDUCED_TX_HEADROOM : 0; hw->channel_change_time = 10; hw->queues = MWL8K_TX_WMM_QUEUES; /* Set rssi values to dBm */ hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL; /* * Ask mac80211 to not to trigger PS mode * based on PM bit of incoming frames. */ if (priv->ap_fw) hw->flags |= IEEE80211_HW_AP_LINK_PS; hw->vif_data_size = sizeof(struct mwl8k_vif); hw->sta_data_size = sizeof(struct mwl8k_sta); priv->macids_used = 0; INIT_LIST_HEAD(&priv->vif_list); /* Set default radio state and preamble */ priv->radio_on = false; priv->radio_short_preamble = false; /* Finalize join worker */ INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker); /* Handle watchdog ba events */ INIT_WORK(&priv->watchdog_ba_handle, mwl8k_watchdog_ba_events); /* To reload the firmware if it crashes */ INIT_WORK(&priv->fw_reload, mwl8k_hw_restart_work); /* TX reclaim and RX tasklets. */ tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw); tasklet_disable(&priv->poll_tx_task); tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw); tasklet_disable(&priv->poll_rx_task); /* Power management cookie */ priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma); if (priv->cookie == NULL) return -ENOMEM; mutex_init(&priv->fw_mutex); priv->fw_mutex_owner = NULL; priv->fw_mutex_depth = 0; priv->hostcmd_wait = NULL; spin_lock_init(&priv->tx_lock); spin_lock_init(&priv->stream_lock); priv->tx_wait = NULL; rc = mwl8k_probe_hw(hw); if (rc) goto err_free_cookie; hw->wiphy->interface_modes = 0; if (priv->ap_macids_supported || priv->device_info->fw_image_ap) hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP); if (priv->sta_macids_supported || priv->device_info->fw_image_sta) hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION); rc = ieee80211_register_hw(hw); if (rc) { wiphy_err(hw->wiphy, "Cannot register device\n"); goto err_unprobe_hw; } return 0; err_unprobe_hw: for (i = 0; i < mwl8k_tx_queues(priv); i++) mwl8k_txq_deinit(hw, i); mwl8k_rxq_deinit(hw, 0); err_free_cookie: if (priv->cookie != NULL) pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma); return rc; } static int __devinit mwl8k_probe(struct pci_dev *pdev, const struct pci_device_id *id) { static int printed_version; struct ieee80211_hw *hw; struct mwl8k_priv *priv; struct mwl8k_device_info *di; int rc; if (!printed_version) { printk(KERN_INFO "%s version %s\n", MWL8K_DESC, MWL8K_VERSION); printed_version = 1; } rc = pci_enable_device(pdev); if (rc) { printk(KERN_ERR "%s: Cannot enable new PCI device\n", MWL8K_NAME); return rc; } rc = pci_request_regions(pdev, MWL8K_NAME); if (rc) { printk(KERN_ERR "%s: Cannot obtain PCI resources\n", MWL8K_NAME); goto err_disable_device; } pci_set_master(pdev); hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops); if (hw == NULL) { printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME); rc = -ENOMEM; goto err_free_reg; } SET_IEEE80211_DEV(hw, &pdev->dev); pci_set_drvdata(pdev, hw); priv = hw->priv; priv->hw = hw; priv->pdev = pdev; priv->device_info = &mwl8k_info_tbl[id->driver_data]; priv->sram = pci_iomap(pdev, 0, 0x10000); if (priv->sram == NULL) { wiphy_err(hw->wiphy, "Cannot map device SRAM\n"); goto err_iounmap; } /* * If BAR0 is a 32 bit BAR, the register BAR will be BAR1. * If BAR0 is a 64 bit BAR, the register BAR will be BAR2. */ priv->regs = pci_iomap(pdev, 1, 0x10000); if (priv->regs == NULL) { priv->regs = pci_iomap(pdev, 2, 0x10000); if (priv->regs == NULL) { wiphy_err(hw->wiphy, "Cannot map device registers\n"); goto err_iounmap; } } /* * Choose the initial fw image depending on user input. If a second * image is available, make it the alternative image that will be * loaded if the first one fails. */ init_completion(&priv->firmware_loading_complete); di = priv->device_info; if (ap_mode_default && di->fw_image_ap) { priv->fw_pref = di->fw_image_ap; priv->fw_alt = di->fw_image_sta; } else if (!ap_mode_default && di->fw_image_sta) { priv->fw_pref = di->fw_image_sta; priv->fw_alt = di->fw_image_ap; } else if (ap_mode_default && !di->fw_image_ap && di->fw_image_sta) { printk(KERN_WARNING "AP fw is unavailable. Using STA fw."); priv->fw_pref = di->fw_image_sta; } else if (!ap_mode_default && !di->fw_image_sta && di->fw_image_ap) { printk(KERN_WARNING "STA fw is unavailable. Using AP fw."); priv->fw_pref = di->fw_image_ap; } rc = mwl8k_init_firmware(hw, priv->fw_pref, true); if (rc) goto err_stop_firmware; priv->hw_restart_in_progress = false; return rc; err_stop_firmware: mwl8k_hw_reset(priv); err_iounmap: if (priv->regs != NULL) pci_iounmap(pdev, priv->regs); if (priv->sram != NULL) pci_iounmap(pdev, priv->sram); pci_set_drvdata(pdev, NULL); ieee80211_free_hw(hw); err_free_reg: pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); return rc; } static void __devexit mwl8k_shutdown(struct pci_dev *pdev) { printk(KERN_ERR "===>%s(%u)\n", __func__, __LINE__); } static void __devexit mwl8k_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct mwl8k_priv *priv; int i; if (hw == NULL) return; priv = hw->priv; wait_for_completion(&priv->firmware_loading_complete); if (priv->fw_state == FW_STATE_ERROR) { mwl8k_hw_reset(priv); goto unmap; } ieee80211_stop_queues(hw); ieee80211_unregister_hw(hw); /* Remove TX reclaim and RX tasklets. */ tasklet_kill(&priv->poll_tx_task); tasklet_kill(&priv->poll_rx_task); /* Stop hardware */ mwl8k_hw_reset(priv); /* Return all skbs to mac80211 */ for (i = 0; i < mwl8k_tx_queues(priv); i++) mwl8k_txq_reclaim(hw, i, INT_MAX, 1); for (i = 0; i < mwl8k_tx_queues(priv); i++) mwl8k_txq_deinit(hw, i); mwl8k_rxq_deinit(hw, 0); pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma); unmap: pci_iounmap(pdev, priv->regs); pci_iounmap(pdev, priv->sram); pci_set_drvdata(pdev, NULL); ieee80211_free_hw(hw); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver mwl8k_driver = { .name = MWL8K_NAME, .id_table = mwl8k_pci_id_table, .probe = mwl8k_probe, .remove = __devexit_p(mwl8k_remove), .shutdown = __devexit_p(mwl8k_shutdown), }; static int __init mwl8k_init(void) { return pci_register_driver(&mwl8k_driver); } static void __exit mwl8k_exit(void) { pci_unregister_driver(&mwl8k_driver); } module_init(mwl8k_init); module_exit(mwl8k_exit); MODULE_DESCRIPTION(MWL8K_DESC); MODULE_VERSION(MWL8K_VERSION); MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>"); MODULE_LICENSE("GPL");
gpl-2.0
zipperX/android_kernel_oneplus_msm8974
lib/raid6/algos.c
5057
3744
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright 2002 H. Peter Anvin - All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, Inc., 53 Temple Place Ste 330, * Boston MA 02111-1307, USA; either version 2 of the License, or * (at your option) any later version; incorporated herein by reference. * * ----------------------------------------------------------------------- */ /* * raid6/algos.c * * Algorithm list and algorithm selection for RAID-6 */ #include <linux/raid/pq.h> #include <linux/module.h> #ifndef __KERNEL__ #include <sys/mman.h> #include <stdio.h> #else #include <linux/gfp.h> #if !RAID6_USE_EMPTY_ZERO_PAGE /* In .bss so it's zeroed */ const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); EXPORT_SYMBOL(raid6_empty_zero_page); #endif #endif struct raid6_calls raid6_call; EXPORT_SYMBOL_GPL(raid6_call); const struct raid6_calls * const raid6_algos[] = { &raid6_intx1, &raid6_intx2, &raid6_intx4, &raid6_intx8, #if defined(__ia64__) &raid6_intx16, &raid6_intx32, #endif #if defined(__i386__) && !defined(__arch_um__) &raid6_mmxx1, &raid6_mmxx2, &raid6_sse1x1, &raid6_sse1x2, &raid6_sse2x1, &raid6_sse2x2, #endif #if defined(__x86_64__) && !defined(__arch_um__) &raid6_sse2x1, &raid6_sse2x2, &raid6_sse2x4, #endif #ifdef CONFIG_ALTIVEC &raid6_altivec1, &raid6_altivec2, &raid6_altivec4, &raid6_altivec8, #endif NULL }; #ifdef __KERNEL__ #define RAID6_TIME_JIFFIES_LG2 4 #else /* Need more time to be stable in userspace */ #define RAID6_TIME_JIFFIES_LG2 9 #define time_before(x, y) ((x) < (y)) #endif /* Try to pick the best algorithm */ /* This code uses the gfmul table as convenient data set to abuse */ int __init raid6_select_algo(void) { const struct raid6_calls * const * algo; const struct raid6_calls * best; char *syndromes; void *dptrs[(65536/PAGE_SIZE)+2]; int i, disks; unsigned long perf, bestperf; int bestprefer; unsigned long j0, j1; disks = (65536/PAGE_SIZE)+2; for ( i = 0 ; i < disks-2 ; i++ ) { dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i; } /* Normal code - use a 2-page allocation to avoid D$ conflict */ syndromes = (void *) __get_free_pages(GFP_KERNEL, 1); if ( !syndromes ) { printk("raid6: Yikes! No memory available.\n"); return -ENOMEM; } dptrs[disks-2] = syndromes; dptrs[disks-1] = syndromes + PAGE_SIZE; bestperf = 0; bestprefer = 0; best = NULL; for ( algo = raid6_algos ; *algo ; algo++ ) { if ( !(*algo)->valid || (*algo)->valid() ) { perf = 0; preempt_disable(); j0 = jiffies; while ( (j1 = jiffies) == j0 ) cpu_relax(); while (time_before(jiffies, j1 + (1<<RAID6_TIME_JIFFIES_LG2))) { (*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs); perf++; } preempt_enable(); if ( (*algo)->prefer > bestprefer || ((*algo)->prefer == bestprefer && perf > bestperf) ) { best = *algo; bestprefer = best->prefer; bestperf = perf; } printk("raid6: %-8s %5ld MB/s\n", (*algo)->name, (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2)); } } if (best) { printk("raid6: using algorithm %s (%ld MB/s)\n", best->name, (bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2)); raid6_call = *best; } else printk("raid6: Yikes! No algorithm found!\n"); free_pages((unsigned long)syndromes, 1); return best ? 0 : -EINVAL; } static void raid6_exit(void) { do { } while (0); } subsys_initcall(raid6_select_algo); module_exit(raid6_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
gpl-2.0
kozmikkick/tripndroid-endeavoru-3.5.7
arch/sparc/prom/bootstr_32.c
7873
1215
/* * bootstr.c: Boot string/argument acquisition from the PROM. * * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <asm/oplib.h> #include <linux/init.h> #define BARG_LEN 256 static char barg_buf[BARG_LEN] = { 0 }; static char fetched __initdata = 0; char * __init prom_getbootargs(void) { int iter; char *cp, *arg; /* This check saves us from a panic when bootfd patches args. */ if (fetched) { return barg_buf; } switch(prom_vers) { case PROM_V0: cp = barg_buf; /* Start from 1 and go over fd(0,0,0)kernel */ for(iter = 1; iter < 8; iter++) { arg = (*(romvec->pv_v0bootargs))->argv[iter]; if (arg == NULL) break; while(*arg != 0) { /* Leave place for space and null. */ if(cp >= barg_buf + BARG_LEN-2){ /* We might issue a warning here. */ break; } *cp++ = *arg++; } *cp++ = ' '; } *cp = 0; break; case PROM_V2: case PROM_V3: /* * V3 PROM cannot supply as with more than 128 bytes * of an argument. But a smart bootstrap loader can. */ strlcpy(barg_buf, *romvec->pv_v2bootargs.bootargs, sizeof(barg_buf)); break; default: break; } fetched = 1; return barg_buf; }
gpl-2.0
SaberMod/android_kernel_lge_hammerhead-sts
arch/cris/arch-v10/kernel/setup.c
7873
2998
/* * * linux/arch/cris/arch-v10/kernel/setup.c * * Copyright (C) 1995 Linus Torvalds * Copyright (c) 2001-2002 Axis Communications AB */ /* * This file handles the architecture-dependent parts of initialization */ #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/param.h> #include <arch/system.h> #ifdef CONFIG_PROC_FS #define HAS_FPU 0x0001 #define HAS_MMU 0x0002 #define HAS_ETHERNET100 0x0004 #define HAS_TOKENRING 0x0008 #define HAS_SCSI 0x0010 #define HAS_ATA 0x0020 #define HAS_USB 0x0040 #define HAS_IRQ_BUG 0x0080 #define HAS_MMU_BUG 0x0100 static struct cpu_info { char *model; unsigned short cache; unsigned short flags; } cpu_info[] = { /* The first four models will never ever run this code and are only here for display. */ { "ETRAX 1", 0, 0 }, { "ETRAX 2", 0, 0 }, { "ETRAX 3", 0, HAS_TOKENRING }, { "ETRAX 4", 0, HAS_TOKENRING | HAS_SCSI }, { "Unknown", 0, 0 }, { "Unknown", 0, 0 }, { "Unknown", 0, 0 }, { "Simulator", 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA }, { "ETRAX 100", 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_IRQ_BUG }, { "ETRAX 100", 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA }, { "ETRAX 100LX", 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_USB | HAS_MMU | HAS_MMU_BUG }, { "ETRAX 100LX v2", 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_USB | HAS_MMU }, { "Unknown", 0, 0 } /* This entry MUST be the last */ }; int show_cpuinfo(struct seq_file *m, void *v) { unsigned long revision; struct cpu_info *info; /* read the version register in the CPU and print some stuff */ revision = rdvr(); if (revision >= ARRAY_SIZE(cpu_info)) info = &cpu_info[ARRAY_SIZE(cpu_info) - 1]; else info = &cpu_info[revision]; return seq_printf(m, "processor\t: 0\n" "cpu\t\t: CRIS\n" "cpu revision\t: %lu\n" "cpu model\t: %s\n" "cache size\t: %d kB\n" "fpu\t\t: %s\n" "mmu\t\t: %s\n" "mmu DMA bug\t: %s\n" "ethernet\t: %s Mbps\n" "token ring\t: %s\n" "scsi\t\t: %s\n" "ata\t\t: %s\n" "usb\t\t: %s\n" "bogomips\t: %lu.%02lu\n", revision, info->model, info->cache, info->flags & HAS_FPU ? "yes" : "no", info->flags & HAS_MMU ? "yes" : "no", info->flags & HAS_MMU_BUG ? "yes" : "no", info->flags & HAS_ETHERNET100 ? "10/100" : "10", info->flags & HAS_TOKENRING ? "4/16 Mbps" : "no", info->flags & HAS_SCSI ? "yes" : "no", info->flags & HAS_ATA ? "yes" : "no", info->flags & HAS_USB ? "yes" : "no", (loops_per_jiffy * HZ + 500) / 500000, ((loops_per_jiffy * HZ + 500) / 5000) % 100); } #endif /* CONFIG_PROC_FS */ void show_etrax_copyright(void) { printk(KERN_INFO "Linux/CRIS port on ETRAX 100LX (c) 2001 Axis Communications AB\n"); }
gpl-2.0
epic4g/samsung-kernel-c1spr-EK02
arch/ia64/kernel/sys_ia64.c
8897
5254
/* * This file contains various system calls that have different calling * conventions on different platforms. * * Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/sched.h> #include <linux/shm.h> #include <linux/file.h> /* doh, must come after sched.h... */ #include <linux/smp.h> #include <linux/syscalls.h> #include <linux/highuid.h> #include <linux/hugetlb.h> #include <asm/shmparam.h> #include <asm/uaccess.h> unsigned long arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { long map_shared = (flags & MAP_SHARED); unsigned long start_addr, align_mask = PAGE_SIZE - 1; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; if (len > RGN_MAP_LIMIT) return -ENOMEM; /* handle fixed mapping: prevent overlap with huge pages */ if (flags & MAP_FIXED) { if (is_hugepage_only_range(mm, addr, len)) return -EINVAL; return addr; } #ifdef CONFIG_HUGETLB_PAGE if (REGION_NUMBER(addr) == RGN_HPAGE) addr = 0; #endif if (!addr) addr = mm->free_area_cache; if (map_shared && (TASK_SIZE > 0xfffffffful)) /* * For 64-bit tasks, align shared segments to 1MB to avoid potential * performance penalty due to virtual aliasing (see ASDM). For 32-bit * tasks, we prefer to avoid exhausting the address space too quickly by * limiting alignment to a single page. */ align_mask = SHMLBA - 1; full_search: start_addr = addr = (addr + align_mask) & ~align_mask; for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { if (start_addr != TASK_UNMAPPED_BASE) { /* Start a new search --- just in case we missed some holes. */ addr = TASK_UNMAPPED_BASE; goto full_search; } return -ENOMEM; } if (!vma || addr + len <= vma->vm_start) { /* Remember the address where we stopped this search: */ mm->free_area_cache = addr + len; return addr; } addr = (vma->vm_end + align_mask) & ~align_mask; } } asmlinkage long ia64_getpriority (int which, int who) { long prio; prio = sys_getpriority(which, who); if (prio >= 0) { force_successful_syscall_return(); prio = 20 - prio; } return prio; } /* XXX obsolete, but leave it here until the old libc is gone... */ asmlinkage unsigned long sys_getpagesize (void) { return PAGE_SIZE; } asmlinkage unsigned long ia64_brk (unsigned long brk) { unsigned long retval = sys_brk(brk); force_successful_syscall_return(); return retval; } /* * On IA-64, we return the two file descriptors in ret0 and ret1 (r8 * and r9) as this is faster than doing a copy_to_user(). */ asmlinkage long sys_ia64_pipe (void) { struct pt_regs *regs = task_pt_regs(current); int fd[2]; int retval; retval = do_pipe_flags(fd, 0); if (retval) goto out; retval = fd[0]; regs->r9 = fd[1]; out: return retval; } int ia64_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { unsigned long roff; /* * Don't permit mappings into unmapped space, the virtual page table * of a region, or across a region boundary. Note: RGN_MAP_LIMIT is * equal to 2^n-PAGE_SIZE (for some integer n <= 61) and len > 0. */ roff = REGION_OFFSET(addr); if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) return -EINVAL; return 0; } /* * mmap2() is like mmap() except that the offset is expressed in units * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces * of) files that are larger than the address space of the CPU. */ asmlinkage unsigned long sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) { addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); if (!IS_ERR((void *) addr)) force_successful_syscall_return(); return addr; } asmlinkage unsigned long sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, long off) { if (offset_in_page(off) != 0) return -EINVAL; addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); if (!IS_ERR((void *) addr)) force_successful_syscall_return(); return addr; } asmlinkage unsigned long ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { extern unsigned long do_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr); down_write(&current->mm->mmap_sem); { addr = do_mremap(addr, old_len, new_len, flags, new_addr); } up_write(&current->mm->mmap_sem); if (IS_ERR((void *) addr)) return addr; force_successful_syscall_return(); return addr; } #ifndef CONFIG_PCI asmlinkage long sys_pciconfig_read (unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, void *buf) { return -ENOSYS; } asmlinkage long sys_pciconfig_write (unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, void *buf) { return -ENOSYS; } #endif /* CONFIG_PCI */
gpl-2.0
Renzo-Olivares/android_kernel_htc_m7-gpe
drivers/video/console/font_8x16.c
14785
95976
/**********************************************/ /* */ /* Font file generated by cpi2fnt */ /* */ /**********************************************/ #include <linux/font.h> #include <linux/module.h> #define FONTDATAMAX 4096 static const unsigned char fontdata_8x16[FONTDATAMAX] = { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 1 0x01 '^A' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x81, /* 10000001 */ 0xa5, /* 10100101 */ 0x81, /* 10000001 */ 0x81, /* 10000001 */ 0xbd, /* 10111101 */ 0x99, /* 10011001 */ 0x81, /* 10000001 */ 0x81, /* 10000001 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 2 0x02 '^B' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xff, /* 11111111 */ 0xdb, /* 11011011 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xc3, /* 11000011 */ 0xe7, /* 11100111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 3 0x03 '^C' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 4 0x04 '^D' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 5 0x05 '^E' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0xe7, /* 11100111 */ 0xe7, /* 11100111 */ 0xe7, /* 11100111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 6 0x06 '^F' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 7 0x07 '^G' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 8 0x08 '^H' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xe7, /* 11100111 */ 0xc3, /* 11000011 */ 0xc3, /* 11000011 */ 0xe7, /* 11100111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 9 0x09 '^I' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x42, /* 01000010 */ 0x42, /* 01000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 10 0x0a '^J' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xc3, /* 11000011 */ 0x99, /* 10011001 */ 0xbd, /* 10111101 */ 0xbd, /* 10111101 */ 0x99, /* 10011001 */ 0xc3, /* 11000011 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 11 0x0b '^K' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1e, /* 00011110 */ 0x0e, /* 00001110 */ 0x1a, /* 00011010 */ 0x32, /* 00110010 */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 12 0x0c '^L' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 13 0x0d '^M' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x33, /* 00110011 */ 0x3f, /* 00111111 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x70, /* 01110000 */ 0xf0, /* 11110000 */ 0xe0, /* 11100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 14 0x0e '^N' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7f, /* 01111111 */ 0x63, /* 01100011 */ 0x7f, /* 01111111 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x67, /* 01100111 */ 0xe7, /* 11100111 */ 0xe6, /* 11100110 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 15 0x0f '^O' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xdb, /* 11011011 */ 0x3c, /* 00111100 */ 0xe7, /* 11100111 */ 0x3c, /* 00111100 */ 0xdb, /* 11011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 16 0x10 '^P' */ 0x00, /* 00000000 */ 0x80, /* 10000000 */ 0xc0, /* 11000000 */ 0xe0, /* 11100000 */ 0xf0, /* 11110000 */ 0xf8, /* 11111000 */ 0xfe, /* 11111110 */ 0xf8, /* 11111000 */ 0xf0, /* 11110000 */ 0xe0, /* 11100000 */ 0xc0, /* 11000000 */ 0x80, /* 10000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 17 0x11 '^Q' */ 0x00, /* 00000000 */ 0x02, /* 00000010 */ 0x06, /* 00000110 */ 0x0e, /* 00001110 */ 0x1e, /* 00011110 */ 0x3e, /* 00111110 */ 0xfe, /* 11111110 */ 0x3e, /* 00111110 */ 0x1e, /* 00011110 */ 0x0e, /* 00001110 */ 0x06, /* 00000110 */ 0x02, /* 00000010 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 18 0x12 '^R' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 19 0x13 '^S' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 20 0x14 '^T' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7f, /* 01111111 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7b, /* 01111011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 21 0x15 '^U' */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x0c, /* 00001100 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 22 0x16 '^V' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 23 0x17 '^W' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 24 0x18 '^X' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 25 0x19 '^Y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 26 0x1a '^Z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 27 0x1b '^[' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xfe, /* 11111110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 28 0x1c '^\' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 29 0x1d '^]' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 30 0x1e '^^' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0x7c, /* 01111100 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 31 0x1f '^_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 32 0x20 ' ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 33 0x21 '!' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 34 0x22 '"' */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x24, /* 00100100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 35 0x23 '#' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 36 0x24 '$' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x86, /* 10000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 37 0x25 '%' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc2, /* 11000010 */ 0xc6, /* 11000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0x86, /* 10000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 38 0x26 '&' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 39 0x27 ''' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 40 0x28 '(' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 41 0x29 ')' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 42 0x2a '*' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0xff, /* 11111111 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 43 0x2b '+' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 44 0x2c ',' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 45 0x2d '-' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 46 0x2e '.' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 47 0x2f '/' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x02, /* 00000010 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x80, /* 10000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 48 0x30 '0' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 49 0x31 '1' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x38, /* 00111000 */ 0x78, /* 01111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 50 0x32 '2' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 51 0x33 '3' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x3c, /* 00111100 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 52 0x34 '4' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x1c, /* 00011100 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x1e, /* 00011110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 53 0x35 '5' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 54 0x36 '6' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 55 0x37 '7' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 56 0x38 '8' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 57 0x39 '9' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 58 0x3a ':' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 59 0x3b ';' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 60 0x3c '<' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 61 0x3d '=' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 62 0x3e '>' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 63 0x3f '?' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 64 0x40 '@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xde, /* 11011110 */ 0xde, /* 11011110 */ 0xde, /* 11011110 */ 0xdc, /* 11011100 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 65 0x41 'A' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 66 0x42 'B' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 67 0x43 'C' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc2, /* 11000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 68 0x44 'D' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 69 0x45 'E' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x66, /* 01100110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x60, /* 01100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 70 0x46 'F' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x66, /* 01100110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 71 0x47 'G' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xde, /* 11011110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x66, /* 01100110 */ 0x3a, /* 00111010 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 72 0x48 'H' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 73 0x49 'I' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 74 0x4a 'J' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1e, /* 00011110 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 75 0x4b 'K' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe6, /* 11100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x78, /* 01111000 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 76 0x4c 'L' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf0, /* 11110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 77 0x4d 'M' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xee, /* 11101110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 78 0x4e 'N' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xe6, /* 11100110 */ 0xf6, /* 11110110 */ 0xfe, /* 11111110 */ 0xde, /* 11011110 */ 0xce, /* 11001110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 79 0x4f 'O' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 80 0x50 'P' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 81 0x51 'Q' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xde, /* 11011110 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0x0e, /* 00001110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 82 0x52 'R' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 83 0x53 'S' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x38, /* 00111000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 84 0x54 'T' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x5a, /* 01011010 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 85 0x55 'U' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 86 0x56 'V' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 87 0x57 'W' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xfe, /* 11111110 */ 0xee, /* 11101110 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 88 0x58 'X' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 89 0x59 'Y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 90 0x5a 'Z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x86, /* 10000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc2, /* 11000010 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 91 0x5b '[' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 92 0x5c '\' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x80, /* 10000000 */ 0xc0, /* 11000000 */ 0xe0, /* 11100000 */ 0x70, /* 01110000 */ 0x38, /* 00111000 */ 0x1c, /* 00011100 */ 0x0e, /* 00001110 */ 0x06, /* 00000110 */ 0x02, /* 00000010 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 93 0x5d ']' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 94 0x5e '^' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 95 0x5f '_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 96 0x60 '`' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 97 0x61 'a' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 98 0x62 'b' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 99 0x63 'c' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 100 0x64 'd' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1c, /* 00011100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 101 0x65 'e' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 102 0x66 'f' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1c, /* 00011100 */ 0x36, /* 00110110 */ 0x32, /* 00110010 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 103 0x67 'g' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ /* 104 0x68 'h' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x6c, /* 01101100 */ 0x76, /* 01110110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 105 0x69 'i' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 106 0x6a 'j' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 107 0x6b 'k' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x78, /* 01111000 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 108 0x6c 'l' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 109 0x6d 'm' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xec, /* 11101100 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 110 0x6e 'n' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 111 0x6f 'o' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 112 0x70 'p' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ /* 113 0x71 'q' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x1e, /* 00011110 */ 0x00, /* 00000000 */ /* 114 0x72 'r' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x76, /* 01110110 */ 0x66, /* 01100110 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 115 0x73 's' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x38, /* 00111000 */ 0x0c, /* 00001100 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 116 0x74 't' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0xfc, /* 11111100 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x36, /* 00110110 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 117 0x75 'u' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 118 0x76 'v' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 119 0x77 'w' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 120 0x78 'x' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 121 0x79 'y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ /* 122 0x7a 'z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xcc, /* 11001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 123 0x7b '{' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0e, /* 00001110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 124 0x7c '|' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 125 0x7d '}' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x70, /* 01110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 126 0x7e '~' */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 127 0x7f '' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 128 0x80 '€' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc2, /* 11000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 129 0x81 '' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 130 0x82 '‚' */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 131 0x83 'ƒ' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 132 0x84 '„' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 133 0x85 '…' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 134 0x86 '†' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 135 0x87 '‡' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 136 0x88 'ˆ' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 137 0x89 '‰' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 138 0x8a 'Š' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 139 0x8b '‹' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 140 0x8c 'Œ' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 141 0x8d '' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 142 0x8e 'Ž' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 143 0x8f '' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 144 0x90 '' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x66, /* 01100110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 145 0x91 '‘' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xec, /* 11101100 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x7e, /* 01111110 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x6e, /* 01101110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 146 0x92 '’' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3e, /* 00111110 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xfe, /* 11111110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xce, /* 11001110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 147 0x93 '“' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 148 0x94 '”' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 149 0x95 '•' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 150 0x96 '–' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 151 0x97 '—' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 152 0x98 '˜' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ /* 153 0x99 '™' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 154 0x9a 'š' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 155 0x9b '›' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 156 0x9c 'œ' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x64, /* 01100100 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xe6, /* 11100110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 157 0x9d '' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 158 0x9e 'ž' */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xf8, /* 11111000 */ 0xc4, /* 11000100 */ 0xcc, /* 11001100 */ 0xde, /* 11011110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 159 0x9f 'Ÿ' */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x1b, /* 00011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 160 0xa0 ' ' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 161 0xa1 '¡' */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 162 0xa2 '¢' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 163 0xa3 '£' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 164 0xa4 '¤' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 165 0xa5 '¥' */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xe6, /* 11100110 */ 0xf6, /* 11110110 */ 0xfe, /* 11111110 */ 0xde, /* 11011110 */ 0xce, /* 11001110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 166 0xa6 '¦' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x3e, /* 00111110 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 167 0xa7 '§' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 168 0xa8 '¨' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 169 0xa9 '©' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 170 0xaa 'ª' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 171 0xab '«' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0xe0, /* 11100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xdc, /* 11011100 */ 0x86, /* 10000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x3e, /* 00111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 172 0xac '¬' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0xe0, /* 11100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x66, /* 01100110 */ 0xce, /* 11001110 */ 0x9a, /* 10011010 */ 0x3f, /* 00111111 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 173 0xad '­' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 174 0xae '®' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x36, /* 00110110 */ 0x6c, /* 01101100 */ 0xd8, /* 11011000 */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 175 0xaf '¯' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xd8, /* 11011000 */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x6c, /* 01101100 */ 0xd8, /* 11011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 176 0xb0 '°' */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ /* 177 0xb1 '±' */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ /* 178 0xb2 '²' */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ /* 179 0xb3 '³' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 180 0xb4 '´' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 181 0xb5 'µ' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 182 0xb6 '¶' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 183 0xb7 '·' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 184 0xb8 '¸' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 185 0xb9 '¹' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x06, /* 00000110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 186 0xba 'º' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 187 0xbb '»' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 188 0xbc '¼' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x06, /* 00000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 189 0xbd '½' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 190 0xbe '¾' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 191 0xbf '¿' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 192 0xc0 'À' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 193 0xc1 'Á' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 194 0xc2 'Â' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 195 0xc3 'Ã' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 196 0xc4 'Ä' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 197 0xc5 'Å' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 198 0xc6 'Æ' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 199 0xc7 'Ç' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 200 0xc8 'È' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x30, /* 00110000 */ 0x3f, /* 00111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 201 0xc9 'É' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x30, /* 00110000 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 202 0xca 'Ê' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf7, /* 11110111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 203 0xcb 'Ë' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xf7, /* 11110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 204 0xcc 'Ì' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x30, /* 00110000 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 205 0xcd 'Í' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 206 0xce 'Î' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf7, /* 11110111 */ 0x00, /* 00000000 */ 0xf7, /* 11110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 207 0xcf 'Ï' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 208 0xd0 'Ð' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 209 0xd1 'Ñ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 210 0xd2 'Ò' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 211 0xd3 'Ó' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x3f, /* 00111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 212 0xd4 'Ô' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 213 0xd5 'Õ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 214 0xd6 'Ö' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 215 0xd7 '×' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xff, /* 11111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 216 0xd8 'Ø' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 217 0xd9 'Ù' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 218 0xda 'Ú' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 219 0xdb 'Û' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 220 0xdc 'Ü' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 221 0xdd 'Ý' */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ /* 222 0xde 'Þ' */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ /* 223 0xdf 'ß' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 224 0xe0 'à' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xdc, /* 11011100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 225 0xe1 'á' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xd8, /* 11011000 */ 0xcc, /* 11001100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 226 0xe2 'â' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 227 0xe3 'ã' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 228 0xe4 'ä' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 229 0xe5 'å' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 230 0xe6 'æ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ /* 231 0xe7 'ç' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 232 0xe8 'è' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 233 0xe9 'é' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 234 0xea 'ê' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xee, /* 11101110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 235 0xeb 'ë' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1e, /* 00011110 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x3e, /* 00111110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 236 0xec 'ì' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 237 0xed 'í' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x03, /* 00000011 */ 0x06, /* 00000110 */ 0x7e, /* 01111110 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0xf3, /* 11110011 */ 0x7e, /* 01111110 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 238 0xee 'î' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1c, /* 00011100 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 239 0xef 'ï' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 240 0xf0 'ð' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 241 0xf1 'ñ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 242 0xf2 'ò' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 243 0xf3 'ó' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 244 0xf4 'ô' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 245 0xf5 'õ' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 246 0xf6 'ö' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 247 0xf7 '÷' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 248 0xf8 'ø' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 249 0xf9 'ù' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 250 0xfa 'ú' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 251 0xfb 'û' */ 0x00, /* 00000000 */ 0x0f, /* 00001111 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0xec, /* 11101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x3c, /* 00111100 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 252 0xfc 'ü' */ 0x00, /* 00000000 */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 253 0xfd 'ý' */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x32, /* 00110010 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 254 0xfe 'þ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 255 0xff 'ÿ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ }; const struct font_desc font_vga_8x16 = { .idx = VGA8x16_IDX, .name = "VGA8x16", .width = 8, .height = 16, .data = fontdata_8x16, .pref = 0, }; EXPORT_SYMBOL(font_vga_8x16);
gpl-2.0
Frontier314/kernel_s702hf
drivers/block/paride/friq.c
15553
6365
/* friq.c (c) 1998 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License friq.c is a low-level protocol driver for the Freecom "IQ" parallel port IDE adapter. Early versions of this adapter use the 'frpw' protocol. Freecom uses this adapter in a battery powered external CD-ROM drive. It is also used in LS-120 drives by Maxell and Panasonic, and other devices. The battery powered drive requires software support to control the power to the drive. This module enables the drive power when the high level driver (pcd) is loaded and disables it when the module is unloaded. Note, if the friq module is built in to the kernel, the power will never be switched off, so other means should be used to conserve battery power. */ /* Changes: 1.01 GRG 1998.12.20 Added support for soft power switch */ #define FRIQ_VERSION "1.01" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" #define CMD(x) w2(4);w0(0xff);w0(0xff);w0(0x73);w0(0x73);\ w0(0xc9);w0(0xc9);w0(0x26);w0(0x26);w0(x);w0(x); #define j44(l,h) (((l>>4)&0x0f)|(h&0xf0)) /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set */ static int cont_map[2] = { 0x08, 0x10 }; static int friq_read_regr( PIA *pi, int cont, int regr ) { int h,l,r; r = regr + cont_map[cont]; CMD(r); w2(6); l = r1(); w2(4); h = r1(); w2(4); return j44(l,h); } static void friq_write_regr( PIA *pi, int cont, int regr, int val) { int r; r = regr + cont_map[cont]; CMD(r); w0(val); w2(5);w2(7);w2(5);w2(4); } static void friq_read_block_int( PIA *pi, char * buf, int count, int regr ) { int h, l, k, ph; switch(pi->mode) { case 0: CMD(regr); for (k=0;k<count;k++) { w2(6); l = r1(); w2(4); h = r1(); buf[k] = j44(l,h); } w2(4); break; case 1: ph = 2; CMD(regr+0xc0); w0(0xff); for (k=0;k<count;k++) { w2(0xa4 + ph); buf[k] = r0(); ph = 2 - ph; } w2(0xac); w2(0xa4); w2(4); break; case 2: CMD(regr+0x80); for (k=0;k<count-2;k++) buf[k] = r4(); w2(0xac); w2(0xa4); buf[count-2] = r4(); buf[count-1] = r4(); w2(4); break; case 3: CMD(regr+0x80); for (k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w(); w2(0xac); w2(0xa4); buf[count-2] = r4(); buf[count-1] = r4(); w2(4); break; case 4: CMD(regr+0x80); for (k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l(); buf[count-4] = r4(); buf[count-3] = r4(); w2(0xac); w2(0xa4); buf[count-2] = r4(); buf[count-1] = r4(); w2(4); break; } } static void friq_read_block( PIA *pi, char * buf, int count) { friq_read_block_int(pi,buf,count,0x08); } static void friq_write_block( PIA *pi, char * buf, int count ) { int k; switch(pi->mode) { case 0: case 1: CMD(8); w2(5); for (k=0;k<count;k++) { w0(buf[k]); w2(7);w2(5); } w2(4); break; case 2: CMD(0xc8); w2(5); for (k=0;k<count;k++) w4(buf[k]); w2(4); break; case 3: CMD(0xc8); w2(5); for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]); w2(4); break; case 4: CMD(0xc8); w2(5); for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]); w2(4); break; } } static void friq_connect ( PIA *pi ) { pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(4); } static void friq_disconnect ( PIA *pi ) { CMD(0x20); w0(pi->saved_r0); w2(pi->saved_r2); } static int friq_test_proto( PIA *pi, char * scratch, int verbose ) { int j, k, r; int e[2] = {0,0}; pi->saved_r0 = r0(); w0(0xff); udelay(20); CMD(0x3d); /* turn the power on */ udelay(500); w0(pi->saved_r0); friq_connect(pi); for (j=0;j<2;j++) { friq_write_regr(pi,0,6,0xa0+j*0x10); for (k=0;k<256;k++) { friq_write_regr(pi,0,2,k^0xaa); friq_write_regr(pi,0,3,k^0x55); if (friq_read_regr(pi,0,2) != (k^0xaa)) e[j]++; } } friq_disconnect(pi); friq_connect(pi); friq_read_block_int(pi,scratch,512,0x10); r = 0; for (k=0;k<128;k++) if (scratch[k] != k) r++; friq_disconnect(pi); if (verbose) { printk("%s: friq: port 0x%x, mode %d, test=(%d,%d,%d)\n", pi->device,pi->port,pi->mode,e[0],e[1],r); } return (r || (e[0] && e[1])); } static void friq_log_adapter( PIA *pi, char * scratch, int verbose ) { char *mode_string[6] = {"4-bit","8-bit", "EPP-8","EPP-16","EPP-32"}; printk("%s: friq %s, Freecom IQ ASIC-2 adapter at 0x%x, ", pi->device, FRIQ_VERSION,pi->port); printk("mode %d (%s), delay %d\n",pi->mode, mode_string[pi->mode],pi->delay); pi->private = 1; friq_connect(pi); CMD(0x9e); /* disable sleep timer */ friq_disconnect(pi); } static void friq_release_proto( PIA *pi) { if (pi->private) { /* turn off the power */ friq_connect(pi); CMD(0x1d); CMD(0x1e); friq_disconnect(pi); pi->private = 0; } } static struct pi_protocol friq = { .owner = THIS_MODULE, .name = "friq", .max_mode = 5, .epp_first = 2, .default_delay = 1, .max_units = 1, .write_regr = friq_write_regr, .read_regr = friq_read_regr, .write_block = friq_write_block, .read_block = friq_read_block, .connect = friq_connect, .disconnect = friq_disconnect, .test_proto = friq_test_proto, .log_adapter = friq_log_adapter, .release_proto = friq_release_proto, }; static int __init friq_init(void) { return paride_register(&friq); } static void __exit friq_exit(void) { paride_unregister(&friq); } MODULE_LICENSE("GPL"); module_init(friq_init) module_exit(friq_exit)
gpl-2.0
omnirom/android_kernel_huawei_angler
drivers/video/fb_sys_fops.c
15553
2075
/* * linux/drivers/video/fb_sys_read.c - Generic file operations where * framebuffer is in system RAM * * Copyright (C) 2007 Antonino Daplas <adaplas@pol.net> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * */ #include <linux/fb.h> #include <linux/module.h> #include <linux/uaccess.h> ssize_t fb_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; void *src; int err = 0; unsigned long total_size; if (info->state != FBINFO_STATE_RUNNING) return -EPERM; total_size = info->screen_size; if (total_size == 0) total_size = info->fix.smem_len; if (p >= total_size) return 0; if (count >= total_size) count = total_size; if (count + p > total_size) count = total_size - p; src = (void __force *)(info->screen_base + p); if (info->fbops->fb_sync) info->fbops->fb_sync(info); if (copy_to_user(buf, src, count)) err = -EFAULT; if (!err) *ppos += count; return (err) ? err : count; } EXPORT_SYMBOL_GPL(fb_sys_read); ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; void *dst; int err = 0; unsigned long total_size; if (info->state != FBINFO_STATE_RUNNING) return -EPERM; total_size = info->screen_size; if (total_size == 0) total_size = info->fix.smem_len; if (p > total_size) return -EFBIG; if (count > total_size) { err = -EFBIG; count = total_size; } if (count + p > total_size) { if (!err) err = -ENOSPC; count = total_size - p; } dst = (void __force *) (info->screen_base + p); if (info->fbops->fb_sync) info->fbops->fb_sync(info); if (copy_from_user(dst, buf, count)) err = -EFAULT; if (!err) *ppos += count; return (err) ? err : count; } EXPORT_SYMBOL_GPL(fb_sys_write); MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); MODULE_DESCRIPTION("Generic file read (fb in system RAM)"); MODULE_LICENSE("GPL");
gpl-2.0
01org/edison-linux
net/mac802154/llsec.c
194
25634
/* * Copyright (C) 2014 Fraunhofer ITWM * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Written by: * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de> */ #include <linux/err.h> #include <linux/bug.h> #include <linux/completion.h> #include <linux/ieee802154.h> #include <crypto/algapi.h> #include "ieee802154_i.h" #include "llsec.h" static void llsec_key_put(struct mac802154_llsec_key *key); static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a, const struct ieee802154_llsec_key_id *b); static void llsec_dev_free(struct mac802154_llsec_device *dev); void mac802154_llsec_init(struct mac802154_llsec *sec) { memset(sec, 0, sizeof(*sec)); memset(&sec->params.default_key_source, 0xFF, IEEE802154_ADDR_LEN); INIT_LIST_HEAD(&sec->table.security_levels); INIT_LIST_HEAD(&sec->table.devices); INIT_LIST_HEAD(&sec->table.keys); hash_init(sec->devices_short); hash_init(sec->devices_hw); rwlock_init(&sec->lock); } void mac802154_llsec_destroy(struct mac802154_llsec *sec) { struct ieee802154_llsec_seclevel *sl, *sn; struct ieee802154_llsec_device *dev, *dn; struct ieee802154_llsec_key_entry *key, *kn; list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) { struct mac802154_llsec_seclevel *msl; msl = container_of(sl, struct mac802154_llsec_seclevel, level); list_del(&sl->list); kfree(msl); } list_for_each_entry_safe(dev, dn, &sec->table.devices, list) { struct mac802154_llsec_device *mdev; mdev = container_of(dev, struct mac802154_llsec_device, dev); list_del(&dev->list); llsec_dev_free(mdev); } list_for_each_entry_safe(key, kn, &sec->table.keys, list) { struct mac802154_llsec_key *mkey; mkey = container_of(key->key, struct mac802154_llsec_key, key); list_del(&key->list); llsec_key_put(mkey); kfree(key); } } int mac802154_llsec_get_params(struct mac802154_llsec *sec, struct ieee802154_llsec_params *params) { read_lock_bh(&sec->lock); *params = sec->params; read_unlock_bh(&sec->lock); return 0; } int mac802154_llsec_set_params(struct mac802154_llsec *sec, const struct ieee802154_llsec_params *params, int changed) { write_lock_bh(&sec->lock); if (changed & IEEE802154_LLSEC_PARAM_ENABLED) sec->params.enabled = params->enabled; if (changed & IEEE802154_LLSEC_PARAM_FRAME_COUNTER) sec->params.frame_counter = params->frame_counter; if (changed & IEEE802154_LLSEC_PARAM_OUT_LEVEL) sec->params.out_level = params->out_level; if (changed & IEEE802154_LLSEC_PARAM_OUT_KEY) sec->params.out_key = params->out_key; if (changed & IEEE802154_LLSEC_PARAM_KEY_SOURCE) sec->params.default_key_source = params->default_key_source; if (changed & IEEE802154_LLSEC_PARAM_PAN_ID) sec->params.pan_id = params->pan_id; if (changed & IEEE802154_LLSEC_PARAM_HWADDR) sec->params.hwaddr = params->hwaddr; if (changed & IEEE802154_LLSEC_PARAM_COORD_HWADDR) sec->params.coord_hwaddr = params->coord_hwaddr; if (changed & IEEE802154_LLSEC_PARAM_COORD_SHORTADDR) sec->params.coord_shortaddr = params->coord_shortaddr; write_unlock_bh(&sec->lock); return 0; } static struct mac802154_llsec_key* llsec_key_alloc(const struct ieee802154_llsec_key *template) { const int authsizes[3] = { 4, 8, 16 }; struct mac802154_llsec_key *key; int i; key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; kref_init(&key->ref); key->key = *template; BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm)); for (i = 0; i < ARRAY_SIZE(key->tfm); i++) { key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(key->tfm[i])) goto err_tfm; if (crypto_aead_setkey(key->tfm[i], template->key, IEEE802154_LLSEC_KEY_SIZE)) goto err_tfm; if (crypto_aead_setauthsize(key->tfm[i], authsizes[i])) goto err_tfm; } key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(key->tfm0)) goto err_tfm; if (crypto_blkcipher_setkey(key->tfm0, template->key, IEEE802154_LLSEC_KEY_SIZE)) goto err_tfm0; return key; err_tfm0: crypto_free_blkcipher(key->tfm0); err_tfm: for (i = 0; i < ARRAY_SIZE(key->tfm); i++) if (key->tfm[i]) crypto_free_aead(key->tfm[i]); kfree(key); return NULL; } static void llsec_key_release(struct kref *ref) { struct mac802154_llsec_key *key; int i; key = container_of(ref, struct mac802154_llsec_key, ref); for (i = 0; i < ARRAY_SIZE(key->tfm); i++) crypto_free_aead(key->tfm[i]); crypto_free_blkcipher(key->tfm0); kfree(key); } static struct mac802154_llsec_key* llsec_key_get(struct mac802154_llsec_key *key) { kref_get(&key->ref); return key; } static void llsec_key_put(struct mac802154_llsec_key *key) { kref_put(&key->ref, llsec_key_release); } static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a, const struct ieee802154_llsec_key_id *b) { if (a->mode != b->mode) return false; if (a->mode == IEEE802154_SCF_KEY_IMPLICIT) return ieee802154_addr_equal(&a->device_addr, &b->device_addr); if (a->id != b->id) return false; switch (a->mode) { case IEEE802154_SCF_KEY_INDEX: return true; case IEEE802154_SCF_KEY_SHORT_INDEX: return a->short_source == b->short_source; case IEEE802154_SCF_KEY_HW_INDEX: return a->extended_source == b->extended_source; } return false; } int mac802154_llsec_key_add(struct mac802154_llsec *sec, const struct ieee802154_llsec_key_id *id, const struct ieee802154_llsec_key *key) { struct mac802154_llsec_key *mkey = NULL; struct ieee802154_llsec_key_entry *pos, *new; if (!(key->frame_types & (1 << IEEE802154_FC_TYPE_MAC_CMD)) && key->cmd_frame_ids) return -EINVAL; list_for_each_entry(pos, &sec->table.keys, list) { if (llsec_key_id_equal(&pos->id, id)) return -EEXIST; if (memcmp(pos->key->key, key->key, IEEE802154_LLSEC_KEY_SIZE)) continue; mkey = container_of(pos->key, struct mac802154_llsec_key, key); /* Don't allow multiple instances of the same AES key to have * different allowed frame types/command frame ids, as this is * not possible in the 802.15.4 PIB. */ if (pos->key->frame_types != key->frame_types || pos->key->cmd_frame_ids != key->cmd_frame_ids) return -EEXIST; break; } new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return -ENOMEM; if (!mkey) mkey = llsec_key_alloc(key); else mkey = llsec_key_get(mkey); if (!mkey) goto fail; new->id = *id; new->key = &mkey->key; list_add_rcu(&new->list, &sec->table.keys); return 0; fail: kfree(new); return -ENOMEM; } int mac802154_llsec_key_del(struct mac802154_llsec *sec, const struct ieee802154_llsec_key_id *key) { struct ieee802154_llsec_key_entry *pos; list_for_each_entry(pos, &sec->table.keys, list) { struct mac802154_llsec_key *mkey; mkey = container_of(pos->key, struct mac802154_llsec_key, key); if (llsec_key_id_equal(&pos->id, key)) { list_del_rcu(&pos->list); llsec_key_put(mkey); return 0; } } return -ENOENT; } static bool llsec_dev_use_shortaddr(__le16 short_addr) { return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) && short_addr != cpu_to_le16(0xffff); } static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id) { return ((__force u16)short_addr) << 16 | (__force u16)pan_id; } static u64 llsec_dev_hash_long(__le64 hwaddr) { return (__force u64)hwaddr; } static struct mac802154_llsec_device* llsec_dev_find_short(struct mac802154_llsec *sec, __le16 short_addr, __le16 pan_id) { struct mac802154_llsec_device *dev; u32 key = llsec_dev_hash_short(short_addr, pan_id); hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) { if (dev->dev.short_addr == short_addr && dev->dev.pan_id == pan_id) return dev; } return NULL; } static struct mac802154_llsec_device* llsec_dev_find_long(struct mac802154_llsec *sec, __le64 hwaddr) { struct mac802154_llsec_device *dev; u64 key = llsec_dev_hash_long(hwaddr); hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) { if (dev->dev.hwaddr == hwaddr) return dev; } return NULL; } static void llsec_dev_free(struct mac802154_llsec_device *dev) { struct ieee802154_llsec_device_key *pos, *pn; struct mac802154_llsec_device_key *devkey; list_for_each_entry_safe(pos, pn, &dev->dev.keys, list) { devkey = container_of(pos, struct mac802154_llsec_device_key, devkey); list_del(&pos->list); kfree(devkey); } kfree(dev); } int mac802154_llsec_dev_add(struct mac802154_llsec *sec, const struct ieee802154_llsec_device *dev) { struct mac802154_llsec_device *entry; u32 skey = llsec_dev_hash_short(dev->short_addr, dev->pan_id); u64 hwkey = llsec_dev_hash_long(dev->hwaddr); BUILD_BUG_ON(sizeof(hwkey) != IEEE802154_ADDR_LEN); if ((llsec_dev_use_shortaddr(dev->short_addr) && llsec_dev_find_short(sec, dev->short_addr, dev->pan_id)) || llsec_dev_find_long(sec, dev->hwaddr)) return -EEXIST; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->dev = *dev; spin_lock_init(&entry->lock); INIT_LIST_HEAD(&entry->dev.keys); if (llsec_dev_use_shortaddr(dev->short_addr)) hash_add_rcu(sec->devices_short, &entry->bucket_s, skey); else INIT_HLIST_NODE(&entry->bucket_s); hash_add_rcu(sec->devices_hw, &entry->bucket_hw, hwkey); list_add_tail_rcu(&entry->dev.list, &sec->table.devices); return 0; } static void llsec_dev_free_rcu(struct rcu_head *rcu) { llsec_dev_free(container_of(rcu, struct mac802154_llsec_device, rcu)); } int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr) { struct mac802154_llsec_device *pos; pos = llsec_dev_find_long(sec, device_addr); if (!pos) return -ENOENT; hash_del_rcu(&pos->bucket_s); hash_del_rcu(&pos->bucket_hw); call_rcu(&pos->rcu, llsec_dev_free_rcu); return 0; } static struct mac802154_llsec_device_key* llsec_devkey_find(struct mac802154_llsec_device *dev, const struct ieee802154_llsec_key_id *key) { struct ieee802154_llsec_device_key *devkey; list_for_each_entry_rcu(devkey, &dev->dev.keys, list) { if (!llsec_key_id_equal(key, &devkey->key_id)) continue; return container_of(devkey, struct mac802154_llsec_device_key, devkey); } return NULL; } int mac802154_llsec_devkey_add(struct mac802154_llsec *sec, __le64 dev_addr, const struct ieee802154_llsec_device_key *key) { struct mac802154_llsec_device *dev; struct mac802154_llsec_device_key *devkey; dev = llsec_dev_find_long(sec, dev_addr); if (!dev) return -ENOENT; if (llsec_devkey_find(dev, &key->key_id)) return -EEXIST; devkey = kmalloc(sizeof(*devkey), GFP_KERNEL); if (!devkey) return -ENOMEM; devkey->devkey = *key; list_add_tail_rcu(&devkey->devkey.list, &dev->dev.keys); return 0; } int mac802154_llsec_devkey_del(struct mac802154_llsec *sec, __le64 dev_addr, const struct ieee802154_llsec_device_key *key) { struct mac802154_llsec_device *dev; struct mac802154_llsec_device_key *devkey; dev = llsec_dev_find_long(sec, dev_addr); if (!dev) return -ENOENT; devkey = llsec_devkey_find(dev, &key->key_id); if (!devkey) return -ENOENT; list_del_rcu(&devkey->devkey.list); kfree_rcu(devkey, rcu); return 0; } static struct mac802154_llsec_seclevel* llsec_find_seclevel(const struct mac802154_llsec *sec, const struct ieee802154_llsec_seclevel *sl) { struct ieee802154_llsec_seclevel *pos; list_for_each_entry(pos, &sec->table.security_levels, list) { if (pos->frame_type != sl->frame_type || (pos->frame_type == IEEE802154_FC_TYPE_MAC_CMD && pos->cmd_frame_id != sl->cmd_frame_id) || pos->device_override != sl->device_override || pos->sec_levels != sl->sec_levels) continue; return container_of(pos, struct mac802154_llsec_seclevel, level); } return NULL; } int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec, const struct ieee802154_llsec_seclevel *sl) { struct mac802154_llsec_seclevel *entry; if (llsec_find_seclevel(sec, sl)) return -EEXIST; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->level = *sl; list_add_tail_rcu(&entry->level.list, &sec->table.security_levels); return 0; } int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec, const struct ieee802154_llsec_seclevel *sl) { struct mac802154_llsec_seclevel *pos; pos = llsec_find_seclevel(sec, sl); if (!pos) return -ENOENT; list_del_rcu(&pos->level.list); kfree_rcu(pos, rcu); return 0; } static int llsec_recover_addr(struct mac802154_llsec *sec, struct ieee802154_addr *addr) { __le16 caddr = sec->params.coord_shortaddr; addr->pan_id = sec->params.pan_id; if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) { return -EINVAL; } else if (caddr == cpu_to_le16(IEEE802154_ADDR_UNDEF)) { addr->extended_addr = sec->params.coord_hwaddr; addr->mode = IEEE802154_ADDR_LONG; } else { addr->short_addr = sec->params.coord_shortaddr; addr->mode = IEEE802154_ADDR_SHORT; } return 0; } static struct mac802154_llsec_key* llsec_lookup_key(struct mac802154_llsec *sec, const struct ieee802154_hdr *hdr, const struct ieee802154_addr *addr, struct ieee802154_llsec_key_id *key_id) { struct ieee802154_addr devaddr = *addr; u8 key_id_mode = hdr->sec.key_id_mode; struct ieee802154_llsec_key_entry *key_entry; struct mac802154_llsec_key *key; if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT && devaddr.mode == IEEE802154_ADDR_NONE) { if (hdr->fc.type == IEEE802154_FC_TYPE_BEACON) { devaddr.extended_addr = sec->params.coord_hwaddr; devaddr.mode = IEEE802154_ADDR_LONG; } else if (llsec_recover_addr(sec, &devaddr) < 0) { return NULL; } } list_for_each_entry_rcu(key_entry, &sec->table.keys, list) { const struct ieee802154_llsec_key_id *id = &key_entry->id; if (!(key_entry->key->frame_types & BIT(hdr->fc.type))) continue; if (id->mode != key_id_mode) continue; if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT) { if (ieee802154_addr_equal(&devaddr, &id->device_addr)) goto found; } else { if (id->id != hdr->sec.key_id) continue; if ((key_id_mode == IEEE802154_SCF_KEY_INDEX) || (key_id_mode == IEEE802154_SCF_KEY_SHORT_INDEX && id->short_source == hdr->sec.short_src) || (key_id_mode == IEEE802154_SCF_KEY_HW_INDEX && id->extended_source == hdr->sec.extended_src)) goto found; } } return NULL; found: key = container_of(key_entry->key, struct mac802154_llsec_key, key); if (key_id) *key_id = key_entry->id; return llsec_key_get(key); } static void llsec_geniv(u8 iv[16], __le64 addr, const struct ieee802154_sechdr *sec) { __be64 addr_bytes = (__force __be64) swab64((__force u64) addr); __be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter); iv[0] = 1; /* L' = L - 1 = 1 */ memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes)); memcpy(iv + 9, &frame_counter, sizeof(frame_counter)); iv[13] = sec->level; iv[14] = 0; iv[15] = 1; } static int llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec, const struct ieee802154_hdr *hdr, struct mac802154_llsec_key *key) { u8 iv[16]; struct scatterlist src; struct blkcipher_desc req = { .tfm = key->tfm0, .info = iv, .flags = 0, }; llsec_geniv(iv, sec->params.hwaddr, &hdr->sec); sg_init_one(&src, skb->data, skb->len); return crypto_blkcipher_encrypt_iv(&req, &src, &src, skb->len); } static struct crypto_aead* llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen) { int i; for (i = 0; i < ARRAY_SIZE(key->tfm); i++) if (crypto_aead_authsize(key->tfm[i]) == authlen) return key->tfm[i]; BUG(); } static int llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec, const struct ieee802154_hdr *hdr, struct mac802154_llsec_key *key) { u8 iv[16]; unsigned char *data; int authlen, assoclen, datalen, rc; struct scatterlist src, assoc[2], dst[2]; struct aead_request *req; authlen = ieee802154_sechdr_authtag_len(&hdr->sec); llsec_geniv(iv, sec->params.hwaddr, &hdr->sec); req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC); if (!req) return -ENOMEM; sg_init_table(assoc, 2); sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len); assoclen = skb->mac_len; data = skb_mac_header(skb) + skb->mac_len; datalen = skb_tail_pointer(skb) - data; if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) { sg_set_buf(&assoc[1], data, 0); } else { sg_set_buf(&assoc[1], data, datalen); assoclen += datalen; datalen = 0; } sg_init_one(&src, data, datalen); sg_init_table(dst, 2); sg_set_buf(&dst[0], data, datalen); sg_set_buf(&dst[1], skb_put(skb, authlen), authlen); aead_request_set_callback(req, 0, NULL, NULL); aead_request_set_assoc(req, assoc, assoclen); aead_request_set_crypt(req, &src, dst, datalen, iv); rc = crypto_aead_encrypt(req); kfree(req); return rc; } static int llsec_do_encrypt(struct sk_buff *skb, const struct mac802154_llsec *sec, const struct ieee802154_hdr *hdr, struct mac802154_llsec_key *key) { if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC) return llsec_do_encrypt_unauth(skb, sec, hdr, key); else return llsec_do_encrypt_auth(skb, sec, hdr, key); } int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb) { struct ieee802154_hdr hdr; int rc, authlen, hlen; struct mac802154_llsec_key *key; u32 frame_ctr; hlen = ieee802154_hdr_pull(skb, &hdr); if (hlen < 0 || hdr.fc.type != IEEE802154_FC_TYPE_DATA) return -EINVAL; if (!hdr.fc.security_enabled || hdr.sec.level == 0) { skb_push(skb, hlen); return 0; } authlen = ieee802154_sechdr_authtag_len(&hdr.sec); if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU) return -EMSGSIZE; rcu_read_lock(); read_lock_bh(&sec->lock); if (!sec->params.enabled) { rc = -EINVAL; goto fail_read; } key = llsec_lookup_key(sec, &hdr, &hdr.dest, NULL); if (!key) { rc = -ENOKEY; goto fail_read; } read_unlock_bh(&sec->lock); write_lock_bh(&sec->lock); frame_ctr = be32_to_cpu(sec->params.frame_counter); hdr.sec.frame_counter = cpu_to_le32(frame_ctr); if (frame_ctr == 0xFFFFFFFF) { write_unlock_bh(&sec->lock); llsec_key_put(key); rc = -EOVERFLOW; goto fail; } sec->params.frame_counter = cpu_to_be32(frame_ctr + 1); write_unlock_bh(&sec->lock); rcu_read_unlock(); skb->mac_len = ieee802154_hdr_push(skb, &hdr); skb_reset_mac_header(skb); rc = llsec_do_encrypt(skb, sec, &hdr, key); llsec_key_put(key); return rc; fail_read: read_unlock_bh(&sec->lock); fail: rcu_read_unlock(); return rc; } static struct mac802154_llsec_device* llsec_lookup_dev(struct mac802154_llsec *sec, const struct ieee802154_addr *addr) { struct ieee802154_addr devaddr = *addr; struct mac802154_llsec_device *dev = NULL; if (devaddr.mode == IEEE802154_ADDR_NONE && llsec_recover_addr(sec, &devaddr) < 0) return NULL; if (devaddr.mode == IEEE802154_ADDR_SHORT) { u32 key = llsec_dev_hash_short(devaddr.short_addr, devaddr.pan_id); hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) { if (dev->dev.pan_id == devaddr.pan_id && dev->dev.short_addr == devaddr.short_addr) return dev; } } else { u64 key = llsec_dev_hash_long(devaddr.extended_addr); hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) { if (dev->dev.hwaddr == devaddr.extended_addr) return dev; } } return NULL; } static int llsec_lookup_seclevel(const struct mac802154_llsec *sec, u8 frame_type, u8 cmd_frame_id, struct ieee802154_llsec_seclevel *rlevel) { struct ieee802154_llsec_seclevel *level; list_for_each_entry_rcu(level, &sec->table.security_levels, list) { if (level->frame_type == frame_type && (frame_type != IEEE802154_FC_TYPE_MAC_CMD || level->cmd_frame_id == cmd_frame_id)) { *rlevel = *level; return 0; } } return -EINVAL; } static int llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec, const struct ieee802154_hdr *hdr, struct mac802154_llsec_key *key, __le64 dev_addr) { u8 iv[16]; unsigned char *data; int datalen; struct scatterlist src; struct blkcipher_desc req = { .tfm = key->tfm0, .info = iv, .flags = 0, }; llsec_geniv(iv, dev_addr, &hdr->sec); data = skb_mac_header(skb) + skb->mac_len; datalen = skb_tail_pointer(skb) - data; sg_init_one(&src, data, datalen); return crypto_blkcipher_decrypt_iv(&req, &src, &src, datalen); } static int llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec, const struct ieee802154_hdr *hdr, struct mac802154_llsec_key *key, __le64 dev_addr) { u8 iv[16]; unsigned char *data; int authlen, datalen, assoclen, rc; struct scatterlist src, assoc[2]; struct aead_request *req; authlen = ieee802154_sechdr_authtag_len(&hdr->sec); llsec_geniv(iv, dev_addr, &hdr->sec); req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC); if (!req) return -ENOMEM; sg_init_table(assoc, 2); sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len); assoclen = skb->mac_len; data = skb_mac_header(skb) + skb->mac_len; datalen = skb_tail_pointer(skb) - data; if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) { sg_set_buf(&assoc[1], data, 0); } else { sg_set_buf(&assoc[1], data, datalen - authlen); assoclen += datalen - authlen; data += datalen - authlen; datalen = authlen; } sg_init_one(&src, data, datalen); aead_request_set_callback(req, 0, NULL, NULL); aead_request_set_assoc(req, assoc, assoclen); aead_request_set_crypt(req, &src, &src, datalen, iv); rc = crypto_aead_decrypt(req); kfree(req); skb_trim(skb, skb->len - authlen); return rc; } static int llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec, const struct ieee802154_hdr *hdr, struct mac802154_llsec_key *key, __le64 dev_addr) { if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC) return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr); else return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr); } static int llsec_update_devkey_record(struct mac802154_llsec_device *dev, const struct ieee802154_llsec_key_id *in_key) { struct mac802154_llsec_device_key *devkey; devkey = llsec_devkey_find(dev, in_key); if (!devkey) { struct mac802154_llsec_device_key *next; next = kzalloc(sizeof(*devkey), GFP_ATOMIC); if (!next) return -ENOMEM; next->devkey.key_id = *in_key; spin_lock_bh(&dev->lock); devkey = llsec_devkey_find(dev, in_key); if (!devkey) list_add_rcu(&next->devkey.list, &dev->dev.keys); else kfree(next); spin_unlock_bh(&dev->lock); } return 0; } static int llsec_update_devkey_info(struct mac802154_llsec_device *dev, const struct ieee802154_llsec_key_id *in_key, u32 frame_counter) { struct mac802154_llsec_device_key *devkey = NULL; if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RESTRICT) { devkey = llsec_devkey_find(dev, in_key); if (!devkey) return -ENOENT; } if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RECORD) { int rc = llsec_update_devkey_record(dev, in_key); if (rc < 0) return rc; } spin_lock_bh(&dev->lock); if ((!devkey && frame_counter < dev->dev.frame_counter) || (devkey && frame_counter < devkey->devkey.frame_counter)) { spin_unlock_bh(&dev->lock); return -EINVAL; } if (devkey) devkey->devkey.frame_counter = frame_counter + 1; else dev->dev.frame_counter = frame_counter + 1; spin_unlock_bh(&dev->lock); return 0; } int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb) { struct ieee802154_hdr hdr; struct mac802154_llsec_key *key; struct ieee802154_llsec_key_id key_id; struct mac802154_llsec_device *dev; struct ieee802154_llsec_seclevel seclevel; int err; __le64 dev_addr; u32 frame_ctr; if (ieee802154_hdr_peek(skb, &hdr) < 0) return -EINVAL; if (!hdr.fc.security_enabled) return 0; if (hdr.fc.version == 0) return -EINVAL; read_lock_bh(&sec->lock); if (!sec->params.enabled) { read_unlock_bh(&sec->lock); return -EINVAL; } read_unlock_bh(&sec->lock); rcu_read_lock(); key = llsec_lookup_key(sec, &hdr, &hdr.source, &key_id); if (!key) { err = -ENOKEY; goto fail; } dev = llsec_lookup_dev(sec, &hdr.source); if (!dev) { err = -EINVAL; goto fail_dev; } if (llsec_lookup_seclevel(sec, hdr.fc.type, 0, &seclevel) < 0) { err = -EINVAL; goto fail_dev; } if (!(seclevel.sec_levels & BIT(hdr.sec.level)) && (hdr.sec.level == 0 && seclevel.device_override && !dev->dev.seclevel_exempt)) { err = -EINVAL; goto fail_dev; } frame_ctr = le32_to_cpu(hdr.sec.frame_counter); if (frame_ctr == 0xffffffff) { err = -EOVERFLOW; goto fail_dev; } err = llsec_update_devkey_info(dev, &key_id, frame_ctr); if (err) goto fail_dev; dev_addr = dev->dev.hwaddr; rcu_read_unlock(); err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr); llsec_key_put(key); return err; fail_dev: llsec_key_put(key); fail: rcu_read_unlock(); return err; }
gpl-2.0
markyzq/kernel-drm-rockchip
drivers/media/dvb-frontends/cx22700.c
450
11120
/* Conexant cx22700 DVB OFDM demodulator driver Copyright (C) 2001-2002 Convergence Integrated Media GmbH Holger Waechtler <holger@convergence.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "cx22700.h" struct cx22700_state { struct i2c_adapter* i2c; const struct cx22700_config* config; struct dvb_frontend frontend; }; static int debug; #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "cx22700: " args); \ } while (0) static u8 init_tab [] = { 0x04, 0x10, 0x05, 0x09, 0x06, 0x00, 0x08, 0x04, 0x09, 0x00, 0x0a, 0x01, 0x15, 0x40, 0x16, 0x10, 0x17, 0x87, 0x18, 0x17, 0x1a, 0x10, 0x25, 0x04, 0x2e, 0x00, 0x39, 0x00, 0x3a, 0x04, 0x45, 0x08, 0x46, 0x02, 0x47, 0x05, }; static int cx22700_writereg (struct cx22700_state* state, u8 reg, u8 data) { int ret; u8 buf [] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; dprintk ("%s\n", __func__); ret = i2c_transfer (state->i2c, &msg, 1); if (ret != 1) printk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n", __func__, reg, data, ret); return (ret != 1) ? -1 : 0; } static int cx22700_readreg (struct cx22700_state* state, u8 reg) { int ret; u8 b0 [] = { reg }; u8 b1 [] = { 0 }; struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; dprintk ("%s\n", __func__); ret = i2c_transfer (state->i2c, msg, 2); if (ret != 2) return -EIO; return b1[0]; } static int cx22700_set_inversion (struct cx22700_state* state, int inversion) { u8 val; dprintk ("%s\n", __func__); switch (inversion) { case INVERSION_AUTO: return -EOPNOTSUPP; case INVERSION_ON: val = cx22700_readreg (state, 0x09); return cx22700_writereg (state, 0x09, val | 0x01); case INVERSION_OFF: val = cx22700_readreg (state, 0x09); return cx22700_writereg (state, 0x09, val & 0xfe); default: return -EINVAL; } } static int cx22700_set_tps(struct cx22700_state *state, struct dtv_frontend_properties *p) { static const u8 qam_tab [4] = { 0, 1, 0, 2 }; static const u8 fec_tab [6] = { 0, 1, 2, 0, 3, 4 }; u8 val; dprintk ("%s\n", __func__); if (p->code_rate_HP < FEC_1_2 || p->code_rate_HP > FEC_7_8) return -EINVAL; if (p->code_rate_LP < FEC_1_2 || p->code_rate_LP > FEC_7_8) return -EINVAL; if (p->code_rate_HP == FEC_4_5 || p->code_rate_LP == FEC_4_5) return -EINVAL; if ((int)p->guard_interval < GUARD_INTERVAL_1_32 || p->guard_interval > GUARD_INTERVAL_1_4) return -EINVAL; if (p->transmission_mode != TRANSMISSION_MODE_2K && p->transmission_mode != TRANSMISSION_MODE_8K) return -EINVAL; if (p->modulation != QPSK && p->modulation != QAM_16 && p->modulation != QAM_64) return -EINVAL; if ((int)p->hierarchy < HIERARCHY_NONE || p->hierarchy > HIERARCHY_4) return -EINVAL; if (p->bandwidth_hz > 8000000 || p->bandwidth_hz < 6000000) return -EINVAL; if (p->bandwidth_hz == 7000000) cx22700_writereg (state, 0x09, cx22700_readreg (state, 0x09 | 0x10)); else cx22700_writereg (state, 0x09, cx22700_readreg (state, 0x09 & ~0x10)); val = qam_tab[p->modulation - QPSK]; val |= p->hierarchy - HIERARCHY_NONE; cx22700_writereg (state, 0x04, val); if (p->code_rate_HP - FEC_1_2 >= sizeof(fec_tab) || p->code_rate_LP - FEC_1_2 >= sizeof(fec_tab)) return -EINVAL; val = fec_tab[p->code_rate_HP - FEC_1_2] << 3; val |= fec_tab[p->code_rate_LP - FEC_1_2]; cx22700_writereg (state, 0x05, val); val = (p->guard_interval - GUARD_INTERVAL_1_32) << 2; val |= p->transmission_mode - TRANSMISSION_MODE_2K; cx22700_writereg (state, 0x06, val); cx22700_writereg (state, 0x08, 0x04 | 0x02); /* use user tps parameters */ cx22700_writereg (state, 0x08, 0x04); /* restart acquisition */ return 0; } static int cx22700_get_tps(struct cx22700_state *state, struct dtv_frontend_properties *p) { static const fe_modulation_t qam_tab [3] = { QPSK, QAM_16, QAM_64 }; static const fe_code_rate_t fec_tab [5] = { FEC_1_2, FEC_2_3, FEC_3_4, FEC_5_6, FEC_7_8 }; u8 val; dprintk ("%s\n", __func__); if (!(cx22700_readreg(state, 0x07) & 0x20)) /* tps valid? */ return -EAGAIN; val = cx22700_readreg (state, 0x01); if ((val & 0x7) > 4) p->hierarchy = HIERARCHY_AUTO; else p->hierarchy = HIERARCHY_NONE + (val & 0x7); if (((val >> 3) & 0x3) > 2) p->modulation = QAM_AUTO; else p->modulation = qam_tab[(val >> 3) & 0x3]; val = cx22700_readreg (state, 0x02); if (((val >> 3) & 0x07) > 4) p->code_rate_HP = FEC_AUTO; else p->code_rate_HP = fec_tab[(val >> 3) & 0x07]; if ((val & 0x07) > 4) p->code_rate_LP = FEC_AUTO; else p->code_rate_LP = fec_tab[val & 0x07]; val = cx22700_readreg (state, 0x03); p->guard_interval = GUARD_INTERVAL_1_32 + ((val >> 6) & 0x3); p->transmission_mode = TRANSMISSION_MODE_2K + ((val >> 5) & 0x1); return 0; } static int cx22700_init (struct dvb_frontend* fe) { struct cx22700_state* state = fe->demodulator_priv; int i; dprintk("cx22700_init: init chip\n"); cx22700_writereg (state, 0x00, 0x02); /* soft reset */ cx22700_writereg (state, 0x00, 0x00); msleep(10); for (i=0; i<sizeof(init_tab); i+=2) cx22700_writereg (state, init_tab[i], init_tab[i+1]); cx22700_writereg (state, 0x00, 0x01); return 0; } static int cx22700_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct cx22700_state* state = fe->demodulator_priv; u16 rs_ber = (cx22700_readreg (state, 0x0d) << 9) | (cx22700_readreg (state, 0x0e) << 1); u8 sync = cx22700_readreg (state, 0x07); *status = 0; if (rs_ber < 0xff00) *status |= FE_HAS_SIGNAL; if (sync & 0x20) *status |= FE_HAS_CARRIER; if (sync & 0x10) *status |= FE_HAS_VITERBI; if (sync & 0x10) *status |= FE_HAS_SYNC; if (*status == 0x0f) *status |= FE_HAS_LOCK; return 0; } static int cx22700_read_ber(struct dvb_frontend* fe, u32* ber) { struct cx22700_state* state = fe->demodulator_priv; *ber = cx22700_readreg (state, 0x0c) & 0x7f; cx22700_writereg (state, 0x0c, 0x00); return 0; } static int cx22700_read_signal_strength(struct dvb_frontend* fe, u16* signal_strength) { struct cx22700_state* state = fe->demodulator_priv; u16 rs_ber = (cx22700_readreg (state, 0x0d) << 9) | (cx22700_readreg (state, 0x0e) << 1); *signal_strength = ~rs_ber; return 0; } static int cx22700_read_snr(struct dvb_frontend* fe, u16* snr) { struct cx22700_state* state = fe->demodulator_priv; u16 rs_ber = (cx22700_readreg (state, 0x0d) << 9) | (cx22700_readreg (state, 0x0e) << 1); *snr = ~rs_ber; return 0; } static int cx22700_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { struct cx22700_state* state = fe->demodulator_priv; *ucblocks = cx22700_readreg (state, 0x0f); cx22700_writereg (state, 0x0f, 0x00); return 0; } static int cx22700_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct cx22700_state* state = fe->demodulator_priv; cx22700_writereg (state, 0x00, 0x02); /* XXX CHECKME: soft reset*/ cx22700_writereg (state, 0x00, 0x00); if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } cx22700_set_inversion(state, c->inversion); cx22700_set_tps(state, c); cx22700_writereg (state, 0x37, 0x01); /* PAL loop filter off */ cx22700_writereg (state, 0x00, 0x01); /* restart acquire */ return 0; } static int cx22700_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct cx22700_state* state = fe->demodulator_priv; u8 reg09 = cx22700_readreg (state, 0x09); c->inversion = reg09 & 0x1 ? INVERSION_ON : INVERSION_OFF; return cx22700_get_tps(state, c); } static int cx22700_i2c_gate_ctrl(struct dvb_frontend* fe, int enable) { struct cx22700_state* state = fe->demodulator_priv; if (enable) { return cx22700_writereg(state, 0x0a, 0x00); } else { return cx22700_writereg(state, 0x0a, 0x01); } } static int cx22700_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings) { fesettings->min_delay_ms = 150; fesettings->step_size = 166667; fesettings->max_drift = 166667*2; return 0; } static void cx22700_release(struct dvb_frontend* fe) { struct cx22700_state* state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops cx22700_ops; struct dvb_frontend* cx22700_attach(const struct cx22700_config* config, struct i2c_adapter* i2c) { struct cx22700_state* state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct cx22700_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; /* check if the demod is there */ if (cx22700_readreg(state, 0x07) < 0) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &cx22700_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops cx22700_ops = { .delsys = { SYS_DVBT }, .info = { .name = "Conexant CX22700 DVB-T", .frequency_min = 470000000, .frequency_max = 860000000, .frequency_stepsize = 166667, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_RECOVER }, .release = cx22700_release, .init = cx22700_init, .i2c_gate_ctrl = cx22700_i2c_gate_ctrl, .set_frontend = cx22700_set_frontend, .get_frontend = cx22700_get_frontend, .get_tune_settings = cx22700_get_tune_settings, .read_status = cx22700_read_status, .read_ber = cx22700_read_ber, .read_signal_strength = cx22700_read_signal_strength, .read_snr = cx22700_read_snr, .read_ucblocks = cx22700_read_ucblocks, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("Conexant CX22700 DVB-T Demodulator driver"); MODULE_AUTHOR("Holger Waechtler"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(cx22700_attach);
gpl-2.0
Pafcholini/kernel-msm-3.10
drivers/base/power/wakeup.c
706
24559
/* * drivers/base/power/wakeup.c - System wakeup events framework * * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. * * This file is released under the GPLv2. */ #include <linux/device.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/capability.h> #include <linux/export.h> #include <linux/suspend.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <trace/events/power.h> #include "power.h" /* * If set, the suspend/hibernate code will abort transitions to a sleep state * if wakeup events are registered during or immediately before the transition. */ bool events_check_enabled __read_mostly; /* * Combined counters of registered wakeup events and wakeup events in progress. * They need to be modified together atomically, so it's better to use one * atomic variable to hold them both. */ static atomic_t combined_event_count = ATOMIC_INIT(0); #define IN_PROGRESS_BITS (sizeof(int) * 4) #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) static void split_counters(unsigned int *cnt, unsigned int *inpr) { unsigned int comb = atomic_read(&combined_event_count); *cnt = (comb >> IN_PROGRESS_BITS); *inpr = comb & MAX_IN_PROGRESS; } /* A preserved old value of the events counter. */ static unsigned int saved_count; static DEFINE_SPINLOCK(events_lock); static void pm_wakeup_timer_fn(unsigned long data); static LIST_HEAD(wakeup_sources); static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); /** * wakeup_source_prepare - Prepare a new wakeup source for initialization. * @ws: Wakeup source to prepare. * @name: Pointer to the name of the new wakeup source. * * Callers must ensure that the @name string won't be freed when @ws is still in * use. */ void wakeup_source_prepare(struct wakeup_source *ws, const char *name) { if (ws) { memset(ws, 0, sizeof(*ws)); ws->name = name; } } EXPORT_SYMBOL_GPL(wakeup_source_prepare); /** * wakeup_source_create - Create a struct wakeup_source object. * @name: Name of the new wakeup source. */ struct wakeup_source *wakeup_source_create(const char *name) { struct wakeup_source *ws; ws = kmalloc(sizeof(*ws), GFP_KERNEL); if (!ws) return NULL; wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL); return ws; } EXPORT_SYMBOL_GPL(wakeup_source_create); /** * wakeup_source_drop - Prepare a struct wakeup_source object for destruction. * @ws: Wakeup source to prepare for destruction. * * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never * be run in parallel with this function for the same wakeup source object. */ void wakeup_source_drop(struct wakeup_source *ws) { if (!ws) return; del_timer_sync(&ws->timer); __pm_relax(ws); } EXPORT_SYMBOL_GPL(wakeup_source_drop); /** * wakeup_source_destroy - Destroy a struct wakeup_source object. * @ws: Wakeup source to destroy. * * Use only for wakeup source objects created with wakeup_source_create(). */ void wakeup_source_destroy(struct wakeup_source *ws) { if (!ws) return; wakeup_source_drop(ws); kfree(ws->name); kfree(ws); } EXPORT_SYMBOL_GPL(wakeup_source_destroy); /** * wakeup_source_add - Add given object to the list of wakeup sources. * @ws: Wakeup source object to add to the list. */ void wakeup_source_add(struct wakeup_source *ws) { unsigned long flags; if (WARN_ON(!ws)) return; spin_lock_init(&ws->lock); setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); ws->active = false; ws->last_time = ktime_get(); spin_lock_irqsave(&events_lock, flags); list_add_rcu(&ws->entry, &wakeup_sources); spin_unlock_irqrestore(&events_lock, flags); } EXPORT_SYMBOL_GPL(wakeup_source_add); /** * wakeup_source_remove - Remove given object from the wakeup sources list. * @ws: Wakeup source object to remove from the list. */ void wakeup_source_remove(struct wakeup_source *ws) { unsigned long flags; if (WARN_ON(!ws)) return; spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); spin_unlock_irqrestore(&events_lock, flags); synchronize_rcu(); } EXPORT_SYMBOL_GPL(wakeup_source_remove); /** * wakeup_source_register - Create wakeup source and add it to the list. * @name: Name of the wakeup source to register. */ struct wakeup_source *wakeup_source_register(const char *name) { struct wakeup_source *ws; ws = wakeup_source_create(name); if (ws) wakeup_source_add(ws); return ws; } EXPORT_SYMBOL_GPL(wakeup_source_register); /** * wakeup_source_unregister - Remove wakeup source from the list and remove it. * @ws: Wakeup source object to unregister. */ void wakeup_source_unregister(struct wakeup_source *ws) { if (ws) { wakeup_source_remove(ws); wakeup_source_destroy(ws); } } EXPORT_SYMBOL_GPL(wakeup_source_unregister); /** * device_wakeup_attach - Attach a wakeup source object to a device object. * @dev: Device to handle. * @ws: Wakeup source object to attach to @dev. * * This causes @dev to be treated as a wakeup device. */ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) { spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { spin_unlock_irq(&dev->power.lock); return -EEXIST; } dev->power.wakeup = ws; spin_unlock_irq(&dev->power.lock); return 0; } /** * device_wakeup_enable - Enable given device to be a wakeup source. * @dev: Device to handle. * * Create a wakeup source object, register it and attach it to @dev. */ int device_wakeup_enable(struct device *dev) { struct wakeup_source *ws; int ret; if (!dev || !dev->power.can_wakeup) return -EINVAL; ws = wakeup_source_register(dev_name(dev)); if (!ws) return -ENOMEM; ret = device_wakeup_attach(dev, ws); if (ret) wakeup_source_unregister(ws); return ret; } EXPORT_SYMBOL_GPL(device_wakeup_enable); /** * device_wakeup_detach - Detach a device's wakeup source object from it. * @dev: Device to detach the wakeup source object from. * * After it returns, @dev will not be treated as a wakeup device any more. */ static struct wakeup_source *device_wakeup_detach(struct device *dev) { struct wakeup_source *ws; spin_lock_irq(&dev->power.lock); ws = dev->power.wakeup; dev->power.wakeup = NULL; spin_unlock_irq(&dev->power.lock); return ws; } /** * device_wakeup_disable - Do not regard a device as a wakeup source any more. * @dev: Device to handle. * * Detach the @dev's wakeup source object from it, unregister this wakeup source * object and destroy it. */ int device_wakeup_disable(struct device *dev) { struct wakeup_source *ws; if (!dev || !dev->power.can_wakeup) return -EINVAL; ws = device_wakeup_detach(dev); if (ws) wakeup_source_unregister(ws); return 0; } EXPORT_SYMBOL_GPL(device_wakeup_disable); /** * device_set_wakeup_capable - Set/reset device wakeup capability flag. * @dev: Device to handle. * @capable: Whether or not @dev is capable of waking up the system from sleep. * * If @capable is set, set the @dev's power.can_wakeup flag and add its * wakeup-related attributes to sysfs. Otherwise, unset the @dev's * power.can_wakeup flag and remove its wakeup-related attributes from sysfs. * * This function may sleep and it can't be called from any context where * sleeping is not allowed. */ void device_set_wakeup_capable(struct device *dev, bool capable) { if (!!dev->power.can_wakeup == !!capable) return; if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { if (capable) { if (wakeup_sysfs_add(dev)) return; } else { wakeup_sysfs_remove(dev); } } dev->power.can_wakeup = capable; } EXPORT_SYMBOL_GPL(device_set_wakeup_capable); /** * device_init_wakeup - Device wakeup initialization. * @dev: Device to handle. * @enable: Whether or not to enable @dev as a wakeup device. * * By default, most devices should leave wakeup disabled. The exceptions are * devices that everyone expects to be wakeup sources: keyboards, power buttons, * possibly network interfaces, etc. Also, devices that don't generate their * own wakeup requests but merely forward requests from one bus to another * (like PCI bridges) should have wakeup enabled by default. */ int device_init_wakeup(struct device *dev, bool enable) { int ret = 0; if (enable) { device_set_wakeup_capable(dev, true); ret = device_wakeup_enable(dev); } else { device_set_wakeup_capable(dev, false); } return ret; } EXPORT_SYMBOL_GPL(device_init_wakeup); /** * device_set_wakeup_enable - Enable or disable a device to wake up the system. * @dev: Device to handle. */ int device_set_wakeup_enable(struct device *dev, bool enable) { if (!dev || !dev->power.can_wakeup) return -EINVAL; return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); } EXPORT_SYMBOL_GPL(device_set_wakeup_enable); /* * The functions below use the observation that each wakeup event starts a * period in which the system should not be suspended. The moment this period * will end depends on how the wakeup event is going to be processed after being * detected and all of the possible cases can be divided into two distinct * groups. * * First, a wakeup event may be detected by the same functional unit that will * carry out the entire processing of it and possibly will pass it to user space * for further processing. In that case the functional unit that has detected * the event may later "close" the "no suspend" period associated with it * directly as soon as it has been dealt with. The pair of pm_stay_awake() and * pm_relax(), balanced with each other, is supposed to be used in such * situations. * * Second, a wakeup event may be detected by one functional unit and processed * by another one. In that case the unit that has detected it cannot really * "close" the "no suspend" period associated with it, unless it knows in * advance what's going to happen to the event during processing. This * knowledge, however, may not be available to it, so it can simply specify time * to wait before the system can be suspended and pass it as the second * argument of pm_wakeup_event(). * * It is valid to call pm_relax() after pm_wakeup_event(), in which case the * "no suspend" period will be ended either by the pm_relax(), or by the timer * function executed when the timer expires, whichever comes first. */ /** * wakup_source_activate - Mark given wakeup source as active. * @ws: Wakeup source to handle. * * Update the @ws' statistics and, if @ws has just been activated, notify the PM * core of the event by incrementing the counter of of wakeup events being * processed. */ static void wakeup_source_activate(struct wakeup_source *ws) { unsigned int cec; /* * active wakeup source should bring the system * out of PM_SUSPEND_FREEZE state */ freeze_wake(); ws->active = true; ws->active_count++; ws->last_time = ktime_get(); if (ws->autosleep_enabled) ws->start_prevent_time = ws->last_time; /* Increment the counter of events in progress. */ cec = atomic_inc_return(&combined_event_count); trace_wakeup_source_activate(ws->name, cec); } /** * wakeup_source_report_event - Report wakeup event using the given source. * @ws: Wakeup source to report the event for. */ static void wakeup_source_report_event(struct wakeup_source *ws) { ws->event_count++; /* This is racy, but the counter is approximate anyway. */ if (events_check_enabled) ws->wakeup_count++; if (!ws->active) wakeup_source_activate(ws); } /** * __pm_stay_awake - Notify the PM core of a wakeup event. * @ws: Wakeup source object associated with the source of the event. * * It is safe to call this function from interrupt context. */ void __pm_stay_awake(struct wakeup_source *ws) { unsigned long flags; if (!ws) return; spin_lock_irqsave(&ws->lock, flags); wakeup_source_report_event(ws); del_timer(&ws->timer); ws->timer_expires = 0; spin_unlock_irqrestore(&ws->lock, flags); } EXPORT_SYMBOL_GPL(__pm_stay_awake); /** * pm_stay_awake - Notify the PM core that a wakeup event is being processed. * @dev: Device the wakeup event is related to. * * Notify the PM core of a wakeup event (signaled by @dev) by calling * __pm_stay_awake for the @dev's wakeup source object. * * Call this function after detecting of a wakeup event if pm_relax() is going * to be called directly after processing the event (and possibly passing it to * user space for further processing). */ void pm_stay_awake(struct device *dev) { unsigned long flags; if (!dev) return; spin_lock_irqsave(&dev->power.lock, flags); __pm_stay_awake(dev->power.wakeup); spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_stay_awake); #ifdef CONFIG_PM_AUTOSLEEP static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) { ktime_t delta = ktime_sub(now, ws->start_prevent_time); ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta); } #else static inline void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) {} #endif /** * wakup_source_deactivate - Mark given wakeup source as inactive. * @ws: Wakeup source to handle. * * Update the @ws' statistics and notify the PM core that the wakeup source has * become inactive by decrementing the counter of wakeup events being processed * and incrementing the counter of registered wakeup events. */ static void wakeup_source_deactivate(struct wakeup_source *ws) { unsigned int cnt, inpr, cec; ktime_t duration; ktime_t now; ws->relax_count++; /* * __pm_relax() may be called directly or from a timer function. * If it is called directly right after the timer function has been * started, but before the timer function calls __pm_relax(), it is * possible that __pm_stay_awake() will be called in the meantime and * will set ws->active. Then, ws->active may be cleared immediately * by the __pm_relax() called from the timer function, but in such a * case ws->relax_count will be different from ws->active_count. */ if (ws->relax_count != ws->active_count) { ws->relax_count--; return; } ws->active = false; now = ktime_get(); duration = ktime_sub(now, ws->last_time); ws->total_time = ktime_add(ws->total_time, duration); if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) ws->max_time = duration; ws->last_time = now; del_timer(&ws->timer); ws->timer_expires = 0; if (ws->autosleep_enabled) update_prevent_sleep_time(ws, now); /* * Increment the counter of registered wakeup events and decrement the * couter of wakeup events in progress simultaneously. */ cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); trace_wakeup_source_deactivate(ws->name, cec); split_counters(&cnt, &inpr); if (!inpr && waitqueue_active(&wakeup_count_wait_queue)) wake_up(&wakeup_count_wait_queue); } /** * __pm_relax - Notify the PM core that processing of a wakeup event has ended. * @ws: Wakeup source object associated with the source of the event. * * Call this function for wakeup events whose processing started with calling * __pm_stay_awake(). * * It is safe to call it from interrupt context. */ void __pm_relax(struct wakeup_source *ws) { unsigned long flags; if (!ws) return; spin_lock_irqsave(&ws->lock, flags); if (ws->active) wakeup_source_deactivate(ws); spin_unlock_irqrestore(&ws->lock, flags); } EXPORT_SYMBOL_GPL(__pm_relax); /** * pm_relax - Notify the PM core that processing of a wakeup event has ended. * @dev: Device that signaled the event. * * Execute __pm_relax() for the @dev's wakeup source object. */ void pm_relax(struct device *dev) { unsigned long flags; if (!dev) return; spin_lock_irqsave(&dev->power.lock, flags); __pm_relax(dev->power.wakeup); spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_relax); /** * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. * @data: Address of the wakeup source object associated with the event source. * * Call wakeup_source_deactivate() for the wakeup source whose address is stored * in @data if it is currently active and its timer has not been canceled and * the expiration time of the timer is not in future. */ static void pm_wakeup_timer_fn(unsigned long data) { struct wakeup_source *ws = (struct wakeup_source *)data; unsigned long flags; spin_lock_irqsave(&ws->lock, flags); if (ws->active && ws->timer_expires && time_after_eq(jiffies, ws->timer_expires)) { wakeup_source_deactivate(ws); ws->expire_count++; } spin_unlock_irqrestore(&ws->lock, flags); } /** * __pm_wakeup_event - Notify the PM core of a wakeup event. * @ws: Wakeup source object associated with the event source. * @msec: Anticipated event processing time (in milliseconds). * * Notify the PM core of a wakeup event whose source is @ws that will take * approximately @msec milliseconds to be processed by the kernel. If @ws is * not active, activate it. If @msec is nonzero, set up the @ws' timer to * execute pm_wakeup_timer_fn() in future. * * It is safe to call this function from interrupt context. */ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) { unsigned long flags; unsigned long expires; if (!ws) return; spin_lock_irqsave(&ws->lock, flags); wakeup_source_report_event(ws); if (!msec) { wakeup_source_deactivate(ws); goto unlock; } expires = jiffies + msecs_to_jiffies(msec); if (!expires) expires = 1; if (!ws->timer_expires || time_after(expires, ws->timer_expires)) { mod_timer(&ws->timer, expires); ws->timer_expires = expires; } unlock: spin_unlock_irqrestore(&ws->lock, flags); } EXPORT_SYMBOL_GPL(__pm_wakeup_event); /** * pm_wakeup_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). * * Call __pm_wakeup_event() for the @dev's wakeup source object. */ void pm_wakeup_event(struct device *dev, unsigned int msec) { unsigned long flags; if (!dev) return; spin_lock_irqsave(&dev->power.lock, flags); __pm_wakeup_event(dev->power.wakeup, msec); spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_wakeup_event); static void print_active_wakeup_sources(void) { struct wakeup_source *ws; int active = 0; struct wakeup_source *last_activity_ws = NULL; rcu_read_lock(); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->active) { pr_info("active wakeup source: %s\n", ws->name); active = 1; } else if (!active && (!last_activity_ws || ktime_to_ns(ws->last_time) > ktime_to_ns(last_activity_ws->last_time))) { last_activity_ws = ws; } } if (!active && last_activity_ws) pr_info("last active wakeup source: %s\n", last_activity_ws->name); rcu_read_unlock(); } /** * pm_wakeup_pending - Check if power transition in progress should be aborted. * * Compare the current number of registered wakeup events with its preserved * value from the past and return true if new wakeup events have been registered * since the old value was stored. Also return true if the current number of * wakeup events being processed is different from zero. */ bool pm_wakeup_pending(void) { unsigned long flags; bool ret = false; spin_lock_irqsave(&events_lock, flags); if (events_check_enabled) { unsigned int cnt, inpr; split_counters(&cnt, &inpr); ret = (cnt != saved_count || inpr > 0); events_check_enabled = !ret; } spin_unlock_irqrestore(&events_lock, flags); if (ret) print_active_wakeup_sources(); return ret; } /** * pm_get_wakeup_count - Read the number of registered wakeup events. * @count: Address to store the value at. * @block: Whether or not to block. * * Store the number of registered wakeup events at the address in @count. If * @block is set, block until the current number of wakeup events being * processed is zero. * * Return 'false' if the current number of wakeup events being processed is * nonzero. Otherwise return 'true'. */ bool pm_get_wakeup_count(unsigned int *count, bool block) { unsigned int cnt, inpr; if (block) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(&wakeup_count_wait_queue, &wait, TASK_INTERRUPTIBLE); split_counters(&cnt, &inpr); if (inpr == 0 || signal_pending(current)) break; schedule(); } finish_wait(&wakeup_count_wait_queue, &wait); } split_counters(&cnt, &inpr); *count = cnt; return !inpr; } /** * pm_save_wakeup_count - Save the current number of registered wakeup events. * @count: Value to compare with the current number of registered wakeup events. * * If @count is equal to the current number of registered wakeup events and the * current number of wakeup events being processed is zero, store @count as the * old number of registered wakeup events for pm_check_wakeup_events(), enable * wakeup events detection and return 'true'. Otherwise disable wakeup events * detection and return 'false'. */ bool pm_save_wakeup_count(unsigned int count) { unsigned int cnt, inpr; unsigned long flags; events_check_enabled = false; spin_lock_irqsave(&events_lock, flags); split_counters(&cnt, &inpr); if (cnt == count && inpr == 0) { saved_count = count; events_check_enabled = true; } spin_unlock_irqrestore(&events_lock, flags); return events_check_enabled; } #ifdef CONFIG_PM_AUTOSLEEP /** * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources. * @enabled: Whether to set or to clear the autosleep_enabled flags. */ void pm_wakep_autosleep_enabled(bool set) { struct wakeup_source *ws; ktime_t now = ktime_get(); rcu_read_lock(); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { spin_lock_irq(&ws->lock); if (ws->autosleep_enabled != set) { ws->autosleep_enabled = set; if (ws->active) { if (set) ws->start_prevent_time = now; else update_prevent_sleep_time(ws, now); } } spin_unlock_irq(&ws->lock); } rcu_read_unlock(); } #endif /* CONFIG_PM_AUTOSLEEP */ static struct dentry *wakeup_sources_stats_dentry; /** * print_wakeup_source_stats - Print wakeup source statistics information. * @m: seq_file to print the statistics into. * @ws: Wakeup source object to print the statistics for. */ static int print_wakeup_source_stats(struct seq_file *m, struct wakeup_source *ws) { unsigned long flags; ktime_t total_time; ktime_t max_time; unsigned long active_count; ktime_t active_time; ktime_t prevent_sleep_time; int ret; spin_lock_irqsave(&ws->lock, flags); total_time = ws->total_time; max_time = ws->max_time; prevent_sleep_time = ws->prevent_sleep_time; active_count = ws->active_count; if (ws->active) { ktime_t now = ktime_get(); active_time = ktime_sub(now, ws->last_time); total_time = ktime_add(total_time, active_time); if (active_time.tv64 > max_time.tv64) max_time = active_time; if (ws->autosleep_enabled) prevent_sleep_time = ktime_add(prevent_sleep_time, ktime_sub(now, ws->start_prevent_time)); } else { active_time = ktime_set(0, 0); } ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t" "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", ws->name, active_count, ws->event_count, ws->wakeup_count, ws->expire_count, ktime_to_ms(active_time), ktime_to_ms(total_time), ktime_to_ms(max_time), ktime_to_ms(ws->last_time), ktime_to_ms(prevent_sleep_time)); spin_unlock_irqrestore(&ws->lock, flags); return ret; } /** * wakeup_sources_stats_show - Print wakeup sources statistics information. * @m: seq_file to print the statistics into. */ static int wakeup_sources_stats_show(struct seq_file *m, void *unused) { struct wakeup_source *ws; seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" "expire_count\tactive_since\ttotal_time\tmax_time\t" "last_change\tprevent_suspend_time\n"); rcu_read_lock(); list_for_each_entry_rcu(ws, &wakeup_sources, entry) print_wakeup_source_stats(m, ws); rcu_read_unlock(); return 0; } static int wakeup_sources_stats_open(struct inode *inode, struct file *file) { return single_open(file, wakeup_sources_stats_show, NULL); } static const struct file_operations wakeup_sources_stats_fops = { .owner = THIS_MODULE, .open = wakeup_sources_stats_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init wakeup_sources_debugfs_init(void) { wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops); return 0; } postcore_initcall(wakeup_sources_debugfs_init);
gpl-2.0
jumpstarter-io/linux
net/wireless/wext-sme.c
962
9438
/* * cfg80211 wext compat for managed mode. * * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2009 Intel Corporation. All rights reserved. */ #include <linux/export.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <net/cfg80211.h> #include <net/cfg80211-wext.h> #include "wext-compat.h" #include "nl80211.h" int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct cfg80211_cached_keys *ck = NULL; const u8 *prev_bssid = NULL; int err, i; ASSERT_RTNL(); ASSERT_WDEV_LOCK(wdev); if (!netif_running(wdev->netdev)) return 0; wdev->wext.connect.ie = wdev->wext.ie; wdev->wext.connect.ie_len = wdev->wext.ie_len; /* Use default background scan period */ wdev->wext.connect.bg_scan_period = -1; if (wdev->wext.keys) { wdev->wext.keys->def = wdev->wext.default_key; wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; if (wdev->wext.default_key != -1) wdev->wext.connect.privacy = true; } if (!wdev->wext.connect.ssid_len) return 0; if (wdev->wext.keys) { ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); if (!ck) return -ENOMEM; for (i = 0; i < 6; i++) ck->params[i].key = ck->data[i]; } if (wdev->wext.prev_bssid_valid) prev_bssid = wdev->wext.prev_bssid; err = cfg80211_connect(rdev, wdev->netdev, &wdev->wext.connect, ck, prev_bssid); if (err) kzfree(ck); return err; } int cfg80211_mgd_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wextfreq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_channel *chan = NULL; int err, freq; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; freq = cfg80211_wext_freq(wextfreq); if (freq < 0) return freq; if (freq) { chan = ieee80211_get_channel(wdev->wiphy, freq); if (!chan) return -EINVAL; if (chan->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; } wdev_lock(wdev); if (wdev->conn) { bool event = true; if (wdev->wext.connect.channel == chan) { err = 0; goto out; } /* if SSID set, we'll try right again, avoid event */ if (wdev->wext.connect.ssid_len) event = false; err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } wdev->wext.connect.channel = chan; /* * SSID is not set, we just want to switch monitor channel, * this is really just backward compatibility, if the SSID * is set then we use the channel to select the BSS to use * to connect to instead. If we were connected on another * channel we disconnected above and reconnect below. */ if (chan && !wdev->wext.connect.ssid_len) { struct cfg80211_chan_def chandef = { .width = NL80211_CHAN_WIDTH_20_NOHT, .center_freq1 = freq, }; chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); if (chandef.chan) err = cfg80211_set_monitor_channel(rdev, &chandef); else err = -EINVAL; goto out; } err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); return err; } int cfg80211_mgd_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_channel *chan = NULL; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; wdev_lock(wdev); if (wdev->current_bss) chan = wdev->current_bss->pub.channel; else if (wdev->wext.connect.channel) chan = wdev->wext.connect.channel; wdev_unlock(wdev); if (chan) { freq->m = chan->center_freq; freq->e = 6; return 0; } /* no channel if not joining */ return -EINVAL; } int cfg80211_mgd_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); size_t len = data->length; int err; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (!data->flags) len = 0; /* iwconfig uses nul termination in SSID.. */ if (len > 0 && ssid[len - 1] == '\0') len--; wdev_lock(wdev); err = 0; if (wdev->conn) { bool event = true; if (wdev->wext.connect.ssid && len && len == wdev->wext.connect.ssid_len && memcmp(wdev->wext.connect.ssid, ssid, len) == 0) goto out; /* if SSID set now, we'll try to connect, avoid event */ if (len) event = false; err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } wdev->wext.prev_bssid_valid = false; wdev->wext.connect.ssid = wdev->wext.ssid; memcpy(wdev->wext.ssid, ssid, len); wdev->wext.connect.ssid_len = len; wdev->wext.connect.crypto.control_port = false; wdev->wext.connect.crypto.control_port_ethertype = cpu_to_be16(ETH_P_PAE); err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); return err; } int cfg80211_mgd_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; data->flags = 0; wdev_lock(wdev); if (wdev->current_bss) { const u8 *ie; rcu_read_lock(); ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, WLAN_EID_SSID); if (ie) { data->flags = 1; data->length = ie[1]; memcpy(ssid, ie + 2, data->length); } rcu_read_unlock(); } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { data->flags = 1; data->length = wdev->wext.connect.ssid_len; memcpy(ssid, wdev->wext.connect.ssid, data->length); } wdev_unlock(wdev); return 0; } int cfg80211_mgd_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u8 *bssid = ap_addr->sa_data; int err; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (ap_addr->sa_family != ARPHRD_ETHER) return -EINVAL; /* automatic mode */ if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) bssid = NULL; wdev_lock(wdev); if (wdev->conn) { err = 0; /* both automatic */ if (!bssid && !wdev->wext.connect.bssid) goto out; /* fixed already - and no change */ if (wdev->wext.connect.bssid && bssid && ether_addr_equal(bssid, wdev->wext.connect.bssid)) goto out; err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } if (bssid) { memcpy(wdev->wext.bssid, bssid, ETH_ALEN); wdev->wext.connect.bssid = wdev->wext.bssid; } else wdev->wext.connect.bssid = NULL; err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); return err; } int cfg80211_mgd_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; ap_addr->sa_family = ARPHRD_ETHER; wdev_lock(wdev); if (wdev->current_bss) memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); else eth_zero_addr(ap_addr->sa_data); wdev_unlock(wdev); return 0; } int cfg80211_wext_siwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u8 *ie = extra; int ie_len = data->length, err; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!ie_len) ie = NULL; wdev_lock(wdev); /* no change */ err = 0; if (wdev->wext.ie_len == ie_len && memcmp(wdev->wext.ie, ie, ie_len) == 0) goto out; if (ie_len) { ie = kmemdup(extra, ie_len, GFP_KERNEL); if (!ie) { err = -ENOMEM; goto out; } } else ie = NULL; kfree(wdev->wext.ie); wdev->wext.ie = ie; wdev->wext.ie_len = ie_len; if (wdev->conn) { err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } /* userspace better not think we'll reconnect */ err = 0; out: wdev_unlock(wdev); return err; } int cfg80211_wext_siwmlme(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct iw_mlme *mlme = (struct iw_mlme *)extra; struct cfg80211_registered_device *rdev; int err; if (!wdev) return -EOPNOTSUPP; rdev = wiphy_to_rdev(wdev->wiphy); if (wdev->iftype != NL80211_IFTYPE_STATION) return -EINVAL; if (mlme->addr.sa_family != ARPHRD_ETHER) return -EINVAL; wdev_lock(wdev); switch (mlme->cmd) { case IW_MLME_DEAUTH: case IW_MLME_DISASSOC: err = cfg80211_disconnect(rdev, dev, mlme->reason_code, true); break; default: err = -EOPNOTSUPP; break; } wdev_unlock(wdev); return err; }
gpl-2.0
acheron1502/android_kernel_BLU_BLU_PURE_XL
fs/btrfs/ctree.c
1474
151571
/* * Copyright (C) 2007,2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/rbtree.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "print-tree.h" #include "locking.h" static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level); static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *ins_key, struct btrfs_path *path, int data_size, int extend); static int push_node_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst, struct extent_buffer *src, int empty); static int balance_node_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst_buf, struct extent_buffer *src_buf); static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, int level, int slot); static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb); static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); struct btrfs_path *btrfs_alloc_path(void) { struct btrfs_path *path; path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); return path; } /* * set all locked nodes in the path to blocking locks. This should * be done before scheduling */ noinline void btrfs_set_path_blocking(struct btrfs_path *p) { int i; for (i = 0; i < BTRFS_MAX_LEVEL; i++) { if (!p->nodes[i] || !p->locks[i]) continue; btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]); if (p->locks[i] == BTRFS_READ_LOCK) p->locks[i] = BTRFS_READ_LOCK_BLOCKING; else if (p->locks[i] == BTRFS_WRITE_LOCK) p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; } } /* * reset all the locked nodes in the patch to spinning locks. * * held is used to keep lockdep happy, when lockdep is enabled * we set held to a blocking lock before we go around and * retake all the spinlocks in the path. You can safely use NULL * for held */ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, struct extent_buffer *held, int held_rw) { int i; #ifdef CONFIG_DEBUG_LOCK_ALLOC /* lockdep really cares that we take all of these spinlocks * in the right order. If any of the locks in the path are not * currently blocking, it is going to complain. So, make really * really sure by forcing the path to blocking before we clear * the path blocking. */ if (held) { btrfs_set_lock_blocking_rw(held, held_rw); if (held_rw == BTRFS_WRITE_LOCK) held_rw = BTRFS_WRITE_LOCK_BLOCKING; else if (held_rw == BTRFS_READ_LOCK) held_rw = BTRFS_READ_LOCK_BLOCKING; } btrfs_set_path_blocking(p); #endif for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { if (p->nodes[i] && p->locks[i]) { btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]); if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING) p->locks[i] = BTRFS_WRITE_LOCK; else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING) p->locks[i] = BTRFS_READ_LOCK; } } #ifdef CONFIG_DEBUG_LOCK_ALLOC if (held) btrfs_clear_lock_blocking_rw(held, held_rw); #endif } /* this also releases the path */ void btrfs_free_path(struct btrfs_path *p) { if (!p) return; btrfs_release_path(p); kmem_cache_free(btrfs_path_cachep, p); } /* * path release drops references on the extent buffers in the path * and it drops any locks held by this path * * It is safe to call this on paths that no locks or extent buffers held. */ noinline void btrfs_release_path(struct btrfs_path *p) { int i; for (i = 0; i < BTRFS_MAX_LEVEL; i++) { p->slots[i] = 0; if (!p->nodes[i]) continue; if (p->locks[i]) { btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); p->locks[i] = 0; } free_extent_buffer(p->nodes[i]); p->nodes[i] = NULL; } } /* * safely gets a reference on the root node of a tree. A lock * is not taken, so a concurrent writer may put a different node * at the root of the tree. See btrfs_lock_root_node for the * looping required. * * The extent buffer returned by this has a reference taken, so * it won't disappear. It may stop being the root of the tree * at any time because there are no locks held. */ struct extent_buffer *btrfs_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { rcu_read_lock(); eb = rcu_dereference(root->node); /* * RCU really hurts here, we could free up the root node because * it was cow'ed but we may not get the new root node yet so do * the inc_not_zero dance and if it doesn't work then * synchronize_rcu and try again. */ if (atomic_inc_not_zero(&eb->refs)) { rcu_read_unlock(); break; } rcu_read_unlock(); synchronize_rcu(); } return eb; } /* loop around taking references on and locking the root node of the * tree until you end up with a lock on the root. A locked buffer * is returned, with a reference held. */ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { eb = btrfs_root_node(root); btrfs_tree_lock(eb); if (eb == root->node) break; btrfs_tree_unlock(eb); free_extent_buffer(eb); } return eb; } /* loop around taking references on and locking the root node of the * tree until you end up with a lock on the root. A locked buffer * is returned, with a reference held. */ static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { eb = btrfs_root_node(root); btrfs_tree_read_lock(eb); if (eb == root->node) break; btrfs_tree_read_unlock(eb); free_extent_buffer(eb); } return eb; } /* cowonly root (everything not a reference counted cow subvolume), just get * put onto a simple dirty list. transaction.c walks this to make sure they * get properly updated on disk. */ static void add_root_to_dirty_list(struct btrfs_root *root) { spin_lock(&root->fs_info->trans_lock); if (root->track_dirty && list_empty(&root->dirty_list)) { list_add(&root->dirty_list, &root->fs_info->dirty_cowonly_roots); } spin_unlock(&root->fs_info->trans_lock); } /* * used by snapshot creation to make a copy of a root for a tree with * a given objectid. The buffer with the new root node is returned in * cow_ret, and this func returns zero on success or a negative error code. */ int btrfs_copy_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer **cow_ret, u64 new_root_objectid) { struct extent_buffer *cow; int ret = 0; int level; struct btrfs_disk_key disk_key; WARN_ON(root->ref_cows && trans->transid != root->fs_info->running_transaction->transid); WARN_ON(root->ref_cows && trans->transid != root->last_trans); level = btrfs_header_level(buf); if (level == 0) btrfs_item_key(buf, &disk_key, 0); else btrfs_node_key(buf, &disk_key, 0); cow = btrfs_alloc_free_block(trans, root, buf->len, 0, new_root_objectid, &disk_key, level, buf->start, 0); if (IS_ERR(cow)) return PTR_ERR(cow); copy_extent_buffer(cow, buf, 0, 0, cow->len); btrfs_set_header_bytenr(cow, cow->start); btrfs_set_header_generation(cow, trans->transid); btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | BTRFS_HEADER_FLAG_RELOC); if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); else btrfs_set_header_owner(cow, new_root_objectid); write_extent_buffer(cow, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(cow), BTRFS_FSID_SIZE); WARN_ON(btrfs_header_generation(buf) > trans->transid); if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) ret = btrfs_inc_ref(trans, root, cow, 1, 1); else ret = btrfs_inc_ref(trans, root, cow, 0, 1); if (ret) return ret; btrfs_mark_buffer_dirty(cow); *cow_ret = cow; return 0; } enum mod_log_op { MOD_LOG_KEY_REPLACE, MOD_LOG_KEY_ADD, MOD_LOG_KEY_REMOVE, MOD_LOG_KEY_REMOVE_WHILE_FREEING, MOD_LOG_KEY_REMOVE_WHILE_MOVING, MOD_LOG_MOVE_KEYS, MOD_LOG_ROOT_REPLACE, }; struct tree_mod_move { int dst_slot; int nr_items; }; struct tree_mod_root { u64 logical; u8 level; }; struct tree_mod_elem { struct rb_node node; u64 index; /* shifted logical */ u64 seq; enum mod_log_op op; /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */ int slot; /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */ u64 generation; /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */ struct btrfs_disk_key key; u64 blockptr; /* this is used for op == MOD_LOG_MOVE_KEYS */ struct tree_mod_move move; /* this is used for op == MOD_LOG_ROOT_REPLACE */ struct tree_mod_root old_root; }; static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info) { read_lock(&fs_info->tree_mod_log_lock); } static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info) { read_unlock(&fs_info->tree_mod_log_lock); } static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info) { write_lock(&fs_info->tree_mod_log_lock); } static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info) { write_unlock(&fs_info->tree_mod_log_lock); } /* * Increment the upper half of tree_mod_seq, set lower half zero. * * Must be called with fs_info->tree_mod_seq_lock held. */ static inline u64 btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info *fs_info) { u64 seq = atomic64_read(&fs_info->tree_mod_seq); seq &= 0xffffffff00000000ull; seq += 1ull << 32; atomic64_set(&fs_info->tree_mod_seq, seq); return seq; } /* * Increment the lower half of tree_mod_seq. * * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers * are generated should not technically require a spin lock here. (Rationale: * incrementing the minor while incrementing the major seq number is between its * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it * just returns a unique sequence number as usual.) We have decided to leave * that requirement in here and rethink it once we notice it really imposes a * problem on some workload. */ static inline u64 btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info *fs_info) { return atomic64_inc_return(&fs_info->tree_mod_seq); } /* * return the last minor in the previous major tree_mod_seq number */ u64 btrfs_tree_mod_seq_prev(u64 seq) { return (seq & 0xffffffff00000000ull) - 1ull; } /* * This adds a new blocker to the tree mod log's blocker list if the @elem * passed does not already have a sequence number set. So when a caller expects * to record tree modifications, it should ensure to set elem->seq to zero * before calling btrfs_get_tree_mod_seq. * Returns a fresh, unused tree log modification sequence number, even if no new * blocker was added. */ u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem) { u64 seq; tree_mod_log_write_lock(fs_info); spin_lock(&fs_info->tree_mod_seq_lock); if (!elem->seq) { elem->seq = btrfs_inc_tree_mod_seq_major(fs_info); list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); } seq = btrfs_inc_tree_mod_seq_minor(fs_info); spin_unlock(&fs_info->tree_mod_seq_lock); tree_mod_log_write_unlock(fs_info); return seq; } void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem) { struct rb_root *tm_root; struct rb_node *node; struct rb_node *next; struct seq_list *cur_elem; struct tree_mod_elem *tm; u64 min_seq = (u64)-1; u64 seq_putting = elem->seq; if (!seq_putting) return; spin_lock(&fs_info->tree_mod_seq_lock); list_del(&elem->list); elem->seq = 0; list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) { if (cur_elem->seq < min_seq) { if (seq_putting > cur_elem->seq) { /* * blocker with lower sequence number exists, we * cannot remove anything from the log */ spin_unlock(&fs_info->tree_mod_seq_lock); return; } min_seq = cur_elem->seq; } } spin_unlock(&fs_info->tree_mod_seq_lock); /* * anything that's lower than the lowest existing (read: blocked) * sequence number can be removed from the tree. */ tree_mod_log_write_lock(fs_info); tm_root = &fs_info->tree_mod_log; for (node = rb_first(tm_root); node; node = next) { next = rb_next(node); tm = container_of(node, struct tree_mod_elem, node); if (tm->seq > min_seq) continue; rb_erase(node, tm_root); kfree(tm); } tree_mod_log_write_unlock(fs_info); } /* * key order of the log: * index -> sequence * * the index is the shifted logical of the *new* root node for root replace * operations, or the shifted logical of the affected block for all other * operations. */ static noinline int __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) { struct rb_root *tm_root; struct rb_node **new; struct rb_node *parent = NULL; struct tree_mod_elem *cur; BUG_ON(!tm || !tm->seq); tm_root = &fs_info->tree_mod_log; new = &tm_root->rb_node; while (*new) { cur = container_of(*new, struct tree_mod_elem, node); parent = *new; if (cur->index < tm->index) new = &((*new)->rb_left); else if (cur->index > tm->index) new = &((*new)->rb_right); else if (cur->seq < tm->seq) new = &((*new)->rb_left); else if (cur->seq > tm->seq) new = &((*new)->rb_right); else { kfree(tm); return -EEXIST; } } rb_link_node(&tm->node, parent, new); rb_insert_color(&tm->node, tm_root); return 0; } /* * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it * returns zero with the tree_mod_log_lock acquired. The caller must hold * this until all tree mod log insertions are recorded in the rb tree and then * call tree_mod_log_write_unlock() to release. */ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) { smp_mb(); if (list_empty(&(fs_info)->tree_mod_seq_list)) return 1; if (eb && btrfs_header_level(eb) == 0) return 1; tree_mod_log_write_lock(fs_info); if (list_empty(&fs_info->tree_mod_seq_list)) { /* * someone emptied the list while we were waiting for the lock. * we must not add to the list when no blocker exists. */ tree_mod_log_write_unlock(fs_info); return 1; } return 0; } /* * This allocates memory and gets a tree modification sequence number. * * Returns <0 on error. * Returns >0 (the added sequence number) on success. */ static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, struct tree_mod_elem **tm_ret) { struct tree_mod_elem *tm; /* * once we switch from spin locks to something different, we should * honor the flags parameter here. */ tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC); if (!tm) return -ENOMEM; spin_lock(&fs_info->tree_mod_seq_lock); tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info); spin_unlock(&fs_info->tree_mod_seq_lock); return tm->seq; } static inline int __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int slot, enum mod_log_op op, gfp_t flags) { int ret; struct tree_mod_elem *tm; ret = tree_mod_alloc(fs_info, flags, &tm); if (ret < 0) return ret; tm->index = eb->start >> PAGE_CACHE_SHIFT; if (op != MOD_LOG_KEY_ADD) { btrfs_node_key(eb, &tm->key, slot); tm->blockptr = btrfs_node_blockptr(eb, slot); } tm->op = op; tm->slot = slot; tm->generation = btrfs_node_ptr_generation(eb, slot); return __tree_mod_log_insert(fs_info, tm); } static noinline int tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int slot, enum mod_log_op op, gfp_t flags) { int ret; if (tree_mod_dont_log(fs_info, eb)) return 0; ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags); tree_mod_log_write_unlock(fs_info); return ret; } static noinline int tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int slot, enum mod_log_op op) { return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS); } static noinline int tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int slot, enum mod_log_op op) { return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS); } static noinline int tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int dst_slot, int src_slot, int nr_items, gfp_t flags) { struct tree_mod_elem *tm; int ret; int i; if (tree_mod_dont_log(fs_info, eb)) return 0; /* * When we override something during the move, we log these removals. * This can only happen when we move towards the beginning of the * buffer, i.e. dst_slot < src_slot. */ for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot, MOD_LOG_KEY_REMOVE_WHILE_MOVING); BUG_ON(ret < 0); } ret = tree_mod_alloc(fs_info, flags, &tm); if (ret < 0) goto out; tm->index = eb->start >> PAGE_CACHE_SHIFT; tm->slot = src_slot; tm->move.dst_slot = dst_slot; tm->move.nr_items = nr_items; tm->op = MOD_LOG_MOVE_KEYS; ret = __tree_mod_log_insert(fs_info, tm); out: tree_mod_log_write_unlock(fs_info); return ret; } static inline void __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) { int i; u32 nritems; int ret; if (btrfs_header_level(eb) == 0) return; nritems = btrfs_header_nritems(eb); for (i = nritems - 1; i >= 0; i--) { ret = tree_mod_log_insert_key_locked(fs_info, eb, i, MOD_LOG_KEY_REMOVE_WHILE_FREEING); BUG_ON(ret < 0); } } static noinline int tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, struct extent_buffer *old_root, struct extent_buffer *new_root, gfp_t flags, int log_removal) { struct tree_mod_elem *tm; int ret; if (tree_mod_dont_log(fs_info, NULL)) return 0; if (log_removal) __tree_mod_log_free_eb(fs_info, old_root); ret = tree_mod_alloc(fs_info, flags, &tm); if (ret < 0) goto out; tm->index = new_root->start >> PAGE_CACHE_SHIFT; tm->old_root.logical = old_root->start; tm->old_root.level = btrfs_header_level(old_root); tm->generation = btrfs_header_generation(old_root); tm->op = MOD_LOG_ROOT_REPLACE; ret = __tree_mod_log_insert(fs_info, tm); out: tree_mod_log_write_unlock(fs_info); return ret; } static struct tree_mod_elem * __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, int smallest) { struct rb_root *tm_root; struct rb_node *node; struct tree_mod_elem *cur = NULL; struct tree_mod_elem *found = NULL; u64 index = start >> PAGE_CACHE_SHIFT; tree_mod_log_read_lock(fs_info); tm_root = &fs_info->tree_mod_log; node = tm_root->rb_node; while (node) { cur = container_of(node, struct tree_mod_elem, node); if (cur->index < index) { node = node->rb_left; } else if (cur->index > index) { node = node->rb_right; } else if (cur->seq < min_seq) { node = node->rb_left; } else if (!smallest) { /* we want the node with the highest seq */ if (found) BUG_ON(found->seq > cur->seq); found = cur; node = node->rb_left; } else if (cur->seq > min_seq) { /* we want the node with the smallest seq */ if (found) BUG_ON(found->seq < cur->seq); found = cur; node = node->rb_right; } else { found = cur; break; } } tree_mod_log_read_unlock(fs_info); return found; } /* * this returns the element from the log with the smallest time sequence * value that's in the log (the oldest log item). any element with a time * sequence lower than min_seq will be ignored. */ static struct tree_mod_elem * tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) { return __tree_mod_log_search(fs_info, start, min_seq, 1); } /* * this returns the element from the log with the largest time sequence * value that's in the log (the most recent log item). any element with * a time sequence lower than min_seq will be ignored. */ static struct tree_mod_elem * tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) { return __tree_mod_log_search(fs_info, start, min_seq, 0); } static noinline void tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, struct extent_buffer *src, unsigned long dst_offset, unsigned long src_offset, int nr_items) { int ret; int i; if (tree_mod_dont_log(fs_info, NULL)) return; if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) { tree_mod_log_write_unlock(fs_info); return; } for (i = 0; i < nr_items; i++) { ret = tree_mod_log_insert_key_locked(fs_info, src, i + src_offset, MOD_LOG_KEY_REMOVE); BUG_ON(ret < 0); ret = tree_mod_log_insert_key_locked(fs_info, dst, i + dst_offset, MOD_LOG_KEY_ADD); BUG_ON(ret < 0); } tree_mod_log_write_unlock(fs_info); } static inline void tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, int dst_offset, int src_offset, int nr_items) { int ret; ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset, nr_items, GFP_NOFS); BUG_ON(ret < 0); } static noinline void tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int slot, int atomic) { int ret; ret = tree_mod_log_insert_key_mask(fs_info, eb, slot, MOD_LOG_KEY_REPLACE, atomic ? GFP_ATOMIC : GFP_NOFS); BUG_ON(ret < 0); } static noinline void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) { if (tree_mod_dont_log(fs_info, eb)) return; __tree_mod_log_free_eb(fs_info, eb); tree_mod_log_write_unlock(fs_info); } static noinline void tree_mod_log_set_root_pointer(struct btrfs_root *root, struct extent_buffer *new_root_node, int log_removal) { int ret; ret = tree_mod_log_insert_root(root->fs_info, root->node, new_root_node, GFP_NOFS, log_removal); BUG_ON(ret < 0); } /* * check if the tree block can be shared by multiple trees */ int btrfs_block_can_be_shared(struct btrfs_root *root, struct extent_buffer *buf) { /* * Tree blocks not in refernece counted trees and tree roots * are never shared. If a block was allocated after the last * snapshot and the block was not allocated by tree relocation, * we know the block is not shared. */ if (root->ref_cows && buf != root->node && buf != root->commit_root && (btrfs_header_generation(buf) <= btrfs_root_last_snapshot(&root->root_item) || btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) return 1; #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (root->ref_cows && btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) return 1; #endif return 0; } static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *cow, int *last_ref) { u64 refs; u64 owner; u64 flags; u64 new_flags = 0; int ret; /* * Backrefs update rules: * * Always use full backrefs for extent pointers in tree block * allocated by tree relocation. * * If a shared tree block is no longer referenced by its owner * tree (btrfs_header_owner(buf) == root->root_key.objectid), * use full backrefs for extent pointers in tree block. * * If a tree block is been relocating * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), * use full backrefs for extent pointers in tree block. * The reason for this is some operations (such as drop tree) * are only allowed for blocks use full backrefs. */ if (btrfs_block_can_be_shared(root, buf)) { ret = btrfs_lookup_extent_info(trans, root, buf->start, btrfs_header_level(buf), 1, &refs, &flags); if (ret) return ret; if (refs == 0) { ret = -EROFS; btrfs_std_error(root->fs_info, ret); return ret; } } else { refs = 1; if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; else flags = 0; } owner = btrfs_header_owner(buf); BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); if (refs > 1) { if ((owner == root->root_key.objectid || root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { ret = btrfs_inc_ref(trans, root, buf, 1, 1); BUG_ON(ret); /* -ENOMEM */ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { ret = btrfs_dec_ref(trans, root, buf, 0, 1); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_inc_ref(trans, root, cow, 1, 1); BUG_ON(ret); /* -ENOMEM */ } new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; } else { if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) ret = btrfs_inc_ref(trans, root, cow, 1, 1); else ret = btrfs_inc_ref(trans, root, cow, 0, 1); BUG_ON(ret); /* -ENOMEM */ } if (new_flags != 0) { int level = btrfs_header_level(buf); ret = btrfs_set_disk_extent_flags(trans, root, buf->start, buf->len, new_flags, level, 0); if (ret) return ret; } } else { if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) ret = btrfs_inc_ref(trans, root, cow, 1, 1); else ret = btrfs_inc_ref(trans, root, cow, 0, 1); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_dec_ref(trans, root, buf, 1, 1); BUG_ON(ret); /* -ENOMEM */ } clean_tree_block(trans, root, buf); *last_ref = 1; } return 0; } /* * does the dirty work in cow of a single block. The parent block (if * supplied) is updated to point to the new cow copy. The new buffer is marked * dirty and returned locked. If you modify the block it needs to be marked * dirty again. * * search_start -- an allocation hint for the new block * * empty_size -- a hint that you plan on doing more cow. This is the size in * bytes the allocator should try to find free next to the block it returns. * This is just a hint and may be ignored by the allocator. */ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, struct extent_buffer **cow_ret, u64 search_start, u64 empty_size) { struct btrfs_disk_key disk_key; struct extent_buffer *cow; int level, ret; int last_ref = 0; int unlock_orig = 0; u64 parent_start; if (*cow_ret == buf) unlock_orig = 1; btrfs_assert_tree_locked(buf); WARN_ON(root->ref_cows && trans->transid != root->fs_info->running_transaction->transid); WARN_ON(root->ref_cows && trans->transid != root->last_trans); level = btrfs_header_level(buf); if (level == 0) btrfs_item_key(buf, &disk_key, 0); else btrfs_node_key(buf, &disk_key, 0); if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { if (parent) parent_start = parent->start; else parent_start = 0; } else parent_start = 0; cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start, root->root_key.objectid, &disk_key, level, search_start, empty_size); if (IS_ERR(cow)) return PTR_ERR(cow); /* cow is set to blocking by btrfs_init_new_buffer */ copy_extent_buffer(cow, buf, 0, 0, cow->len); btrfs_set_header_bytenr(cow, cow->start); btrfs_set_header_generation(cow, trans->transid); btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | BTRFS_HEADER_FLAG_RELOC); if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); else btrfs_set_header_owner(cow, root->root_key.objectid); write_extent_buffer(cow, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(cow), BTRFS_FSID_SIZE); ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); if (ret) { btrfs_abort_transaction(trans, root, ret); return ret; } if (root->ref_cows) btrfs_reloc_cow_block(trans, root, buf, cow); if (buf == root->node) { WARN_ON(parent && parent != buf); if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) parent_start = buf->start; else parent_start = 0; extent_buffer_get(cow); tree_mod_log_set_root_pointer(root, cow, 1); rcu_assign_pointer(root->node, cow); btrfs_free_tree_block(trans, root, buf, parent_start, last_ref); free_extent_buffer(buf); add_root_to_dirty_list(root); } else { if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) parent_start = parent->start; else parent_start = 0; WARN_ON(trans->transid != btrfs_header_generation(parent)); tree_mod_log_insert_key(root->fs_info, parent, parent_slot, MOD_LOG_KEY_REPLACE); btrfs_set_node_blockptr(parent, parent_slot, cow->start); btrfs_set_node_ptr_generation(parent, parent_slot, trans->transid); btrfs_mark_buffer_dirty(parent); if (last_ref) tree_mod_log_free_eb(root->fs_info, buf); btrfs_free_tree_block(trans, root, buf, parent_start, last_ref); } if (unlock_orig) btrfs_tree_unlock(buf); free_extent_buffer_stale(buf); btrfs_mark_buffer_dirty(cow); *cow_ret = cow; return 0; } /* * returns the logical address of the oldest predecessor of the given root. * entries older than time_seq are ignored. */ static struct tree_mod_elem * __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, struct extent_buffer *eb_root, u64 time_seq) { struct tree_mod_elem *tm; struct tree_mod_elem *found = NULL; u64 root_logical = eb_root->start; int looped = 0; if (!time_seq) return 0; /* * the very last operation that's logged for a root is the replacement * operation (if it is replaced at all). this has the index of the *new* * root, making it the very first operation that's logged for this root. */ while (1) { tm = tree_mod_log_search_oldest(fs_info, root_logical, time_seq); if (!looped && !tm) return 0; /* * if there are no tree operation for the oldest root, we simply * return it. this should only happen if that (old) root is at * level 0. */ if (!tm) break; /* * if there's an operation that's not a root replacement, we * found the oldest version of our root. normally, we'll find a * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here. */ if (tm->op != MOD_LOG_ROOT_REPLACE) break; found = tm; root_logical = tm->old_root.logical; looped = 1; } /* if there's no old root to return, return what we found instead */ if (!found) found = tm; return found; } /* * tm is a pointer to the first operation to rewind within eb. then, all * previous operations will be rewinded (until we reach something older than * time_seq). */ static void __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, u64 time_seq, struct tree_mod_elem *first_tm) { u32 n; struct rb_node *next; struct tree_mod_elem *tm = first_tm; unsigned long o_dst; unsigned long o_src; unsigned long p_size = sizeof(struct btrfs_key_ptr); n = btrfs_header_nritems(eb); tree_mod_log_read_lock(fs_info); while (tm && tm->seq >= time_seq) { /* * all the operations are recorded with the operator used for * the modification. as we're going backwards, we do the * opposite of each operation here. */ switch (tm->op) { case MOD_LOG_KEY_REMOVE_WHILE_FREEING: BUG_ON(tm->slot < n); /* Fallthrough */ case MOD_LOG_KEY_REMOVE_WHILE_MOVING: case MOD_LOG_KEY_REMOVE: btrfs_set_node_key(eb, &tm->key, tm->slot); btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); btrfs_set_node_ptr_generation(eb, tm->slot, tm->generation); n++; break; case MOD_LOG_KEY_REPLACE: BUG_ON(tm->slot >= n); btrfs_set_node_key(eb, &tm->key, tm->slot); btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); btrfs_set_node_ptr_generation(eb, tm->slot, tm->generation); break; case MOD_LOG_KEY_ADD: /* if a move operation is needed it's in the log */ n--; break; case MOD_LOG_MOVE_KEYS: o_dst = btrfs_node_key_ptr_offset(tm->slot); o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot); memmove_extent_buffer(eb, o_dst, o_src, tm->move.nr_items * p_size); break; case MOD_LOG_ROOT_REPLACE: /* * this operation is special. for roots, this must be * handled explicitly before rewinding. * for non-roots, this operation may exist if the node * was a root: root A -> child B; then A gets empty and * B is promoted to the new root. in the mod log, we'll * have a root-replace operation for B, a tree block * that is no root. we simply ignore that operation. */ break; } next = rb_next(&tm->node); if (!next) break; tm = container_of(next, struct tree_mod_elem, node); if (tm->index != first_tm->index) break; } tree_mod_log_read_unlock(fs_info); btrfs_set_header_nritems(eb, n); } /* * Called with eb read locked. If the buffer cannot be rewinded, the same buffer * is returned. If rewind operations happen, a fresh buffer is returned. The * returned buffer is always read-locked. If the returned buffer is not the * input buffer, the lock on the input buffer is released and the input buffer * is freed (its refcount is decremented). */ static struct extent_buffer * tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, u64 time_seq) { struct extent_buffer *eb_rewin; struct tree_mod_elem *tm; if (!time_seq) return eb; if (btrfs_header_level(eb) == 0) return eb; tm = tree_mod_log_search(fs_info, eb->start, time_seq); if (!tm) return eb; if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { BUG_ON(tm->slot != 0); eb_rewin = alloc_dummy_extent_buffer(eb->start, fs_info->tree_root->nodesize); BUG_ON(!eb_rewin); btrfs_set_header_bytenr(eb_rewin, eb->start); btrfs_set_header_backref_rev(eb_rewin, btrfs_header_backref_rev(eb)); btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); } else { eb_rewin = btrfs_clone_extent_buffer(eb); BUG_ON(!eb_rewin); } extent_buffer_get(eb_rewin); btrfs_tree_read_unlock(eb); free_extent_buffer(eb); extent_buffer_get(eb_rewin); btrfs_tree_read_lock(eb_rewin); __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); WARN_ON(btrfs_header_nritems(eb_rewin) > BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root)); return eb_rewin; } /* * get_old_root() rewinds the state of @root's root node to the given @time_seq * value. If there are no changes, the current root->root_node is returned. If * anything changed in between, there's a fresh buffer allocated on which the * rewind operations are done. In any case, the returned buffer is read locked. * Returns NULL on error (with no locks held). */ static inline struct extent_buffer * get_old_root(struct btrfs_root *root, u64 time_seq) { struct tree_mod_elem *tm; struct extent_buffer *eb = NULL; struct extent_buffer *eb_root; struct extent_buffer *old; struct tree_mod_root *old_root = NULL; u64 old_generation = 0; u64 logical; u32 blocksize; eb_root = btrfs_read_lock_root_node(root); tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq); if (!tm) return eb_root; if (tm->op == MOD_LOG_ROOT_REPLACE) { old_root = &tm->old_root; old_generation = tm->generation; logical = old_root->logical; } else { logical = eb_root->start; } tm = tree_mod_log_search(root->fs_info, logical, time_seq); if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); blocksize = btrfs_level_size(root, old_root->level); old = read_tree_block(root, logical, blocksize, 0); if (!old || !extent_buffer_uptodate(old)) { free_extent_buffer(old); pr_warn("btrfs: failed to read tree block %llu from get_old_root\n", logical); WARN_ON(1); } else { eb = btrfs_clone_extent_buffer(old); free_extent_buffer(old); } } else if (old_root) { btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); eb = alloc_dummy_extent_buffer(logical, root->nodesize); } else { eb = btrfs_clone_extent_buffer(eb_root); btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); } if (!eb) return NULL; extent_buffer_get(eb); btrfs_tree_read_lock(eb); if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(eb, btrfs_header_owner(eb_root)); btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } if (tm) __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm); else WARN_ON(btrfs_header_level(eb) != 0); WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root)); return eb; } int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq) { struct tree_mod_elem *tm; int level; struct extent_buffer *eb_root = btrfs_root_node(root); tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq); if (tm && tm->op == MOD_LOG_ROOT_REPLACE) { level = tm->old_root.level; } else { level = btrfs_header_level(eb_root); } free_extent_buffer(eb_root); return level; } static inline int should_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf) { /* ensure we can see the force_cow */ smp_rmb(); /* * We do not need to cow a block if * 1) this block is not created or changed in this transaction; * 2) this block does not belong to TREE_RELOC tree; * 3) the root is not forced COW. * * What is forced COW: * when we create snapshot during commiting the transaction, * after we've finished coping src root, we must COW the shared * block to ensure the metadata consistency. */ if (btrfs_header_generation(buf) == trans->transid && !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && !root->force_cow) return 0; return 1; } /* * cows a single block, see __btrfs_cow_block for the real work. * This version of it has extra checks so that a block isn't cow'd more than * once per transaction, as long as it hasn't been written yet */ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, struct extent_buffer **cow_ret) { u64 search_start; int ret; if (trans->transaction != root->fs_info->running_transaction) WARN(1, KERN_CRIT "trans %llu running %llu\n", (unsigned long long)trans->transid, (unsigned long long) root->fs_info->running_transaction->transid); if (trans->transid != root->fs_info->generation) WARN(1, KERN_CRIT "trans %llu running %llu\n", (unsigned long long)trans->transid, (unsigned long long)root->fs_info->generation); if (!should_cow_block(trans, root, buf)) { *cow_ret = buf; return 0; } search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); if (parent) btrfs_set_lock_blocking(parent); btrfs_set_lock_blocking(buf); ret = __btrfs_cow_block(trans, root, buf, parent, parent_slot, cow_ret, search_start, 0); trace_btrfs_cow_block(root, buf, *cow_ret); return ret; } /* * helper function for defrag to decide if two blocks pointed to by a * node are actually close by */ static int close_blocks(u64 blocknr, u64 other, u32 blocksize) { if (blocknr < other && other - (blocknr + blocksize) < 32768) return 1; if (blocknr > other && blocknr - (other + blocksize) < 32768) return 1; return 0; } /* * compare two keys in a memcmp fashion */ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) { struct btrfs_key k1; btrfs_disk_key_to_cpu(&k1, disk); return btrfs_comp_cpu_keys(&k1, k2); } /* * same as comp_keys only with two btrfs_key's */ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2) { if (k1->objectid > k2->objectid) return 1; if (k1->objectid < k2->objectid) return -1; if (k1->type > k2->type) return 1; if (k1->type < k2->type) return -1; if (k1->offset > k2->offset) return 1; if (k1->offset < k2->offset) return -1; return 0; } /* * this is used by the defrag code to go through all the * leaves pointed to by a node and reallocate them so that * disk order is close to key order */ int btrfs_realloc_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *parent, int start_slot, u64 *last_ret, struct btrfs_key *progress) { struct extent_buffer *cur; u64 blocknr; u64 gen; u64 search_start = *last_ret; u64 last_block = 0; u64 other; u32 parent_nritems; int end_slot; int i; int err = 0; int parent_level; int uptodate; u32 blocksize; int progress_passed = 0; struct btrfs_disk_key disk_key; parent_level = btrfs_header_level(parent); WARN_ON(trans->transaction != root->fs_info->running_transaction); WARN_ON(trans->transid != root->fs_info->generation); parent_nritems = btrfs_header_nritems(parent); blocksize = btrfs_level_size(root, parent_level - 1); end_slot = parent_nritems; if (parent_nritems == 1) return 0; btrfs_set_lock_blocking(parent); for (i = start_slot; i < end_slot; i++) { int close = 1; btrfs_node_key(parent, &disk_key, i); if (!progress_passed && comp_keys(&disk_key, progress) < 0) continue; progress_passed = 1; blocknr = btrfs_node_blockptr(parent, i); gen = btrfs_node_ptr_generation(parent, i); if (last_block == 0) last_block = blocknr; if (i > 0) { other = btrfs_node_blockptr(parent, i - 1); close = close_blocks(blocknr, other, blocksize); } if (!close && i < end_slot - 2) { other = btrfs_node_blockptr(parent, i + 1); close = close_blocks(blocknr, other, blocksize); } if (close) { last_block = blocknr; continue; } cur = btrfs_find_tree_block(root, blocknr, blocksize); if (cur) uptodate = btrfs_buffer_uptodate(cur, gen, 0); else uptodate = 0; if (!cur || !uptodate) { if (!cur) { cur = read_tree_block(root, blocknr, blocksize, gen); if (!cur || !extent_buffer_uptodate(cur)) { free_extent_buffer(cur); return -EIO; } } else if (!uptodate) { err = btrfs_read_buffer(cur, gen); if (err) { free_extent_buffer(cur); return err; } } } if (search_start == 0) search_start = last_block; btrfs_tree_lock(cur); btrfs_set_lock_blocking(cur); err = __btrfs_cow_block(trans, root, cur, parent, i, &cur, search_start, min(16 * blocksize, (end_slot - i) * blocksize)); if (err) { btrfs_tree_unlock(cur); free_extent_buffer(cur); break; } search_start = cur->start; last_block = cur->start; *last_ret = search_start; btrfs_tree_unlock(cur); free_extent_buffer(cur); } return err; } /* * The leaf data grows from end-to-front in the node. * this returns the address of the start of the last item, * which is the stop of the leaf data stack */ static inline unsigned int leaf_data_end(struct btrfs_root *root, struct extent_buffer *leaf) { u32 nr = btrfs_header_nritems(leaf); if (nr == 0) return BTRFS_LEAF_DATA_SIZE(root); return btrfs_item_offset_nr(leaf, nr - 1); } /* * search for key in the extent_buffer. The items start at offset p, * and they are item_size apart. There are 'max' items in p. * * the slot in the array is returned via slot, and it points to * the place where you would insert key if it is not found in * the array. * * slot may point to max if the key is bigger than all of the keys */ static noinline int generic_bin_search(struct extent_buffer *eb, unsigned long p, int item_size, struct btrfs_key *key, int max, int *slot) { int low = 0; int high = max; int mid; int ret; struct btrfs_disk_key *tmp = NULL; struct btrfs_disk_key unaligned; unsigned long offset; char *kaddr = NULL; unsigned long map_start = 0; unsigned long map_len = 0; int err; while (low < high) { mid = (low + high) / 2; offset = p + mid * item_size; if (!kaddr || offset < map_start || (offset + sizeof(struct btrfs_disk_key)) > map_start + map_len) { err = map_private_extent_buffer(eb, offset, sizeof(struct btrfs_disk_key), &kaddr, &map_start, &map_len); if (!err) { tmp = (struct btrfs_disk_key *)(kaddr + offset - map_start); } else { read_extent_buffer(eb, &unaligned, offset, sizeof(unaligned)); tmp = &unaligned; } } else { tmp = (struct btrfs_disk_key *)(kaddr + offset - map_start); } ret = comp_keys(tmp, key); if (ret < 0) low = mid + 1; else if (ret > 0) high = mid; else { *slot = mid; return 0; } } *slot = low; return 1; } /* * simple bin_search frontend that does the right thing for * leaves vs nodes */ static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot) { if (level == 0) return generic_bin_search(eb, offsetof(struct btrfs_leaf, items), sizeof(struct btrfs_item), key, btrfs_header_nritems(eb), slot); else return generic_bin_search(eb, offsetof(struct btrfs_node, ptrs), sizeof(struct btrfs_key_ptr), key, btrfs_header_nritems(eb), slot); } int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot) { return bin_search(eb, key, level, slot); } static void root_add_used(struct btrfs_root *root, u32 size) { spin_lock(&root->accounting_lock); btrfs_set_root_used(&root->root_item, btrfs_root_used(&root->root_item) + size); spin_unlock(&root->accounting_lock); } static void root_sub_used(struct btrfs_root *root, u32 size) { spin_lock(&root->accounting_lock); btrfs_set_root_used(&root->root_item, btrfs_root_used(&root->root_item) - size); spin_unlock(&root->accounting_lock); } /* given a node and slot number, this reads the blocks it points to. The * extent buffer is returned with a reference taken (but unlocked). * NULL is returned on error. */ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, struct extent_buffer *parent, int slot) { int level = btrfs_header_level(parent); struct extent_buffer *eb; if (slot < 0) return NULL; if (slot >= btrfs_header_nritems(parent)) return NULL; BUG_ON(level == 0); eb = read_tree_block(root, btrfs_node_blockptr(parent, slot), btrfs_level_size(root, level - 1), btrfs_node_ptr_generation(parent, slot)); if (eb && !extent_buffer_uptodate(eb)) { free_extent_buffer(eb); eb = NULL; } return eb; } /* * node level balancing, used to make sure nodes are in proper order for * item deletion. We balance from the top down, so we have to make sure * that a deletion won't leave an node completely empty later on. */ static noinline int balance_level(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { struct extent_buffer *right = NULL; struct extent_buffer *mid; struct extent_buffer *left = NULL; struct extent_buffer *parent = NULL; int ret = 0; int wret; int pslot; int orig_slot = path->slots[level]; u64 orig_ptr; if (level == 0) return 0; mid = path->nodes[level]; WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); WARN_ON(btrfs_header_generation(mid) != trans->transid); orig_ptr = btrfs_node_blockptr(mid, orig_slot); if (level < BTRFS_MAX_LEVEL - 1) { parent = path->nodes[level + 1]; pslot = path->slots[level + 1]; } /* * deal with the case where there is only one pointer in the root * by promoting the node below to a root */ if (!parent) { struct extent_buffer *child; if (btrfs_header_nritems(mid) != 1) return 0; /* promote the child to a root */ child = read_node_slot(root, mid, 0); if (!child) { ret = -EROFS; btrfs_std_error(root->fs_info, ret); goto enospc; } btrfs_tree_lock(child); btrfs_set_lock_blocking(child); ret = btrfs_cow_block(trans, root, child, mid, 0, &child); if (ret) { btrfs_tree_unlock(child); free_extent_buffer(child); goto enospc; } tree_mod_log_set_root_pointer(root, child, 1); rcu_assign_pointer(root->node, child); add_root_to_dirty_list(root); btrfs_tree_unlock(child); path->locks[level] = 0; path->nodes[level] = NULL; clean_tree_block(trans, root, mid); btrfs_tree_unlock(mid); /* once for the path */ free_extent_buffer(mid); root_sub_used(root, mid->len); btrfs_free_tree_block(trans, root, mid, 0, 1); /* once for the root ptr */ free_extent_buffer_stale(mid); return 0; } if (btrfs_header_nritems(mid) > BTRFS_NODEPTRS_PER_BLOCK(root) / 4) return 0; left = read_node_slot(root, parent, pslot - 1); if (left) { btrfs_tree_lock(left); btrfs_set_lock_blocking(left); wret = btrfs_cow_block(trans, root, left, parent, pslot - 1, &left); if (wret) { ret = wret; goto enospc; } } right = read_node_slot(root, parent, pslot + 1); if (right) { btrfs_tree_lock(right); btrfs_set_lock_blocking(right); wret = btrfs_cow_block(trans, root, right, parent, pslot + 1, &right); if (wret) { ret = wret; goto enospc; } } /* first, try to make some room in the middle buffer */ if (left) { orig_slot += btrfs_header_nritems(left); wret = push_node_left(trans, root, left, mid, 1); if (wret < 0) ret = wret; } /* * then try to empty the right most buffer into the middle */ if (right) { wret = push_node_left(trans, root, mid, right, 1); if (wret < 0 && wret != -ENOSPC) ret = wret; if (btrfs_header_nritems(right) == 0) { clean_tree_block(trans, root, right); btrfs_tree_unlock(right); del_ptr(root, path, level + 1, pslot + 1); root_sub_used(root, right->len); btrfs_free_tree_block(trans, root, right, 0, 1); free_extent_buffer_stale(right); right = NULL; } else { struct btrfs_disk_key right_key; btrfs_node_key(right, &right_key, 0); tree_mod_log_set_node_key(root->fs_info, parent, pslot + 1, 0); btrfs_set_node_key(parent, &right_key, pslot + 1); btrfs_mark_buffer_dirty(parent); } } if (btrfs_header_nritems(mid) == 1) { /* * we're not allowed to leave a node with one item in the * tree during a delete. A deletion from lower in the tree * could try to delete the only pointer in this node. * So, pull some keys from the left. * There has to be a left pointer at this point because * otherwise we would have pulled some pointers from the * right */ if (!left) { ret = -EROFS; btrfs_std_error(root->fs_info, ret); goto enospc; } wret = balance_node_right(trans, root, mid, left); if (wret < 0) { ret = wret; goto enospc; } if (wret == 1) { wret = push_node_left(trans, root, left, mid, 1); if (wret < 0) ret = wret; } BUG_ON(wret == 1); } if (btrfs_header_nritems(mid) == 0) { clean_tree_block(trans, root, mid); btrfs_tree_unlock(mid); del_ptr(root, path, level + 1, pslot); root_sub_used(root, mid->len); btrfs_free_tree_block(trans, root, mid, 0, 1); free_extent_buffer_stale(mid); mid = NULL; } else { /* update the parent key to reflect our changes */ struct btrfs_disk_key mid_key; btrfs_node_key(mid, &mid_key, 0); tree_mod_log_set_node_key(root->fs_info, parent, pslot, 0); btrfs_set_node_key(parent, &mid_key, pslot); btrfs_mark_buffer_dirty(parent); } /* update the path */ if (left) { if (btrfs_header_nritems(left) > orig_slot) { extent_buffer_get(left); /* left was locked after cow */ path->nodes[level] = left; path->slots[level + 1] -= 1; path->slots[level] = orig_slot; if (mid) { btrfs_tree_unlock(mid); free_extent_buffer(mid); } } else { orig_slot -= btrfs_header_nritems(left); path->slots[level] = orig_slot; } } /* double check we haven't messed things up */ if (orig_ptr != btrfs_node_blockptr(path->nodes[level], path->slots[level])) BUG(); enospc: if (right) { btrfs_tree_unlock(right); free_extent_buffer(right); } if (left) { if (path->nodes[level] != left) btrfs_tree_unlock(left); free_extent_buffer(left); } return ret; } /* Node balancing for insertion. Here we only split or push nodes around * when they are completely full. This is also done top down, so we * have to be pessimistic. */ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { struct extent_buffer *right = NULL; struct extent_buffer *mid; struct extent_buffer *left = NULL; struct extent_buffer *parent = NULL; int ret = 0; int wret; int pslot; int orig_slot = path->slots[level]; if (level == 0) return 1; mid = path->nodes[level]; WARN_ON(btrfs_header_generation(mid) != trans->transid); if (level < BTRFS_MAX_LEVEL - 1) { parent = path->nodes[level + 1]; pslot = path->slots[level + 1]; } if (!parent) return 1; left = read_node_slot(root, parent, pslot - 1); /* first, try to make some room in the middle buffer */ if (left) { u32 left_nr; btrfs_tree_lock(left); btrfs_set_lock_blocking(left); left_nr = btrfs_header_nritems(left); if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { wret = 1; } else { ret = btrfs_cow_block(trans, root, left, parent, pslot - 1, &left); if (ret) wret = 1; else { wret = push_node_left(trans, root, left, mid, 0); } } if (wret < 0) ret = wret; if (wret == 0) { struct btrfs_disk_key disk_key; orig_slot += left_nr; btrfs_node_key(mid, &disk_key, 0); tree_mod_log_set_node_key(root->fs_info, parent, pslot, 0); btrfs_set_node_key(parent, &disk_key, pslot); btrfs_mark_buffer_dirty(parent); if (btrfs_header_nritems(left) > orig_slot) { path->nodes[level] = left; path->slots[level + 1] -= 1; path->slots[level] = orig_slot; btrfs_tree_unlock(mid); free_extent_buffer(mid); } else { orig_slot -= btrfs_header_nritems(left); path->slots[level] = orig_slot; btrfs_tree_unlock(left); free_extent_buffer(left); } return 0; } btrfs_tree_unlock(left); free_extent_buffer(left); } right = read_node_slot(root, parent, pslot + 1); /* * then try to empty the right most buffer into the middle */ if (right) { u32 right_nr; btrfs_tree_lock(right); btrfs_set_lock_blocking(right); right_nr = btrfs_header_nritems(right); if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { wret = 1; } else { ret = btrfs_cow_block(trans, root, right, parent, pslot + 1, &right); if (ret) wret = 1; else { wret = balance_node_right(trans, root, right, mid); } } if (wret < 0) ret = wret; if (wret == 0) { struct btrfs_disk_key disk_key; btrfs_node_key(right, &disk_key, 0); tree_mod_log_set_node_key(root->fs_info, parent, pslot + 1, 0); btrfs_set_node_key(parent, &disk_key, pslot + 1); btrfs_mark_buffer_dirty(parent); if (btrfs_header_nritems(mid) <= orig_slot) { path->nodes[level] = right; path->slots[level + 1] += 1; path->slots[level] = orig_slot - btrfs_header_nritems(mid); btrfs_tree_unlock(mid); free_extent_buffer(mid); } else { btrfs_tree_unlock(right); free_extent_buffer(right); } return 0; } btrfs_tree_unlock(right); free_extent_buffer(right); } return 1; } /* * readahead one full node of leaves, finding things that are close * to the block in 'slot', and triggering ra on them. */ static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path, int level, int slot, u64 objectid) { struct extent_buffer *node; struct btrfs_disk_key disk_key; u32 nritems; u64 search; u64 target; u64 nread = 0; u64 gen; int direction = path->reada; struct extent_buffer *eb; u32 nr; u32 blocksize; u32 nscan = 0; if (level != 1) return; if (!path->nodes[level]) return; node = path->nodes[level]; search = btrfs_node_blockptr(node, slot); blocksize = btrfs_level_size(root, level - 1); eb = btrfs_find_tree_block(root, search, blocksize); if (eb) { free_extent_buffer(eb); return; } target = search; nritems = btrfs_header_nritems(node); nr = slot; while (1) { if (direction < 0) { if (nr == 0) break; nr--; } else if (direction > 0) { nr++; if (nr >= nritems) break; } if (path->reada < 0 && objectid) { btrfs_node_key(node, &disk_key, nr); if (btrfs_disk_key_objectid(&disk_key) != objectid) break; } search = btrfs_node_blockptr(node, nr); if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { gen = btrfs_node_ptr_generation(node, nr); readahead_tree_block(root, search, blocksize, gen); nread += blocksize; } nscan++; if ((nread > 65536 || nscan > 32)) break; } } /* * returns -EAGAIN if it had to drop the path, or zero if everything was in * cache */ static noinline int reada_for_balance(struct btrfs_root *root, struct btrfs_path *path, int level) { int slot; int nritems; struct extent_buffer *parent; struct extent_buffer *eb; u64 gen; u64 block1 = 0; u64 block2 = 0; int ret = 0; int blocksize; parent = path->nodes[level + 1]; if (!parent) return 0; nritems = btrfs_header_nritems(parent); slot = path->slots[level + 1]; blocksize = btrfs_level_size(root, level); if (slot > 0) { block1 = btrfs_node_blockptr(parent, slot - 1); gen = btrfs_node_ptr_generation(parent, slot - 1); eb = btrfs_find_tree_block(root, block1, blocksize); /* * if we get -eagain from btrfs_buffer_uptodate, we * don't want to return eagain here. That will loop * forever */ if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) block1 = 0; free_extent_buffer(eb); } if (slot + 1 < nritems) { block2 = btrfs_node_blockptr(parent, slot + 1); gen = btrfs_node_ptr_generation(parent, slot + 1); eb = btrfs_find_tree_block(root, block2, blocksize); if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) block2 = 0; free_extent_buffer(eb); } if (block1 || block2) { ret = -EAGAIN; /* release the whole path */ btrfs_release_path(path); /* read the blocks */ if (block1) readahead_tree_block(root, block1, blocksize, 0); if (block2) readahead_tree_block(root, block2, blocksize, 0); if (block1) { eb = read_tree_block(root, block1, blocksize, 0); free_extent_buffer(eb); } if (block2) { eb = read_tree_block(root, block2, blocksize, 0); free_extent_buffer(eb); } } return ret; } /* * when we walk down the tree, it is usually safe to unlock the higher layers * in the tree. The exceptions are when our path goes through slot 0, because * operations on the tree might require changing key pointers higher up in the * tree. * * callers might also have set path->keep_locks, which tells this code to keep * the lock if the path points to the last slot in the block. This is part of * walking through the tree, and selecting the next slot in the higher block. * * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so * if lowest_unlock is 1, level 0 won't be unlocked */ static noinline void unlock_up(struct btrfs_path *path, int level, int lowest_unlock, int min_write_lock_level, int *write_lock_level) { int i; int skip_level = level; int no_skips = 0; struct extent_buffer *t; for (i = level; i < BTRFS_MAX_LEVEL; i++) { if (!path->nodes[i]) break; if (!path->locks[i]) break; if (!no_skips && path->slots[i] == 0) { skip_level = i + 1; continue; } if (!no_skips && path->keep_locks) { u32 nritems; t = path->nodes[i]; nritems = btrfs_header_nritems(t); if (nritems < 1 || path->slots[i] >= nritems - 1) { skip_level = i + 1; continue; } } if (skip_level < i && i >= lowest_unlock) no_skips = 1; t = path->nodes[i]; if (i >= lowest_unlock && i > skip_level && path->locks[i]) { btrfs_tree_unlock_rw(t, path->locks[i]); path->locks[i] = 0; if (write_lock_level && i > min_write_lock_level && i <= *write_lock_level) { *write_lock_level = i - 1; } } } } /* * This releases any locks held in the path starting at level and * going all the way up to the root. * * btrfs_search_slot will keep the lock held on higher nodes in a few * corner cases, such as COW of the block at slot zero in the node. This * ignores those rules, and it should only be called when there are no * more updates to be done higher up in the tree. */ noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) { int i; if (path->keep_locks) return; for (i = level; i < BTRFS_MAX_LEVEL; i++) { if (!path->nodes[i]) continue; if (!path->locks[i]) continue; btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); path->locks[i] = 0; } } /* * helper function for btrfs_search_slot. The goal is to find a block * in cache without setting the path to blocking. If we find the block * we return zero and the path is unchanged. * * If we can't find the block, we set the path blocking and do some * reada. -EAGAIN is returned and the search must be repeated. */ static int read_block_for_search(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *p, struct extent_buffer **eb_ret, int level, int slot, struct btrfs_key *key, u64 time_seq) { u64 blocknr; u64 gen; u32 blocksize; struct extent_buffer *b = *eb_ret; struct extent_buffer *tmp; int ret; blocknr = btrfs_node_blockptr(b, slot); gen = btrfs_node_ptr_generation(b, slot); blocksize = btrfs_level_size(root, level - 1); tmp = btrfs_find_tree_block(root, blocknr, blocksize); if (tmp) { /* first we do an atomic uptodate check */ if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) { if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { /* * we found an up to date block without * sleeping, return * right away */ *eb_ret = tmp; return 0; } /* the pages were up to date, but we failed * the generation number check. Do a full * read for the generation number that is correct. * We must do this without dropping locks so * we can trust our generation number */ free_extent_buffer(tmp); btrfs_set_path_blocking(p); /* now we're allowed to do a blocking uptodate check */ tmp = read_tree_block(root, blocknr, blocksize, gen); if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) { *eb_ret = tmp; return 0; } free_extent_buffer(tmp); btrfs_release_path(p); return -EIO; } } /* * reduce lock contention at high levels * of the btree by dropping locks before * we read. Don't release the lock on the current * level because we need to walk this node to figure * out which blocks to read. */ btrfs_unlock_up_safe(p, level + 1); btrfs_set_path_blocking(p); free_extent_buffer(tmp); if (p->reada) reada_for_search(root, p, level, slot, key->objectid); btrfs_release_path(p); ret = -EAGAIN; tmp = read_tree_block(root, blocknr, blocksize, 0); if (tmp) { /* * If the read above didn't mark this buffer up to date, * it will never end up being up to date. Set ret to EIO now * and give up so that our caller doesn't loop forever * on our EAGAINs. */ if (!btrfs_buffer_uptodate(tmp, 0, 0)) ret = -EIO; free_extent_buffer(tmp); } return ret; } /* * helper function for btrfs_search_slot. This does all of the checks * for node-level blocks and does any balancing required based on * the ins_len. * * If no extra work was required, zero is returned. If we had to * drop the path, -EAGAIN is returned and btrfs_search_slot must * start over */ static int setup_nodes_for_search(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *p, struct extent_buffer *b, int level, int ins_len, int *write_lock_level) { int ret; if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { int sret; if (*write_lock_level < level + 1) { *write_lock_level = level + 1; btrfs_release_path(p); goto again; } sret = reada_for_balance(root, p, level); if (sret) goto again; btrfs_set_path_blocking(p); sret = split_node(trans, root, p, level); btrfs_clear_path_blocking(p, NULL, 0); BUG_ON(sret > 0); if (sret) { ret = sret; goto done; } b = p->nodes[level]; } else if (ins_len < 0 && btrfs_header_nritems(b) < BTRFS_NODEPTRS_PER_BLOCK(root) / 2) { int sret; if (*write_lock_level < level + 1) { *write_lock_level = level + 1; btrfs_release_path(p); goto again; } sret = reada_for_balance(root, p, level); if (sret) goto again; btrfs_set_path_blocking(p); sret = balance_level(trans, root, p, level); btrfs_clear_path_blocking(p, NULL, 0); if (sret) { ret = sret; goto done; } b = p->nodes[level]; if (!b) { btrfs_release_path(p); goto again; } BUG_ON(btrfs_header_nritems(b) == 1); } return 0; again: ret = -EAGAIN; done: return ret; } /* * look for key in the tree. path is filled in with nodes along the way * if key is found, we return zero and you can find the item in the leaf * level of the path (level 0) * * If the key isn't found, the path points to the slot where it should * be inserted, and 1 is returned. If there are other errors during the * search a negative error number is returned. * * if ins_len > 0, nodes and leaves will be split as we walk down the * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if * possible) */ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_path *p, int ins_len, int cow) { struct extent_buffer *b; int slot; int ret; int err; int level; int lowest_unlock = 1; int root_lock; /* everything at write_lock_level or lower must be write locked */ int write_lock_level = 0; u8 lowest_level = 0; int min_write_lock_level; lowest_level = p->lowest_level; WARN_ON(lowest_level && ins_len > 0); WARN_ON(p->nodes[0] != NULL); if (ins_len < 0) { lowest_unlock = 2; /* when we are removing items, we might have to go up to level * two as we update tree pointers Make sure we keep write * for those levels as well */ write_lock_level = 2; } else if (ins_len > 0) { /* * for inserting items, make sure we have a write lock on * level 1 so we can update keys */ write_lock_level = 1; } if (!cow) write_lock_level = -1; if (cow && (p->keep_locks || p->lowest_level)) write_lock_level = BTRFS_MAX_LEVEL; min_write_lock_level = write_lock_level; again: /* * we try very hard to do read locks on the root */ root_lock = BTRFS_READ_LOCK; level = 0; if (p->search_commit_root) { /* * the commit roots are read only * so we always do read locks */ b = root->commit_root; extent_buffer_get(b); level = btrfs_header_level(b); if (!p->skip_locking) btrfs_tree_read_lock(b); } else { if (p->skip_locking) { b = btrfs_root_node(root); level = btrfs_header_level(b); } else { /* we don't know the level of the root node * until we actually have it read locked */ b = btrfs_read_lock_root_node(root); level = btrfs_header_level(b); if (level <= write_lock_level) { /* whoops, must trade for write lock */ btrfs_tree_read_unlock(b); free_extent_buffer(b); b = btrfs_lock_root_node(root); root_lock = BTRFS_WRITE_LOCK; /* the level might have changed, check again */ level = btrfs_header_level(b); } } } p->nodes[level] = b; if (!p->skip_locking) p->locks[level] = root_lock; while (b) { level = btrfs_header_level(b); /* * setup the path here so we can release it under lock * contention with the cow code */ if (cow) { /* * if we don't really need to cow this block * then we don't want to set the path blocking, * so we test it here */ if (!should_cow_block(trans, root, b)) goto cow_done; btrfs_set_path_blocking(p); /* * must have write locks on this node and the * parent */ if (level > write_lock_level || (level + 1 > write_lock_level && level + 1 < BTRFS_MAX_LEVEL && p->nodes[level + 1])) { write_lock_level = level + 1; btrfs_release_path(p); goto again; } err = btrfs_cow_block(trans, root, b, p->nodes[level + 1], p->slots[level + 1], &b); if (err) { ret = err; goto done; } } cow_done: BUG_ON(!cow && ins_len); p->nodes[level] = b; btrfs_clear_path_blocking(p, NULL, 0); /* * we have a lock on b and as long as we aren't changing * the tree, there is no way to for the items in b to change. * It is safe to drop the lock on our parent before we * go through the expensive btree search on b. * * If cow is true, then we might be changing slot zero, * which may require changing the parent. So, we can't * drop the lock until after we know which slot we're * operating on. */ if (!cow) btrfs_unlock_up_safe(p, level + 1); ret = bin_search(b, key, level, &slot); if (level != 0) { int dec = 0; if (ret && slot > 0) { dec = 1; slot -= 1; } p->slots[level] = slot; err = setup_nodes_for_search(trans, root, p, b, level, ins_len, &write_lock_level); if (err == -EAGAIN) goto again; if (err) { ret = err; goto done; } b = p->nodes[level]; slot = p->slots[level]; /* * slot 0 is special, if we change the key * we have to update the parent pointer * which means we must have a write lock * on the parent */ if (slot == 0 && cow && write_lock_level < level + 1) { write_lock_level = level + 1; btrfs_release_path(p); goto again; } unlock_up(p, level, lowest_unlock, min_write_lock_level, &write_lock_level); if (level == lowest_level) { if (dec) p->slots[level]++; goto done; } err = read_block_for_search(trans, root, p, &b, level, slot, key, 0); if (err == -EAGAIN) goto again; if (err) { ret = err; goto done; } if (!p->skip_locking) { level = btrfs_header_level(b); if (level <= write_lock_level) { err = btrfs_try_tree_write_lock(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_lock(b); btrfs_clear_path_blocking(p, b, BTRFS_WRITE_LOCK); } p->locks[level] = BTRFS_WRITE_LOCK; } else { err = btrfs_try_tree_read_lock(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); btrfs_clear_path_blocking(p, b, BTRFS_READ_LOCK); } p->locks[level] = BTRFS_READ_LOCK; } p->nodes[level] = b; } } else { p->slots[level] = slot; if (ins_len > 0 && btrfs_leaf_free_space(root, b) < ins_len) { if (write_lock_level < 1) { write_lock_level = 1; btrfs_release_path(p); goto again; } btrfs_set_path_blocking(p); err = split_leaf(trans, root, key, p, ins_len, ret == 0); btrfs_clear_path_blocking(p, NULL, 0); BUG_ON(err > 0); if (err) { ret = err; goto done; } } if (!p->search_for_split) unlock_up(p, level, lowest_unlock, min_write_lock_level, &write_lock_level); goto done; } } ret = 1; done: /* * we don't really know what they plan on doing with the path * from here on, so for now just mark it as blocking */ if (!p->leave_spinning) btrfs_set_path_blocking(p); if (ret < 0) btrfs_release_path(p); return ret; } /* * Like btrfs_search_slot, this looks for a key in the given tree. It uses the * current state of the tree together with the operations recorded in the tree * modification log to search for the key in a previous version of this tree, as * denoted by the time_seq parameter. * * Naturally, there is no support for insert, delete or cow operations. * * The resulting path and return value will be set up as if we called * btrfs_search_slot at that point in time with ins_len and cow both set to 0. */ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, struct btrfs_path *p, u64 time_seq) { struct extent_buffer *b; int slot; int ret; int err; int level; int lowest_unlock = 1; u8 lowest_level = 0; lowest_level = p->lowest_level; WARN_ON(p->nodes[0] != NULL); if (p->search_commit_root) { BUG_ON(time_seq); return btrfs_search_slot(NULL, root, key, p, 0, 0); } again: b = get_old_root(root, time_seq); level = btrfs_header_level(b); p->locks[level] = BTRFS_READ_LOCK; while (b) { level = btrfs_header_level(b); p->nodes[level] = b; btrfs_clear_path_blocking(p, NULL, 0); /* * we have a lock on b and as long as we aren't changing * the tree, there is no way to for the items in b to change. * It is safe to drop the lock on our parent before we * go through the expensive btree search on b. */ btrfs_unlock_up_safe(p, level + 1); ret = bin_search(b, key, level, &slot); if (level != 0) { int dec = 0; if (ret && slot > 0) { dec = 1; slot -= 1; } p->slots[level] = slot; unlock_up(p, level, lowest_unlock, 0, NULL); if (level == lowest_level) { if (dec) p->slots[level]++; goto done; } err = read_block_for_search(NULL, root, p, &b, level, slot, key, time_seq); if (err == -EAGAIN) goto again; if (err) { ret = err; goto done; } level = btrfs_header_level(b); err = btrfs_try_tree_read_lock(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); btrfs_clear_path_blocking(p, b, BTRFS_READ_LOCK); } b = tree_mod_log_rewind(root->fs_info, b, time_seq); p->locks[level] = BTRFS_READ_LOCK; p->nodes[level] = b; } else { p->slots[level] = slot; unlock_up(p, level, lowest_unlock, 0, NULL); goto done; } } ret = 1; done: if (!p->leave_spinning) btrfs_set_path_blocking(p); if (ret < 0) btrfs_release_path(p); return ret; } /* * helper to use instead of search slot if no exact match is needed but * instead the next or previous item should be returned. * When find_higher is true, the next higher item is returned, the next lower * otherwise. * When return_any and find_higher are both true, and no higher item is found, * return the next lower instead. * When return_any is true and find_higher is false, and no lower item is found, * return the next higher instead. * It returns 0 if any item is found, 1 if none is found (tree empty), and * < 0 on error */ int btrfs_search_slot_for_read(struct btrfs_root *root, struct btrfs_key *key, struct btrfs_path *p, int find_higher, int return_any) { int ret; struct extent_buffer *leaf; again: ret = btrfs_search_slot(NULL, root, key, p, 0, 0); if (ret <= 0) return ret; /* * a return value of 1 means the path is at the position where the * item should be inserted. Normally this is the next bigger item, * but in case the previous item is the last in a leaf, path points * to the first free slot in the previous leaf, i.e. at an invalid * item. */ leaf = p->nodes[0]; if (find_higher) { if (p->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, p); if (ret <= 0) return ret; if (!return_any) return 1; /* * no higher item found, return the next * lower instead */ return_any = 0; find_higher = 0; btrfs_release_path(p); goto again; } } else { if (p->slots[0] == 0) { ret = btrfs_prev_leaf(root, p); if (ret < 0) return ret; if (!ret) { p->slots[0] = btrfs_header_nritems(leaf) - 1; return 0; } if (!return_any) return 1; /* * no lower item found, return the next * higher instead */ return_any = 0; find_higher = 1; btrfs_release_path(p); goto again; } else { --p->slots[0]; } } return 0; } /* * adjust the pointers going up the tree, starting at level * making sure the right key of each node is points to 'key'. * This is used after shifting pointers to the left, so it stops * fixing up pointers when a given leaf/node is not in slot 0 of the * higher levels * */ static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_disk_key *key, int level) { int i; struct extent_buffer *t; for (i = level; i < BTRFS_MAX_LEVEL; i++) { int tslot = path->slots[i]; if (!path->nodes[i]) break; t = path->nodes[i]; tree_mod_log_set_node_key(root->fs_info, t, tslot, 1); btrfs_set_node_key(t, key, tslot); btrfs_mark_buffer_dirty(path->nodes[i]); if (tslot != 0) break; } } /* * update item key. * * This function isn't completely safe. It's the caller's responsibility * that the new key won't break the order */ void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key) { struct btrfs_disk_key disk_key; struct extent_buffer *eb; int slot; eb = path->nodes[0]; slot = path->slots[0]; if (slot > 0) { btrfs_item_key(eb, &disk_key, slot - 1); BUG_ON(comp_keys(&disk_key, new_key) >= 0); } if (slot < btrfs_header_nritems(eb) - 1) { btrfs_item_key(eb, &disk_key, slot + 1); BUG_ON(comp_keys(&disk_key, new_key) <= 0); } btrfs_cpu_key_to_disk(&disk_key, new_key); btrfs_set_item_key(eb, &disk_key, slot); btrfs_mark_buffer_dirty(eb); if (slot == 0) fixup_low_keys(root, path, &disk_key, 1); } /* * try to push data from one node into the next node left in the * tree. * * returns 0 if some ptrs were pushed left, < 0 if there was some horrible * error, and > 0 if there was no room in the left hand block. */ static int push_node_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst, struct extent_buffer *src, int empty) { int push_items = 0; int src_nritems; int dst_nritems; int ret = 0; src_nritems = btrfs_header_nritems(src); dst_nritems = btrfs_header_nritems(dst); push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; WARN_ON(btrfs_header_generation(src) != trans->transid); WARN_ON(btrfs_header_generation(dst) != trans->transid); if (!empty && src_nritems <= 8) return 1; if (push_items <= 0) return 1; if (empty) { push_items = min(src_nritems, push_items); if (push_items < src_nritems) { /* leave at least 8 pointers in the node if * we aren't going to empty it */ if (src_nritems - push_items < 8) { if (push_items <= 8) return 1; push_items -= 8; } } } else push_items = min(src_nritems - 8, push_items); tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, push_items); copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(dst_nritems), btrfs_node_key_ptr_offset(0), push_items * sizeof(struct btrfs_key_ptr)); if (push_items < src_nritems) { /* * don't call tree_mod_log_eb_move here, key removal was already * fully logged by tree_mod_log_eb_copy above. */ memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(push_items), (src_nritems - push_items) * sizeof(struct btrfs_key_ptr)); } btrfs_set_header_nritems(src, src_nritems - push_items); btrfs_set_header_nritems(dst, dst_nritems + push_items); btrfs_mark_buffer_dirty(src); btrfs_mark_buffer_dirty(dst); return ret; } /* * try to push data from one node into the next node right in the * tree. * * returns 0 if some ptrs were pushed, < 0 if there was some horrible * error, and > 0 if there was no room in the right hand block. * * this will only push up to 1/2 the contents of the left node over */ static int balance_node_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst, struct extent_buffer *src) { int push_items = 0; int max_push; int src_nritems; int dst_nritems; int ret = 0; WARN_ON(btrfs_header_generation(src) != trans->transid); WARN_ON(btrfs_header_generation(dst) != trans->transid); src_nritems = btrfs_header_nritems(src); dst_nritems = btrfs_header_nritems(dst); push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; if (push_items <= 0) return 1; if (src_nritems < 4) return 1; max_push = src_nritems / 2 + 1; /* don't try to empty the node */ if (max_push >= src_nritems) return 1; if (max_push < push_items) push_items = max_push; tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems); memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), btrfs_node_key_ptr_offset(0), (dst_nritems) * sizeof(struct btrfs_key_ptr)); tree_mod_log_eb_copy(root->fs_info, dst, src, 0, src_nritems - push_items, push_items); copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(src_nritems - push_items), push_items * sizeof(struct btrfs_key_ptr)); btrfs_set_header_nritems(src, src_nritems - push_items); btrfs_set_header_nritems(dst, dst_nritems + push_items); btrfs_mark_buffer_dirty(src); btrfs_mark_buffer_dirty(dst); return ret; } /* * helper function to insert a new root level in the tree. * A new node is allocated, and a single item is inserted to * point to the existing root * * returns zero on success or < 0 on failure. */ static noinline int insert_new_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int log_removal) { u64 lower_gen; struct extent_buffer *lower; struct extent_buffer *c; struct extent_buffer *old; struct btrfs_disk_key lower_key; BUG_ON(path->nodes[level]); BUG_ON(path->nodes[level-1] != root->node); lower = path->nodes[level-1]; if (level == 1) btrfs_item_key(lower, &lower_key, 0); else btrfs_node_key(lower, &lower_key, 0); c = btrfs_alloc_free_block(trans, root, root->nodesize, 0, root->root_key.objectid, &lower_key, level, root->node->start, 0); if (IS_ERR(c)) return PTR_ERR(c); root_add_used(root, root->nodesize); memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_nritems(c, 1); btrfs_set_header_level(c, level); btrfs_set_header_bytenr(c, c->start); btrfs_set_header_generation(c, trans->transid); btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(c, root->root_key.objectid); write_extent_buffer(c, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(c), BTRFS_FSID_SIZE); write_extent_buffer(c, root->fs_info->chunk_tree_uuid, (unsigned long)btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE); btrfs_set_node_key(c, &lower_key, 0); btrfs_set_node_blockptr(c, 0, lower->start); lower_gen = btrfs_header_generation(lower); WARN_ON(lower_gen != trans->transid); btrfs_set_node_ptr_generation(c, 0, lower_gen); btrfs_mark_buffer_dirty(c); old = root->node; tree_mod_log_set_root_pointer(root, c, log_removal); rcu_assign_pointer(root->node, c); /* the super has an extra ref to root->node */ free_extent_buffer(old); add_root_to_dirty_list(root); extent_buffer_get(c); path->nodes[level] = c; path->locks[level] = BTRFS_WRITE_LOCK; path->slots[level] = 0; return 0; } /* * worker function to insert a single pointer in a node. * the node should have enough room for the pointer already * * slot and level indicate where you want the key to go, and * blocknr is the block the key points to. */ static void insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_disk_key *key, u64 bytenr, int slot, int level) { struct extent_buffer *lower; int nritems; int ret; BUG_ON(!path->nodes[level]); btrfs_assert_tree_locked(path->nodes[level]); lower = path->nodes[level]; nritems = btrfs_header_nritems(lower); BUG_ON(slot > nritems); BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root)); if (slot != nritems) { if (level) tree_mod_log_eb_move(root->fs_info, lower, slot + 1, slot, nritems - slot); memmove_extent_buffer(lower, btrfs_node_key_ptr_offset(slot + 1), btrfs_node_key_ptr_offset(slot), (nritems - slot) * sizeof(struct btrfs_key_ptr)); } if (level) { ret = tree_mod_log_insert_key(root->fs_info, lower, slot, MOD_LOG_KEY_ADD); BUG_ON(ret < 0); } btrfs_set_node_key(lower, key, slot); btrfs_set_node_blockptr(lower, slot, bytenr); WARN_ON(trans->transid == 0); btrfs_set_node_ptr_generation(lower, slot, trans->transid); btrfs_set_header_nritems(lower, nritems + 1); btrfs_mark_buffer_dirty(lower); } /* * split the node at the specified level in path in two. * The path is corrected to point to the appropriate node after the split * * Before splitting this tries to make some room in the node by pushing * left and right, if either one works, it returns right away. * * returns 0 on success and < 0 on failure */ static noinline int split_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { struct extent_buffer *c; struct extent_buffer *split; struct btrfs_disk_key disk_key; int mid; int ret; u32 c_nritems; c = path->nodes[level]; WARN_ON(btrfs_header_generation(c) != trans->transid); if (c == root->node) { /* * trying to split the root, lets make a new one * * tree mod log: We pass 0 as log_removal parameter to * insert_new_root, because that root buffer will be kept as a * normal node. We are going to log removal of half of the * elements below with tree_mod_log_eb_copy. We're holding a * tree lock on the buffer, which is why we cannot race with * other tree_mod_log users. */ ret = insert_new_root(trans, root, path, level + 1, 0); if (ret) return ret; } else { ret = push_nodes_for_insert(trans, root, path, level); c = path->nodes[level]; if (!ret && btrfs_header_nritems(c) < BTRFS_NODEPTRS_PER_BLOCK(root) - 3) return 0; if (ret < 0) return ret; } c_nritems = btrfs_header_nritems(c); mid = (c_nritems + 1) / 2; btrfs_node_key(c, &disk_key, mid); split = btrfs_alloc_free_block(trans, root, root->nodesize, 0, root->root_key.objectid, &disk_key, level, c->start, 0); if (IS_ERR(split)) return PTR_ERR(split); root_add_used(root, root->nodesize); memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_level(split, btrfs_header_level(c)); btrfs_set_header_bytenr(split, split->start); btrfs_set_header_generation(split, trans->transid); btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(split, root->root_key.objectid); write_extent_buffer(split, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(split), BTRFS_FSID_SIZE); write_extent_buffer(split, root->fs_info->chunk_tree_uuid, (unsigned long)btrfs_header_chunk_tree_uuid(split), BTRFS_UUID_SIZE); tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid); copy_extent_buffer(split, c, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(mid), (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); btrfs_set_header_nritems(split, c_nritems - mid); btrfs_set_header_nritems(c, mid); ret = 0; btrfs_mark_buffer_dirty(c); btrfs_mark_buffer_dirty(split); insert_ptr(trans, root, path, &disk_key, split->start, path->slots[level + 1] + 1, level + 1); if (path->slots[level] >= mid) { path->slots[level] -= mid; btrfs_tree_unlock(c); free_extent_buffer(c); path->nodes[level] = split; path->slots[level + 1] += 1; } else { btrfs_tree_unlock(split); free_extent_buffer(split); } return ret; } /* * how many bytes are required to store the items in a leaf. start * and nr indicate which items in the leaf to check. This totals up the * space used both by the item structs and the item data */ static int leaf_space_used(struct extent_buffer *l, int start, int nr) { struct btrfs_item *start_item; struct btrfs_item *end_item; struct btrfs_map_token token; int data_len; int nritems = btrfs_header_nritems(l); int end = min(nritems, start + nr) - 1; if (!nr) return 0; btrfs_init_map_token(&token); start_item = btrfs_item_nr(l, start); end_item = btrfs_item_nr(l, end); data_len = btrfs_token_item_offset(l, start_item, &token) + btrfs_token_item_size(l, start_item, &token); data_len = data_len - btrfs_token_item_offset(l, end_item, &token); data_len += sizeof(struct btrfs_item) * nr; WARN_ON(data_len < 0); return data_len; } /* * The space between the end of the leaf items and * the start of the leaf data. IOW, how much room * the leaf has left for both items and data */ noinline int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf) { int nritems = btrfs_header_nritems(leaf); int ret; ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); if (ret < 0) { printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, " "used %d nritems %d\n", ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), leaf_space_used(leaf, 0, nritems), nritems); } return ret; } /* * min slot controls the lowest index we're willing to push to the * right. We'll push up to and including min_slot, but no lower */ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size, int empty, struct extent_buffer *right, int free_space, u32 left_nritems, u32 min_slot) { struct extent_buffer *left = path->nodes[0]; struct extent_buffer *upper = path->nodes[1]; struct btrfs_map_token token; struct btrfs_disk_key disk_key; int slot; u32 i; int push_space = 0; int push_items = 0; struct btrfs_item *item; u32 nr; u32 right_nritems; u32 data_end; u32 this_item_size; btrfs_init_map_token(&token); if (empty) nr = 0; else nr = max_t(u32, 1, min_slot); if (path->slots[0] >= left_nritems) push_space += data_size; slot = path->slots[1]; i = left_nritems - 1; while (i >= nr) { item = btrfs_item_nr(left, i); if (!empty && push_items > 0) { if (path->slots[0] > i) break; if (path->slots[0] == i) { int space = btrfs_leaf_free_space(root, left); if (space + push_space * 2 > free_space) break; } } if (path->slots[0] == i) push_space += data_size; this_item_size = btrfs_item_size(left, item); if (this_item_size + sizeof(*item) + push_space > free_space) break; push_items++; push_space += this_item_size + sizeof(*item); if (i == 0) break; i--; } if (push_items == 0) goto out_unlock; WARN_ON(!empty && push_items == left_nritems); /* push left to right */ right_nritems = btrfs_header_nritems(right); push_space = btrfs_item_end_nr(left, left_nritems - push_items); push_space -= leaf_data_end(root, left); /* make room in the right data area */ data_end = leaf_data_end(root, right); memmove_extent_buffer(right, btrfs_leaf_data(right) + data_end - push_space, btrfs_leaf_data(right) + data_end, BTRFS_LEAF_DATA_SIZE(root) - data_end); /* copy from the left data area */ copy_extent_buffer(right, left, btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - push_space, btrfs_leaf_data(left) + leaf_data_end(root, left), push_space); memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), btrfs_item_nr_offset(0), right_nritems * sizeof(struct btrfs_item)); /* copy the items from left to right */ copy_extent_buffer(right, left, btrfs_item_nr_offset(0), btrfs_item_nr_offset(left_nritems - push_items), push_items * sizeof(struct btrfs_item)); /* update the item pointers */ right_nritems += push_items; btrfs_set_header_nritems(right, right_nritems); push_space = BTRFS_LEAF_DATA_SIZE(root); for (i = 0; i < right_nritems; i++) { item = btrfs_item_nr(right, i); push_space -= btrfs_token_item_size(right, item, &token); btrfs_set_token_item_offset(right, item, push_space, &token); } left_nritems -= push_items; btrfs_set_header_nritems(left, left_nritems); if (left_nritems) btrfs_mark_buffer_dirty(left); else clean_tree_block(trans, root, left); btrfs_mark_buffer_dirty(right); btrfs_item_key(right, &disk_key, 0); btrfs_set_node_key(upper, &disk_key, slot + 1); btrfs_mark_buffer_dirty(upper); /* then fixup the leaf pointer in the path */ if (path->slots[0] >= left_nritems) { path->slots[0] -= left_nritems; if (btrfs_header_nritems(path->nodes[0]) == 0) clean_tree_block(trans, root, path->nodes[0]); btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[1] += 1; } else { btrfs_tree_unlock(right); free_extent_buffer(right); } return 0; out_unlock: btrfs_tree_unlock(right); free_extent_buffer(right); return 1; } /* * push some data in the path leaf to the right, trying to free up at * least data_size bytes. returns zero if the push worked, nonzero otherwise * * returns 1 if the push failed because the other node didn't have enough * room, 0 if everything worked out and < 0 if there were major errors. * * this will push starting from min_slot to the end of the leaf. It won't * push any slot lower than min_slot */ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int min_data_size, int data_size, int empty, u32 min_slot) { struct extent_buffer *left = path->nodes[0]; struct extent_buffer *right; struct extent_buffer *upper; int slot; int free_space; u32 left_nritems; int ret; if (!path->nodes[1]) return 1; slot = path->slots[1]; upper = path->nodes[1]; if (slot >= btrfs_header_nritems(upper) - 1) return 1; btrfs_assert_tree_locked(path->nodes[1]); right = read_node_slot(root, upper, slot + 1); if (right == NULL) return 1; btrfs_tree_lock(right); btrfs_set_lock_blocking(right); free_space = btrfs_leaf_free_space(root, right); if (free_space < data_size) goto out_unlock; /* cow and double check */ ret = btrfs_cow_block(trans, root, right, upper, slot + 1, &right); if (ret) goto out_unlock; free_space = btrfs_leaf_free_space(root, right); if (free_space < data_size) goto out_unlock; left_nritems = btrfs_header_nritems(left); if (left_nritems == 0) goto out_unlock; return __push_leaf_right(trans, root, path, min_data_size, empty, right, free_space, left_nritems, min_slot); out_unlock: btrfs_tree_unlock(right); free_extent_buffer(right); return 1; } /* * push some data in the path leaf to the left, trying to free up at * least data_size bytes. returns zero if the push worked, nonzero otherwise * * max_slot can put a limit on how far into the leaf we'll push items. The * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the * items */ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size, int empty, struct extent_buffer *left, int free_space, u32 right_nritems, u32 max_slot) { struct btrfs_disk_key disk_key; struct extent_buffer *right = path->nodes[0]; int i; int push_space = 0; int push_items = 0; struct btrfs_item *item; u32 old_left_nritems; u32 nr; int ret = 0; u32 this_item_size; u32 old_left_item_size; struct btrfs_map_token token; btrfs_init_map_token(&token); if (empty) nr = min(right_nritems, max_slot); else nr = min(right_nritems - 1, max_slot); for (i = 0; i < nr; i++) { item = btrfs_item_nr(right, i); if (!empty && push_items > 0) { if (path->slots[0] < i) break; if (path->slots[0] == i) { int space = btrfs_leaf_free_space(root, right); if (space + push_space * 2 > free_space) break; } } if (path->slots[0] == i) push_space += data_size; this_item_size = btrfs_item_size(right, item); if (this_item_size + sizeof(*item) + push_space > free_space) break; push_items++; push_space += this_item_size + sizeof(*item); } if (push_items == 0) { ret = 1; goto out; } if (!empty && push_items == btrfs_header_nritems(right)) WARN_ON(1); /* push data from right to left */ copy_extent_buffer(left, right, btrfs_item_nr_offset(btrfs_header_nritems(left)), btrfs_item_nr_offset(0), push_items * sizeof(struct btrfs_item)); push_space = BTRFS_LEAF_DATA_SIZE(root) - btrfs_item_offset_nr(right, push_items - 1); copy_extent_buffer(left, right, btrfs_leaf_data(left) + leaf_data_end(root, left) - push_space, btrfs_leaf_data(right) + btrfs_item_offset_nr(right, push_items - 1), push_space); old_left_nritems = btrfs_header_nritems(left); BUG_ON(old_left_nritems <= 0); old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { u32 ioff; item = btrfs_item_nr(left, i); ioff = btrfs_token_item_offset(left, item, &token); btrfs_set_token_item_offset(left, item, ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size), &token); } btrfs_set_header_nritems(left, old_left_nritems + push_items); /* fixup right node */ if (push_items > right_nritems) WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, right_nritems); if (push_items < right_nritems) { push_space = btrfs_item_offset_nr(right, push_items - 1) - leaf_data_end(root, right); memmove_extent_buffer(right, btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - push_space, btrfs_leaf_data(right) + leaf_data_end(root, right), push_space); memmove_extent_buffer(right, btrfs_item_nr_offset(0), btrfs_item_nr_offset(push_items), (btrfs_header_nritems(right) - push_items) * sizeof(struct btrfs_item)); } right_nritems -= push_items; btrfs_set_header_nritems(right, right_nritems); push_space = BTRFS_LEAF_DATA_SIZE(root); for (i = 0; i < right_nritems; i++) { item = btrfs_item_nr(right, i); push_space = push_space - btrfs_token_item_size(right, item, &token); btrfs_set_token_item_offset(right, item, push_space, &token); } btrfs_mark_buffer_dirty(left); if (right_nritems) btrfs_mark_buffer_dirty(right); else clean_tree_block(trans, root, right); btrfs_item_key(right, &disk_key, 0); fixup_low_keys(root, path, &disk_key, 1); /* then fixup the leaf pointer in the path */ if (path->slots[0] < push_items) { path->slots[0] += old_left_nritems; btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = left; path->slots[1] -= 1; } else { btrfs_tree_unlock(left); free_extent_buffer(left); path->slots[0] -= push_items; } BUG_ON(path->slots[0] < 0); return ret; out: btrfs_tree_unlock(left); free_extent_buffer(left); return ret; } /* * push some data in the path leaf to the left, trying to free up at * least data_size bytes. returns zero if the push worked, nonzero otherwise * * max_slot can put a limit on how far into the leaf we'll push items. The * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the * items */ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int min_data_size, int data_size, int empty, u32 max_slot) { struct extent_buffer *right = path->nodes[0]; struct extent_buffer *left; int slot; int free_space; u32 right_nritems; int ret = 0; slot = path->slots[1]; if (slot == 0) return 1; if (!path->nodes[1]) return 1; right_nritems = btrfs_header_nritems(right); if (right_nritems == 0) return 1; btrfs_assert_tree_locked(path->nodes[1]); left = read_node_slot(root, path->nodes[1], slot - 1); if (left == NULL) return 1; btrfs_tree_lock(left); btrfs_set_lock_blocking(left); free_space = btrfs_leaf_free_space(root, left); if (free_space < data_size) { ret = 1; goto out; } /* cow and double check */ ret = btrfs_cow_block(trans, root, left, path->nodes[1], slot - 1, &left); if (ret) { /* we hit -ENOSPC, but it isn't fatal here */ if (ret == -ENOSPC) ret = 1; goto out; } free_space = btrfs_leaf_free_space(root, left); if (free_space < data_size) { ret = 1; goto out; } return __push_leaf_left(trans, root, path, min_data_size, empty, left, free_space, right_nritems, max_slot); out: btrfs_tree_unlock(left); free_extent_buffer(left); return ret; } /* * split the path's leaf in two, making sure there is at least data_size * available for the resulting leaf level of the path. */ static noinline void copy_for_split(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *l, struct extent_buffer *right, int slot, int mid, int nritems) { int data_copy_size; int rt_data_off; int i; struct btrfs_disk_key disk_key; struct btrfs_map_token token; btrfs_init_map_token(&token); nritems = nritems - mid; btrfs_set_header_nritems(right, nritems); data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l); copy_extent_buffer(right, l, btrfs_item_nr_offset(0), btrfs_item_nr_offset(mid), nritems * sizeof(struct btrfs_item)); copy_extent_buffer(right, l, btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - data_copy_size, btrfs_leaf_data(l) + leaf_data_end(root, l), data_copy_size); rt_data_off = BTRFS_LEAF_DATA_SIZE(root) - btrfs_item_end_nr(l, mid); for (i = 0; i < nritems; i++) { struct btrfs_item *item = btrfs_item_nr(right, i); u32 ioff; ioff = btrfs_token_item_offset(right, item, &token); btrfs_set_token_item_offset(right, item, ioff + rt_data_off, &token); } btrfs_set_header_nritems(l, mid); btrfs_item_key(right, &disk_key, 0); insert_ptr(trans, root, path, &disk_key, right->start, path->slots[1] + 1, 1); btrfs_mark_buffer_dirty(right); btrfs_mark_buffer_dirty(l); BUG_ON(path->slots[0] != slot); if (mid <= slot) { btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] -= mid; path->slots[1] += 1; } else { btrfs_tree_unlock(right); free_extent_buffer(right); } BUG_ON(path->slots[0] < 0); } /* * double splits happen when we need to insert a big item in the middle * of a leaf. A double split can leave us with 3 mostly empty leaves: * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] * A B C * * We avoid this by trying to push the items on either side of our target * into the adjacent leaves. If all goes well we can avoid the double split * completely. */ static noinline int push_for_double_split(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size) { int ret; int progress = 0; int slot; u32 nritems; slot = path->slots[0]; /* * try to push all the items after our slot into the * right leaf */ ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot); if (ret < 0) return ret; if (ret == 0) progress++; nritems = btrfs_header_nritems(path->nodes[0]); /* * our goal is to get our slot at the start or end of a leaf. If * we've done so we're done */ if (path->slots[0] == 0 || path->slots[0] == nritems) return 0; if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) return 0; /* try to push all the items before our slot into the next leaf */ slot = path->slots[0]; ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot); if (ret < 0) return ret; if (ret == 0) progress++; if (progress) return 0; return 1; } /* * split the path's leaf in two, making sure there is at least data_size * available for the resulting leaf level of the path. * * returns 0 if all went well and < 0 on failure. */ static noinline int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *ins_key, struct btrfs_path *path, int data_size, int extend) { struct btrfs_disk_key disk_key; struct extent_buffer *l; u32 nritems; int mid; int slot; struct extent_buffer *right; int ret = 0; int wret; int split; int num_doubles = 0; int tried_avoid_double = 0; l = path->nodes[0]; slot = path->slots[0]; if (extend && data_size + btrfs_item_size_nr(l, slot) + sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) return -EOVERFLOW; /* first try to make some room by pushing left and right */ if (data_size) { wret = push_leaf_right(trans, root, path, data_size, data_size, 0, 0); if (wret < 0) return wret; if (wret) { wret = push_leaf_left(trans, root, path, data_size, data_size, 0, (u32)-1); if (wret < 0) return wret; } l = path->nodes[0]; /* did the pushes work? */ if (btrfs_leaf_free_space(root, l) >= data_size) return 0; } if (!path->nodes[1]) { ret = insert_new_root(trans, root, path, 1, 1); if (ret) return ret; } again: split = 1; l = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(l); mid = (nritems + 1) / 2; if (mid <= slot) { if (nritems == 1 || leaf_space_used(l, mid, nritems - mid) + data_size > BTRFS_LEAF_DATA_SIZE(root)) { if (slot >= nritems) { split = 0; } else { mid = slot; if (mid != nritems && leaf_space_used(l, mid, nritems - mid) + data_size > BTRFS_LEAF_DATA_SIZE(root)) { if (data_size && !tried_avoid_double) goto push_for_double; split = 2; } } } } else { if (leaf_space_used(l, 0, mid) + data_size > BTRFS_LEAF_DATA_SIZE(root)) { if (!extend && data_size && slot == 0) { split = 0; } else if ((extend || !data_size) && slot == 0) { mid = 1; } else { mid = slot; if (mid != nritems && leaf_space_used(l, mid, nritems - mid) + data_size > BTRFS_LEAF_DATA_SIZE(root)) { if (data_size && !tried_avoid_double) goto push_for_double; split = 2 ; } } } } if (split == 0) btrfs_cpu_key_to_disk(&disk_key, ins_key); else btrfs_item_key(l, &disk_key, mid); right = btrfs_alloc_free_block(trans, root, root->leafsize, 0, root->root_key.objectid, &disk_key, 0, l->start, 0); if (IS_ERR(right)) return PTR_ERR(right); root_add_used(root, root->leafsize); memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_bytenr(right, right->start); btrfs_set_header_generation(right, trans->transid); btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(right, root->root_key.objectid); btrfs_set_header_level(right, 0); write_extent_buffer(right, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(right), BTRFS_FSID_SIZE); write_extent_buffer(right, root->fs_info->chunk_tree_uuid, (unsigned long)btrfs_header_chunk_tree_uuid(right), BTRFS_UUID_SIZE); if (split == 0) { if (mid <= slot) { btrfs_set_header_nritems(right, 0); insert_ptr(trans, root, path, &disk_key, right->start, path->slots[1] + 1, 1); btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] = 0; path->slots[1] += 1; } else { btrfs_set_header_nritems(right, 0); insert_ptr(trans, root, path, &disk_key, right->start, path->slots[1], 1); btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] = 0; if (path->slots[1] == 0) fixup_low_keys(root, path, &disk_key, 1); } btrfs_mark_buffer_dirty(right); return ret; } copy_for_split(trans, root, path, l, right, slot, mid, nritems); if (split == 2) { BUG_ON(num_doubles != 0); num_doubles++; goto again; } return 0; push_for_double: push_for_double_split(trans, root, path, data_size); tried_avoid_double = 1; if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) return 0; goto again; } static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int ins_len) { struct btrfs_key key; struct extent_buffer *leaf; struct btrfs_file_extent_item *fi; u64 extent_len = 0; u32 item_size; int ret; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && key.type != BTRFS_EXTENT_CSUM_KEY); if (btrfs_leaf_free_space(root, leaf) >= ins_len) return 0; item_size = btrfs_item_size_nr(leaf, path->slots[0]); if (key.type == BTRFS_EXTENT_DATA_KEY) { fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); extent_len = btrfs_file_extent_num_bytes(leaf, fi); } btrfs_release_path(path); path->keep_locks = 1; path->search_for_split = 1; ret = btrfs_search_slot(trans, root, &key, path, 0, 1); path->search_for_split = 0; if (ret < 0) goto err; ret = -EAGAIN; leaf = path->nodes[0]; /* if our item isn't there or got smaller, return now */ if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) goto err; /* the leaf has changed, it now has room. return now */ if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len) goto err; if (key.type == BTRFS_EXTENT_DATA_KEY) { fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) goto err; } btrfs_set_path_blocking(path); ret = split_leaf(trans, root, &key, path, ins_len, 1); if (ret) goto err; path->keep_locks = 0; btrfs_unlock_up_safe(path, 1); return 0; err: path->keep_locks = 0; return ret; } static noinline int split_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key, unsigned long split_offset) { struct extent_buffer *leaf; struct btrfs_item *item; struct btrfs_item *new_item; int slot; char *buf; u32 nritems; u32 item_size; u32 orig_offset; struct btrfs_disk_key disk_key; leaf = path->nodes[0]; BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); btrfs_set_path_blocking(path); item = btrfs_item_nr(leaf, path->slots[0]); orig_offset = btrfs_item_offset(leaf, item); item_size = btrfs_item_size(leaf, item); buf = kmalloc(item_size, GFP_NOFS); if (!buf) return -ENOMEM; read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, path->slots[0]), item_size); slot = path->slots[0] + 1; nritems = btrfs_header_nritems(leaf); if (slot != nritems) { /* shift the items */ memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), btrfs_item_nr_offset(slot), (nritems - slot) * sizeof(struct btrfs_item)); } btrfs_cpu_key_to_disk(&disk_key, new_key); btrfs_set_item_key(leaf, &disk_key, slot); new_item = btrfs_item_nr(leaf, slot); btrfs_set_item_offset(leaf, new_item, orig_offset); btrfs_set_item_size(leaf, new_item, item_size - split_offset); btrfs_set_item_offset(leaf, item, orig_offset + item_size - split_offset); btrfs_set_item_size(leaf, item, split_offset); btrfs_set_header_nritems(leaf, nritems + 1); /* write the data for the start of the original item */ write_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, path->slots[0]), split_offset); /* write the data for the new item */ write_extent_buffer(leaf, buf + split_offset, btrfs_item_ptr_offset(leaf, slot), item_size - split_offset); btrfs_mark_buffer_dirty(leaf); BUG_ON(btrfs_leaf_free_space(root, leaf) < 0); kfree(buf); return 0; } /* * This function splits a single item into two items, * giving 'new_key' to the new item and splitting the * old one at split_offset (from the start of the item). * * The path may be released by this operation. After * the split, the path is pointing to the old item. The * new item is going to be in the same node as the old one. * * Note, the item being split must be smaller enough to live alone on * a tree block with room for one extra struct btrfs_item * * This allows us to split the item in place, keeping a lock on the * leaf the entire time. */ int btrfs_split_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key, unsigned long split_offset) { int ret; ret = setup_leaf_for_split(trans, root, path, sizeof(struct btrfs_item)); if (ret) return ret; ret = split_item(trans, root, path, new_key, split_offset); return ret; } /* * This function duplicate a item, giving 'new_key' to the new item. * It guarantees both items live in the same tree leaf and the new item * is contiguous with the original item. * * This allows us to split file extent in place, keeping a lock on the * leaf the entire time. */ int btrfs_duplicate_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key) { struct extent_buffer *leaf; int ret; u32 item_size; leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); ret = setup_leaf_for_split(trans, root, path, item_size + sizeof(struct btrfs_item)); if (ret) return ret; path->slots[0]++; setup_items_for_insert(root, path, new_key, &item_size, item_size, item_size + sizeof(struct btrfs_item), 1); leaf = path->nodes[0]; memcpy_extent_buffer(leaf, btrfs_item_ptr_offset(leaf, path->slots[0]), btrfs_item_ptr_offset(leaf, path->slots[0] - 1), item_size); return 0; } /* * make the item pointed to by the path smaller. new_size indicates * how small to make it, and from_end tells us if we just chop bytes * off the end of the item or if we shift the item to chop bytes off * the front. */ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, u32 new_size, int from_end) { int slot; struct extent_buffer *leaf; struct btrfs_item *item; u32 nritems; unsigned int data_end; unsigned int old_data_start; unsigned int old_size; unsigned int size_diff; int i; struct btrfs_map_token token; btrfs_init_map_token(&token); leaf = path->nodes[0]; slot = path->slots[0]; old_size = btrfs_item_size_nr(leaf, slot); if (old_size == new_size) return; nritems = btrfs_header_nritems(leaf); data_end = leaf_data_end(root, leaf); old_data_start = btrfs_item_offset_nr(leaf, slot); size_diff = old_size - new_size; BUG_ON(slot < 0); BUG_ON(slot >= nritems); /* * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(leaf, i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff + size_diff, &token); } /* shift the data */ if (from_end) { memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end + size_diff, btrfs_leaf_data(leaf) + data_end, old_data_start + new_size - data_end); } else { struct btrfs_disk_key disk_key; u64 offset; btrfs_item_key(leaf, &disk_key, slot); if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { unsigned long ptr; struct btrfs_file_extent_item *fi; fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); fi = (struct btrfs_file_extent_item *)( (unsigned long)fi - size_diff); if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { ptr = btrfs_item_ptr_offset(leaf, slot); memmove_extent_buffer(leaf, ptr, (unsigned long)fi, offsetof(struct btrfs_file_extent_item, disk_bytenr)); } } memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end + size_diff, btrfs_leaf_data(leaf) + data_end, old_data_start - data_end); offset = btrfs_disk_key_offset(&disk_key); btrfs_set_disk_key_offset(&disk_key, offset + size_diff); btrfs_set_item_key(leaf, &disk_key, slot); if (slot == 0) fixup_low_keys(root, path, &disk_key, 1); } item = btrfs_item_nr(leaf, slot); btrfs_set_item_size(leaf, item, new_size); btrfs_mark_buffer_dirty(leaf); if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } } /* * make the item pointed to by the path bigger, data_size is the new size. */ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, u32 data_size) { int slot; struct extent_buffer *leaf; struct btrfs_item *item; u32 nritems; unsigned int data_end; unsigned int old_data; unsigned int old_size; int i; struct btrfs_map_token token; btrfs_init_map_token(&token); leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); data_end = leaf_data_end(root, leaf); if (btrfs_leaf_free_space(root, leaf) < data_size) { btrfs_print_leaf(root, leaf); BUG(); } slot = path->slots[0]; old_data = btrfs_item_end_nr(leaf, slot); BUG_ON(slot < 0); if (slot >= nritems) { btrfs_print_leaf(root, leaf); printk(KERN_CRIT "slot %d too large, nritems %d\n", slot, nritems); BUG_ON(1); } /* * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(leaf, i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff - data_size, &token); } /* shift the data */ memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end - data_size, btrfs_leaf_data(leaf) + data_end, old_data - data_end); data_end = old_data; old_size = btrfs_item_size_nr(leaf, slot); item = btrfs_item_nr(leaf, slot); btrfs_set_item_size(leaf, item, old_size + data_size); btrfs_mark_buffer_dirty(leaf); if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } } /* * this is a helper for btrfs_insert_empty_items, the main goal here is * to save stack depth by doing the bulk of the work in a function * that doesn't call btrfs_search_slot */ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 *data_size, u32 total_data, u32 total_size, int nr) { struct btrfs_item *item; int i; u32 nritems; unsigned int data_end; struct btrfs_disk_key disk_key; struct extent_buffer *leaf; int slot; struct btrfs_map_token token; btrfs_init_map_token(&token); leaf = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(leaf); data_end = leaf_data_end(root, leaf); if (btrfs_leaf_free_space(root, leaf) < total_size) { btrfs_print_leaf(root, leaf); printk(KERN_CRIT "not enough freespace need %u have %d\n", total_size, btrfs_leaf_free_space(root, leaf)); BUG(); } if (slot != nritems) { unsigned int old_data = btrfs_item_end_nr(leaf, slot); if (old_data < data_end) { btrfs_print_leaf(root, leaf); printk(KERN_CRIT "slot %d old_data %d data_end %d\n", slot, old_data, data_end); BUG_ON(1); } /* * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(leaf, i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff - total_data, &token); } /* shift the items */ memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), btrfs_item_nr_offset(slot), (nritems - slot) * sizeof(struct btrfs_item)); /* shift the data */ memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end - total_data, btrfs_leaf_data(leaf) + data_end, old_data - data_end); data_end = old_data; } /* setup the item for the new data */ for (i = 0; i < nr; i++) { btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); btrfs_set_item_key(leaf, &disk_key, slot + i); item = btrfs_item_nr(leaf, slot + i); btrfs_set_token_item_offset(leaf, item, data_end - data_size[i], &token); data_end -= data_size[i]; btrfs_set_token_item_size(leaf, item, data_size[i], &token); } btrfs_set_header_nritems(leaf, nritems + nr); if (slot == 0) { btrfs_cpu_key_to_disk(&disk_key, cpu_key); fixup_low_keys(root, path, &disk_key, 1); } btrfs_unlock_up_safe(path, 1); btrfs_mark_buffer_dirty(leaf); if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } } /* * Given a key and some data, insert items into the tree. * This does all the path init required, making room in the tree if needed. */ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 *data_size, int nr) { int ret = 0; int slot; int i; u32 total_size = 0; u32 total_data = 0; for (i = 0; i < nr; i++) total_data += data_size[i]; total_size = total_data + (nr * sizeof(struct btrfs_item)); ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); if (ret == 0) return -EEXIST; if (ret < 0) return ret; slot = path->slots[0]; BUG_ON(slot < 0); setup_items_for_insert(root, path, cpu_key, data_size, total_data, total_size, nr); return 0; } /* * Given a key and some data, insert an item into the tree. * This does all the path init required, making room in the tree if needed. */ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *cpu_key, void *data, u32 data_size) { int ret = 0; struct btrfs_path *path; struct extent_buffer *leaf; unsigned long ptr; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); if (!ret) { leaf = path->nodes[0]; ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); write_extent_buffer(leaf, data, ptr, data_size); btrfs_mark_buffer_dirty(leaf); } btrfs_free_path(path); return ret; } /* * delete the pointer from a given node. * * the tree should have been previously balanced so the deletion does not * empty a node. */ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, int level, int slot) { struct extent_buffer *parent = path->nodes[level]; u32 nritems; int ret; nritems = btrfs_header_nritems(parent); if (slot != nritems - 1) { if (level) tree_mod_log_eb_move(root->fs_info, parent, slot, slot + 1, nritems - slot - 1); memmove_extent_buffer(parent, btrfs_node_key_ptr_offset(slot), btrfs_node_key_ptr_offset(slot + 1), sizeof(struct btrfs_key_ptr) * (nritems - slot - 1)); } else if (level) { ret = tree_mod_log_insert_key(root->fs_info, parent, slot, MOD_LOG_KEY_REMOVE); BUG_ON(ret < 0); } nritems--; btrfs_set_header_nritems(parent, nritems); if (nritems == 0 && parent == root->node) { BUG_ON(btrfs_header_level(root->node) != 1); /* just turn the root into a leaf and break */ btrfs_set_header_level(root->node, 0); } else if (slot == 0) { struct btrfs_disk_key disk_key; btrfs_node_key(parent, &disk_key, 0); fixup_low_keys(root, path, &disk_key, level + 1); } btrfs_mark_buffer_dirty(parent); } /* * a helper function to delete the leaf pointed to by path->slots[1] and * path->nodes[1]. * * This deletes the pointer in path->nodes[1] and frees the leaf * block extent. zero is returned if it all worked out, < 0 otherwise. * * The path must have already been setup for deleting the leaf, including * all the proper balancing. path->nodes[1] must be locked. */ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *leaf) { WARN_ON(btrfs_header_generation(leaf) != trans->transid); del_ptr(root, path, 1, path->slots[1]); /* * btrfs_free_extent is expensive, we want to make sure we * aren't holding any locks when we call it */ btrfs_unlock_up_safe(path, 0); root_sub_used(root, leaf->len); extent_buffer_get(leaf); btrfs_free_tree_block(trans, root, leaf, 0, 1); free_extent_buffer_stale(leaf); } /* * delete the item at the leaf level in path. If that empties * the leaf, remove it from the tree */ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int slot, int nr) { struct extent_buffer *leaf; struct btrfs_item *item; int last_off; int dsize = 0; int ret = 0; int wret; int i; u32 nritems; struct btrfs_map_token token; btrfs_init_map_token(&token); leaf = path->nodes[0]; last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); for (i = 0; i < nr; i++) dsize += btrfs_item_size_nr(leaf, slot + i); nritems = btrfs_header_nritems(leaf); if (slot + nr != nritems) { int data_end = leaf_data_end(root, leaf); memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end + dsize, btrfs_leaf_data(leaf) + data_end, last_off - data_end); for (i = slot + nr; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(leaf, i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff + dsize, &token); } memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), btrfs_item_nr_offset(slot + nr), sizeof(struct btrfs_item) * (nritems - slot - nr)); } btrfs_set_header_nritems(leaf, nritems - nr); nritems -= nr; /* delete the leaf if we've emptied it */ if (nritems == 0) { if (leaf == root->node) { btrfs_set_header_level(leaf, 0); } else { btrfs_set_path_blocking(path); clean_tree_block(trans, root, leaf); btrfs_del_leaf(trans, root, path, leaf); } } else { int used = leaf_space_used(leaf, 0, nritems); if (slot == 0) { struct btrfs_disk_key disk_key; btrfs_item_key(leaf, &disk_key, 0); fixup_low_keys(root, path, &disk_key, 1); } /* delete the leaf if it is mostly empty */ if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { /* push_leaf_left fixes the path. * make sure the path still points to our leaf * for possible call to del_ptr below */ slot = path->slots[1]; extent_buffer_get(leaf); btrfs_set_path_blocking(path); wret = push_leaf_left(trans, root, path, 1, 1, 1, (u32)-1); if (wret < 0 && wret != -ENOSPC) ret = wret; if (path->nodes[0] == leaf && btrfs_header_nritems(leaf)) { wret = push_leaf_right(trans, root, path, 1, 1, 1, 0); if (wret < 0 && wret != -ENOSPC) ret = wret; } if (btrfs_header_nritems(leaf) == 0) { path->slots[1] = slot; btrfs_del_leaf(trans, root, path, leaf); free_extent_buffer(leaf); ret = 0; } else { /* if we're still in the path, make sure * we're dirty. Otherwise, one of the * push_leaf functions must have already * dirtied this buffer */ if (path->nodes[0] == leaf) btrfs_mark_buffer_dirty(leaf); free_extent_buffer(leaf); } } else { btrfs_mark_buffer_dirty(leaf); } } return ret; } /* * search the tree again to find a leaf with lesser keys * returns 0 if it found something or 1 if there are no lesser leaves. * returns < 0 on io errors. * * This may release the path, and so you may lose any locks held at the * time you call it. */ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) { struct btrfs_key key; struct btrfs_disk_key found_key; int ret; btrfs_item_key_to_cpu(path->nodes[0], &key, 0); if (key.offset > 0) key.offset--; else if (key.type > 0) key.type--; else if (key.objectid > 0) key.objectid--; else return 1; btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ret; btrfs_item_key(path->nodes[0], &found_key, 0); ret = comp_keys(&found_key, &key); if (ret < 0) return 0; return 1; } /* * A helper function to walk down the tree starting at min_key, and looking * for nodes or leaves that are have a minimum transaction id. * This is used by the btree defrag code, and tree logging * * This does not cow, but it does stuff the starting key it finds back * into min_key, so you can call btrfs_search_slot with cow=1 on the * key and get a writable path. * * This does lock as it descends, and path->keep_locks should be set * to 1 by the caller. * * This honors path->lowest_level to prevent descent past a given level * of the tree. * * min_trans indicates the oldest transaction that you are interested * in walking through. Any nodes or leaves older than min_trans are * skipped over (without reading them). * * returns zero if something useful was found, < 0 on error and 1 if there * was nothing in the tree that matched the search criteria. */ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, struct btrfs_key *max_key, struct btrfs_path *path, u64 min_trans) { struct extent_buffer *cur; struct btrfs_key found_key; int slot; int sret; u32 nritems; int level; int ret = 1; WARN_ON(!path->keep_locks); again: cur = btrfs_read_lock_root_node(root); level = btrfs_header_level(cur); WARN_ON(path->nodes[level]); path->nodes[level] = cur; path->locks[level] = BTRFS_READ_LOCK; if (btrfs_header_generation(cur) < min_trans) { ret = 1; goto out; } while (1) { nritems = btrfs_header_nritems(cur); level = btrfs_header_level(cur); sret = bin_search(cur, min_key, level, &slot); /* at the lowest level, we're done, setup the path and exit */ if (level == path->lowest_level) { if (slot >= nritems) goto find_next_key; ret = 0; path->slots[level] = slot; btrfs_item_key_to_cpu(cur, &found_key, slot); goto out; } if (sret && slot > 0) slot--; /* * check this node pointer against the min_trans parameters. * If it is too old, old, skip to the next one. */ while (slot < nritems) { u64 blockptr; u64 gen; blockptr = btrfs_node_blockptr(cur, slot); gen = btrfs_node_ptr_generation(cur, slot); if (gen < min_trans) { slot++; continue; } break; } find_next_key: /* * we didn't find a candidate key in this node, walk forward * and find another one */ if (slot >= nritems) { path->slots[level] = slot; btrfs_set_path_blocking(path); sret = btrfs_find_next_key(root, path, min_key, level, min_trans); if (sret == 0) { btrfs_release_path(path); goto again; } else { goto out; } } /* save our key for returning back */ btrfs_node_key_to_cpu(cur, &found_key, slot); path->slots[level] = slot; if (level == path->lowest_level) { ret = 0; unlock_up(path, level, 1, 0, NULL); goto out; } btrfs_set_path_blocking(path); cur = read_node_slot(root, cur, slot); BUG_ON(!cur); /* -ENOMEM */ btrfs_tree_read_lock(cur); path->locks[level - 1] = BTRFS_READ_LOCK; path->nodes[level - 1] = cur; unlock_up(path, level, 1, 0, NULL); btrfs_clear_path_blocking(path, NULL, 0); } out: if (ret == 0) memcpy(min_key, &found_key, sizeof(found_key)); btrfs_set_path_blocking(path); return ret; } static void tree_move_down(struct btrfs_root *root, struct btrfs_path *path, int *level, int root_level) { BUG_ON(*level == 0); path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level], path->slots[*level]); path->slots[*level - 1] = 0; (*level)--; } static int tree_move_next_or_upnext(struct btrfs_root *root, struct btrfs_path *path, int *level, int root_level) { int ret = 0; int nritems; nritems = btrfs_header_nritems(path->nodes[*level]); path->slots[*level]++; while (path->slots[*level] >= nritems) { if (*level == root_level) return -1; /* move upnext */ path->slots[*level] = 0; free_extent_buffer(path->nodes[*level]); path->nodes[*level] = NULL; (*level)++; path->slots[*level]++; nritems = btrfs_header_nritems(path->nodes[*level]); ret = 1; } return ret; } /* * Returns 1 if it had to move up and next. 0 is returned if it moved only next * or down. */ static int tree_advance(struct btrfs_root *root, struct btrfs_path *path, int *level, int root_level, int allow_down, struct btrfs_key *key) { int ret; if (*level == 0 || !allow_down) { ret = tree_move_next_or_upnext(root, path, level, root_level); } else { tree_move_down(root, path, level, root_level); ret = 0; } if (ret >= 0) { if (*level == 0) btrfs_item_key_to_cpu(path->nodes[*level], key, path->slots[*level]); else btrfs_node_key_to_cpu(path->nodes[*level], key, path->slots[*level]); } return ret; } static int tree_compare_item(struct btrfs_root *left_root, struct btrfs_path *left_path, struct btrfs_path *right_path, char *tmp_buf) { int cmp; int len1, len2; unsigned long off1, off2; len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]); len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]); if (len1 != len2) return 1; off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]); off2 = btrfs_item_ptr_offset(right_path->nodes[0], right_path->slots[0]); read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1); cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1); if (cmp) return 1; return 0; } #define ADVANCE 1 #define ADVANCE_ONLY_NEXT -1 /* * This function compares two trees and calls the provided callback for * every changed/new/deleted item it finds. * If shared tree blocks are encountered, whole subtrees are skipped, making * the compare pretty fast on snapshotted subvolumes. * * This currently works on commit roots only. As commit roots are read only, * we don't do any locking. The commit roots are protected with transactions. * Transactions are ended and rejoined when a commit is tried in between. * * This function checks for modifications done to the trees while comparing. * If it detects a change, it aborts immediately. */ int btrfs_compare_trees(struct btrfs_root *left_root, struct btrfs_root *right_root, btrfs_changed_cb_t changed_cb, void *ctx) { int ret; int cmp; struct btrfs_trans_handle *trans = NULL; struct btrfs_path *left_path = NULL; struct btrfs_path *right_path = NULL; struct btrfs_key left_key; struct btrfs_key right_key; char *tmp_buf = NULL; int left_root_level; int right_root_level; int left_level; int right_level; int left_end_reached; int right_end_reached; int advance_left; int advance_right; u64 left_blockptr; u64 right_blockptr; u64 left_start_ctransid; u64 right_start_ctransid; u64 ctransid; left_path = btrfs_alloc_path(); if (!left_path) { ret = -ENOMEM; goto out; } right_path = btrfs_alloc_path(); if (!right_path) { ret = -ENOMEM; goto out; } tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS); if (!tmp_buf) { ret = -ENOMEM; goto out; } left_path->search_commit_root = 1; left_path->skip_locking = 1; right_path->search_commit_root = 1; right_path->skip_locking = 1; spin_lock(&left_root->root_item_lock); left_start_ctransid = btrfs_root_ctransid(&left_root->root_item); spin_unlock(&left_root->root_item_lock); spin_lock(&right_root->root_item_lock); right_start_ctransid = btrfs_root_ctransid(&right_root->root_item); spin_unlock(&right_root->root_item_lock); trans = btrfs_join_transaction(left_root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; goto out; } /* * Strategy: Go to the first items of both trees. Then do * * If both trees are at level 0 * Compare keys of current items * If left < right treat left item as new, advance left tree * and repeat * If left > right treat right item as deleted, advance right tree * and repeat * If left == right do deep compare of items, treat as changed if * needed, advance both trees and repeat * If both trees are at the same level but not at level 0 * Compare keys of current nodes/leafs * If left < right advance left tree and repeat * If left > right advance right tree and repeat * If left == right compare blockptrs of the next nodes/leafs * If they match advance both trees but stay at the same level * and repeat * If they don't match advance both trees while allowing to go * deeper and repeat * If tree levels are different * Advance the tree that needs it and repeat * * Advancing a tree means: * If we are at level 0, try to go to the next slot. If that's not * possible, go one level up and repeat. Stop when we found a level * where we could go to the next slot. We may at this point be on a * node or a leaf. * * If we are not at level 0 and not on shared tree blocks, go one * level deeper. * * If we are not at level 0 and on shared tree blocks, go one slot to * the right if possible or go up and right. */ left_level = btrfs_header_level(left_root->commit_root); left_root_level = left_level; left_path->nodes[left_level] = left_root->commit_root; extent_buffer_get(left_path->nodes[left_level]); right_level = btrfs_header_level(right_root->commit_root); right_root_level = right_level; right_path->nodes[right_level] = right_root->commit_root; extent_buffer_get(right_path->nodes[right_level]); if (left_level == 0) btrfs_item_key_to_cpu(left_path->nodes[left_level], &left_key, left_path->slots[left_level]); else btrfs_node_key_to_cpu(left_path->nodes[left_level], &left_key, left_path->slots[left_level]); if (right_level == 0) btrfs_item_key_to_cpu(right_path->nodes[right_level], &right_key, right_path->slots[right_level]); else btrfs_node_key_to_cpu(right_path->nodes[right_level], &right_key, right_path->slots[right_level]); left_end_reached = right_end_reached = 0; advance_left = advance_right = 0; while (1) { /* * We need to make sure the transaction does not get committed * while we do anything on commit roots. This means, we need to * join and leave transactions for every item that we process. */ if (trans && btrfs_should_end_transaction(trans, left_root)) { btrfs_release_path(left_path); btrfs_release_path(right_path); ret = btrfs_end_transaction(trans, left_root); trans = NULL; if (ret < 0) goto out; } /* now rejoin the transaction */ if (!trans) { trans = btrfs_join_transaction(left_root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; goto out; } spin_lock(&left_root->root_item_lock); ctransid = btrfs_root_ctransid(&left_root->root_item); spin_unlock(&left_root->root_item_lock); if (ctransid != left_start_ctransid) left_start_ctransid = 0; spin_lock(&right_root->root_item_lock); ctransid = btrfs_root_ctransid(&right_root->root_item); spin_unlock(&right_root->root_item_lock); if (ctransid != right_start_ctransid) right_start_ctransid = 0; if (!left_start_ctransid || !right_start_ctransid) { WARN(1, KERN_WARNING "btrfs: btrfs_compare_tree detected " "a change in one of the trees while " "iterating. This is probably a " "bug.\n"); ret = -EIO; goto out; } /* * the commit root may have changed, so start again * where we stopped */ left_path->lowest_level = left_level; right_path->lowest_level = right_level; ret = btrfs_search_slot(NULL, left_root, &left_key, left_path, 0, 0); if (ret < 0) goto out; ret = btrfs_search_slot(NULL, right_root, &right_key, right_path, 0, 0); if (ret < 0) goto out; } if (advance_left && !left_end_reached) { ret = tree_advance(left_root, left_path, &left_level, left_root_level, advance_left != ADVANCE_ONLY_NEXT, &left_key); if (ret < 0) left_end_reached = ADVANCE; advance_left = 0; } if (advance_right && !right_end_reached) { ret = tree_advance(right_root, right_path, &right_level, right_root_level, advance_right != ADVANCE_ONLY_NEXT, &right_key); if (ret < 0) right_end_reached = ADVANCE; advance_right = 0; } if (left_end_reached && right_end_reached) { ret = 0; goto out; } else if (left_end_reached) { if (right_level == 0) { ret = changed_cb(left_root, right_root, left_path, right_path, &right_key, BTRFS_COMPARE_TREE_DELETED, ctx); if (ret < 0) goto out; } advance_right = ADVANCE; continue; } else if (right_end_reached) { if (left_level == 0) { ret = changed_cb(left_root, right_root, left_path, right_path, &left_key, BTRFS_COMPARE_TREE_NEW, ctx); if (ret < 0) goto out; } advance_left = ADVANCE; continue; } if (left_level == 0 && right_level == 0) { cmp = btrfs_comp_cpu_keys(&left_key, &right_key); if (cmp < 0) { ret = changed_cb(left_root, right_root, left_path, right_path, &left_key, BTRFS_COMPARE_TREE_NEW, ctx); if (ret < 0) goto out; advance_left = ADVANCE; } else if (cmp > 0) { ret = changed_cb(left_root, right_root, left_path, right_path, &right_key, BTRFS_COMPARE_TREE_DELETED, ctx); if (ret < 0) goto out; advance_right = ADVANCE; } else { WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); ret = tree_compare_item(left_root, left_path, right_path, tmp_buf); if (ret) { WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); ret = changed_cb(left_root, right_root, left_path, right_path, &left_key, BTRFS_COMPARE_TREE_CHANGED, ctx); if (ret < 0) goto out; } advance_left = ADVANCE; advance_right = ADVANCE; } } else if (left_level == right_level) { cmp = btrfs_comp_cpu_keys(&left_key, &right_key); if (cmp < 0) { advance_left = ADVANCE; } else if (cmp > 0) { advance_right = ADVANCE; } else { left_blockptr = btrfs_node_blockptr( left_path->nodes[left_level], left_path->slots[left_level]); right_blockptr = btrfs_node_blockptr( right_path->nodes[right_level], right_path->slots[right_level]); if (left_blockptr == right_blockptr) { /* * As we're on a shared block, don't * allow to go deeper. */ advance_left = ADVANCE_ONLY_NEXT; advance_right = ADVANCE_ONLY_NEXT; } else { advance_left = ADVANCE; advance_right = ADVANCE; } } } else if (left_level < right_level) { advance_right = ADVANCE; } else { advance_left = ADVANCE; } } out: btrfs_free_path(left_path); btrfs_free_path(right_path); kfree(tmp_buf); if (trans) { if (!ret) ret = btrfs_end_transaction(trans, left_root); else btrfs_end_transaction(trans, left_root); } return ret; } /* * this is similar to btrfs_next_leaf, but does not try to preserve * and fixup the path. It looks for and returns the next key in the * tree based on the current path and the min_trans parameters. * * 0 is returned if another key is found, < 0 if there are any errors * and 1 is returned if there are no higher keys in the tree * * path->keep_locks should be set to 1 on the search made before * calling this function. */ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key, int level, u64 min_trans) { int slot; struct extent_buffer *c; WARN_ON(!path->keep_locks); while (level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) return 1; slot = path->slots[level] + 1; c = path->nodes[level]; next: if (slot >= btrfs_header_nritems(c)) { int ret; int orig_lowest; struct btrfs_key cur_key; if (level + 1 >= BTRFS_MAX_LEVEL || !path->nodes[level + 1]) return 1; if (path->locks[level + 1]) { level++; continue; } slot = btrfs_header_nritems(c) - 1; if (level == 0) btrfs_item_key_to_cpu(c, &cur_key, slot); else btrfs_node_key_to_cpu(c, &cur_key, slot); orig_lowest = path->lowest_level; btrfs_release_path(path); path->lowest_level = level; ret = btrfs_search_slot(NULL, root, &cur_key, path, 0, 0); path->lowest_level = orig_lowest; if (ret < 0) return ret; c = path->nodes[level]; slot = path->slots[level]; if (ret == 0) slot++; goto next; } if (level == 0) btrfs_item_key_to_cpu(c, key, slot); else { u64 gen = btrfs_node_ptr_generation(c, slot); if (gen < min_trans) { slot++; goto next; } btrfs_node_key_to_cpu(c, key, slot); } return 0; } return 1; } /* * search the tree again to find a leaf with greater keys * returns 0 if it found something or 1 if there are no greater leaves. * returns < 0 on io errors. */ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) { return btrfs_next_old_leaf(root, path, 0); } int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) { int slot; int level; struct extent_buffer *c; struct extent_buffer *next; struct btrfs_key key; u32 nritems; int ret; int old_spinning = path->leave_spinning; int next_rw_lock = 0; nritems = btrfs_header_nritems(path->nodes[0]); if (nritems == 0) return 1; btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); again: level = 1; next = NULL; next_rw_lock = 0; btrfs_release_path(path); path->keep_locks = 1; path->leave_spinning = 1; if (time_seq) ret = btrfs_search_old_slot(root, &key, path, time_seq); else ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); path->keep_locks = 0; if (ret < 0) return ret; nritems = btrfs_header_nritems(path->nodes[0]); /* * by releasing the path above we dropped all our locks. A balance * could have added more items next to the key that used to be * at the very end of the block. So, check again here and * advance the path if there are now more items available. */ if (nritems > 0 && path->slots[0] < nritems - 1) { if (ret == 0) path->slots[0]++; ret = 0; goto done; } while (level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) { ret = 1; goto done; } slot = path->slots[level] + 1; c = path->nodes[level]; if (slot >= btrfs_header_nritems(c)) { level++; if (level == BTRFS_MAX_LEVEL) { ret = 1; goto done; } continue; } if (next) { btrfs_tree_unlock_rw(next, next_rw_lock); free_extent_buffer(next); } next = c; next_rw_lock = path->locks[level]; ret = read_block_for_search(NULL, root, path, &next, level, slot, &key, 0); if (ret == -EAGAIN) goto again; if (ret < 0) { btrfs_release_path(path); goto done; } if (!path->skip_locking) { ret = btrfs_try_tree_read_lock(next); if (!ret && time_seq) { /* * If we don't get the lock, we may be racing * with push_leaf_left, holding that lock while * itself waiting for the leaf we've currently * locked. To solve this situation, we give up * on our lock and cycle. */ free_extent_buffer(next); btrfs_release_path(path); cond_resched(); goto again; } if (!ret) { btrfs_set_path_blocking(path); btrfs_tree_read_lock(next); btrfs_clear_path_blocking(path, next, BTRFS_READ_LOCK); } next_rw_lock = BTRFS_READ_LOCK; } break; } path->slots[level] = slot; while (1) { level--; c = path->nodes[level]; if (path->locks[level]) btrfs_tree_unlock_rw(c, path->locks[level]); free_extent_buffer(c); path->nodes[level] = next; path->slots[level] = 0; if (!path->skip_locking) path->locks[level] = next_rw_lock; if (!level) break; ret = read_block_for_search(NULL, root, path, &next, level, 0, &key, 0); if (ret == -EAGAIN) goto again; if (ret < 0) { btrfs_release_path(path); goto done; } if (!path->skip_locking) { ret = btrfs_try_tree_read_lock(next); if (!ret) { btrfs_set_path_blocking(path); btrfs_tree_read_lock(next); btrfs_clear_path_blocking(path, next, BTRFS_READ_LOCK); } next_rw_lock = BTRFS_READ_LOCK; } } ret = 0; done: unlock_up(path, 0, 1, 0, NULL); path->leave_spinning = old_spinning; if (!old_spinning) btrfs_set_path_blocking(path); return ret; } /* * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps * searching until it gets past min_objectid or finds an item of 'type' * * returns 0 if something is found, 1 if nothing was found and < 0 on error */ int btrfs_previous_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid, int type) { struct btrfs_key found_key; struct extent_buffer *leaf; u32 nritems; int ret; while (1) { if (path->slots[0] == 0) { btrfs_set_path_blocking(path); ret = btrfs_prev_leaf(root, path); if (ret != 0) return ret; } else { path->slots[0]--; } leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); if (nritems == 0) return 1; if (path->slots[0] == nritems) path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid < min_objectid) break; if (found_key.type == type) return 0; if (found_key.objectid == min_objectid && found_key.type < type) break; } return 1; }
gpl-2.0
joeisgood99/Z5C-Copyleft-Kernel
drivers/usb/phy/phy-nop.c
1986
6813
/* * drivers/usb/otg/nop-usb-xceiv.c * * NOP USB transceiver for all USB transceiver which are either built-in * into USB IP or which are mostly autonomous. * * Copyright (C) 2009 Texas Instruments Inc * Author: Ajay Kumar Gupta <ajay.gupta@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Current status: * This provides a "nop" transceiver for PHYs which are * autonomous such as isp1504, isp1707, etc. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/usb/otg.h> #include <linux/usb/nop-usb-xceiv.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/regulator/consumer.h> #include <linux/of.h> struct nop_usb_xceiv { struct usb_phy phy; struct device *dev; struct clk *clk; struct regulator *vcc; struct regulator *reset; }; static struct platform_device *pd; void usb_nop_xceiv_register(void) { if (pd) return; pd = platform_device_register_simple("nop_usb_xceiv", -1, NULL, 0); if (!pd) { printk(KERN_ERR "Unable to register usb nop transceiver\n"); return; } } EXPORT_SYMBOL(usb_nop_xceiv_register); void usb_nop_xceiv_unregister(void) { platform_device_unregister(pd); pd = NULL; } EXPORT_SYMBOL(usb_nop_xceiv_unregister); static int nop_set_suspend(struct usb_phy *x, int suspend) { return 0; } static int nop_init(struct usb_phy *phy) { struct nop_usb_xceiv *nop = dev_get_drvdata(phy->dev); if (!IS_ERR(nop->vcc)) { if (regulator_enable(nop->vcc)) dev_err(phy->dev, "Failed to enable power\n"); } if (!IS_ERR(nop->clk)) clk_enable(nop->clk); if (!IS_ERR(nop->reset)) { /* De-assert RESET */ if (regulator_enable(nop->reset)) dev_err(phy->dev, "Failed to de-assert reset\n"); } return 0; } static void nop_shutdown(struct usb_phy *phy) { struct nop_usb_xceiv *nop = dev_get_drvdata(phy->dev); if (!IS_ERR(nop->reset)) { /* Assert RESET */ if (regulator_disable(nop->reset)) dev_err(phy->dev, "Failed to assert reset\n"); } if (!IS_ERR(nop->clk)) clk_disable(nop->clk); if (!IS_ERR(nop->vcc)) { if (regulator_disable(nop->vcc)) dev_err(phy->dev, "Failed to disable power\n"); } } static int nop_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget) { if (!otg) return -ENODEV; if (!gadget) { otg->gadget = NULL; return -ENODEV; } otg->gadget = gadget; otg->phy->state = OTG_STATE_B_IDLE; return 0; } static int nop_set_host(struct usb_otg *otg, struct usb_bus *host) { if (!otg) return -ENODEV; if (!host) { otg->host = NULL; return -ENODEV; } otg->host = host; return 0; } static int nop_usb_xceiv_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct nop_usb_xceiv_platform_data *pdata = pdev->dev.platform_data; struct nop_usb_xceiv *nop; enum usb_phy_type type = USB_PHY_TYPE_USB2; int err; u32 clk_rate = 0; bool needs_vcc = false; bool needs_reset = false; nop = devm_kzalloc(&pdev->dev, sizeof(*nop), GFP_KERNEL); if (!nop) return -ENOMEM; nop->phy.otg = devm_kzalloc(&pdev->dev, sizeof(*nop->phy.otg), GFP_KERNEL); if (!nop->phy.otg) return -ENOMEM; if (dev->of_node) { struct device_node *node = dev->of_node; if (of_property_read_u32(node, "clock-frequency", &clk_rate)) clk_rate = 0; needs_vcc = of_property_read_bool(node, "vcc-supply"); needs_reset = of_property_read_bool(node, "reset-supply"); } else if (pdata) { type = pdata->type; clk_rate = pdata->clk_rate; needs_vcc = pdata->needs_vcc; needs_reset = pdata->needs_reset; } nop->clk = devm_clk_get(&pdev->dev, "main_clk"); if (IS_ERR(nop->clk)) { dev_dbg(&pdev->dev, "Can't get phy clock: %ld\n", PTR_ERR(nop->clk)); } if (!IS_ERR(nop->clk) && clk_rate) { err = clk_set_rate(nop->clk, clk_rate); if (err) { dev_err(&pdev->dev, "Error setting clock rate\n"); return err; } } if (!IS_ERR(nop->clk)) { err = clk_prepare(nop->clk); if (err) { dev_err(&pdev->dev, "Error preparing clock\n"); return err; } } nop->vcc = devm_regulator_get(&pdev->dev, "vcc"); if (IS_ERR(nop->vcc)) { dev_dbg(&pdev->dev, "Error getting vcc regulator: %ld\n", PTR_ERR(nop->vcc)); if (needs_vcc) return -EPROBE_DEFER; } nop->reset = devm_regulator_get(&pdev->dev, "reset"); if (IS_ERR(nop->reset)) { dev_dbg(&pdev->dev, "Error getting reset regulator: %ld\n", PTR_ERR(nop->reset)); if (needs_reset) return -EPROBE_DEFER; } nop->dev = &pdev->dev; nop->phy.dev = nop->dev; nop->phy.label = "nop-xceiv"; nop->phy.set_suspend = nop_set_suspend; nop->phy.init = nop_init; nop->phy.shutdown = nop_shutdown; nop->phy.state = OTG_STATE_UNDEFINED; nop->phy.type = type; nop->phy.otg->phy = &nop->phy; nop->phy.otg->set_host = nop_set_host; nop->phy.otg->set_peripheral = nop_set_peripheral; err = usb_add_phy_dev(&nop->phy); if (err) { dev_err(&pdev->dev, "can't register transceiver, err: %d\n", err); goto err_add; } platform_set_drvdata(pdev, nop); ATOMIC_INIT_NOTIFIER_HEAD(&nop->phy.notifier); return 0; err_add: if (!IS_ERR(nop->clk)) clk_unprepare(nop->clk); return err; } static int nop_usb_xceiv_remove(struct platform_device *pdev) { struct nop_usb_xceiv *nop = platform_get_drvdata(pdev); if (!IS_ERR(nop->clk)) clk_unprepare(nop->clk); usb_remove_phy(&nop->phy); return 0; } static const struct of_device_id nop_xceiv_dt_ids[] = { { .compatible = "usb-nop-xceiv" }, { } }; MODULE_DEVICE_TABLE(of, nop_xceiv_dt_ids); static struct platform_driver nop_usb_xceiv_driver = { .probe = nop_usb_xceiv_probe, .remove = nop_usb_xceiv_remove, .driver = { .name = "nop_usb_xceiv", .owner = THIS_MODULE, .of_match_table = of_match_ptr(nop_xceiv_dt_ids), }, }; static int __init nop_usb_xceiv_init(void) { return platform_driver_register(&nop_usb_xceiv_driver); } subsys_initcall(nop_usb_xceiv_init); static void __exit nop_usb_xceiv_exit(void) { platform_driver_unregister(&nop_usb_xceiv_driver); } module_exit(nop_usb_xceiv_exit); MODULE_ALIAS("platform:nop_usb_xceiv"); MODULE_AUTHOR("Texas Instruments Inc"); MODULE_DESCRIPTION("NOP USB Transceiver driver"); MODULE_LICENSE("GPL");
gpl-2.0
MinimalOS-AOSP/kernel_huawei_angler
net/ipv6/ip6_offload.c
1986
6546
/* * IPV6 GSO/GRO offload support * Linux INET6 implementation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/socket.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/printk.h> #include <net/protocol.h> #include <net/ipv6.h> #include "ip6_offload.h" static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) { const struct net_offload *ops = NULL; for (;;) { struct ipv6_opt_hdr *opth; int len; if (proto != NEXTHDR_HOP) { ops = rcu_dereference(inet6_offloads[proto]); if (unlikely(!ops)) break; if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) break; } if (unlikely(!pskb_may_pull(skb, 8))) break; opth = (void *)skb->data; len = ipv6_optlen(opth); if (unlikely(!pskb_may_pull(skb, len))) break; proto = opth->nexthdr; __skb_pull(skb, len); } return proto; } static int ipv6_gso_send_check(struct sk_buff *skb) { const struct ipv6hdr *ipv6h; const struct net_offload *ops; int err = -EINVAL; if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); err = -EPROTONOSUPPORT; rcu_read_lock(); ops = rcu_dereference(inet6_offloads[ ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); if (likely(ops && ops->callbacks.gso_send_check)) { skb_reset_transport_header(skb); err = ops->callbacks.gso_send_check(skb); } rcu_read_unlock(); out: return err; } static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct ipv6hdr *ipv6h; const struct net_offload *ops; int proto; struct frag_hdr *fptr; unsigned int unfrag_ip6hlen; u8 *prevhdr; int offset = 0; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL | SKB_GSO_TCPV6 | 0))) goto out; if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); segs = ERR_PTR(-EPROTONOSUPPORT); proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); rcu_read_lock(); ops = rcu_dereference(inet6_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) { skb_reset_transport_header(skb); segs = ops->callbacks.gso_segment(skb, features); } rcu_read_unlock(); if (IS_ERR(segs)) goto out; for (skb = segs; skb; skb = skb->next) { ipv6h = ipv6_hdr(skb); ipv6h->payload_len = htons(skb->len - skb->mac_len - sizeof(*ipv6h)); if (proto == IPPROTO_UDP) { unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->frag_off = htons(offset); if (skb->next != NULL) fptr->frag_off |= htons(IP6_MF); offset += (ntohs(ipv6h->payload_len) - sizeof(struct frag_hdr)); } } out: return segs; } static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, struct sk_buff *skb) { const struct net_offload *ops; struct sk_buff **pp = NULL; struct sk_buff *p; struct ipv6hdr *iph; unsigned int nlen; unsigned int hlen; unsigned int off; int flush = 1; int proto; __wsum csum; off = skb_gro_offset(skb); hlen = off + sizeof(*iph); iph = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { iph = skb_gro_header_slow(skb, hlen, off); if (unlikely(!iph)) goto out; } skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); flush += ntohs(iph->payload_len) != skb_gro_len(skb); rcu_read_lock(); proto = iph->nexthdr; ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) { __pskb_pull(skb, skb_gro_offset(skb)); proto = ipv6_gso_pull_exthdrs(skb, proto); skb_gro_pull(skb, -skb_transport_offset(skb)); skb_reset_transport_header(skb); __skb_push(skb, skb_gro_offset(skb)); ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) goto out_unlock; iph = ipv6_hdr(skb); } NAPI_GRO_CB(skb)->proto = proto; flush--; nlen = skb_network_header_len(skb); for (p = *head; p; p = p->next) { const struct ipv6hdr *iph2; __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ if (!NAPI_GRO_CB(p)->same_flow) continue; iph2 = ipv6_hdr(p); first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ; /* All fields must match except length and Traffic Class. */ if (nlen != skb_network_header_len(p) || (first_word & htonl(0xF00FFFFF)) || memcmp(&iph->nexthdr, &iph2->nexthdr, nlen - offsetof(struct ipv6hdr, nexthdr))) { NAPI_GRO_CB(p)->same_flow = 0; continue; } /* flush if Traffic Class fields are different */ NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); NAPI_GRO_CB(p)->flush |= flush; } NAPI_GRO_CB(skb)->flush |= flush; csum = skb->csum; skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); pp = ops->callbacks.gro_receive(head, skb); skb->csum = csum; out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } static int ipv6_gro_complete(struct sk_buff *skb) { const struct net_offload *ops; struct ipv6hdr *iph = ipv6_hdr(skb); int err = -ENOSYS; iph->payload_len = htons(skb->len - skb_network_offset(skb) - sizeof(*iph)); rcu_read_lock(); ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) goto out_unlock; err = ops->callbacks.gro_complete(skb); out_unlock: rcu_read_unlock(); return err; } static struct packet_offload ipv6_packet_offload __read_mostly = { .type = cpu_to_be16(ETH_P_IPV6), .callbacks = { .gso_send_check = ipv6_gso_send_check, .gso_segment = ipv6_gso_segment, .gro_receive = ipv6_gro_receive, .gro_complete = ipv6_gro_complete, }, }; static int __init ipv6_offload_init(void) { if (tcpv6_offload_init() < 0) pr_crit("%s: Cannot add TCP protocol offload\n", __func__); if (udp_offload_init() < 0) pr_crit("%s: Cannot add UDP protocol offload\n", __func__); if (ipv6_exthdrs_offload_init() < 0) pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__); dev_add_offload(&ipv6_packet_offload); return 0; } fs_initcall(ipv6_offload_init);
gpl-2.0
jmztaylor/android_kernel_amazon_ariel
sound/soc/sh/siu_dai.c
2242
21134
/* * siu_dai.c - ALSA SoC driver for Renesas SH7343, SH7722 SIU peripheral. * * Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> * Copyright (C) 2006 Carlos Munoz <carlos@kenati.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/delay.h> #include <linux/firmware.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/module.h> #include <asm/clock.h> #include <asm/siu.h> #include <sound/control.h> #include <sound/soc.h> #include "siu.h" /* Board specifics */ #if defined(CONFIG_CPU_SUBTYPE_SH7722) # define SIU_MAX_VOLUME 0x1000 #else # define SIU_MAX_VOLUME 0x7fff #endif #define PRAM_SIZE 0x2000 #define XRAM_SIZE 0x800 #define YRAM_SIZE 0x800 #define XRAM_OFFSET 0x4000 #define YRAM_OFFSET 0x6000 #define REG_OFFSET 0xc000 #define PLAYBACK_ENABLED 1 #define CAPTURE_ENABLED 2 #define VOLUME_CAPTURE 0 #define VOLUME_PLAYBACK 1 #define DFLT_VOLUME_LEVEL 0x08000800 /* * SPDIF is only available on port A and on some SIU implementations it is only * available for input. Due to the lack of hardware to test it, SPDIF is left * disabled in this driver version */ struct format_flag { u32 i2s; u32 pcm; u32 spdif; u32 mask; }; struct port_flag { struct format_flag playback; struct format_flag capture; }; struct siu_info *siu_i2s_data; static struct port_flag siu_flags[SIU_PORT_NUM] = { [SIU_PORT_A] = { .playback = { .i2s = 0x50000000, .pcm = 0x40000000, .spdif = 0x80000000, /* not on all SIU versions */ .mask = 0xd0000000, }, .capture = { .i2s = 0x05000000, .pcm = 0x04000000, .spdif = 0x08000000, .mask = 0x0d000000, }, }, [SIU_PORT_B] = { .playback = { .i2s = 0x00500000, .pcm = 0x00400000, .spdif = 0, /* impossible - turn off */ .mask = 0x00500000, }, .capture = { .i2s = 0x00050000, .pcm = 0x00040000, .spdif = 0, /* impossible - turn off */ .mask = 0x00050000, }, }, }; static void siu_dai_start(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; dev_dbg(port_info->pcm->card->dev, "%s\n", __func__); /* Issue software reset to siu */ siu_write32(base + SIU_SRCTL, 0); /* Wait for the reset to take effect */ udelay(1); port_info->stfifo = 0; port_info->trdat = 0; /* portA, portB, SIU operate */ siu_write32(base + SIU_SRCTL, 0x301); /* portA=256fs, portB=256fs */ siu_write32(base + SIU_CKCTL, 0x40400000); /* portA's BRG does not divide SIUCKA */ siu_write32(base + SIU_BRGASEL, 0); siu_write32(base + SIU_BRRA, 0); /* portB's BRG divides SIUCKB by half */ siu_write32(base + SIU_BRGBSEL, 1); siu_write32(base + SIU_BRRB, 0); siu_write32(base + SIU_IFCTL, 0x44440000); /* portA: 32 bit/fs, master; portB: 32 bit/fs, master */ siu_write32(base + SIU_SFORM, 0x0c0c0000); /* * Volume levels: looks like the DSP firmware implements volume controls * differently from what's described in the datasheet */ siu_write32(base + SIU_SBDVCA, port_info->playback.volume); siu_write32(base + SIU_SBDVCB, port_info->capture.volume); } static void siu_dai_stop(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; /* SIU software reset */ siu_write32(base + SIU_SRCTL, 0); } static void siu_dai_spbAselect(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; struct siu_firmware *fw = &info->fw; u32 *ydef = fw->yram0; u32 idx; /* path A use */ if (!info->port_id) idx = 1; /* portA */ else idx = 2; /* portB */ ydef[0] = (fw->spbpar[idx].ab1a << 16) | (fw->spbpar[idx].ab0a << 8) | (fw->spbpar[idx].dir << 7) | 3; ydef[1] = fw->yram0[1]; /* 0x03000300 */ ydef[2] = (16 / 2) << 24; ydef[3] = fw->yram0[3]; /* 0 */ ydef[4] = fw->yram0[4]; /* 0 */ ydef[7] = fw->spbpar[idx].event; port_info->stfifo |= fw->spbpar[idx].stfifo; port_info->trdat |= fw->spbpar[idx].trdat; } static void siu_dai_spbBselect(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; struct siu_firmware *fw = &info->fw; u32 *ydef = fw->yram0; u32 idx; /* path B use */ if (!info->port_id) idx = 7; /* portA */ else idx = 8; /* portB */ ydef[5] = (fw->spbpar[idx].ab1a << 16) | (fw->spbpar[idx].ab0a << 8) | 1; ydef[6] = fw->spbpar[idx].event; port_info->stfifo |= fw->spbpar[idx].stfifo; port_info->trdat |= fw->spbpar[idx].trdat; } static void siu_dai_open(struct siu_stream *siu_stream) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; u32 srctl, ifctl; srctl = siu_read32(base + SIU_SRCTL); ifctl = siu_read32(base + SIU_IFCTL); switch (info->port_id) { case SIU_PORT_A: /* portA operates */ srctl |= 0x200; ifctl &= ~0xc2; break; case SIU_PORT_B: /* portB operates */ srctl |= 0x100; ifctl &= ~0x31; break; } siu_write32(base + SIU_SRCTL, srctl); /* Unmute and configure portA */ siu_write32(base + SIU_IFCTL, ifctl); } /* * At the moment only fixed Left-upper, Left-lower, Right-upper, Right-lower * packing is supported */ static void siu_dai_pcmdatapack(struct siu_stream *siu_stream) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; u32 dpak; dpak = siu_read32(base + SIU_DPAK); switch (info->port_id) { case SIU_PORT_A: dpak &= ~0xc0000000; break; case SIU_PORT_B: dpak &= ~0x00c00000; break; } siu_write32(base + SIU_DPAK, dpak); } static int siu_dai_spbstart(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; struct siu_firmware *fw = &info->fw; u32 *ydef = fw->yram0; int cnt; u32 __iomem *add; u32 *ptr; /* Load SPB Program in PRAM */ ptr = fw->pram0; add = info->pram; for (cnt = 0; cnt < PRAM0_SIZE; cnt++, add++, ptr++) siu_write32(add, *ptr); ptr = fw->pram1; add = info->pram + (0x0100 / sizeof(u32)); for (cnt = 0; cnt < PRAM1_SIZE; cnt++, add++, ptr++) siu_write32(add, *ptr); /* XRAM initialization */ add = info->xram; for (cnt = 0; cnt < XRAM0_SIZE + XRAM1_SIZE + XRAM2_SIZE; cnt++, add++) siu_write32(add, 0); /* YRAM variable area initialization */ add = info->yram; for (cnt = 0; cnt < YRAM_DEF_SIZE; cnt++, add++) siu_write32(add, ydef[cnt]); /* YRAM FIR coefficient area initialization */ add = info->yram + (0x0200 / sizeof(u32)); for (cnt = 0; cnt < YRAM_FIR_SIZE; cnt++, add++) siu_write32(add, fw->yram_fir_coeff[cnt]); /* YRAM IIR coefficient area initialization */ add = info->yram + (0x0600 / sizeof(u32)); for (cnt = 0; cnt < YRAM_IIR_SIZE; cnt++, add++) siu_write32(add, 0); siu_write32(base + SIU_TRDAT, port_info->trdat); port_info->trdat = 0x0; /* SPB start condition: software */ siu_write32(base + SIU_SBACTIV, 0); /* Start SPB */ siu_write32(base + SIU_SBCTL, 0xc0000000); /* Wait for program to halt */ cnt = 0x10000; while (--cnt && siu_read32(base + SIU_SBCTL) != 0x80000000) cpu_relax(); if (!cnt) return -EBUSY; /* SPB program start address setting */ siu_write32(base + SIU_SBPSET, 0x00400000); /* SPB hardware start(FIFOCTL source) */ siu_write32(base + SIU_SBACTIV, 0xc0000000); return 0; } static void siu_dai_spbstop(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; siu_write32(base + SIU_SBACTIV, 0); /* SPB stop */ siu_write32(base + SIU_SBCTL, 0); port_info->stfifo = 0; } /* API functions */ /* Playback and capture hardware properties are identical */ static struct snd_pcm_hardware siu_dai_pcm_hw = { .info = SNDRV_PCM_INFO_INTERLEAVED, .formats = SNDRV_PCM_FMTBIT_S16, .rates = SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = SIU_BUFFER_BYTES_MAX, .period_bytes_min = SIU_PERIOD_BYTES_MIN, .period_bytes_max = SIU_PERIOD_BYTES_MAX, .periods_min = SIU_PERIODS_MIN, .periods_max = SIU_PERIODS_MAX, }; static int siu_dai_info_volume(struct snd_kcontrol *kctrl, struct snd_ctl_elem_info *uinfo) { struct siu_port *port_info = snd_kcontrol_chip(kctrl); dev_dbg(port_info->pcm->card->dev, "%s\n", __func__); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = SIU_MAX_VOLUME; return 0; } static int siu_dai_get_volume(struct snd_kcontrol *kctrl, struct snd_ctl_elem_value *ucontrol) { struct siu_port *port_info = snd_kcontrol_chip(kctrl); struct device *dev = port_info->pcm->card->dev; u32 vol; dev_dbg(dev, "%s\n", __func__); switch (kctrl->private_value) { case VOLUME_PLAYBACK: /* Playback is always on port 0 */ vol = port_info->playback.volume; ucontrol->value.integer.value[0] = vol & 0xffff; ucontrol->value.integer.value[1] = vol >> 16 & 0xffff; break; case VOLUME_CAPTURE: /* Capture is always on port 1 */ vol = port_info->capture.volume; ucontrol->value.integer.value[0] = vol & 0xffff; ucontrol->value.integer.value[1] = vol >> 16 & 0xffff; break; default: dev_err(dev, "%s() invalid private_value=%ld\n", __func__, kctrl->private_value); return -EINVAL; } return 0; } static int siu_dai_put_volume(struct snd_kcontrol *kctrl, struct snd_ctl_elem_value *ucontrol) { struct siu_port *port_info = snd_kcontrol_chip(kctrl); struct device *dev = port_info->pcm->card->dev; struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; u32 new_vol; u32 cur_vol; dev_dbg(dev, "%s\n", __func__); if (ucontrol->value.integer.value[0] < 0 || ucontrol->value.integer.value[0] > SIU_MAX_VOLUME || ucontrol->value.integer.value[1] < 0 || ucontrol->value.integer.value[1] > SIU_MAX_VOLUME) return -EINVAL; new_vol = ucontrol->value.integer.value[0] | ucontrol->value.integer.value[1] << 16; /* See comment above - DSP firmware implementation */ switch (kctrl->private_value) { case VOLUME_PLAYBACK: /* Playback is always on port 0 */ cur_vol = port_info->playback.volume; siu_write32(base + SIU_SBDVCA, new_vol); port_info->playback.volume = new_vol; break; case VOLUME_CAPTURE: /* Capture is always on port 1 */ cur_vol = port_info->capture.volume; siu_write32(base + SIU_SBDVCB, new_vol); port_info->capture.volume = new_vol; break; default: dev_err(dev, "%s() invalid private_value=%ld\n", __func__, kctrl->private_value); return -EINVAL; } if (cur_vol != new_vol) return 1; return 0; } static struct snd_kcontrol_new playback_controls = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Playback Volume", .index = 0, .info = siu_dai_info_volume, .get = siu_dai_get_volume, .put = siu_dai_put_volume, .private_value = VOLUME_PLAYBACK, }; static struct snd_kcontrol_new capture_controls = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Capture Volume", .index = 0, .info = siu_dai_info_volume, .get = siu_dai_get_volume, .put = siu_dai_put_volume, .private_value = VOLUME_CAPTURE, }; int siu_init_port(int port, struct siu_port **port_info, struct snd_card *card) { struct device *dev = card->dev; struct snd_kcontrol *kctrl; int ret; *port_info = kzalloc(sizeof(**port_info), GFP_KERNEL); if (!*port_info) return -ENOMEM; dev_dbg(dev, "%s: port #%d@%p\n", __func__, port, *port_info); (*port_info)->playback.volume = DFLT_VOLUME_LEVEL; (*port_info)->capture.volume = DFLT_VOLUME_LEVEL; /* * Add mixer support. The SPB is used to change the volume. Both * ports use the same SPB. Therefore, we only register one * control instance since it will be used by both channels. * In error case we continue without controls. */ kctrl = snd_ctl_new1(&playback_controls, *port_info); ret = snd_ctl_add(card, kctrl); if (ret < 0) dev_err(dev, "failed to add playback controls %p port=%d err=%d\n", kctrl, port, ret); kctrl = snd_ctl_new1(&capture_controls, *port_info); ret = snd_ctl_add(card, kctrl); if (ret < 0) dev_err(dev, "failed to add capture controls %p port=%d err=%d\n", kctrl, port, ret); return 0; } void siu_free_port(struct siu_port *port_info) { kfree(port_info); } static int siu_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct siu_info *info = snd_soc_dai_get_drvdata(dai); struct snd_pcm_runtime *rt = substream->runtime; struct siu_port *port_info = siu_port_info(substream); int ret; dev_dbg(substream->pcm->card->dev, "%s: port=%d@%p\n", __func__, info->port_id, port_info); snd_soc_set_runtime_hwparams(substream, &siu_dai_pcm_hw); ret = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS); if (unlikely(ret < 0)) return ret; siu_dai_start(port_info); return 0; } static void siu_dai_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct siu_info *info = snd_soc_dai_get_drvdata(dai); struct siu_port *port_info = siu_port_info(substream); dev_dbg(substream->pcm->card->dev, "%s: port=%d@%p\n", __func__, info->port_id, port_info); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) port_info->play_cap &= ~PLAYBACK_ENABLED; else port_info->play_cap &= ~CAPTURE_ENABLED; /* Stop the siu if the other stream is not using it */ if (!port_info->play_cap) { /* during stmread or stmwrite ? */ BUG_ON(port_info->playback.rw_flg || port_info->capture.rw_flg); siu_dai_spbstop(port_info); siu_dai_stop(port_info); } } /* PCM part of siu_dai_playback_prepare() / siu_dai_capture_prepare() */ static int siu_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct siu_info *info = snd_soc_dai_get_drvdata(dai); struct snd_pcm_runtime *rt = substream->runtime; struct siu_port *port_info = siu_port_info(substream); struct siu_stream *siu_stream; int self, ret; dev_dbg(substream->pcm->card->dev, "%s: port %d, active streams %lx, %d channels\n", __func__, info->port_id, port_info->play_cap, rt->channels); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { self = PLAYBACK_ENABLED; siu_stream = &port_info->playback; } else { self = CAPTURE_ENABLED; siu_stream = &port_info->capture; } /* Set up the siu if not already done */ if (!port_info->play_cap) { siu_stream->rw_flg = 0; /* stream-data transfer flag */ siu_dai_spbAselect(port_info); siu_dai_spbBselect(port_info); siu_dai_open(siu_stream); siu_dai_pcmdatapack(siu_stream); ret = siu_dai_spbstart(port_info); if (ret < 0) goto fail; } else { ret = 0; } port_info->play_cap |= self; fail: return ret; } /* * SIU can set bus format to I2S / PCM / SPDIF independently for playback and * capture, however, the current API sets the bus format globally for a DAI. */ static int siu_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct siu_info *info = snd_soc_dai_get_drvdata(dai); u32 __iomem *base = info->reg; u32 ifctl; dev_dbg(dai->dev, "%s: fmt 0x%x on port %d\n", __func__, fmt, info->port_id); if (info->port_id < 0) return -ENODEV; /* Here select between I2S / PCM / SPDIF */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: ifctl = siu_flags[info->port_id].playback.i2s | siu_flags[info->port_id].capture.i2s; break; case SND_SOC_DAIFMT_LEFT_J: ifctl = siu_flags[info->port_id].playback.pcm | siu_flags[info->port_id].capture.pcm; break; /* SPDIF disabled - see comment at the top */ default: return -EINVAL; } ifctl |= ~(siu_flags[info->port_id].playback.mask | siu_flags[info->port_id].capture.mask) & siu_read32(base + SIU_IFCTL); siu_write32(base + SIU_IFCTL, ifctl); return 0; } static int siu_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct clk *siu_clk, *parent_clk; char *siu_name, *parent_name; int ret; if (dir != SND_SOC_CLOCK_IN) return -EINVAL; dev_dbg(dai->dev, "%s: using clock %d\n", __func__, clk_id); switch (clk_id) { case SIU_CLKA_PLL: siu_name = "siua_clk"; parent_name = "pll_clk"; break; case SIU_CLKA_EXT: siu_name = "siua_clk"; parent_name = "siumcka_clk"; break; case SIU_CLKB_PLL: siu_name = "siub_clk"; parent_name = "pll_clk"; break; case SIU_CLKB_EXT: siu_name = "siub_clk"; parent_name = "siumckb_clk"; break; default: return -EINVAL; } siu_clk = clk_get(dai->dev, siu_name); if (IS_ERR(siu_clk)) { dev_err(dai->dev, "%s: cannot get a SIU clock: %ld\n", __func__, PTR_ERR(siu_clk)); return PTR_ERR(siu_clk); } parent_clk = clk_get(dai->dev, parent_name); if (IS_ERR(parent_clk)) { ret = PTR_ERR(parent_clk); dev_err(dai->dev, "cannot get a SIU clock parent: %d\n", ret); goto epclkget; } ret = clk_set_parent(siu_clk, parent_clk); if (ret < 0) { dev_err(dai->dev, "cannot reparent the SIU clock: %d\n", ret); goto eclksetp; } ret = clk_set_rate(siu_clk, freq); if (ret < 0) dev_err(dai->dev, "cannot set SIU clock rate: %d\n", ret); /* TODO: when clkdev gets reference counting we'll move these to siu_dai_shutdown() */ eclksetp: clk_put(parent_clk); epclkget: clk_put(siu_clk); return ret; } static const struct snd_soc_dai_ops siu_dai_ops = { .startup = siu_dai_startup, .shutdown = siu_dai_shutdown, .prepare = siu_dai_prepare, .set_sysclk = siu_dai_set_sysclk, .set_fmt = siu_dai_set_fmt, }; static struct snd_soc_dai_driver siu_i2s_dai = { .name = "siu-i2s-dai", .playback = { .channels_min = 2, .channels_max = 2, .formats = SNDRV_PCM_FMTBIT_S16, .rates = SNDRV_PCM_RATE_8000_48000, }, .capture = { .channels_min = 2, .channels_max = 2, .formats = SNDRV_PCM_FMTBIT_S16, .rates = SNDRV_PCM_RATE_8000_48000, }, .ops = &siu_dai_ops, }; static const struct snd_soc_component_driver siu_i2s_component = { .name = "siu-i2s", }; static int siu_probe(struct platform_device *pdev) { const struct firmware *fw_entry; struct resource *res, *region; struct siu_info *info; int ret; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; siu_i2s_data = info; info->dev = &pdev->dev; ret = request_firmware(&fw_entry, "siu_spb.bin", &pdev->dev); if (ret) goto ereqfw; /* * Loaded firmware is "const" - read only, but we have to modify it in * snd_siu_sh7343_spbAselect() and snd_siu_sh7343_spbBselect() */ memcpy(&info->fw, fw_entry->data, fw_entry->size); release_firmware(fw_entry); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; goto egetres; } region = request_mem_region(res->start, resource_size(res), pdev->name); if (!region) { dev_err(&pdev->dev, "SIU region already claimed\n"); ret = -EBUSY; goto ereqmemreg; } ret = -ENOMEM; info->pram = ioremap(res->start, PRAM_SIZE); if (!info->pram) goto emappram; info->xram = ioremap(res->start + XRAM_OFFSET, XRAM_SIZE); if (!info->xram) goto emapxram; info->yram = ioremap(res->start + YRAM_OFFSET, YRAM_SIZE); if (!info->yram) goto emapyram; info->reg = ioremap(res->start + REG_OFFSET, resource_size(res) - REG_OFFSET); if (!info->reg) goto emapreg; dev_set_drvdata(&pdev->dev, info); /* register using ARRAY version so we can keep dai name */ ret = snd_soc_register_component(&pdev->dev, &siu_i2s_component, &siu_i2s_dai, 1); if (ret < 0) goto edaiinit; ret = snd_soc_register_platform(&pdev->dev, &siu_platform); if (ret < 0) goto esocregp; pm_runtime_enable(&pdev->dev); return ret; esocregp: snd_soc_unregister_component(&pdev->dev); edaiinit: iounmap(info->reg); emapreg: iounmap(info->yram); emapyram: iounmap(info->xram); emapxram: iounmap(info->pram); emappram: release_mem_region(res->start, resource_size(res)); ereqmemreg: egetres: ereqfw: kfree(info); return ret; } static int siu_remove(struct platform_device *pdev) { struct siu_info *info = dev_get_drvdata(&pdev->dev); struct resource *res; pm_runtime_disable(&pdev->dev); snd_soc_unregister_platform(&pdev->dev); snd_soc_unregister_component(&pdev->dev); iounmap(info->reg); iounmap(info->yram); iounmap(info->xram); iounmap(info->pram); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); kfree(info); return 0; } static struct platform_driver siu_driver = { .driver = { .owner = THIS_MODULE, .name = "siu-pcm-audio", }, .probe = siu_probe, .remove = siu_remove, }; module_platform_driver(siu_driver); MODULE_AUTHOR("Carlos Munoz <carlos@kenati.com>"); MODULE_DESCRIPTION("ALSA SoC SH7722 SIU driver"); MODULE_LICENSE("GPL");
gpl-2.0
hash07/Apollo_X
arch/microblaze/kernel/dma.c
2498
5382
/* * Copyright (C) 2009-2010 PetaLogix * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation * * Provide default implementations of the DMA mapping callbacks for * directly mapped busses. */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <linux/dma-debug.h> #include <linux/export.h> #include <linux/bug.h> /* * Generic direct DMA implementation * * This implementation supports a per-device offset that can be applied if * the address at which memory is visible to devices is not 0. Platform code * can set archdata.dma_data to an unsigned long holding the offset. By * default the offset is PCI_DRAM_OFFSET. */ static unsigned long get_dma_direct_offset(struct device *dev) { if (likely(dev)) return (unsigned long)dev->archdata.dma_data; return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ } #define NOT_COHERENT_CACHE static void *dma_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) { #ifdef NOT_COHERENT_CACHE return consistent_alloc(flag, size, dma_handle); #else void *ret; struct page *page; int node = dev_to_node(dev); /* ignore region specifiers */ flag &= ~(__GFP_HIGHMEM); page = alloc_pages_node(node, flag, get_order(size)); if (page == NULL) return NULL; ret = page_address(page); memset(ret, 0, size); *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); return ret; #endif } static void dma_direct_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { #ifdef NOT_COHERENT_CACHE consistent_free(size, vaddr); #else free_pages((unsigned long)vaddr, get_order(size)); #endif } static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, sg->length, direction); } return nents; } static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { } static int dma_direct_dma_supported(struct device *dev, u64 mask) { return 1; } static inline dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { __dma_sync(page_to_phys(page) + offset, size, direction); return page_to_phys(page) + offset + get_dma_direct_offset(dev); } static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { /* There is not necessary to do cache cleanup * * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and * dma_address is physical address */ __dma_sync(dma_address, size, direction); } static inline void dma_direct_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { /* * It's pointless to flush the cache as the memory segment * is given to the CPU */ if (direction == DMA_FROM_DEVICE) __dma_sync(dma_handle, size, direction); } static inline void dma_direct_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { /* * It's pointless to invalidate the cache if the device isn't * supposed to write to the relevant region */ if (direction == DMA_TO_DEVICE) __dma_sync(dma_handle, size, direction); } static inline void dma_direct_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ if (direction == DMA_FROM_DEVICE) for_each_sg(sgl, sg, nents, i) __dma_sync(sg->dma_address, sg->length, direction); } static inline void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ if (direction == DMA_TO_DEVICE) for_each_sg(sgl, sg, nents, i) __dma_sync(sg->dma_address, sg->length, direction); } struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .map_sg = dma_direct_map_sg, .unmap_sg = dma_direct_unmap_sg, .dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, .sync_single_for_cpu = dma_direct_sync_single_for_cpu, .sync_single_for_device = dma_direct_sync_single_for_device, .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, .sync_sg_for_device = dma_direct_sync_sg_for_device, }; EXPORT_SYMBOL(dma_direct_ops); /* Number of entries preallocated for DMA-API debugging */ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) static int __init dma_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_init);
gpl-2.0
sultanxda/android_kernel_asus_grouper-old
drivers/scsi/fcoe/fcoe_transport.c
2754
18866
/* * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/errno.h> #include <linux/crc32.h> #include <scsi/libfcoe.h> #include "libfcoe.h" MODULE_AUTHOR("Open-FCoE.org"); MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs"); MODULE_LICENSE("GPL v2"); static int fcoe_transport_create(const char *, struct kernel_param *); static int fcoe_transport_destroy(const char *, struct kernel_param *); static int fcoe_transport_show(char *buffer, const struct kernel_param *kp); static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device); static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device); static int fcoe_transport_enable(const char *, struct kernel_param *); static int fcoe_transport_disable(const char *, struct kernel_param *); static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr); static LIST_HEAD(fcoe_transports); static DEFINE_MUTEX(ft_mutex); static LIST_HEAD(fcoe_netdevs); static DEFINE_MUTEX(fn_mutex); unsigned int libfcoe_debug_logging; module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR); __MODULE_PARM_TYPE(show, "string"); MODULE_PARM_DESC(show, " Show attached FCoE transports"); module_param_call(create, fcoe_transport_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR); __MODULE_PARM_TYPE(create, "string"); MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); module_param_call(create_vn2vn, fcoe_transport_create, NULL, (void *)FIP_MODE_VN2VN, S_IWUSR); __MODULE_PARM_TYPE(create_vn2vn, "string"); MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance " "on an Ethernet interface"); module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(destroy, "string"); MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface"); module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(enable, "string"); MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface."); module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(disable, "string"); MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); /* notification function for packets from net device */ static struct notifier_block libfcoe_notifier = { .notifier_call = libfcoe_device_notification, }; /** * fcoe_fc_crc() - Calculates the CRC for a given frame * @fp: The frame to be checksumed * * This uses crc32() routine to calculate the CRC for a frame * * Return: The 32 bit CRC value */ u32 fcoe_fc_crc(struct fc_frame *fp) { struct sk_buff *skb = fp_skb(fp); struct skb_frag_struct *frag; unsigned char *data; unsigned long off, len, clen; u32 crc; unsigned i; crc = crc32(~0, skb->data, skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; off = frag->page_offset; len = frag->size; while (len > 0) { clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), KM_SKB_DATA_SOFTIRQ); crc = crc32(crc, data + (off & ~PAGE_MASK), clen); kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); off += clen; len -= clen; } } return crc; } EXPORT_SYMBOL_GPL(fcoe_fc_crc); /** * fcoe_start_io() - Start FCoE I/O * @skb: The packet to be transmitted * * This routine is called from the net device to start transmitting * FCoE packets. * * Returns: 0 for success */ int fcoe_start_io(struct sk_buff *skb) { struct sk_buff *nskb; int rc; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; rc = dev_queue_xmit(nskb); if (rc != 0) return rc; kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(fcoe_start_io); /** * fcoe_clean_pending_queue() - Dequeue a skb and free it * @lport: The local port to dequeue a skb on */ void fcoe_clean_pending_queue(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); struct sk_buff *skb; spin_lock_bh(&port->fcoe_pending_queue.lock); while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) { spin_unlock_bh(&port->fcoe_pending_queue.lock); kfree_skb(skb); spin_lock_bh(&port->fcoe_pending_queue.lock); } spin_unlock_bh(&port->fcoe_pending_queue.lock); } EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); /** * fcoe_check_wait_queue() - Attempt to clear the transmit backlog * @lport: The local port whose backlog is to be cleared * * This empties the wait_queue, dequeues the head of the wait_queue queue * and calls fcoe_start_io() for each packet. If all skb have been * transmitted it returns the qlen. If an error occurs it restores * wait_queue (to try again later) and returns -1. * * The wait_queue is used when the skb transmit fails. The failed skb * will go in the wait_queue which will be emptied by the timer function or * by the next skb transmit. */ void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) { struct fcoe_port *port = lport_priv(lport); int rc; spin_lock_bh(&port->fcoe_pending_queue.lock); if (skb) __skb_queue_tail(&port->fcoe_pending_queue, skb); if (port->fcoe_pending_queue_active) goto out; port->fcoe_pending_queue_active = 1; while (port->fcoe_pending_queue.qlen) { /* keep qlen > 0 until fcoe_start_io succeeds */ port->fcoe_pending_queue.qlen++; skb = __skb_dequeue(&port->fcoe_pending_queue); spin_unlock_bh(&port->fcoe_pending_queue.lock); rc = fcoe_start_io(skb); spin_lock_bh(&port->fcoe_pending_queue.lock); if (rc) { __skb_queue_head(&port->fcoe_pending_queue, skb); /* undo temporary increment above */ port->fcoe_pending_queue.qlen--; break; } /* undo temporary increment above */ port->fcoe_pending_queue.qlen--; } if (port->fcoe_pending_queue.qlen < port->min_queue_depth) lport->qfull = 0; if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) mod_timer(&port->timer, jiffies + 2); port->fcoe_pending_queue_active = 0; out: if (port->fcoe_pending_queue.qlen > port->max_queue_depth) lport->qfull = 1; spin_unlock_bh(&port->fcoe_pending_queue.lock); } EXPORT_SYMBOL_GPL(fcoe_check_wait_queue); /** * fcoe_queue_timer() - The fcoe queue timer * @lport: The local port * * Calls fcoe_check_wait_queue on timeout */ void fcoe_queue_timer(ulong lport) { fcoe_check_wait_queue((struct fc_lport *)lport, NULL); } EXPORT_SYMBOL_GPL(fcoe_queue_timer); /** * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC * @skb: The packet to be transmitted * @tlen: The total length of the trailer * @fps: The fcoe context * * This routine allocates a page for frame trailers. The page is re-used if * there is enough room left on it for the current trailer. If there isn't * enough buffer left a new page is allocated for the trailer. Reference to * the page from this function as well as the skbs using the page fragments * ensure that the page is freed at the appropriate time. * * Returns: 0 for success */ int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen, struct fcoe_percpu_s *fps) { struct page *page; page = fps->crc_eof_page; if (!page) { page = alloc_page(GFP_ATOMIC); if (!page) return -ENOMEM; fps->crc_eof_page = page; fps->crc_eof_offset = 0; } get_page(page); skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, fps->crc_eof_offset, tlen); skb->len += tlen; skb->data_len += tlen; skb->truesize += tlen; fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); if (fps->crc_eof_offset >= PAGE_SIZE) { fps->crc_eof_page = NULL; fps->crc_eof_offset = 0; put_page(page); } return 0; } EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof); /** * fcoe_transport_lookup - find an fcoe transport that matches a netdev * @netdev: The netdev to look for from all attached transports * * Returns : ptr to the fcoe transport that supports this netdev or NULL * if not found. * * The ft_mutex should be held when this is called */ static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev) { struct fcoe_transport *ft = NULL; list_for_each_entry(ft, &fcoe_transports, list) if (ft->match && ft->match(netdev)) return ft; return NULL; } /** * fcoe_transport_attach - Attaches an FCoE transport * @ft: The fcoe transport to be attached * * Returns : 0 for success */ int fcoe_transport_attach(struct fcoe_transport *ft) { int rc = 0; mutex_lock(&ft_mutex); if (ft->attached) { LIBFCOE_TRANSPORT_DBG("transport %s already attached\n", ft->name); rc = -EEXIST; goto out_attach; } /* Add default transport to the tail */ if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT)) list_add(&ft->list, &fcoe_transports); else list_add_tail(&ft->list, &fcoe_transports); ft->attached = true; LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name); out_attach: mutex_unlock(&ft_mutex); return rc; } EXPORT_SYMBOL(fcoe_transport_attach); /** * fcoe_transport_detach - Detaches an FCoE transport * @ft: The fcoe transport to be attached * * Returns : 0 for success */ int fcoe_transport_detach(struct fcoe_transport *ft) { int rc = 0; struct fcoe_netdev_mapping *nm = NULL, *tmp; mutex_lock(&ft_mutex); if (!ft->attached) { LIBFCOE_TRANSPORT_DBG("transport %s already detached\n", ft->name); rc = -ENODEV; goto out_attach; } /* remove netdev mapping for this transport as it is going away */ mutex_lock(&fn_mutex); list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { if (nm->ft == ft) { LIBFCOE_TRANSPORT_DBG("transport %s going away, " "remove its netdev mapping for %s\n", ft->name, nm->netdev->name); list_del(&nm->list); kfree(nm); } } mutex_unlock(&fn_mutex); list_del(&ft->list); ft->attached = false; LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name); out_attach: mutex_unlock(&ft_mutex); return rc; } EXPORT_SYMBOL(fcoe_transport_detach); static int fcoe_transport_show(char *buffer, const struct kernel_param *kp) { int i, j; struct fcoe_transport *ft = NULL; i = j = sprintf(buffer, "Attached FCoE transports:"); mutex_lock(&ft_mutex); list_for_each_entry(ft, &fcoe_transports, list) { if (i >= PAGE_SIZE - IFNAMSIZ) break; i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name); } mutex_unlock(&ft_mutex); if (i == j) i += snprintf(&buffer[i], IFNAMSIZ, "none"); return i; } static int __init fcoe_transport_init(void) { register_netdevice_notifier(&libfcoe_notifier); return 0; } static int __exit fcoe_transport_exit(void) { struct fcoe_transport *ft; unregister_netdevice_notifier(&libfcoe_notifier); mutex_lock(&ft_mutex); list_for_each_entry(ft, &fcoe_transports, list) printk(KERN_ERR "FCoE transport %s is still attached!\n", ft->name); mutex_unlock(&ft_mutex); return 0; } static int fcoe_add_netdev_mapping(struct net_device *netdev, struct fcoe_transport *ft) { struct fcoe_netdev_mapping *nm; nm = kmalloc(sizeof(*nm), GFP_KERNEL); if (!nm) { printk(KERN_ERR "Unable to allocate netdev_mapping"); return -ENOMEM; } nm->netdev = netdev; nm->ft = ft; mutex_lock(&fn_mutex); list_add(&nm->list, &fcoe_netdevs); mutex_unlock(&fn_mutex); return 0; } static void fcoe_del_netdev_mapping(struct net_device *netdev) { struct fcoe_netdev_mapping *nm = NULL, *tmp; mutex_lock(&fn_mutex); list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { if (nm->netdev == netdev) { list_del(&nm->list); kfree(nm); mutex_unlock(&fn_mutex); return; } } mutex_unlock(&fn_mutex); } /** * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which * it was created * * Returns : ptr to the fcoe transport that supports this netdev or NULL * if not found. * * The ft_mutex should be held when this is called */ static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev) { struct fcoe_transport *ft = NULL; struct fcoe_netdev_mapping *nm; mutex_lock(&fn_mutex); list_for_each_entry(nm, &fcoe_netdevs, list) { if (netdev == nm->netdev) { ft = nm->ft; mutex_unlock(&fn_mutex); return ft; } } mutex_unlock(&fn_mutex); return NULL; } /** * fcoe_if_to_netdev() - Parse a name buffer to get a net device * @buffer: The name of the net device * * Returns: NULL or a ptr to net_device */ static struct net_device *fcoe_if_to_netdev(const char *buffer) { char *cp; char ifname[IFNAMSIZ + 2]; if (buffer) { strlcpy(ifname, buffer, IFNAMSIZ); cp = ifname + strlen(ifname); while (--cp >= ifname && *cp == '\n') *cp = '\0'; return dev_get_by_name(&init_net, ifname); } return NULL; } /** * libfcoe_device_notification() - Handler for net device events * @notifier: The context of the notification * @event: The type of event * @ptr: The net device that the event was on * * This function is called by the Ethernet driver in case of link change event. * * Returns: 0 for success */ static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr) { struct net_device *netdev = ptr; switch (event) { case NETDEV_UNREGISTER: printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n", netdev->name); fcoe_del_netdev_mapping(netdev); break; } return NOTIFY_OK; } /** * fcoe_transport_create() - Create a fcoe interface * @buffer: The name of the Ethernet interface to create on * @kp: The associated kernel param * * Called from sysfs. This holds the ft_mutex while calling the * registered fcoe transport's create function. * * Returns: 0 for success */ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; enum fip_state fip_mode = (enum fip_state)(long)kp->arg; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) { LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer); goto out_nodev; } ft = fcoe_netdev_map_lookup(netdev); if (ft) { LIBFCOE_TRANSPORT_DBG("transport %s already has existing " "FCoE instance on %s.\n", ft->name, netdev->name); rc = -EEXIST; goto out_putdev; } ft = fcoe_transport_lookup(netdev); if (!ft) { LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", netdev->name); goto out_putdev; } rc = fcoe_add_netdev_mapping(netdev, ft); if (rc) { LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping " "for FCoE transport %s for %s.\n", ft->name, netdev->name); goto out_putdev; } /* pass to transport create */ rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV; if (rc) fcoe_del_netdev_mapping(netdev); LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n", ft->name, (rc) ? "failed" : "succeeded", netdev->name); out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } /** * fcoe_transport_destroy() - Destroy a FCoE interface * @buffer: The name of the Ethernet interface to be destroyed * @kp: The associated kernel parameter * * Called from sysfs. This holds the ft_mutex while calling the * registered fcoe transport's destroy function. * * Returns: 0 for success */ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) { LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer); goto out_nodev; } ft = fcoe_netdev_map_lookup(netdev); if (!ft) { LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", netdev->name); goto out_putdev; } /* pass to transport destroy */ rc = ft->destroy ? ft->destroy(netdev) : -ENODEV; fcoe_del_netdev_mapping(netdev); LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n", ft->name, (rc) ? "failed" : "succeeded", netdev->name); out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } /** * fcoe_transport_disable() - Disables a FCoE interface * @buffer: The name of the Ethernet interface to be disabled * @kp: The associated kernel parameter * * Called from sysfs. * * Returns: 0 for success */ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) goto out_nodev; ft = fcoe_netdev_map_lookup(netdev); if (!ft) goto out_putdev; rc = ft->disable ? ft->disable(netdev) : -ENODEV; out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); if (rc == -ERESTARTSYS) return restart_syscall(); else return rc; } /** * fcoe_transport_enable() - Enables a FCoE interface * @buffer: The name of the Ethernet interface to be enabled * @kp: The associated kernel parameter * * Called from sysfs. * * Returns: 0 for success */ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) goto out_nodev; ft = fcoe_netdev_map_lookup(netdev); if (!ft) goto out_putdev; rc = ft->enable ? ft->enable(netdev) : -ENODEV; out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } /** * libfcoe_init() - Initialization routine for libfcoe.ko */ static int __init libfcoe_init(void) { fcoe_transport_init(); return 0; } module_init(libfcoe_init); /** * libfcoe_exit() - Tear down libfcoe.ko */ static void __exit libfcoe_exit(void) { fcoe_transport_exit(); } module_exit(libfcoe_exit);
gpl-2.0
ShinyROM/android_kernel_asus_flo
drivers/media/video/hdpvr/hdpvr-core.c
4034
12594
/* * Hauppauge HD PVR USB driver * * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2008 Janne Grunau (j@jannau.net) * Copyright (C) 2008 John Poet * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/atomic.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-dev.h> #include <media/v4l2-common.h> #include "hdpvr.h" static int video_nr[HDPVR_MAX] = {[0 ... (HDPVR_MAX - 1)] = UNSET}; module_param_array(video_nr, int, NULL, 0); MODULE_PARM_DESC(video_nr, "video device number (-1=Auto)"); /* holds the number of currently registered devices */ static atomic_t dev_nr = ATOMIC_INIT(-1); int hdpvr_debug; module_param(hdpvr_debug, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(hdpvr_debug, "enable debugging output"); static uint default_video_input = HDPVR_VIDEO_INPUTS; module_param(default_video_input, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(default_video_input, "default video input: 0=Component / " "1=S-Video / 2=Composite"); static uint default_audio_input = HDPVR_AUDIO_INPUTS; module_param(default_audio_input, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / " "1=RCA front / 2=S/PDIF"); static bool boost_audio; module_param(boost_audio, bool, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(boost_audio, "boost the audio signal"); /* table of devices that work with this driver */ static struct usb_device_id hdpvr_table[] = { { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID1) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID2) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID3) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID4) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, hdpvr_table); void hdpvr_delete(struct hdpvr_device *dev) { hdpvr_free_buffers(dev); if (dev->video_dev) video_device_release(dev->video_dev); usb_put_dev(dev->udev); } static void challenge(u8 *bytes) { u64 *i64P, tmp64; uint i, idx; for (idx = 0; idx < 32; ++idx) { if (idx & 0x3) bytes[(idx >> 3) + 3] = bytes[(idx >> 2) & 0x3]; switch (idx & 0x3) { case 0x3: bytes[2] += bytes[3] * 4 + bytes[4] + bytes[5]; bytes[4] += bytes[(idx & 0x1) * 2] * 9 + 9; break; case 0x1: bytes[0] *= 8; bytes[0] += 7*idx + 4; bytes[6] += bytes[3] * 3; break; case 0x0: bytes[3 - (idx >> 3)] = bytes[idx >> 2]; bytes[5] += bytes[6] * 3; for (i = 0; i < 3; i++) bytes[3] *= bytes[3] + 1; break; case 0x2: for (i = 0; i < 3; i++) bytes[1] *= bytes[6] + 1; for (i = 0; i < 3; i++) { i64P = (u64 *)bytes; tmp64 = le64_to_cpup(i64P); tmp64 <<= bytes[7] & 0x0f; *i64P += cpu_to_le64(tmp64); } break; } } } /* try to init the device like the windows driver */ static int device_authorization(struct hdpvr_device *dev) { int ret, retval = -ENOMEM; char request_type = 0x38, rcv_request = 0x81; char *response; #ifdef HDPVR_DEBUG size_t buf_size = 46; char *print_buf = kzalloc(5*buf_size+1, GFP_KERNEL); if (!print_buf) { v4l2_err(&dev->v4l2_dev, "Out of memory\n"); return retval; } #endif mutex_lock(&dev->usbc_mutex); ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), rcv_request, 0x80 | request_type, 0x0400, 0x0003, dev->usbc_buf, 46, 10000); if (ret != 46) { v4l2_err(&dev->v4l2_dev, "unexpected answer of status request, len %d\n", ret); goto unlock; } #ifdef HDPVR_DEBUG else { hex_dump_to_buffer(dev->usbc_buf, 46, 16, 1, print_buf, 5*buf_size+1, 0); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "Status request returned, len %d: %s\n", ret, print_buf); } #endif dev->fw_ver = dev->usbc_buf[1]; v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n", dev->fw_ver, &dev->usbc_buf[2]); if (dev->fw_ver > 0x15) { dev->options.brightness = 0x80; dev->options.contrast = 0x40; dev->options.hue = 0xf; dev->options.saturation = 0x40; dev->options.sharpness = 0x80; } switch (dev->fw_ver) { case HDPVR_FIRMWARE_VERSION: dev->flags &= ~HDPVR_FLAG_AC3_CAP; break; case HDPVR_FIRMWARE_VERSION_AC3: case HDPVR_FIRMWARE_VERSION_0X12: case HDPVR_FIRMWARE_VERSION_0X15: dev->flags |= HDPVR_FLAG_AC3_CAP; break; default: v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might" " not work.\n"); if (dev->fw_ver >= HDPVR_FIRMWARE_VERSION_AC3) dev->flags |= HDPVR_FLAG_AC3_CAP; else dev->flags &= ~HDPVR_FLAG_AC3_CAP; } response = dev->usbc_buf+38; #ifdef HDPVR_DEBUG hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "challenge: %s\n", print_buf); #endif challenge(response); #ifdef HDPVR_DEBUG hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n", print_buf); #endif msleep(100); ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xd1, 0x00 | request_type, 0x0000, 0x0000, response, 8, 10000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "magic request returned %d\n", ret); retval = ret != 8; unlock: mutex_unlock(&dev->usbc_mutex); return retval; } static int hdpvr_device_init(struct hdpvr_device *dev) { int ret; u8 *buf; struct hdpvr_video_info *vidinf; if (device_authorization(dev)) return -EACCES; /* default options for init */ hdpvr_set_options(dev); /* set filter options */ mutex_lock(&dev->usbc_mutex); buf = dev->usbc_buf; buf[0] = 0x03; buf[1] = 0x03; buf[2] = 0x00; buf[3] = 0x00; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0x01, 0x38, CTRL_LOW_PASS_FILTER_VALUE, CTRL_DEFAULT_INDEX, buf, 4, 1000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "control request returned %d\n", ret); mutex_unlock(&dev->usbc_mutex); vidinf = get_video_info(dev); if (!vidinf) v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "no valid video signal or device init failed\n"); else kfree(vidinf); /* enable fan and bling leds */ mutex_lock(&dev->usbc_mutex); buf[0] = 0x1; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xd4, 0x38, 0, 0, buf, 1, 1000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "control request returned %d\n", ret); /* boost analog audio */ buf[0] = boost_audio; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xd5, 0x38, 0, 0, buf, 1, 1000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "control request returned %d\n", ret); mutex_unlock(&dev->usbc_mutex); dev->status = STATUS_IDLE; return 0; } static const struct hdpvr_options hdpvr_default_options = { .video_std = HDPVR_60HZ, .video_input = HDPVR_COMPONENT, .audio_input = HDPVR_RCA_BACK, .bitrate = 65, /* 6 mbps */ .peak_bitrate = 90, /* 9 mbps */ .bitrate_mode = HDPVR_CONSTANT, .gop_mode = HDPVR_SIMPLE_IDR_GOP, .audio_codec = V4L2_MPEG_AUDIO_ENCODING_AAC, /* original picture controls for firmware version <= 0x15 */ /* updated in device_authorization() for newer firmware */ .brightness = 0x86, .contrast = 0x80, .hue = 0x80, .saturation = 0x80, .sharpness = 0x80, }; static int hdpvr_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct hdpvr_device *dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct i2c_client *client; size_t buffer_size; int i; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { err("Out of memory"); goto error; } dev->workqueue = 0; /* register v4l2_device early so it can be used for printks */ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { err("v4l2_device_register failed"); goto error; } mutex_init(&dev->io_mutex); mutex_init(&dev->i2c_mutex); mutex_init(&dev->usbc_mutex); dev->usbc_buf = kmalloc(64, GFP_KERNEL); if (!dev->usbc_buf) { v4l2_err(&dev->v4l2_dev, "Out of memory\n"); goto error; } init_waitqueue_head(&dev->wait_buffer); init_waitqueue_head(&dev->wait_data); dev->workqueue = create_singlethread_workqueue("hdpvr_buffer"); if (!dev->workqueue) goto error; /* init video transfer queues */ INIT_LIST_HEAD(&dev->free_buff_list); INIT_LIST_HEAD(&dev->rec_buff_list); dev->options = hdpvr_default_options; if (default_video_input < HDPVR_VIDEO_INPUTS) dev->options.video_input = default_video_input; if (default_audio_input < HDPVR_AUDIO_INPUTS) { dev->options.audio_input = default_audio_input; if (default_audio_input == HDPVR_SPDIF) dev->options.audio_codec = V4L2_MPEG_AUDIO_ENCODING_AC3; } dev->udev = usb_get_dev(interface_to_usbdev(interface)); /* set up the endpoint information */ /* use only the first bulk-in and bulk-out endpoints */ iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!dev->bulk_in_endpointAddr && usb_endpoint_is_bulk_in(endpoint)) { /* USB interface description is buggy, reported max * packet size is 512 bytes, windows driver uses 8192 */ buffer_size = 8192; dev->bulk_in_size = buffer_size; dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; } } if (!dev->bulk_in_endpointAddr) { v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n"); goto error; } /* init the device */ if (hdpvr_device_init(dev)) { v4l2_err(&dev->v4l2_dev, "device init failed\n"); goto error; } mutex_lock(&dev->io_mutex); if (hdpvr_alloc_buffers(dev, NUM_BUFFERS)) { mutex_unlock(&dev->io_mutex); v4l2_err(&dev->v4l2_dev, "allocating transfer buffers failed\n"); goto error; } mutex_unlock(&dev->io_mutex); if (hdpvr_register_videodev(dev, &interface->dev, video_nr[atomic_inc_return(&dev_nr)])) { v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); goto error; } #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) retval = hdpvr_register_i2c_adapter(dev); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n"); goto error; } client = hdpvr_register_ir_rx_i2c(dev); if (!client) { v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n"); goto reg_fail; } client = hdpvr_register_ir_tx_i2c(dev); if (!client) { v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n"); goto reg_fail; } #endif /* let the user know what node this device is now attached to */ v4l2_info(&dev->v4l2_dev, "device now attached to %s\n", video_device_node_name(dev->video_dev)); return 0; reg_fail: #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_adapter(&dev->i2c_adapter); #endif error: if (dev) { /* Destroy single thread */ if (dev->workqueue) destroy_workqueue(dev->workqueue); /* this frees allocated memory */ hdpvr_delete(dev); } return retval; } static void hdpvr_disconnect(struct usb_interface *interface) { struct hdpvr_device *dev = to_hdpvr_dev(usb_get_intfdata(interface)); v4l2_info(&dev->v4l2_dev, "device %s disconnected\n", video_device_node_name(dev->video_dev)); /* prevent more I/O from starting and stop any ongoing */ mutex_lock(&dev->io_mutex); dev->status = STATUS_DISCONNECTED; wake_up_interruptible(&dev->wait_data); wake_up_interruptible(&dev->wait_buffer); mutex_unlock(&dev->io_mutex); v4l2_device_disconnect(&dev->v4l2_dev); msleep(100); flush_workqueue(dev->workqueue); mutex_lock(&dev->io_mutex); hdpvr_cancel_queue(dev); mutex_unlock(&dev->io_mutex); #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_adapter(&dev->i2c_adapter); #endif video_unregister_device(dev->video_dev); atomic_dec(&dev_nr); } static struct usb_driver hdpvr_usb_driver = { .name = "hdpvr", .probe = hdpvr_probe, .disconnect = hdpvr_disconnect, .id_table = hdpvr_table, }; module_usb_driver(hdpvr_usb_driver); MODULE_LICENSE("GPL"); MODULE_VERSION("0.2.1"); MODULE_AUTHOR("Janne Grunau"); MODULE_DESCRIPTION("Hauppauge HD PVR driver");
gpl-2.0
PaulMilbank/g300_kernel
drivers/input/touchscreen/synaptics/rmi_spi.c
4290
16433
/** * * Synaptics Register Mapped Interface (RMI4) SPI Physical Layer Driver. * Copyright (C) 2008-2011, Synaptics Incorporated * */ /* * This file is licensed under the GPL2 license. * *############################################################################ * GPL * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * *############################################################################ */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> #include <linux/platform_device.h> #include <linux/semaphore.h> #include <linux/spi/spi.h> #include <linux/input/rmi_platformdata.h> #include "rmi_spi.h" #include "rmi_drvr.h" #define COMM_DEBUG 1 /* Set to 1 to dump transfers. */ /* 65 microseconds inter-byte delay between bytes for RMI chip*/ #define RMI_DEFAULT_BYTE_DELAY_US 0 /* 65 */ #define SPI_BUFFER_SIZE 32 static u8 *buf; /* This is the data kept on a per instance (client) basis. This data is * always accessible by using the container_of() macro of the various elements * inside. */ struct spi_device_instance_data { int instance_no; int irq; unsigned int byte_delay_us; struct rmi_phys_driver rpd; struct spi_device *spidev; struct rmi_spi_platformdata *platformdata; }; static int spi_xfer(struct spi_device_instance_data *instance_data, const u8 *txbuf, unsigned n_tx, u8 *rxbuf, unsigned n_rx) { struct spi_device *spi = instance_data->spidev; #if COMM_DEBUG int i; #endif int status; struct spi_message message; struct spi_transfer *xfer_list; u8 *local_buf; int nXfers = 0; int xfer_index = 0; if ((n_tx + n_rx) > SPI_BUFFER_SIZE) return -EINVAL; if (n_tx) nXfers += 1; if (n_rx) { if (instance_data->byte_delay_us) nXfers += n_rx; else nXfers += 1; } xfer_list = kcalloc(nXfers, sizeof(struct spi_transfer), GFP_KERNEL); if (!xfer_list) return -ENOMEM; /* ... unless someone else is using the pre-allocated buffer */ local_buf = kzalloc(SPI_BUFFER_SIZE, GFP_KERNEL); if (!local_buf) { kfree(xfer_list); return -ENOMEM; } spi_message_init(&message); if (n_tx) { memset(&xfer_list[0], 0, sizeof(struct spi_transfer)); xfer_list[0].len = n_tx; xfer_list[0].delay_usecs = instance_data->byte_delay_us; spi_message_add_tail(&xfer_list[0], &message); memcpy(local_buf, txbuf, n_tx); xfer_list[0].tx_buf = local_buf; xfer_index++; } if (n_rx) { if (instance_data->byte_delay_us) { int buffer_offset = n_tx; for (; xfer_index < nXfers; xfer_index++) { memset(&xfer_list[xfer_index], 0, sizeof(struct spi_transfer)); xfer_list[xfer_index].len = 1; xfer_list[xfer_index].delay_usecs = instance_data->byte_delay_us; xfer_list[xfer_index].rx_buf = local_buf + buffer_offset; buffer_offset++; spi_message_add_tail(&xfer_list[xfer_index], &message); #ifdef CONFIG_ARCH_OMAP printk(KERN_INFO "%s: Did you compensate for ARCH_OMAP?", __func__); /* x[1].len = n_rx-1; */ /* since OMAP has one dummy byte. */ #else /* x[1].len = n_rx; */ #endif } } else { memset(&xfer_list[xfer_index], 0, sizeof(struct spi_transfer)); #ifdef CONFIG_ARCH_OMAP /* since OMAP has one dummy byte. */ xfer_list[xfer_index].len = n_rx-1; #else xfer_list[xfer_index].len = n_rx; #endif xfer_list[xfer_index].rx_buf = local_buf + n_tx; spi_message_add_tail(&xfer_list[xfer_index], &message); xfer_index++; } } printk(KERN_INFO "%s: Ready to go, xfer_index = %d, nXfers = %d.", __func__, xfer_index, nXfers); #if COMM_DEBUG printk(KERN_INFO "%s: SPI transmits %d bytes...", __func__, n_tx); for (i = 0; i < n_tx; i++) printk(KERN_INFO " 0x%02X", local_buf[i]); #endif /* do the i/o */ status = spi_sync(spi, &message); if (status == 0) { memcpy(rxbuf, local_buf + n_tx, n_rx); status = message.status; #if COMM_DEBUG if (n_rx) { printk(KERN_INFO "%s: SPI received %d bytes...", __func__, n_rx); for (i = 0; i < n_rx; i++) printk(KERN_INFO " 0x%02X", rxbuf[i]); } #endif } else { printk(KERN_ERR "%s: spi_sync failed with error code %d.", __func__, status); } kfree(local_buf); kfree(xfer_list); return status; } /** * Read a single register through spi. * \param[in] pd * \param[in] address The address at which to start the data read. * \param[out] valp Pointer to the buffer where the data will be stored. * \return zero upon success (with the byte read in valp),non-zero upon error. */ static int rmi_spi_read(struct rmi_phys_driver *pd, unsigned short address, char *valp) { struct spi_device_instance_data *id = container_of(pd, struct spi_device_instance_data, rpd); char rxbuf[2]; int retval; unsigned short addr = address; addr = ((addr & 0xff00) >> 8); address = ((address & 0x00ff) << 8); addr |= address; addr |= 0x80; /* High bit set indicates read. */ retval = spi_xfer(id, (u8 *)&addr, 2, rxbuf, 1); *valp = rxbuf[0]; return retval; } /** * Same as rmi_spi_read, except that multiple bytes are allowed to be read. * \param[in] pd * \param[in] address The address at which to start the data read. * \param[out] valp Pointer to the buffer where the data will be stored. This * buffer must be at least size bytes long. * \param[in] size The number of bytes to be read. * \return zero upon success(with the byte read in valp), non-zero upon error. */ static int rmi_spi_read_multiple(struct rmi_phys_driver *pd, unsigned short address, char *valp, int size) { struct spi_device_instance_data *id = container_of(pd, struct spi_device_instance_data, rpd); int retval; unsigned short addr = address; addr = ((addr & 0xff00) >> 8); address = ((address & 0x00ff) << 8); addr |= address; addr |= 0x80; /* High bit set indicates read. */ retval = spi_xfer(id, (u8 *)&addr, 2, valp, size); return retval; } /** * Write a single register through spi. * You can write multiple registers at once, but I made the functions for that * seperate for performance reasons. Writing multiple requires allocation and * freeing. * \param[in] pd * \param[in] address The address at which to start the write. * \param[in] data The data to be written. * \return one upon success, something else upon error. */ static int rmi_spi_write(struct rmi_phys_driver *pd, unsigned short address, char data) { struct spi_device_instance_data *id = container_of(pd, struct spi_device_instance_data, rpd); unsigned char txbuf[4]; int retval; txbuf[2] = data; txbuf[1] = address; txbuf[0] = address>>8; retval = spi_xfer(id, txbuf, 3, NULL, 0); return retval ? 0 : 1; } /** * Write multiple registers. * \param[in] pd * \param[in] address The address at which to start the write. * \param[in] valp A pointer to a buffer containing the data to be written. * \param[in] size The number of bytes to write. * \return one upon success, something else upon error. */ static int rmi_spi_write_multiple(struct rmi_phys_driver *pd, unsigned short address, char *valp, int size) { struct spi_device_instance_data *id = container_of(pd, struct spi_device_instance_data, rpd); unsigned char txbuf[32]; int retval; int i; txbuf[1] = address; txbuf[0] = address>>8; for (i = 0; i < size; i++) txbuf[i + 2] = valp[i]; retval = spi_xfer(id, txbuf, size+2, NULL, 0); return retval ? 0 : 1; } /** * This is the Interrupt Service Routine. * It just notifies the physical device * that attention is required. */ static irqreturn_t spi_attn_isr(int irq, void *info) { struct spi_device_instance_data *instance_data = info; disable_irq_nosync(instance_data->irq); if (instance_data->rpd.attention) instance_data->rpd.attention(&instance_data->rpd, instance_data->instance_no); return IRQ_HANDLED; } /* TODO: Move this to rmi_bus, and call a function to get the next sensorID */ static int sensor_count; static int __devinit rmi_spi_probe(struct spi_device *spi) { struct spi_device_instance_data *instance_data; int retval; struct rmi_spi_platformdata *platformdata; struct rmi_sensordata *sensordata; int irqtype = 0; printk(KERN_INFO "Probing RMI4 SPI device\n"); /* This should have already been set up in the board file, shouldn't it? */ spi->bits_per_word = 8; spi->mode = SPI_MODE_3; retval = spi_setup(spi); if (retval < 0) { printk(KERN_ERR "%s: spi_setup failed with %d.", __func__, retval); return retval; } buf = kzalloc(SPI_BUFFER_SIZE, GFP_KERNEL); if (!buf) { printk(KERN_ERR "%s: Failed to allocate memory for spi buffer.", __func__); return -ENOMEM; } instance_data = kzalloc(sizeof(*instance_data), GFP_KERNEL); if (!instance_data) { printk(KERN_ERR "%s: Failer to allocate memory for instance data.", __func__); return -ENOMEM; } instance_data->byte_delay_us = RMI_DEFAULT_BYTE_DELAY_US; instance_data->spidev = spi; instance_data->rpd.name = RMI4_SPI_DRIVER_NAME; instance_data->rpd.write = rmi_spi_write; instance_data->rpd.read = rmi_spi_read; instance_data->rpd.write_multiple = rmi_spi_write_multiple; instance_data->rpd.read_multiple = rmi_spi_read_multiple; instance_data->rpd.module = THIS_MODULE; /* default to polling if irq not used */ instance_data->rpd.polling_required = true; platformdata = spi->dev.platform_data; if (platformdata == NULL) { printk(KERN_ERR "%s: CONFIGURATION ERROR - platform data is NULL.", __func__); return -EINVAL; } instance_data->platformdata = platformdata; sensordata = platformdata->sensordata; /* Call the platform setup routine, to do any setup that is required * before * interacting with the device. */ if (sensordata && sensordata->rmi_sensor_setup) { retval = sensordata->rmi_sensor_setup(); if (retval) { printk(KERN_ERR "%s: sensor setup failed with code %d.", __func__, retval); kfree(instance_data); return retval; } } /* TODO: I think this if is no longer required. */ if (platformdata->chip == RMI_SUPPORT) { instance_data->instance_no = sensor_count; sensor_count++; /* set the device name using the instance_no * appended to DEVICE_NAME to make a unique name */ dev_set_name(&spi->dev, "%s%d", RMI4_SPI_DEVICE_NAME, instance_data->instance_no); /* * Determine if we need to poll (inefficient) or * use interrupts. */ if (platformdata->irq) { switch (platformdata->irq_type) { case IORESOURCE_IRQ_HIGHEDGE: irqtype = IRQF_TRIGGER_RISING; break; case IORESOURCE_IRQ_LOWEDGE: irqtype = IRQF_TRIGGER_FALLING; break; case IORESOURCE_IRQ_HIGHLEVEL: irqtype = IRQF_TRIGGER_HIGH; break; case IORESOURCE_IRQ_LOWLEVEL: irqtype = IRQF_TRIGGER_LOW; break; default: dev_warn(&spi->dev, "%s: Invalid IRQ flags in platform data.", __func__); retval = -ENXIO; goto error_exit; } /* retval = request_irq(instance_data->irq, spi_attn_isr, irqtype, "rmi_spi", instance_data); if (retval) { dev_info(&spi->dev, "%s: Unable to get attn irq %d. Reverting to polling. ", __func__, instance_data->irq); instance_data->rpd.polling_required = true; } else { dev_dbg(&spi->dev, "%s: got irq", __func__); instance_data->rpd.polling_required = false; instance_data->rpd.irq = instance_data->irq; } */ instance_data->rpd.polling_required = false; } else { instance_data->rpd.polling_required = true; dev_info(&spi->dev, "%s: No IRQ info given. Polling required.", __func__); } } /* Store instance data for later access. */ if (instance_data) spi_set_drvdata(spi, instance_data); /* Register the sensor driver - * which will trigger a scan of the PDT. */ retval = rmi_register_sensor(&instance_data->rpd, platformdata->sensordata); if (retval) { printk(KERN_ERR "%s: sensor registration failed with code %d.", __func__, retval); goto error_exit; } if (instance_data->rpd.polling_required == false) { instance_data->irq = platformdata->irq; retval = request_irq(platformdata->irq, spi_attn_isr, irqtype, dev_name(&spi->dev), instance_data); if (retval) { dev_err(&spi->dev, "%s: failed to obtain IRQ %d. Result: %d.", __func__, platformdata->irq, retval); dev_info(&spi->dev, "%s: Reverting to polling.\n", __func__); instance_data->rpd.polling_required = true; instance_data->irq = 0; /* TODO: Need to revert back to polling * - create and start timer. */ } else { dev_dbg(&spi->dev, "%s: got irq.\n", __func__); instance_data->rpd.irq = instance_data->irq; } } printk(KERN_INFO "%s: Successfully Registered %s.", __func__, instance_data->rpd.name); return 0; error_exit: if (sensordata && sensordata->rmi_sensor_teardown) sensordata->rmi_sensor_teardown(); if (instance_data->irq) free_irq(instance_data->irq, instance_data); kfree(instance_data); return retval; } static int rmi_spi_suspend(struct spi_device *spi, pm_message_t message) { printk(KERN_INFO "%s: Suspending...", __func__); return 0; } static int rmi_spi_resume(struct spi_device *spi) { printk(KERN_INFO "%s: Resuming...", __func__); return 0; } static int __devexit rmi_spi_remove(struct spi_device *spi) { struct spi_device_instance_data *id = spi_get_drvdata(spi); printk(KERN_INFO "%s: RMI SPI device removed.", __func__); rmi_spi_suspend(spi, PMSG_SUSPEND); rmi_unregister_sensors(&id->rpd); if (id) { if (id->irq) free_irq(id->irq, id); kfree(id); } return 0; } static struct spi_driver rmi_spi_driver = { .driver = { .name = RMI4_SPI_DRIVER_NAME, .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = rmi_spi_probe, .remove = __devexit_p(rmi_spi_remove), .suspend = rmi_spi_suspend, .resume = rmi_spi_resume, }; /** * The Platform Driver probe function. We just tell the spi subsystem about * ourselves in this call. */ static int rmi_spi_plat_probe(struct platform_device *dev) { struct rmi_spi_platformdata *platform_data = dev->dev.platform_data; printk(KERN_INFO "%s: Platform driver probe.", __func__); if (!platform_data) { printk(KERN_ERR "A platform device must contain rmi_spi_platformdata\n"); return -ENXIO; } return spi_register_driver(&rmi_spi_driver); } /** * Tell the spi subsystem that we're done. * \param[in] dev * \return Always returns 0. */ static int rmi_spi_plat_remove(struct platform_device *dev) { printk(KERN_INFO "%s: Platform driver removed.", __func__); spi_unregister_driver(&rmi_spi_driver); return 0; } /** * Structure used to tell the Platform Driver subsystem about us. */ static struct platform_driver rmi_spi_platform_driver = { .driver = { .name = RMI4_SPI_DRIVER_NAME, .owner = THIS_MODULE, }, .probe = rmi_spi_plat_probe, .remove = __devexit_p(rmi_spi_plat_remove), }; static int __init rmi_spi_init(void) { int retval; printk(KERN_INFO "%s: RMI SPI physical layer initialization.", __func__); retval = spi_register_driver(&rmi_spi_driver); if (retval < 0) { printk(KERN_ERR "%s: Failed to register spi driver, code = %d.", __func__, retval); return retval; } /* #else retval = platform_driver_register(&rmi_spi_platform_driver); if (retval < 0) { printk(KERN_ERR "%s: Failed to register platform driver, code = %d.", __func__, retval); return retval; } #endif */ printk(KERN_INFO "%s: result = %d", __func__, retval); return retval; } module_init(rmi_spi_init); static void __exit rmi_spi_exit(void) { printk(KERN_INFO "%s: RMI SPI physical layer exits.", __func__); kfree(buf); buf = NULL; platform_driver_unregister(&rmi_spi_platform_driver); } module_exit(rmi_spi_exit); /** Standard driver module information - the author of the module. */ MODULE_AUTHOR("Synaptics, Inc."); /** Standard driver module information - a summary description of this module. */ MODULE_DESCRIPTION("RMI4 Driver SPI Physical Layer"); /** Standard driver module information - the license under which this module * is included in the kernel. */ MODULE_LICENSE("GPL");
gpl-2.0
hyuh/kernel-dlx
tools/perf/util/header.c
4546
54668
#define _FILE_OFFSET_BITS 64 #include "util.h" #include <sys/types.h> #include <byteswap.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <linux/list.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <sys/utsname.h> #include "evlist.h" #include "evsel.h" #include "header.h" #include "../perf.h" #include "trace-event.h" #include "session.h" #include "symbol.h" #include "debug.h" #include "cpumap.h" static bool no_buildid_cache = false; static int event_count; static struct perf_trace_event_type *events; static u32 header_argc; static const char **header_argv; int perf_header__push_event(u64 id, const char *name) { if (strlen(name) > MAX_EVENT_NAME) pr_warning("Event %s will be truncated\n", name); if (!events) { events = malloc(sizeof(struct perf_trace_event_type)); if (events == NULL) return -ENOMEM; } else { struct perf_trace_event_type *nevents; nevents = realloc(events, (event_count + 1) * sizeof(*events)); if (nevents == NULL) return -ENOMEM; events = nevents; } memset(&events[event_count], 0, sizeof(struct perf_trace_event_type)); events[event_count].event_id = id; strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1); event_count++; return 0; } char *perf_header__find_event(u64 id) { int i; for (i = 0 ; i < event_count; i++) { if (events[i].event_id == id) return events[i].name; } return NULL; } /* * magic2 = "PERFILE2" * must be a numerical value to let the endianness * determine the memory layout. That way we are able * to detect endianness when reading the perf.data file * back. * * we check for legacy (PERFFILE) format. */ static const char *__perf_magic1 = "PERFFILE"; static const u64 __perf_magic2 = 0x32454c4946524550ULL; static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; #define PERF_MAGIC __perf_magic2 struct perf_file_attr { struct perf_event_attr attr; struct perf_file_section ids; }; void perf_header__set_feat(struct perf_header *header, int feat) { set_bit(feat, header->adds_features); } void perf_header__clear_feat(struct perf_header *header, int feat) { clear_bit(feat, header->adds_features); } bool perf_header__has_feat(const struct perf_header *header, int feat) { return test_bit(feat, header->adds_features); } static int do_write(int fd, const void *buf, size_t size) { while (size) { int ret = write(fd, buf, size); if (ret < 0) return -errno; size -= ret; buf += ret; } return 0; } #define NAME_ALIGN 64 static int write_padded(int fd, const void *bf, size_t count, size_t count_aligned) { static const char zero_buf[NAME_ALIGN]; int err = do_write(fd, bf, count); if (!err) err = do_write(fd, zero_buf, count_aligned - count); return err; } static int do_write_string(int fd, const char *str) { u32 len, olen; int ret; olen = strlen(str) + 1; len = ALIGN(olen, NAME_ALIGN); /* write len, incl. \0 */ ret = do_write(fd, &len, sizeof(len)); if (ret < 0) return ret; return write_padded(fd, str, olen, len); } static char *do_read_string(int fd, struct perf_header *ph) { ssize_t sz, ret; u32 len; char *buf; sz = read(fd, &len, sizeof(len)); if (sz < (ssize_t)sizeof(len)) return NULL; if (ph->needs_swap) len = bswap_32(len); buf = malloc(len); if (!buf) return NULL; ret = read(fd, buf, len); if (ret == (ssize_t)len) { /* * strings are padded by zeroes * thus the actual strlen of buf * may be less than len */ return buf; } free(buf); return NULL; } int perf_header__set_cmdline(int argc, const char **argv) { int i; header_argc = (u32)argc; /* do not include NULL termination */ header_argv = calloc(argc, sizeof(char *)); if (!header_argv) return -ENOMEM; /* * must copy argv contents because it gets moved * around during option parsing */ for (i = 0; i < argc ; i++) header_argv[i] = argv[i]; return 0; } #define dsos__for_each_with_build_id(pos, head) \ list_for_each_entry(pos, head, node) \ if (!pos->has_build_id) \ continue; \ else static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, u16 misc, int fd) { struct dso *pos; dsos__for_each_with_build_id(pos, head) { int err; struct build_id_event b; size_t len; if (!pos->hit) continue; len = pos->long_name_len + 1; len = ALIGN(len, NAME_ALIGN); memset(&b, 0, sizeof(b)); memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); b.pid = pid; b.header.misc = misc; b.header.size = sizeof(b) + len; err = do_write(fd, &b, sizeof(b)); if (err < 0) return err; err = write_padded(fd, pos->long_name, pos->long_name_len + 1, len); if (err < 0) return err; } return 0; } static int machine__write_buildid_table(struct machine *machine, int fd) { int err; u16 kmisc = PERF_RECORD_MISC_KERNEL, umisc = PERF_RECORD_MISC_USER; if (!machine__is_host(machine)) { kmisc = PERF_RECORD_MISC_GUEST_KERNEL; umisc = PERF_RECORD_MISC_GUEST_USER; } err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, kmisc, fd); if (err == 0) err = __dsos__write_buildid_table(&machine->user_dsos, machine->pid, umisc, fd); return err; } static int dsos__write_buildid_table(struct perf_header *header, int fd) { struct perf_session *session = container_of(header, struct perf_session, header); struct rb_node *nd; int err = machine__write_buildid_table(&session->host_machine, fd); if (err) return err; for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); err = machine__write_buildid_table(pos, fd); if (err) break; } return err; } int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, const char *name, bool is_kallsyms) { const size_t size = PATH_MAX; char *realname, *filename = zalloc(size), *linkname = zalloc(size), *targetname; int len, err = -1; if (is_kallsyms) { if (symbol_conf.kptr_restrict) { pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); return 0; } realname = (char *)name; } else realname = realpath(name, NULL); if (realname == NULL || filename == NULL || linkname == NULL) goto out_free; len = scnprintf(filename, size, "%s%s%s", debugdir, is_kallsyms ? "/" : "", realname); if (mkdir_p(filename, 0755)) goto out_free; snprintf(filename + len, size - len, "/%s", sbuild_id); if (access(filename, F_OK)) { if (is_kallsyms) { if (copyfile("/proc/kallsyms", filename)) goto out_free; } else if (link(realname, filename) && copyfile(name, filename)) goto out_free; } len = scnprintf(linkname, size, "%s/.build-id/%.2s", debugdir, sbuild_id); if (access(linkname, X_OK) && mkdir_p(linkname, 0755)) goto out_free; snprintf(linkname + len, size - len, "/%s", sbuild_id + 2); targetname = filename + strlen(debugdir) - 5; memcpy(targetname, "../..", 5); if (symlink(targetname, linkname) == 0) err = 0; out_free: if (!is_kallsyms) free(realname); free(filename); free(linkname); return err; } static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, const char *name, const char *debugdir, bool is_kallsyms) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; build_id__sprintf(build_id, build_id_size, sbuild_id); return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); } int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) { const size_t size = PATH_MAX; char *filename = zalloc(size), *linkname = zalloc(size); int err = -1; if (filename == NULL || linkname == NULL) goto out_free; snprintf(linkname, size, "%s/.build-id/%.2s/%s", debugdir, sbuild_id, sbuild_id + 2); if (access(linkname, F_OK)) goto out_free; if (readlink(linkname, filename, size - 1) < 0) goto out_free; if (unlink(linkname)) goto out_free; /* * Since the link is relative, we must make it absolute: */ snprintf(linkname, size, "%s/.build-id/%.2s/%s", debugdir, sbuild_id, filename); if (unlink(linkname)) goto out_free; err = 0; out_free: free(filename); free(linkname); return err; } static int dso__cache_build_id(struct dso *dso, const char *debugdir) { bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), dso->long_name, debugdir, is_kallsyms); } static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) { struct dso *pos; int err = 0; dsos__for_each_with_build_id(pos, head) if (dso__cache_build_id(pos, debugdir)) err = -1; return err; } static int machine__cache_build_ids(struct machine *machine, const char *debugdir) { int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); return ret; } static int perf_session__cache_build_ids(struct perf_session *session) { struct rb_node *nd; int ret; char debugdir[PATH_MAX]; snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir); if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) return -1; ret = machine__cache_build_ids(&session->host_machine, debugdir); for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret |= machine__cache_build_ids(pos, debugdir); } return ret ? -1 : 0; } static bool machine__read_build_ids(struct machine *machine, bool with_hits) { bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits); ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits); return ret; } static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) { struct rb_node *nd; bool ret = machine__read_build_ids(&session->host_machine, with_hits); for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret |= machine__read_build_ids(pos, with_hits); } return ret; } static int write_trace_info(int fd, struct perf_header *h __used, struct perf_evlist *evlist) { return read_tracing_data(fd, &evlist->entries); } static int write_build_id(int fd, struct perf_header *h, struct perf_evlist *evlist __used) { struct perf_session *session; int err; session = container_of(h, struct perf_session, header); if (!perf_session__read_build_ids(session, true)) return -1; err = dsos__write_buildid_table(h, fd); if (err < 0) { pr_debug("failed to write buildid table\n"); return err; } if (!no_buildid_cache) perf_session__cache_build_ids(session); return 0; } static int write_hostname(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { struct utsname uts; int ret; ret = uname(&uts); if (ret < 0) return -1; return do_write_string(fd, uts.nodename); } static int write_osrelease(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { struct utsname uts; int ret; ret = uname(&uts); if (ret < 0) return -1; return do_write_string(fd, uts.release); } static int write_arch(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { struct utsname uts; int ret; ret = uname(&uts); if (ret < 0) return -1; return do_write_string(fd, uts.machine); } static int write_version(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { return do_write_string(fd, perf_version_string); } static int write_cpudesc(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { #ifndef CPUINFO_PROC #define CPUINFO_PROC NULL #endif FILE *file; char *buf = NULL; char *s, *p; const char *search = CPUINFO_PROC; size_t len = 0; int ret = -1; if (!search) return -1; file = fopen("/proc/cpuinfo", "r"); if (!file) return -1; while (getline(&buf, &len, file) > 0) { ret = strncmp(buf, search, strlen(search)); if (!ret) break; } if (ret) goto done; s = buf; p = strchr(buf, ':'); if (p && *(p+1) == ' ' && *(p+2)) s = p + 2; p = strchr(s, '\n'); if (p) *p = '\0'; /* squash extra space characters (branding string) */ p = s; while (*p) { if (isspace(*p)) { char *r = p + 1; char *q = r; *p = ' '; while (*q && isspace(*q)) q++; if (q != (p+1)) while ((*r++ = *q++)); } p++; } ret = do_write_string(fd, s); done: free(buf); fclose(file); return ret; } static int write_nrcpus(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { long nr; u32 nrc, nra; int ret; nr = sysconf(_SC_NPROCESSORS_CONF); if (nr < 0) return -1; nrc = (u32)(nr & UINT_MAX); nr = sysconf(_SC_NPROCESSORS_ONLN); if (nr < 0) return -1; nra = (u32)(nr & UINT_MAX); ret = do_write(fd, &nrc, sizeof(nrc)); if (ret < 0) return ret; return do_write(fd, &nra, sizeof(nra)); } static int write_event_desc(int fd, struct perf_header *h __used, struct perf_evlist *evlist) { struct perf_evsel *attr; u32 nre = 0, nri, sz; int ret; list_for_each_entry(attr, &evlist->entries, node) nre++; /* * write number of events */ ret = do_write(fd, &nre, sizeof(nre)); if (ret < 0) return ret; /* * size of perf_event_attr struct */ sz = (u32)sizeof(attr->attr); ret = do_write(fd, &sz, sizeof(sz)); if (ret < 0) return ret; list_for_each_entry(attr, &evlist->entries, node) { ret = do_write(fd, &attr->attr, sz); if (ret < 0) return ret; /* * write number of unique id per event * there is one id per instance of an event * * copy into an nri to be independent of the * type of ids, */ nri = attr->ids; ret = do_write(fd, &nri, sizeof(nri)); if (ret < 0) return ret; /* * write event string as passed on cmdline */ ret = do_write_string(fd, event_name(attr)); if (ret < 0) return ret; /* * write unique ids for this event */ ret = do_write(fd, attr->id, attr->ids * sizeof(u64)); if (ret < 0) return ret; } return 0; } static int write_cmdline(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { char buf[MAXPATHLEN]; char proc[32]; u32 i, n; int ret; /* * actual atual path to perf binary */ sprintf(proc, "/proc/%d/exe", getpid()); ret = readlink(proc, buf, sizeof(buf)); if (ret <= 0) return -1; /* readlink() does not add null termination */ buf[ret] = '\0'; /* account for binary path */ n = header_argc + 1; ret = do_write(fd, &n, sizeof(n)); if (ret < 0) return ret; ret = do_write_string(fd, buf); if (ret < 0) return ret; for (i = 0 ; i < header_argc; i++) { ret = do_write_string(fd, header_argv[i]); if (ret < 0) return ret; } return 0; } #define CORE_SIB_FMT \ "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list" #define THRD_SIB_FMT \ "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" struct cpu_topo { u32 core_sib; u32 thread_sib; char **core_siblings; char **thread_siblings; }; static int build_cpu_topo(struct cpu_topo *tp, int cpu) { FILE *fp; char filename[MAXPATHLEN]; char *buf = NULL, *p; size_t len = 0; u32 i = 0; int ret = -1; sprintf(filename, CORE_SIB_FMT, cpu); fp = fopen(filename, "r"); if (!fp) return -1; if (getline(&buf, &len, fp) <= 0) goto done; fclose(fp); p = strchr(buf, '\n'); if (p) *p = '\0'; for (i = 0; i < tp->core_sib; i++) { if (!strcmp(buf, tp->core_siblings[i])) break; } if (i == tp->core_sib) { tp->core_siblings[i] = buf; tp->core_sib++; buf = NULL; len = 0; } sprintf(filename, THRD_SIB_FMT, cpu); fp = fopen(filename, "r"); if (!fp) goto done; if (getline(&buf, &len, fp) <= 0) goto done; p = strchr(buf, '\n'); if (p) *p = '\0'; for (i = 0; i < tp->thread_sib; i++) { if (!strcmp(buf, tp->thread_siblings[i])) break; } if (i == tp->thread_sib) { tp->thread_siblings[i] = buf; tp->thread_sib++; buf = NULL; } ret = 0; done: if(fp) fclose(fp); free(buf); return ret; } static void free_cpu_topo(struct cpu_topo *tp) { u32 i; if (!tp) return; for (i = 0 ; i < tp->core_sib; i++) free(tp->core_siblings[i]); for (i = 0 ; i < tp->thread_sib; i++) free(tp->thread_siblings[i]); free(tp); } static struct cpu_topo *build_cpu_topology(void) { struct cpu_topo *tp; void *addr; u32 nr, i; size_t sz; long ncpus; int ret = -1; ncpus = sysconf(_SC_NPROCESSORS_CONF); if (ncpus < 0) return NULL; nr = (u32)(ncpus & UINT_MAX); sz = nr * sizeof(char *); addr = calloc(1, sizeof(*tp) + 2 * sz); if (!addr) return NULL; tp = addr; addr += sizeof(*tp); tp->core_siblings = addr; addr += sz; tp->thread_siblings = addr; for (i = 0; i < nr; i++) { ret = build_cpu_topo(tp, i); if (ret < 0) break; } if (ret) { free_cpu_topo(tp); tp = NULL; } return tp; } static int write_cpu_topology(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { struct cpu_topo *tp; u32 i; int ret; tp = build_cpu_topology(); if (!tp) return -1; ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib)); if (ret < 0) goto done; for (i = 0; i < tp->core_sib; i++) { ret = do_write_string(fd, tp->core_siblings[i]); if (ret < 0) goto done; } ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib)); if (ret < 0) goto done; for (i = 0; i < tp->thread_sib; i++) { ret = do_write_string(fd, tp->thread_siblings[i]); if (ret < 0) break; } done: free_cpu_topo(tp); return ret; } static int write_total_mem(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { char *buf = NULL; FILE *fp; size_t len = 0; int ret = -1, n; uint64_t mem; fp = fopen("/proc/meminfo", "r"); if (!fp) return -1; while (getline(&buf, &len, fp) > 0) { ret = strncmp(buf, "MemTotal:", 9); if (!ret) break; } if (!ret) { n = sscanf(buf, "%*s %"PRIu64, &mem); if (n == 1) ret = do_write(fd, &mem, sizeof(mem)); } free(buf); fclose(fp); return ret; } static int write_topo_node(int fd, int node) { char str[MAXPATHLEN]; char field[32]; char *buf = NULL, *p; size_t len = 0; FILE *fp; u64 mem_total, mem_free, mem; int ret = -1; sprintf(str, "/sys/devices/system/node/node%d/meminfo", node); fp = fopen(str, "r"); if (!fp) return -1; while (getline(&buf, &len, fp) > 0) { /* skip over invalid lines */ if (!strchr(buf, ':')) continue; if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2) goto done; if (!strcmp(field, "MemTotal:")) mem_total = mem; if (!strcmp(field, "MemFree:")) mem_free = mem; } fclose(fp); ret = do_write(fd, &mem_total, sizeof(u64)); if (ret) goto done; ret = do_write(fd, &mem_free, sizeof(u64)); if (ret) goto done; ret = -1; sprintf(str, "/sys/devices/system/node/node%d/cpulist", node); fp = fopen(str, "r"); if (!fp) goto done; if (getline(&buf, &len, fp) <= 0) goto done; p = strchr(buf, '\n'); if (p) *p = '\0'; ret = do_write_string(fd, buf); done: free(buf); fclose(fp); return ret; } static int write_numa_topology(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { char *buf = NULL; size_t len = 0; FILE *fp; struct cpu_map *node_map = NULL; char *c; u32 nr, i, j; int ret = -1; fp = fopen("/sys/devices/system/node/online", "r"); if (!fp) return -1; if (getline(&buf, &len, fp) <= 0) goto done; c = strchr(buf, '\n'); if (c) *c = '\0'; node_map = cpu_map__new(buf); if (!node_map) goto done; nr = (u32)node_map->nr; ret = do_write(fd, &nr, sizeof(nr)); if (ret < 0) goto done; for (i = 0; i < nr; i++) { j = (u32)node_map->map[i]; ret = do_write(fd, &j, sizeof(j)); if (ret < 0) break; ret = write_topo_node(fd, i); if (ret < 0) break; } done: free(buf); fclose(fp); free(node_map); return ret; } /* * default get_cpuid(): nothing gets recorded * actual implementation must be in arch/$(ARCH)/util/header.c */ int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used) { return -1; } static int write_cpuid(int fd, struct perf_header *h __used, struct perf_evlist *evlist __used) { char buffer[64]; int ret; ret = get_cpuid(buffer, sizeof(buffer)); if (!ret) goto write_it; return -1; write_it: return do_write_string(fd, buffer); } static int write_branch_stack(int fd __used, struct perf_header *h __used, struct perf_evlist *evlist __used) { return 0; } static void print_hostname(struct perf_header *ph, int fd, FILE *fp) { char *str = do_read_string(fd, ph); fprintf(fp, "# hostname : %s\n", str); free(str); } static void print_osrelease(struct perf_header *ph, int fd, FILE *fp) { char *str = do_read_string(fd, ph); fprintf(fp, "# os release : %s\n", str); free(str); } static void print_arch(struct perf_header *ph, int fd, FILE *fp) { char *str = do_read_string(fd, ph); fprintf(fp, "# arch : %s\n", str); free(str); } static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp) { char *str = do_read_string(fd, ph); fprintf(fp, "# cpudesc : %s\n", str); free(str); } static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp) { ssize_t ret; u32 nr; ret = read(fd, &nr, sizeof(nr)); if (ret != (ssize_t)sizeof(nr)) nr = -1; /* interpreted as error */ if (ph->needs_swap) nr = bswap_32(nr); fprintf(fp, "# nrcpus online : %u\n", nr); ret = read(fd, &nr, sizeof(nr)); if (ret != (ssize_t)sizeof(nr)) nr = -1; /* interpreted as error */ if (ph->needs_swap) nr = bswap_32(nr); fprintf(fp, "# nrcpus avail : %u\n", nr); } static void print_version(struct perf_header *ph, int fd, FILE *fp) { char *str = do_read_string(fd, ph); fprintf(fp, "# perf version : %s\n", str); free(str); } static void print_cmdline(struct perf_header *ph, int fd, FILE *fp) { ssize_t ret; char *str; u32 nr, i; ret = read(fd, &nr, sizeof(nr)); if (ret != (ssize_t)sizeof(nr)) return; if (ph->needs_swap) nr = bswap_32(nr); fprintf(fp, "# cmdline : "); for (i = 0; i < nr; i++) { str = do_read_string(fd, ph); fprintf(fp, "%s ", str); free(str); } fputc('\n', fp); } static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp) { ssize_t ret; u32 nr, i; char *str; ret = read(fd, &nr, sizeof(nr)); if (ret != (ssize_t)sizeof(nr)) return; if (ph->needs_swap) nr = bswap_32(nr); for (i = 0; i < nr; i++) { str = do_read_string(fd, ph); fprintf(fp, "# sibling cores : %s\n", str); free(str); } ret = read(fd, &nr, sizeof(nr)); if (ret != (ssize_t)sizeof(nr)) return; if (ph->needs_swap) nr = bswap_32(nr); for (i = 0; i < nr; i++) { str = do_read_string(fd, ph); fprintf(fp, "# sibling threads : %s\n", str); free(str); } } static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) { struct perf_event_attr attr; uint64_t id; void *buf = NULL; char *str; u32 nre, sz, nr, i, j; ssize_t ret; size_t msz; /* number of events */ ret = read(fd, &nre, sizeof(nre)); if (ret != (ssize_t)sizeof(nre)) goto error; if (ph->needs_swap) nre = bswap_32(nre); ret = read(fd, &sz, sizeof(sz)); if (ret != (ssize_t)sizeof(sz)) goto error; if (ph->needs_swap) sz = bswap_32(sz); memset(&attr, 0, sizeof(attr)); /* buffer to hold on file attr struct */ buf = malloc(sz); if (!buf) goto error; msz = sizeof(attr); if (sz < msz) msz = sz; for (i = 0 ; i < nre; i++) { /* * must read entire on-file attr struct to * sync up with layout. */ ret = read(fd, buf, sz); if (ret != (ssize_t)sz) goto error; if (ph->needs_swap) perf_event__attr_swap(buf); memcpy(&attr, buf, msz); ret = read(fd, &nr, sizeof(nr)); if (ret != (ssize_t)sizeof(nr)) goto error; if (ph->needs_swap) nr = bswap_32(nr); str = do_read_string(fd, ph); fprintf(fp, "# event : name = %s, ", str); free(str); fprintf(fp, "type = %d, config = 0x%"PRIx64 ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64, attr.type, (u64)attr.config, (u64)attr.config1, (u64)attr.config2); fprintf(fp, ", excl_usr = %d, excl_kern = %d", attr.exclude_user, attr.exclude_kernel); if (nr) fprintf(fp, ", id = {"); for (j = 0 ; j < nr; j++) { ret = read(fd, &id, sizeof(id)); if (ret != (ssize_t)sizeof(id)) goto error; if (ph->needs_swap) id = bswap_64(id); if (j) fputc(',', fp); fprintf(fp, " %"PRIu64, id); } if (nr && j == nr) fprintf(fp, " }"); fputc('\n', fp); } free(buf); return; error: fprintf(fp, "# event desc: not available or unable to read\n"); } static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp) { uint64_t mem; ssize_t ret; ret = read(fd, &mem, sizeof(mem)); if (ret != sizeof(mem)) goto error; if (h->needs_swap) mem = bswap_64(mem); fprintf(fp, "# total memory : %"PRIu64" kB\n", mem); return; error: fprintf(fp, "# total memory : unknown\n"); } static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp) { ssize_t ret; u32 nr, c, i; char *str; uint64_t mem_total, mem_free; /* nr nodes */ ret = read(fd, &nr, sizeof(nr)); if (ret != (ssize_t)sizeof(nr)) goto error; if (h->needs_swap) nr = bswap_32(nr); for (i = 0; i < nr; i++) { /* node number */ ret = read(fd, &c, sizeof(c)); if (ret != (ssize_t)sizeof(c)) goto error; if (h->needs_swap) c = bswap_32(c); ret = read(fd, &mem_total, sizeof(u64)); if (ret != sizeof(u64)) goto error; ret = read(fd, &mem_free, sizeof(u64)); if (ret != sizeof(u64)) goto error; if (h->needs_swap) { mem_total = bswap_64(mem_total); mem_free = bswap_64(mem_free); } fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," " free = %"PRIu64" kB\n", c, mem_total, mem_free); str = do_read_string(fd, h); fprintf(fp, "# node%u cpu list : %s\n", c, str); free(str); } return; error: fprintf(fp, "# numa topology : not available\n"); } static void print_cpuid(struct perf_header *ph, int fd, FILE *fp) { char *str = do_read_string(fd, ph); fprintf(fp, "# cpuid : %s\n", str); free(str); } static void print_branch_stack(struct perf_header *ph __used, int fd __used, FILE *fp) { fprintf(fp, "# contains samples with branch stack\n"); } static int __event_process_build_id(struct build_id_event *bev, char *filename, struct perf_session *session) { int err = -1; struct list_head *head; struct machine *machine; u16 misc; struct dso *dso; enum dso_kernel_type dso_type; machine = perf_session__findnew_machine(session, bev->pid); if (!machine) goto out; misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; switch (misc) { case PERF_RECORD_MISC_KERNEL: dso_type = DSO_TYPE_KERNEL; head = &machine->kernel_dsos; break; case PERF_RECORD_MISC_GUEST_KERNEL: dso_type = DSO_TYPE_GUEST_KERNEL; head = &machine->kernel_dsos; break; case PERF_RECORD_MISC_USER: case PERF_RECORD_MISC_GUEST_USER: dso_type = DSO_TYPE_USER; head = &machine->user_dsos; break; default: goto out; } dso = __dsos__findnew(head, filename); if (dso != NULL) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; dso__set_build_id(dso, &bev->build_id); if (filename[0] == '[') dso->kernel = dso_type; build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); pr_debug("build id event received for %s: %s\n", dso->long_name, sbuild_id); } err = 0; out: return err; } static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, int input, u64 offset, u64 size) { struct perf_session *session = container_of(header, struct perf_session, header); struct { struct perf_event_header header; u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; char filename[0]; } old_bev; struct build_id_event bev; char filename[PATH_MAX]; u64 limit = offset + size; while (offset < limit) { ssize_t len; if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) return -1; if (header->needs_swap) perf_event_header__bswap(&old_bev.header); len = old_bev.header.size - sizeof(old_bev); if (read(input, filename, len) != len) return -1; bev.header = old_bev.header; /* * As the pid is the missing value, we need to fill * it properly. The header.misc value give us nice hint. */ bev.pid = HOST_KERNEL_ID; if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) bev.pid = DEFAULT_GUEST_KERNEL_ID; memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); __event_process_build_id(&bev, filename, session); offset += bev.header.size; } return 0; } static int perf_header__read_build_ids(struct perf_header *header, int input, u64 offset, u64 size) { struct perf_session *session = container_of(header, struct perf_session, header); struct build_id_event bev; char filename[PATH_MAX]; u64 limit = offset + size, orig_offset = offset; int err = -1; while (offset < limit) { ssize_t len; if (read(input, &bev, sizeof(bev)) != sizeof(bev)) goto out; if (header->needs_swap) perf_event_header__bswap(&bev.header); len = bev.header.size - sizeof(bev); if (read(input, filename, len) != len) goto out; /* * The a1645ce1 changeset: * * "perf: 'perf kvm' tool for monitoring guest performance from host" * * Added a field to struct build_id_event that broke the file * format. * * Since the kernel build-id is the first entry, process the * table using the old format if the well known * '[kernel.kallsyms]' string for the kernel build-id has the * first 4 characters chopped off (where the pid_t sits). */ if (memcmp(filename, "nel.kallsyms]", 13) == 0) { if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) return -1; return perf_header__read_build_ids_abi_quirk(header, input, offset, size); } __event_process_build_id(&bev, filename, session); offset += bev.header.size; } err = 0; out: return err; } static int process_trace_info(struct perf_file_section *section __unused, struct perf_header *ph __unused, int feat __unused, int fd) { trace_report(fd, false); return 0; } static int process_build_id(struct perf_file_section *section, struct perf_header *ph, int feat __unused, int fd) { if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) pr_debug("Failed to read buildids, continuing...\n"); return 0; } struct feature_ops { int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); void (*print)(struct perf_header *h, int fd, FILE *fp); int (*process)(struct perf_file_section *section, struct perf_header *h, int feat, int fd); const char *name; bool full_only; }; #define FEAT_OPA(n, func) \ [n] = { .name = #n, .write = write_##func, .print = print_##func } #define FEAT_OPP(n, func) \ [n] = { .name = #n, .write = write_##func, .print = print_##func, \ .process = process_##func } #define FEAT_OPF(n, func) \ [n] = { .name = #n, .write = write_##func, .print = print_##func, \ .full_only = true } /* feature_ops not implemented: */ #define print_trace_info NULL #define print_build_id NULL static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPP(HEADER_TRACE_INFO, trace_info), FEAT_OPP(HEADER_BUILD_ID, build_id), FEAT_OPA(HEADER_HOSTNAME, hostname), FEAT_OPA(HEADER_OSRELEASE, osrelease), FEAT_OPA(HEADER_VERSION, version), FEAT_OPA(HEADER_ARCH, arch), FEAT_OPA(HEADER_NRCPUS, nrcpus), FEAT_OPA(HEADER_CPUDESC, cpudesc), FEAT_OPA(HEADER_CPUID, cpuid), FEAT_OPA(HEADER_TOTAL_MEM, total_mem), FEAT_OPA(HEADER_EVENT_DESC, event_desc), FEAT_OPA(HEADER_CMDLINE, cmdline), FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), }; struct header_print_data { FILE *fp; bool full; /* extended list of headers */ }; static int perf_file_section__fprintf_info(struct perf_file_section *section, struct perf_header *ph, int feat, int fd, void *data) { struct header_print_data *hd = data; if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { pr_debug("Failed to lseek to %" PRIu64 " offset for feature " "%d, continuing...\n", section->offset, feat); return 0; } if (feat >= HEADER_LAST_FEATURE) { pr_warning("unknown feature %d\n", feat); return 0; } if (!feat_ops[feat].print) return 0; if (!feat_ops[feat].full_only || hd->full) feat_ops[feat].print(ph, fd, hd->fp); else fprintf(hd->fp, "# %s info available, use -I to display\n", feat_ops[feat].name); return 0; } int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) { struct header_print_data hd; struct perf_header *header = &session->header; int fd = session->fd; hd.fp = fp; hd.full = full; perf_header__process_sections(header, fd, &hd, perf_file_section__fprintf_info); return 0; } static int do_write_feat(int fd, struct perf_header *h, int type, struct perf_file_section **p, struct perf_evlist *evlist) { int err; int ret = 0; if (perf_header__has_feat(h, type)) { if (!feat_ops[type].write) return -1; (*p)->offset = lseek(fd, 0, SEEK_CUR); err = feat_ops[type].write(fd, h, evlist); if (err < 0) { pr_debug("failed to write feature %d\n", type); /* undo anything written */ lseek(fd, (*p)->offset, SEEK_SET); return -1; } (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset; (*p)++; } return ret; } static int perf_header__adds_write(struct perf_header *header, struct perf_evlist *evlist, int fd) { int nr_sections; struct perf_file_section *feat_sec, *p; int sec_size; u64 sec_start; int feat; int err; nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); if (!nr_sections) return 0; feat_sec = p = calloc(sizeof(*feat_sec), nr_sections); if (feat_sec == NULL) return -ENOMEM; sec_size = sizeof(*feat_sec) * nr_sections; sec_start = header->data_offset + header->data_size; lseek(fd, sec_start + sec_size, SEEK_SET); for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { if (do_write_feat(fd, header, feat, &p, evlist)) perf_header__clear_feat(header, feat); } lseek(fd, sec_start, SEEK_SET); /* * may write more than needed due to dropped feature, but * this is okay, reader will skip the mising entries */ err = do_write(fd, feat_sec, sec_size); if (err < 0) pr_debug("failed to write feature section\n"); free(feat_sec); return err; } int perf_header__write_pipe(int fd) { struct perf_pipe_file_header f_header; int err; f_header = (struct perf_pipe_file_header){ .magic = PERF_MAGIC, .size = sizeof(f_header), }; err = do_write(fd, &f_header, sizeof(f_header)); if (err < 0) { pr_debug("failed to write perf pipe header\n"); return err; } return 0; } int perf_session__write_header(struct perf_session *session, struct perf_evlist *evlist, int fd, bool at_exit) { struct perf_file_header f_header; struct perf_file_attr f_attr; struct perf_header *header = &session->header; struct perf_evsel *attr, *pair = NULL; int err; lseek(fd, sizeof(f_header), SEEK_SET); if (session->evlist != evlist) pair = list_entry(session->evlist->entries.next, struct perf_evsel, node); list_for_each_entry(attr, &evlist->entries, node) { attr->id_offset = lseek(fd, 0, SEEK_CUR); err = do_write(fd, attr->id, attr->ids * sizeof(u64)); if (err < 0) { out_err_write: pr_debug("failed to write perf header\n"); return err; } if (session->evlist != evlist) { err = do_write(fd, pair->id, pair->ids * sizeof(u64)); if (err < 0) goto out_err_write; attr->ids += pair->ids; pair = list_entry(pair->node.next, struct perf_evsel, node); } } header->attr_offset = lseek(fd, 0, SEEK_CUR); list_for_each_entry(attr, &evlist->entries, node) { f_attr = (struct perf_file_attr){ .attr = attr->attr, .ids = { .offset = attr->id_offset, .size = attr->ids * sizeof(u64), } }; err = do_write(fd, &f_attr, sizeof(f_attr)); if (err < 0) { pr_debug("failed to write perf header attribute\n"); return err; } } header->event_offset = lseek(fd, 0, SEEK_CUR); header->event_size = event_count * sizeof(struct perf_trace_event_type); if (events) { err = do_write(fd, events, header->event_size); if (err < 0) { pr_debug("failed to write perf header events\n"); return err; } } header->data_offset = lseek(fd, 0, SEEK_CUR); if (at_exit) { err = perf_header__adds_write(header, evlist, fd); if (err < 0) return err; } f_header = (struct perf_file_header){ .magic = PERF_MAGIC, .size = sizeof(f_header), .attr_size = sizeof(f_attr), .attrs = { .offset = header->attr_offset, .size = evlist->nr_entries * sizeof(f_attr), }, .data = { .offset = header->data_offset, .size = header->data_size, }, .event_types = { .offset = header->event_offset, .size = header->event_size, }, }; memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); lseek(fd, 0, SEEK_SET); err = do_write(fd, &f_header, sizeof(f_header)); if (err < 0) { pr_debug("failed to write perf header\n"); return err; } lseek(fd, header->data_offset + header->data_size, SEEK_SET); header->frozen = 1; return 0; } static int perf_header__getbuffer64(struct perf_header *header, int fd, void *buf, size_t size) { if (readn(fd, buf, size) <= 0) return -1; if (header->needs_swap) mem_bswap_64(buf, size); return 0; } int perf_header__process_sections(struct perf_header *header, int fd, void *data, int (*process)(struct perf_file_section *section, struct perf_header *ph, int feat, int fd, void *data)) { struct perf_file_section *feat_sec, *sec; int nr_sections; int sec_size; int feat; int err; nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); if (!nr_sections) return 0; feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections); if (!feat_sec) return -1; sec_size = sizeof(*feat_sec) * nr_sections; lseek(fd, header->data_offset + header->data_size, SEEK_SET); err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); if (err < 0) goto out_free; for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { err = process(sec++, header, feat, fd, data); if (err < 0) goto out_free; } err = 0; out_free: free(feat_sec); return err; } static const int attr_file_abi_sizes[] = { [0] = PERF_ATTR_SIZE_VER0, [1] = PERF_ATTR_SIZE_VER1, 0, }; /* * In the legacy file format, the magic number is not used to encode endianness. * hdr_sz was used to encode endianness. But given that hdr_sz can vary based * on ABI revisions, we need to try all combinations for all endianness to * detect the endianness. */ static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) { uint64_t ref_size, attr_size; int i; for (i = 0 ; attr_file_abi_sizes[i]; i++) { ref_size = attr_file_abi_sizes[i] + sizeof(struct perf_file_section); if (hdr_sz != ref_size) { attr_size = bswap_64(hdr_sz); if (attr_size != ref_size) continue; ph->needs_swap = true; } pr_debug("ABI%d perf.data file detected, need_swap=%d\n", i, ph->needs_swap); return 0; } /* could not determine endianness */ return -1; } #define PERF_PIPE_HDR_VER0 16 static const size_t attr_pipe_abi_sizes[] = { [0] = PERF_PIPE_HDR_VER0, 0, }; /* * In the legacy pipe format, there is an implicit assumption that endiannesss * between host recording the samples, and host parsing the samples is the * same. This is not always the case given that the pipe output may always be * redirected into a file and analyzed on a different machine with possibly a * different endianness and perf_event ABI revsions in the perf tool itself. */ static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) { u64 attr_size; int i; for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { if (hdr_sz != attr_pipe_abi_sizes[i]) { attr_size = bswap_64(hdr_sz); if (attr_size != hdr_sz) continue; ph->needs_swap = true; } pr_debug("Pipe ABI%d perf.data file detected\n", i); return 0; } return -1; } static int check_magic_endian(u64 magic, uint64_t hdr_sz, bool is_pipe, struct perf_header *ph) { int ret; /* check for legacy format */ ret = memcmp(&magic, __perf_magic1, sizeof(magic)); if (ret == 0) { pr_debug("legacy perf.data format\n"); if (is_pipe) return try_all_pipe_abis(hdr_sz, ph); return try_all_file_abis(hdr_sz, ph); } /* * the new magic number serves two purposes: * - unique number to identify actual perf.data files * - encode endianness of file */ /* check magic number with one endianness */ if (magic == __perf_magic2) return 0; /* check magic number with opposite endianness */ if (magic != __perf_magic2_sw) return -1; ph->needs_swap = true; return 0; } int perf_file_header__read(struct perf_file_header *header, struct perf_header *ph, int fd) { int ret; lseek(fd, 0, SEEK_SET); ret = readn(fd, header, sizeof(*header)); if (ret <= 0) return -1; if (check_magic_endian(header->magic, header->attr_size, false, ph) < 0) { pr_debug("magic/endian check failed\n"); return -1; } if (ph->needs_swap) { mem_bswap_64(header, offsetof(struct perf_file_header, adds_features)); } if (header->size != sizeof(*header)) { /* Support the previous format */ if (header->size == offsetof(typeof(*header), adds_features)) bitmap_zero(header->adds_features, HEADER_FEAT_BITS); else return -1; } else if (ph->needs_swap) { unsigned int i; /* * feature bitmap is declared as an array of unsigned longs -- * not good since its size can differ between the host that * generated the data file and the host analyzing the file. * * We need to handle endianness, but we don't know the size of * the unsigned long where the file was generated. Take a best * guess at determining it: try 64-bit swap first (ie., file * created on a 64-bit host), and check if the hostname feature * bit is set (this feature bit is forced on as of fbe96f2). * If the bit is not, undo the 64-bit swap and try a 32-bit * swap. If the hostname bit is still not set (e.g., older data * file), punt and fallback to the original behavior -- * clearing all feature bits and setting buildid. */ for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) header->adds_features[i] = bswap_64(header->adds_features[i]); if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) { header->adds_features[i] = bswap_64(header->adds_features[i]); header->adds_features[i] = bswap_32(header->adds_features[i]); } } if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { bitmap_zero(header->adds_features, HEADER_FEAT_BITS); set_bit(HEADER_BUILD_ID, header->adds_features); } } memcpy(&ph->adds_features, &header->adds_features, sizeof(ph->adds_features)); ph->event_offset = header->event_types.offset; ph->event_size = header->event_types.size; ph->data_offset = header->data.offset; ph->data_size = header->data.size; return 0; } static int perf_file_section__process(struct perf_file_section *section, struct perf_header *ph, int feat, int fd, void *data __used) { if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { pr_debug("Failed to lseek to %" PRIu64 " offset for feature " "%d, continuing...\n", section->offset, feat); return 0; } if (feat >= HEADER_LAST_FEATURE) { pr_debug("unknown feature %d, continuing...\n", feat); return 0; } if (!feat_ops[feat].process) return 0; return feat_ops[feat].process(section, ph, feat, fd); } static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, struct perf_header *ph, int fd, bool repipe) { int ret; ret = readn(fd, header, sizeof(*header)); if (ret <= 0) return -1; if (check_magic_endian(header->magic, header->size, true, ph) < 0) { pr_debug("endian/magic failed\n"); return -1; } if (ph->needs_swap) header->size = bswap_64(header->size); if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) return -1; return 0; } static int perf_header__read_pipe(struct perf_session *session, int fd) { struct perf_header *header = &session->header; struct perf_pipe_file_header f_header; if (perf_file_header__read_pipe(&f_header, header, fd, session->repipe) < 0) { pr_debug("incompatible file format\n"); return -EINVAL; } session->fd = fd; return 0; } static int read_attr(int fd, struct perf_header *ph, struct perf_file_attr *f_attr) { struct perf_event_attr *attr = &f_attr->attr; size_t sz, left; size_t our_sz = sizeof(f_attr->attr); int ret; memset(f_attr, 0, sizeof(*f_attr)); /* read minimal guaranteed structure */ ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); if (ret <= 0) { pr_debug("cannot read %d bytes of header attr\n", PERF_ATTR_SIZE_VER0); return -1; } /* on file perf_event_attr size */ sz = attr->size; if (ph->needs_swap) sz = bswap_32(sz); if (sz == 0) { /* assume ABI0 */ sz = PERF_ATTR_SIZE_VER0; } else if (sz > our_sz) { pr_debug("file uses a more recent and unsupported ABI" " (%zu bytes extra)\n", sz - our_sz); return -1; } /* what we have not yet read and that we know about */ left = sz - PERF_ATTR_SIZE_VER0; if (left) { void *ptr = attr; ptr += PERF_ATTR_SIZE_VER0; ret = readn(fd, ptr, left); } /* read perf_file_section, ids are read in caller */ ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); return ret <= 0 ? -1 : 0; } int perf_session__read_header(struct perf_session *session, int fd) { struct perf_header *header = &session->header; struct perf_file_header f_header; struct perf_file_attr f_attr; u64 f_id; int nr_attrs, nr_ids, i, j; session->evlist = perf_evlist__new(NULL, NULL); if (session->evlist == NULL) return -ENOMEM; if (session->fd_pipe) return perf_header__read_pipe(session, fd); if (perf_file_header__read(&f_header, header, fd) < 0) return -EINVAL; nr_attrs = f_header.attrs.size / f_header.attr_size; lseek(fd, f_header.attrs.offset, SEEK_SET); for (i = 0; i < nr_attrs; i++) { struct perf_evsel *evsel; off_t tmp; if (read_attr(fd, header, &f_attr) < 0) goto out_errno; if (header->needs_swap) perf_event__attr_swap(&f_attr.attr); tmp = lseek(fd, 0, SEEK_CUR); evsel = perf_evsel__new(&f_attr.attr, i); if (evsel == NULL) goto out_delete_evlist; /* * Do it before so that if perf_evsel__alloc_id fails, this * entry gets purged too at perf_evlist__delete(). */ perf_evlist__add(session->evlist, evsel); nr_ids = f_attr.ids.size / sizeof(u64); /* * We don't have the cpu and thread maps on the header, so * for allocating the perf_sample_id table we fake 1 cpu and * hattr->ids threads. */ if (perf_evsel__alloc_id(evsel, 1, nr_ids)) goto out_delete_evlist; lseek(fd, f_attr.ids.offset, SEEK_SET); for (j = 0; j < nr_ids; j++) { if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) goto out_errno; perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); } lseek(fd, tmp, SEEK_SET); } symbol_conf.nr_events = nr_attrs; if (f_header.event_types.size) { lseek(fd, f_header.event_types.offset, SEEK_SET); events = malloc(f_header.event_types.size); if (events == NULL) return -ENOMEM; if (perf_header__getbuffer64(header, fd, events, f_header.event_types.size)) goto out_errno; event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } perf_header__process_sections(header, fd, NULL, perf_file_section__process); lseek(fd, header->data_offset, SEEK_SET); header->frozen = 1; return 0; out_errno: return -errno; out_delete_evlist: perf_evlist__delete(session->evlist); session->evlist = NULL; return -ENOMEM; } int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, u16 ids, u64 *id, perf_event__handler_t process) { union perf_event *ev; size_t size; int err; size = sizeof(struct perf_event_attr); size = ALIGN(size, sizeof(u64)); size += sizeof(struct perf_event_header); size += ids * sizeof(u64); ev = malloc(size); if (ev == NULL) return -ENOMEM; ev->attr.attr = *attr; memcpy(ev->attr.id, id, ids * sizeof(u64)); ev->attr.header.type = PERF_RECORD_HEADER_ATTR; ev->attr.header.size = size; err = process(tool, ev, NULL, NULL); free(ev); return err; } int perf_event__synthesize_attrs(struct perf_tool *tool, struct perf_session *session, perf_event__handler_t process) { struct perf_evsel *attr; int err = 0; list_for_each_entry(attr, &session->evlist->entries, node) { err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids, attr->id, process); if (err) { pr_debug("failed to create perf header attribute\n"); return err; } } return err; } int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist) { unsigned int i, ids, n_ids; struct perf_evsel *evsel; struct perf_evlist *evlist = *pevlist; if (evlist == NULL) { *pevlist = evlist = perf_evlist__new(NULL, NULL); if (evlist == NULL) return -ENOMEM; } evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries); if (evsel == NULL) return -ENOMEM; perf_evlist__add(evlist, evsel); ids = event->header.size; ids -= (void *)&event->attr.id - (void *)event; n_ids = ids / sizeof(u64); /* * We don't have the cpu and thread maps on the header, so * for allocating the perf_sample_id table we fake 1 cpu and * hattr->ids threads. */ if (perf_evsel__alloc_id(evsel, 1, n_ids)) return -ENOMEM; for (i = 0; i < n_ids; i++) { perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); } return 0; } int perf_event__synthesize_event_type(struct perf_tool *tool, u64 event_id, char *name, perf_event__handler_t process, struct machine *machine) { union perf_event ev; size_t size = 0; int err = 0; memset(&ev, 0, sizeof(ev)); ev.event_type.event_type.event_id = event_id; memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME); strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1); ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; size = strlen(ev.event_type.event_type.name); size = ALIGN(size, sizeof(u64)); ev.event_type.header.size = sizeof(ev.event_type) - (sizeof(ev.event_type.event_type.name) - size); err = process(tool, &ev, NULL, machine); return err; } int perf_event__synthesize_event_types(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine) { struct perf_trace_event_type *type; int i, err = 0; for (i = 0; i < event_count; i++) { type = &events[i]; err = perf_event__synthesize_event_type(tool, type->event_id, type->name, process, machine); if (err) { pr_debug("failed to create perf header event type\n"); return err; } } return err; } int perf_event__process_event_type(struct perf_tool *tool __unused, union perf_event *event) { if (perf_header__push_event(event->event_type.event_type.event_id, event->event_type.event_type.name) < 0) return -ENOMEM; return 0; } int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct perf_evlist *evlist, perf_event__handler_t process) { union perf_event ev; struct tracing_data *tdata; ssize_t size = 0, aligned_size = 0, padding; int err __used = 0; /* * We are going to store the size of the data followed * by the data contents. Since the fd descriptor is a pipe, * we cannot seek back to store the size of the data once * we know it. Instead we: * * - write the tracing data to the temp file * - get/write the data size to pipe * - write the tracing data from the temp file * to the pipe */ tdata = tracing_data_get(&evlist->entries, fd, true); if (!tdata) return -1; memset(&ev, 0, sizeof(ev)); ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; size = tdata->size; aligned_size = ALIGN(size, sizeof(u64)); padding = aligned_size - size; ev.tracing_data.header.size = sizeof(ev.tracing_data); ev.tracing_data.size = aligned_size; process(tool, &ev, NULL, NULL); /* * The put function will copy all the tracing data * stored in temp file to the pipe. */ tracing_data_put(tdata); write_padded(fd, NULL, 0, padding); return aligned_size; } int perf_event__process_tracing_data(union perf_event *event, struct perf_session *session) { ssize_t size_read, padding, size = event->tracing_data.size; off_t offset = lseek(session->fd, 0, SEEK_CUR); char buf[BUFSIZ]; /* setup for reading amidst mmap */ lseek(session->fd, offset + sizeof(struct tracing_data_event), SEEK_SET); size_read = trace_report(session->fd, session->repipe); padding = ALIGN(size_read, sizeof(u64)) - size_read; if (read(session->fd, buf, padding) < 0) die("reading input file"); if (session->repipe) { int retw = write(STDOUT_FILENO, buf, padding); if (retw <= 0 || retw != padding) die("repiping tracing data padding"); } if (size_read + padding != size) die("tracing data size mismatch"); return size_read + padding; } int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, perf_event__handler_t process, struct machine *machine) { union perf_event ev; size_t len; int err = 0; if (!pos->hit) return err; memset(&ev, 0, sizeof(ev)); len = pos->long_name_len + 1; len = ALIGN(len, NAME_ALIGN); memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; ev.build_id.header.misc = misc; ev.build_id.pid = machine->pid; ev.build_id.header.size = sizeof(ev.build_id) + len; memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); err = process(tool, &ev, NULL, machine); return err; } int perf_event__process_build_id(struct perf_tool *tool __used, union perf_event *event, struct perf_session *session) { __event_process_build_id(&event->build_id, event->build_id.filename, session); return 0; } void disable_buildid_cache(void) { no_buildid_cache = true; }
gpl-2.0
kldoc/android_kernel_lge_mako
drivers/media/video/em28xx/em28xx-video.c
4802
64534
/* em28xx-video.c - driver for Empia EM2800/EM2820/2840 USB video capture devices Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> Markus Rechberger <mrechberger@gmail.com> Mauro Carvalho Chehab <mchehab@infradead.org> Sascha Sommer <saschasommer@freenet.de> Some parts based on SN9C10x PC Camera Controllers GPL driver made by Luca Risolia <luca.risolia@studio.unibo.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitmap.h> #include <linux/usb.h> #include <linux/i2c.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/slab.h> #include "em28xx.h" #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-chip-ident.h> #include <media/msp3400.h> #include <media/tuner.h> #define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \ "Markus Rechberger <mrechberger@gmail.com>, " \ "Mauro Carvalho Chehab <mchehab@infradead.org>, " \ "Sascha Sommer <saschasommer@freenet.de>" #define DRIVER_DESC "Empia em28xx based USB video device driver" #define EM28XX_VERSION "0.1.3" #define em28xx_videodbg(fmt, arg...) do {\ if (video_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) static unsigned int isoc_debug; module_param(isoc_debug, int, 0644); MODULE_PARM_DESC(isoc_debug, "enable debug messages [isoc transfers]"); #define em28xx_isocdbg(fmt, arg...) \ do {\ if (isoc_debug) { \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); \ } \ } while (0) MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(EM28XX_VERSION); static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; module_param_array(video_nr, int, NULL, 0444); module_param_array(vbi_nr, int, NULL, 0444); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(video_nr, "video device numbers"); MODULE_PARM_DESC(vbi_nr, "vbi device numbers"); MODULE_PARM_DESC(radio_nr, "radio device numbers"); static unsigned int video_debug; module_param(video_debug, int, 0644); MODULE_PARM_DESC(video_debug, "enable debug messages [video]"); /* supported video standards */ static struct em28xx_fmt format[] = { { .name = "16 bpp YUY2, 4:2:2, packed", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, .reg = EM28XX_OUTFMT_YUV422_Y0UY1V, }, { .name = "16 bpp RGB 565, LE", .fourcc = V4L2_PIX_FMT_RGB565, .depth = 16, .reg = EM28XX_OUTFMT_RGB_16_656, }, { .name = "8 bpp Bayer BGBG..GRGR", .fourcc = V4L2_PIX_FMT_SBGGR8, .depth = 8, .reg = EM28XX_OUTFMT_RGB_8_BGBG, }, { .name = "8 bpp Bayer GRGR..BGBG", .fourcc = V4L2_PIX_FMT_SGRBG8, .depth = 8, .reg = EM28XX_OUTFMT_RGB_8_GRGR, }, { .name = "8 bpp Bayer GBGB..RGRG", .fourcc = V4L2_PIX_FMT_SGBRG8, .depth = 8, .reg = EM28XX_OUTFMT_RGB_8_GBGB, }, { .name = "12 bpp YUV411", .fourcc = V4L2_PIX_FMT_YUV411P, .depth = 12, .reg = EM28XX_OUTFMT_YUV411, }, }; /* supported controls */ /* Common to all boards */ static struct v4l2_queryctrl ac97_qctrl[] = { { .id = V4L2_CID_AUDIO_VOLUME, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Volume", .minimum = 0x0, .maximum = 0x1f, .step = 0x1, .default_value = 0x1f, .flags = V4L2_CTRL_FLAG_SLIDER, }, { .id = V4L2_CID_AUDIO_MUTE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mute", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1, .flags = 0, } }; /* ------------------------------------------------------------------ DMA and thread functions ------------------------------------------------------------------*/ /* * Announces that a buffer were filled and request the next */ static inline void buffer_filled(struct em28xx *dev, struct em28xx_dmaqueue *dma_q, struct em28xx_buffer *buf) { /* Advice that buffer was filled */ em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; do_gettimeofday(&buf->vb.ts); dev->isoc_ctl.vid_buf = NULL; list_del(&buf->vb.queue); wake_up(&buf->vb.done); } static inline void vbi_buffer_filled(struct em28xx *dev, struct em28xx_dmaqueue *dma_q, struct em28xx_buffer *buf) { /* Advice that buffer was filled */ em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; do_gettimeofday(&buf->vb.ts); dev->isoc_ctl.vbi_buf = NULL; list_del(&buf->vb.queue); wake_up(&buf->vb.done); } /* * Identify the buffer header type and properly handles */ static void em28xx_copy_video(struct em28xx *dev, struct em28xx_dmaqueue *dma_q, struct em28xx_buffer *buf, unsigned char *p, unsigned char *outp, unsigned long len) { void *fieldstart, *startwrite, *startread; int linesdone, currlinedone, offset, lencopy, remain; int bytesperline = dev->width << 1; if (dma_q->pos + len > buf->vb.size) len = buf->vb.size - dma_q->pos; startread = p; remain = len; if (dev->progressive) fieldstart = outp; else { /* Interlaces two half frames */ if (buf->top_field) fieldstart = outp; else fieldstart = outp + bytesperline; } linesdone = dma_q->pos / bytesperline; currlinedone = dma_q->pos % bytesperline; if (dev->progressive) offset = linesdone * bytesperline + currlinedone; else offset = linesdone * bytesperline * 2 + currlinedone; startwrite = fieldstart + offset; lencopy = bytesperline - currlinedone; lencopy = lencopy > remain ? remain : lencopy; if ((char *)startwrite + lencopy > (char *)outp + buf->vb.size) { em28xx_isocdbg("Overflow of %zi bytes past buffer end (1)\n", ((char *)startwrite + lencopy) - ((char *)outp + buf->vb.size)); remain = (char *)outp + buf->vb.size - (char *)startwrite; lencopy = remain; } if (lencopy <= 0) return; memcpy(startwrite, startread, lencopy); remain -= lencopy; while (remain > 0) { startwrite += lencopy + bytesperline; startread += lencopy; if (bytesperline > remain) lencopy = remain; else lencopy = bytesperline; if ((char *)startwrite + lencopy > (char *)outp + buf->vb.size) { em28xx_isocdbg("Overflow of %zi bytes past buffer end" "(2)\n", ((char *)startwrite + lencopy) - ((char *)outp + buf->vb.size)); lencopy = remain = (char *)outp + buf->vb.size - (char *)startwrite; } if (lencopy <= 0) break; memcpy(startwrite, startread, lencopy); remain -= lencopy; } dma_q->pos += len; } static void em28xx_copy_vbi(struct em28xx *dev, struct em28xx_dmaqueue *dma_q, struct em28xx_buffer *buf, unsigned char *p, unsigned char *outp, unsigned long len) { void *startwrite, *startread; int offset; int bytesperline; if (dev == NULL) { em28xx_isocdbg("dev is null\n"); return; } bytesperline = dev->vbi_width; if (dma_q == NULL) { em28xx_isocdbg("dma_q is null\n"); return; } if (buf == NULL) { return; } if (p == NULL) { em28xx_isocdbg("p is null\n"); return; } if (outp == NULL) { em28xx_isocdbg("outp is null\n"); return; } if (dma_q->pos + len > buf->vb.size) len = buf->vb.size - dma_q->pos; startread = p; startwrite = outp + dma_q->pos; offset = dma_q->pos; /* Make sure the bottom field populates the second half of the frame */ if (buf->top_field == 0) { startwrite += bytesperline * dev->vbi_height; offset += bytesperline * dev->vbi_height; } memcpy(startwrite, startread, len); dma_q->pos += len; } static inline void print_err_status(struct em28xx *dev, int packet, int status) { char *errmsg = "Unknown"; switch (status) { case -ENOENT: errmsg = "unlinked synchronuously"; break; case -ECONNRESET: errmsg = "unlinked asynchronuously"; break; case -ENOSR: errmsg = "Buffer error (overrun)"; break; case -EPIPE: errmsg = "Stalled (device not responding)"; break; case -EOVERFLOW: errmsg = "Babble (bad cable?)"; break; case -EPROTO: errmsg = "Bit-stuff error (bad cable?)"; break; case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break; case -ETIME: errmsg = "Device does not respond"; break; } if (packet < 0) { em28xx_isocdbg("URB status %d [%s].\n", status, errmsg); } else { em28xx_isocdbg("URB packet %d, status %d [%s].\n", packet, status, errmsg); } } /* * video-buf generic routine to get the next available buffer */ static inline void get_next_buf(struct em28xx_dmaqueue *dma_q, struct em28xx_buffer **buf) { struct em28xx *dev = container_of(dma_q, struct em28xx, vidq); char *outp; if (list_empty(&dma_q->active)) { em28xx_isocdbg("No active queue to serve\n"); dev->isoc_ctl.vid_buf = NULL; *buf = NULL; return; } /* Get the next buffer */ *buf = list_entry(dma_q->active.next, struct em28xx_buffer, vb.queue); /* Cleans up buffer - Useful for testing for frame/URB loss */ outp = videobuf_to_vmalloc(&(*buf)->vb); memset(outp, 0, (*buf)->vb.size); dev->isoc_ctl.vid_buf = *buf; return; } /* * video-buf generic routine to get the next available VBI buffer */ static inline void vbi_get_next_buf(struct em28xx_dmaqueue *dma_q, struct em28xx_buffer **buf) { struct em28xx *dev = container_of(dma_q, struct em28xx, vbiq); char *outp; if (list_empty(&dma_q->active)) { em28xx_isocdbg("No active queue to serve\n"); dev->isoc_ctl.vbi_buf = NULL; *buf = NULL; return; } /* Get the next buffer */ *buf = list_entry(dma_q->active.next, struct em28xx_buffer, vb.queue); /* Cleans up buffer - Useful for testing for frame/URB loss */ outp = videobuf_to_vmalloc(&(*buf)->vb); memset(outp, 0x00, (*buf)->vb.size); dev->isoc_ctl.vbi_buf = *buf; return; } /* * Controls the isoc copy of each urb packet */ static inline int em28xx_isoc_copy(struct em28xx *dev, struct urb *urb) { struct em28xx_buffer *buf; struct em28xx_dmaqueue *dma_q = &dev->vidq; unsigned char *outp = NULL; int i, len = 0, rc = 1; unsigned char *p; if (!dev) return 0; if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED)) return 0; if (urb->status < 0) { print_err_status(dev, -1, urb->status); if (urb->status == -ENOENT) return 0; } buf = dev->isoc_ctl.vid_buf; if (buf != NULL) outp = videobuf_to_vmalloc(&buf->vb); for (i = 0; i < urb->number_of_packets; i++) { int status = urb->iso_frame_desc[i].status; if (status < 0) { print_err_status(dev, i, status); if (urb->iso_frame_desc[i].status != -EPROTO) continue; } len = urb->iso_frame_desc[i].actual_length - 4; if (urb->iso_frame_desc[i].actual_length <= 0) { /* em28xx_isocdbg("packet %d is empty",i); - spammy */ continue; } if (urb->iso_frame_desc[i].actual_length > dev->max_pkt_size) { em28xx_isocdbg("packet bigger than packet size"); continue; } p = urb->transfer_buffer + urb->iso_frame_desc[i].offset; /* FIXME: incomplete buffer checks where removed to make logic simpler. Impacts of those changes should be evaluated */ if (p[0] == 0x33 && p[1] == 0x95 && p[2] == 0x00) { em28xx_isocdbg("VBI HEADER!!!\n"); /* FIXME: Should add vbi copy */ continue; } if (p[0] == 0x22 && p[1] == 0x5a) { em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2], len, (p[2] & 1) ? "odd" : "even"); if (dev->progressive || !(p[2] & 1)) { if (buf != NULL) buffer_filled(dev, dma_q, buf); get_next_buf(dma_q, &buf); if (buf == NULL) outp = NULL; else outp = videobuf_to_vmalloc(&buf->vb); } if (buf != NULL) { if (p[2] & 1) buf->top_field = 0; else buf->top_field = 1; } dma_q->pos = 0; } if (buf != NULL) { if (p[0] != 0x88 && p[0] != 0x22) { em28xx_isocdbg("frame is not complete\n"); len += 4; } else { p += 4; } em28xx_copy_video(dev, dma_q, buf, p, outp, len); } } return rc; } /* Version of isoc handler that takes into account a mixture of video and VBI data */ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb) { struct em28xx_buffer *buf, *vbi_buf; struct em28xx_dmaqueue *dma_q = &dev->vidq; struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq; unsigned char *outp = NULL; unsigned char *vbioutp = NULL; int i, len = 0, rc = 1; unsigned char *p; int vbi_size; if (!dev) return 0; if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED)) return 0; if (urb->status < 0) { print_err_status(dev, -1, urb->status); if (urb->status == -ENOENT) return 0; } buf = dev->isoc_ctl.vid_buf; if (buf != NULL) outp = videobuf_to_vmalloc(&buf->vb); vbi_buf = dev->isoc_ctl.vbi_buf; if (vbi_buf != NULL) vbioutp = videobuf_to_vmalloc(&vbi_buf->vb); for (i = 0; i < urb->number_of_packets; i++) { int status = urb->iso_frame_desc[i].status; if (status < 0) { print_err_status(dev, i, status); if (urb->iso_frame_desc[i].status != -EPROTO) continue; } len = urb->iso_frame_desc[i].actual_length; if (urb->iso_frame_desc[i].actual_length <= 0) { /* em28xx_isocdbg("packet %d is empty",i); - spammy */ continue; } if (urb->iso_frame_desc[i].actual_length > dev->max_pkt_size) { em28xx_isocdbg("packet bigger than packet size"); continue; } p = urb->transfer_buffer + urb->iso_frame_desc[i].offset; /* capture type 0 = vbi start capture type 1 = video start capture type 2 = video in progress */ if (p[0] == 0x33 && p[1] == 0x95) { dev->capture_type = 0; dev->vbi_read = 0; em28xx_isocdbg("VBI START HEADER!!!\n"); dev->cur_field = p[2]; p += 4; len -= 4; } else if (p[0] == 0x88 && p[1] == 0x88 && p[2] == 0x88 && p[3] == 0x88) { /* continuation */ p += 4; len -= 4; } else if (p[0] == 0x22 && p[1] == 0x5a) { /* start video */ p += 4; len -= 4; } vbi_size = dev->vbi_width * dev->vbi_height; if (dev->capture_type == 0) { if (dev->vbi_read >= vbi_size) { /* We've already read all the VBI data, so treat the rest as video */ em28xx_isocdbg("dev->vbi_read > vbi_size\n"); } else if ((dev->vbi_read + len) < vbi_size) { /* This entire frame is VBI data */ if (dev->vbi_read == 0 && (!(dev->cur_field & 1))) { /* Brand new frame */ if (vbi_buf != NULL) vbi_buffer_filled(dev, vbi_dma_q, vbi_buf); vbi_get_next_buf(vbi_dma_q, &vbi_buf); if (vbi_buf == NULL) vbioutp = NULL; else vbioutp = videobuf_to_vmalloc( &vbi_buf->vb); } if (dev->vbi_read == 0) { vbi_dma_q->pos = 0; if (vbi_buf != NULL) { if (dev->cur_field & 1) vbi_buf->top_field = 0; else vbi_buf->top_field = 1; } } dev->vbi_read += len; em28xx_copy_vbi(dev, vbi_dma_q, vbi_buf, p, vbioutp, len); } else { /* Some of this frame is VBI data and some is video data */ int vbi_data_len = vbi_size - dev->vbi_read; dev->vbi_read += vbi_data_len; em28xx_copy_vbi(dev, vbi_dma_q, vbi_buf, p, vbioutp, vbi_data_len); dev->capture_type = 1; p += vbi_data_len; len -= vbi_data_len; } } if (dev->capture_type == 1) { dev->capture_type = 2; if (dev->progressive || !(dev->cur_field & 1)) { if (buf != NULL) buffer_filled(dev, dma_q, buf); get_next_buf(dma_q, &buf); if (buf == NULL) outp = NULL; else outp = videobuf_to_vmalloc(&buf->vb); } if (buf != NULL) { if (dev->cur_field & 1) buf->top_field = 0; else buf->top_field = 1; } dma_q->pos = 0; } if (buf != NULL && dev->capture_type == 2) { if (len >= 4 && p[0] == 0x88 && p[1] == 0x88 && p[2] == 0x88 && p[3] == 0x88) { p += 4; len -= 4; } if (len >= 4 && p[0] == 0x22 && p[1] == 0x5a) { em28xx_isocdbg("Video frame %d, len=%i, %s\n", p[2], len, (p[2] & 1) ? "odd" : "even"); p += 4; len -= 4; } if (len > 0) em28xx_copy_video(dev, dma_q, buf, p, outp, len); } } return rc; } /* ------------------------------------------------------------------ Videobuf operations ------------------------------------------------------------------*/ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct em28xx_fh *fh = vq->priv_data; struct em28xx *dev = fh->dev; struct v4l2_frequency f; *size = (fh->dev->width * fh->dev->height * dev->format->depth + 7) >> 3; if (0 == *count) *count = EM28XX_DEF_BUF; if (*count < EM28XX_MIN_BUF) *count = EM28XX_MIN_BUF; /* Ask tuner to go to analog or radio mode */ memset(&f, 0, sizeof(f)); f.frequency = dev->ctl_freq; f.type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f); return 0; } /* This is called *without* dev->slock held; please keep it that way */ static void free_buffer(struct videobuf_queue *vq, struct em28xx_buffer *buf) { struct em28xx_fh *fh = vq->priv_data; struct em28xx *dev = fh->dev; unsigned long flags = 0; if (in_interrupt()) BUG(); /* We used to wait for the buffer to finish here, but this didn't work because, as we were keeping the state as VIDEOBUF_QUEUED, videobuf_queue_cancel marked it as finished for us. (Also, it could wedge forever if the hardware was misconfigured.) This should be safe; by the time we get here, the buffer isn't queued anymore. If we ever start marking the buffers as VIDEOBUF_ACTIVE, it won't be, though. */ spin_lock_irqsave(&dev->slock, flags); if (dev->isoc_ctl.vid_buf == buf) dev->isoc_ctl.vid_buf = NULL; spin_unlock_irqrestore(&dev->slock, flags); videobuf_vmalloc_free(&buf->vb); buf->vb.state = VIDEOBUF_NEEDS_INIT; } static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct em28xx_fh *fh = vq->priv_data; struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb); struct em28xx *dev = fh->dev; int rc = 0, urb_init = 0; buf->vb.size = (fh->dev->width * fh->dev->height * dev->format->depth + 7) >> 3; if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) return -EINVAL; buf->vb.width = dev->width; buf->vb.height = dev->height; buf->vb.field = field; if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { rc = videobuf_iolock(vq, &buf->vb, NULL); if (rc < 0) goto fail; } if (!dev->isoc_ctl.analog_bufs.num_bufs) urb_init = 1; if (urb_init) { if (em28xx_vbi_supported(dev) == 1) rc = em28xx_init_isoc(dev, EM28XX_ANALOG_MODE, EM28XX_NUM_PACKETS, EM28XX_NUM_BUFS, dev->max_pkt_size, em28xx_isoc_copy_vbi); else rc = em28xx_init_isoc(dev, EM28XX_ANALOG_MODE, EM28XX_NUM_PACKETS, EM28XX_NUM_BUFS, dev->max_pkt_size, em28xx_isoc_copy); if (rc < 0) goto fail; } buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: free_buffer(vq, buf); return rc; } static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb); struct em28xx_fh *fh = vq->priv_data; struct em28xx *dev = fh->dev; struct em28xx_dmaqueue *vidq = &dev->vidq; buf->vb.state = VIDEOBUF_QUEUED; list_add_tail(&buf->vb.queue, &vidq->active); } static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb); struct em28xx_fh *fh = vq->priv_data; struct em28xx *dev = (struct em28xx *)fh->dev; em28xx_isocdbg("em28xx: called buffer_release\n"); free_buffer(vq, buf); } static struct videobuf_queue_ops em28xx_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .buf_release = buffer_release, }; /********************* v4l2 interface **************************************/ static void video_mux(struct em28xx *dev, int index) { dev->ctl_input = index; dev->ctl_ainput = INPUT(index)->amux; dev->ctl_aoutput = INPUT(index)->aout; if (!dev->ctl_aoutput) dev->ctl_aoutput = EM28XX_AOUT_MASTER; v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing, INPUT(index)->vmux, 0, 0); if (dev->board.has_msp34xx) { if (dev->i2s_speed) { v4l2_device_call_all(&dev->v4l2_dev, 0, audio, s_i2s_clock_freq, dev->i2s_speed); } /* Note: this is msp3400 specific */ v4l2_device_call_all(&dev->v4l2_dev, 0, audio, s_routing, dev->ctl_ainput, MSP_OUTPUT(MSP_SC_IN_DSP_SCART1), 0); } if (dev->board.adecoder != EM28XX_NOADECODER) { v4l2_device_call_all(&dev->v4l2_dev, 0, audio, s_routing, dev->ctl_ainput, dev->ctl_aoutput, 0); } em28xx_audio_analog_set(dev); } /* Usage lock check functions */ static int res_get(struct em28xx_fh *fh, unsigned int bit) { struct em28xx *dev = fh->dev; if (fh->resources & bit) /* have it already allocated */ return 1; /* is it free? */ if (dev->resources & bit) { /* no, someone else uses it */ return 0; } /* it's free, grab it */ fh->resources |= bit; dev->resources |= bit; em28xx_videodbg("res: get %d\n", bit); return 1; } static int res_check(struct em28xx_fh *fh, unsigned int bit) { return fh->resources & bit; } static int res_locked(struct em28xx *dev, unsigned int bit) { return dev->resources & bit; } static void res_free(struct em28xx_fh *fh, unsigned int bits) { struct em28xx *dev = fh->dev; BUG_ON((fh->resources & bits) != bits); fh->resources &= ~bits; dev->resources &= ~bits; em28xx_videodbg("res: put %d\n", bits); } static int get_ressource(struct em28xx_fh *fh) { switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: return EM28XX_RESOURCE_VIDEO; case V4L2_BUF_TYPE_VBI_CAPTURE: return EM28XX_RESOURCE_VBI; default: BUG(); return 0; } } /* * ac97_queryctrl() * return the ac97 supported controls */ static int ac97_queryctrl(struct v4l2_queryctrl *qc) { int i; for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++) { if (qc->id && qc->id == ac97_qctrl[i].id) { memcpy(qc, &(ac97_qctrl[i]), sizeof(*qc)); return 0; } } /* Control is not ac97 related */ return 1; } /* * ac97_get_ctrl() * return the current values for ac97 mute and volume */ static int ac97_get_ctrl(struct em28xx *dev, struct v4l2_control *ctrl) { switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: ctrl->value = dev->mute; return 0; case V4L2_CID_AUDIO_VOLUME: ctrl->value = dev->volume; return 0; default: /* Control is not ac97 related */ return 1; } } /* * ac97_set_ctrl() * set values for ac97 mute and volume */ static int ac97_set_ctrl(struct em28xx *dev, const struct v4l2_control *ctrl) { int i; for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++) if (ctrl->id == ac97_qctrl[i].id) goto handle; /* Announce that hasn't handle it */ return 1; handle: if (ctrl->value < ac97_qctrl[i].minimum || ctrl->value > ac97_qctrl[i].maximum) return -ERANGE; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: dev->mute = ctrl->value; break; case V4L2_CID_AUDIO_VOLUME: dev->volume = ctrl->value; break; } return em28xx_audio_analog_set(dev); } static int check_dev(struct em28xx *dev) { if (dev->state & DEV_DISCONNECTED) { em28xx_errdev("v4l2 ioctl: device not present\n"); return -ENODEV; } if (dev->state & DEV_MISCONFIGURED) { em28xx_errdev("v4l2 ioctl: device is misconfigured; " "close and open it again\n"); return -EIO; } return 0; } static void get_scale(struct em28xx *dev, unsigned int width, unsigned int height, unsigned int *hscale, unsigned int *vscale) { unsigned int maxw = norm_maxw(dev); unsigned int maxh = norm_maxh(dev); *hscale = (((unsigned long)maxw) << 12) / width - 4096L; if (*hscale >= 0x4000) *hscale = 0x3fff; *vscale = (((unsigned long)maxh) << 12) / height - 4096L; if (*vscale >= 0x4000) *vscale = 0x3fff; } /* ------------------------------------------------------------------ IOCTL vidioc handling ------------------------------------------------------------------*/ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; f->fmt.pix.width = dev->width; f->fmt.pix.height = dev->height; f->fmt.pix.pixelformat = dev->format->fourcc; f->fmt.pix.bytesperline = (dev->width * dev->format->depth + 7) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * dev->height; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; /* FIXME: TOP? NONE? BOTTOM? ALTENATE? */ if (dev->progressive) f->fmt.pix.field = V4L2_FIELD_NONE; else f->fmt.pix.field = dev->interlaced ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; return 0; } static struct em28xx_fmt *format_by_fourcc(unsigned int fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(format); i++) if (format[i].fourcc == fourcc) return &format[i]; return NULL; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; unsigned int width = f->fmt.pix.width; unsigned int height = f->fmt.pix.height; unsigned int maxw = norm_maxw(dev); unsigned int maxh = norm_maxh(dev); unsigned int hscale, vscale; struct em28xx_fmt *fmt; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (!fmt) { em28xx_videodbg("Fourcc format (%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } if (dev->board.is_em2800) { /* the em2800 can only scale down to 50% */ height = height > (3 * maxh / 4) ? maxh : maxh / 2; width = width > (3 * maxw / 4) ? maxw : maxw / 2; /* MaxPacketSize for em2800 is too small to capture at full resolution * use half of maxw as the scaler can only scale to 50% */ if (width == maxw && height == maxh) width /= 2; } else { /* width must even because of the YUYV format height must be even because of interlacing */ v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0); } get_scale(dev, width, height, &hscale, &vscale); width = (((unsigned long)maxw) << 12) / (hscale + 4096L); height = (((unsigned long)maxh) << 12) / (vscale + 4096L); f->fmt.pix.width = width; f->fmt.pix.height = height; f->fmt.pix.pixelformat = fmt->fourcc; f->fmt.pix.bytesperline = (dev->width * fmt->depth + 7) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * height; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; if (dev->progressive) f->fmt.pix.field = V4L2_FIELD_NONE; else f->fmt.pix.field = dev->interlaced ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; return 0; } static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc, unsigned width, unsigned height) { struct em28xx_fmt *fmt; fmt = format_by_fourcc(fourcc); if (!fmt) return -EINVAL; dev->format = fmt; dev->width = width; dev->height = height; /* set new image size */ get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale); em28xx_set_alternate(dev); em28xx_resolution_set(dev); return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; vidioc_try_fmt_vid_cap(file, priv, f); if (videobuf_queue_is_busy(&fh->vb_vidq)) { em28xx_errdev("%s queue busy\n", __func__); return -EBUSY; } return em28xx_set_video_format(dev, f->fmt.pix.pixelformat, f->fmt.pix.width, f->fmt.pix.height); } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; *norm = dev->norm; return 0; } static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *norm) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; v4l2_device_call_all(&dev->v4l2_dev, 0, video, querystd, norm); return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; struct v4l2_format f; int rc; rc = check_dev(dev); if (rc < 0) return rc; dev->norm = *norm; /* Adjusts width/height, if needed */ f.fmt.pix.width = dev->width; f.fmt.pix.height = dev->height; vidioc_try_fmt_vid_cap(file, priv, &f); /* set new image size */ dev->width = f.fmt.pix.width; dev->height = f.fmt.pix.height; get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale); em28xx_resolution_set(dev); v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm); return 0; } static int vidioc_g_parm(struct file *file, void *priv, struct v4l2_streamparm *p) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc = 0; if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (dev->board.is_webcam) rc = v4l2_device_call_until_err(&dev->v4l2_dev, 0, video, g_parm, p); else v4l2_video_std_frame_period(dev->norm, &p->parm.capture.timeperframe); return rc; } static int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *p) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; if (!dev->board.is_webcam) return -EINVAL; if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; return v4l2_device_call_until_err(&dev->v4l2_dev, 0, video, s_parm, p); } static const char *iname[] = { [EM28XX_VMUX_COMPOSITE1] = "Composite1", [EM28XX_VMUX_COMPOSITE2] = "Composite2", [EM28XX_VMUX_COMPOSITE3] = "Composite3", [EM28XX_VMUX_COMPOSITE4] = "Composite4", [EM28XX_VMUX_SVIDEO] = "S-Video", [EM28XX_VMUX_TELEVISION] = "Television", [EM28XX_VMUX_CABLE] = "Cable TV", [EM28XX_VMUX_DVB] = "DVB", [EM28XX_VMUX_DEBUG] = "for debug only", }; static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; unsigned int n; n = i->index; if (n >= MAX_EM28XX_INPUT) return -EINVAL; if (0 == INPUT(n)->type) return -EINVAL; i->index = n; i->type = V4L2_INPUT_TYPE_CAMERA; strcpy(i->name, iname[INPUT(n)->type]); if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) || (EM28XX_VMUX_CABLE == INPUT(n)->type)) i->type = V4L2_INPUT_TYPE_TUNER; i->std = dev->vdev->tvnorms; return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; *i = dev->ctl_input; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (i >= MAX_EM28XX_INPUT) return -EINVAL; if (0 == INPUT(i)->type) return -EINVAL; dev->ctl_input = i; video_mux(dev, dev->ctl_input); return 0; } static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; if (!dev->audio_mode.has_audio) return -EINVAL; switch (a->index) { case EM28XX_AMUX_VIDEO: strcpy(a->name, "Television"); break; case EM28XX_AMUX_LINE_IN: strcpy(a->name, "Line In"); break; case EM28XX_AMUX_VIDEO2: strcpy(a->name, "Television alt"); break; case EM28XX_AMUX_PHONE: strcpy(a->name, "Phone"); break; case EM28XX_AMUX_MIC: strcpy(a->name, "Mic"); break; case EM28XX_AMUX_CD: strcpy(a->name, "CD"); break; case EM28XX_AMUX_AUX: strcpy(a->name, "Aux"); break; case EM28XX_AMUX_PCM_OUT: strcpy(a->name, "PCM"); break; default: return -EINVAL; } a->index = dev->ctl_ainput; a->capability = V4L2_AUDCAP_STEREO; return 0; } static int vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *a) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; if (!dev->audio_mode.has_audio) return -EINVAL; if (a->index >= MAX_EM28XX_INPUT) return -EINVAL; if (0 == INPUT(a->index)->type) return -EINVAL; dev->ctl_ainput = INPUT(a->index)->amux; dev->ctl_aoutput = INPUT(a->index)->aout; if (!dev->ctl_aoutput) dev->ctl_aoutput = EM28XX_AOUT_MASTER; return 0; } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int id = qc->id; int rc; rc = check_dev(dev); if (rc < 0) return rc; memset(qc, 0, sizeof(*qc)); qc->id = id; /* enumerate AC97 controls */ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) { rc = ac97_queryctrl(qc); if (!rc) return 0; } /* enumerate V4L2 device controls */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, queryctrl, qc); if (qc->type) return 0; else return -EINVAL; } /* * FIXME: This is an indirect way to check if a control exists at a * subdev. Instead of that hack, maybe the better would be to change all * subdevs to return -ENOIOCTLCMD, if an ioctl is not supported. */ static int check_subdev_ctrl(struct em28xx *dev, int id) { struct v4l2_queryctrl qc; memset(&qc, 0, sizeof(qc)); qc.id = id; /* enumerate V4L2 device controls */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, queryctrl, &qc); if (qc.type) return 0; else return -EINVAL; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; rc = 0; /* Set an AC97 control */ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) rc = ac97_get_ctrl(dev, ctrl); else rc = 1; /* It were not an AC97 control. Sends it to the v4l2 dev interface */ if (rc == 1) { if (check_subdev_ctrl(dev, ctrl->id)) return -EINVAL; v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_ctrl, ctrl); rc = 0; } return rc; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; /* Set an AC97 control */ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) rc = ac97_set_ctrl(dev, ctrl); else rc = 1; /* It isn't an AC97 control. Sends it to the v4l2 dev interface */ if (rc == 1) { rc = check_subdev_ctrl(dev, ctrl->id); if (!rc) v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_ctrl, ctrl); /* * In the case of non-AC97 volume controls, we still need * to do some setups at em28xx, in order to mute/unmute * and to adjust audio volume. However, the value ranges * should be checked by the corresponding V4L subdriver. */ switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: dev->mute = ctrl->value; rc = em28xx_audio_analog_set(dev); break; case V4L2_CID_AUDIO_VOLUME: dev->volume = ctrl->value; rc = em28xx_audio_analog_set(dev); } } return (rc < 0) ? rc : 0; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (0 != t->index) return -EINVAL; strcpy(t->name, "Tuner"); t->type = V4L2_TUNER_ANALOG_TV; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t); return 0; } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (0 != t->index) return -EINVAL; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t); return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; f->frequency = dev->ctl_freq; return 0; } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (0 != f->tuner) return -EINVAL; if (unlikely(0 == fh->radio && f->type != V4L2_TUNER_ANALOG_TV)) return -EINVAL; if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO)) return -EINVAL; dev->ctl_freq = f->frequency; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int em28xx_reg_len(int reg) { switch (reg) { case EM28XX_R40_AC97LSB: case EM28XX_R30_HSCALELOW: case EM28XX_R32_VSCALELOW: return 2; default: return 1; } } static int vidioc_g_chip_ident(struct file *file, void *priv, struct v4l2_dbg_chip_ident *chip) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; chip->ident = V4L2_IDENT_NONE; chip->revision = 0; v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_chip_ident, chip); return 0; } static int vidioc_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int ret; switch (reg->match.type) { case V4L2_CHIP_MATCH_AC97: ret = em28xx_read_ac97(dev, reg->reg); if (ret < 0) return ret; reg->val = ret; reg->size = 1; return 0; case V4L2_CHIP_MATCH_I2C_DRIVER: v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_register, reg); return 0; case V4L2_CHIP_MATCH_I2C_ADDR: /* TODO: is this correct? */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_register, reg); return 0; default: if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; } /* Match host */ reg->size = em28xx_reg_len(reg->reg); if (reg->size == 1) { ret = em28xx_read_reg(dev, reg->reg); if (ret < 0) return ret; reg->val = ret; } else { __le16 val = 0; ret = em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS, reg->reg, (char *)&val, 2); if (ret < 0) return ret; reg->val = le16_to_cpu(val); } return 0; } static int vidioc_s_register(struct file *file, void *priv, struct v4l2_dbg_register *reg) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; __le16 buf; switch (reg->match.type) { case V4L2_CHIP_MATCH_AC97: return em28xx_write_ac97(dev, reg->reg, reg->val); case V4L2_CHIP_MATCH_I2C_DRIVER: v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_register, reg); return 0; case V4L2_CHIP_MATCH_I2C_ADDR: /* TODO: is this correct? */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_register, reg); return 0; default: if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; } /* Match host */ buf = cpu_to_le16(reg->val); return em28xx_write_regs(dev, reg->reg, (char *)&buf, em28xx_reg_len(reg->reg)); } #endif static int vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cc) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; cc->bounds.left = 0; cc->bounds.top = 0; cc->bounds.width = dev->width; cc->bounds.height = dev->height; cc->defrect = cc->bounds; cc->pixelaspect.numerator = 54; /* 4:3 FIXME: remove magic numbers */ cc->pixelaspect.denominator = 59; return 0; } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc = -EINVAL; rc = check_dev(dev); if (rc < 0) return rc; if (unlikely(type != fh->type)) return -EINVAL; em28xx_videodbg("vidioc_streamon fh=%p t=%d fh->res=%d dev->res=%d\n", fh, type, fh->resources, dev->resources); if (unlikely(!res_get(fh, get_ressource(fh)))) return -EBUSY; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) rc = videobuf_streamon(&fh->vb_vidq); else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) rc = videobuf_streamon(&fh->vb_vbiq); return rc; } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && fh->type != V4L2_BUF_TYPE_VBI_CAPTURE) return -EINVAL; if (type != fh->type) return -EINVAL; em28xx_videodbg("vidioc_streamoff fh=%p t=%d fh->res=%d dev->res=%d\n", fh, type, fh->resources, dev->resources); if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { if (res_check(fh, EM28XX_RESOURCE_VIDEO)) { videobuf_streamoff(&fh->vb_vidq); res_free(fh, EM28XX_RESOURCE_VIDEO); } } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) { if (res_check(fh, EM28XX_RESOURCE_VBI)) { videobuf_streamoff(&fh->vb_vbiq); res_free(fh, EM28XX_RESOURCE_VBI); } } return 0; } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; strlcpy(cap->driver, "em28xx", sizeof(cap->driver)); strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card)); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); cap->capabilities = V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; if (dev->vbi_dev) cap->capabilities |= V4L2_CAP_VBI_CAPTURE; if (dev->audio_mode.has_audio) cap->capabilities |= V4L2_CAP_AUDIO; if (dev->tuner_type != TUNER_ABSENT) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (unlikely(f->index >= ARRAY_SIZE(format))) return -EINVAL; strlcpy(f->description, format[f->index].name, sizeof(f->description)); f->pixelformat = format[f->index].fourcc; return 0; } static int vidioc_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; struct em28xx_fmt *fmt; unsigned int maxw = norm_maxw(dev); unsigned int maxh = norm_maxh(dev); fmt = format_by_fourcc(fsize->pixel_format); if (!fmt) { em28xx_videodbg("Fourcc format (%08x) invalid.\n", fsize->pixel_format); return -EINVAL; } if (dev->board.is_em2800) { if (fsize->index > 1) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; fsize->discrete.width = maxw / (1 + fsize->index); fsize->discrete.height = maxh / (1 + fsize->index); return 0; } if (fsize->index != 0) return -EINVAL; /* Report a continuous range */ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = 48; fsize->stepwise.min_height = 32; fsize->stepwise.max_width = maxw; fsize->stepwise.max_height = maxh; fsize->stepwise.step_width = 1; fsize->stepwise.step_height = 1; return 0; } /* Sliced VBI ioctls */ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; f->fmt.sliced.service_set = 0; v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced); if (f->fmt.sliced.service_set == 0) rc = -EINVAL; return rc; } static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *f) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced); if (f->fmt.sliced.service_set == 0) return -EINVAL; return 0; } /* RAW VBI ioctls */ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *format) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; format->fmt.vbi.samples_per_line = dev->vbi_width; format->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY; format->fmt.vbi.offset = 0; format->fmt.vbi.flags = 0; format->fmt.vbi.sampling_rate = 6750000 * 4 / 2; format->fmt.vbi.count[0] = dev->vbi_height; format->fmt.vbi.count[1] = dev->vbi_height; /* Varies by video standard (NTSC, PAL, etc.) */ if (dev->norm & V4L2_STD_525_60) { /* NTSC */ format->fmt.vbi.start[0] = 10; format->fmt.vbi.start[1] = 273; } else if (dev->norm & V4L2_STD_625_50) { /* PAL */ format->fmt.vbi.start[0] = 6; format->fmt.vbi.start[1] = 318; } return 0; } static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *format) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; format->fmt.vbi.samples_per_line = dev->vbi_width; format->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY; format->fmt.vbi.offset = 0; format->fmt.vbi.flags = 0; format->fmt.vbi.sampling_rate = 6750000 * 4 / 2; format->fmt.vbi.count[0] = dev->vbi_height; format->fmt.vbi.count[1] = dev->vbi_height; /* Varies by video standard (NTSC, PAL, etc.) */ if (dev->norm & V4L2_STD_525_60) { /* NTSC */ format->fmt.vbi.start[0] = 10; format->fmt.vbi.start[1] = 273; } else if (dev->norm & V4L2_STD_625_50) { /* PAL */ format->fmt.vbi.start[0] = 6; format->fmt.vbi.start[1] = 318; } return 0; } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return videobuf_reqbufs(&fh->vb_vidq, rb); else return videobuf_reqbufs(&fh->vb_vbiq, rb); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return videobuf_querybuf(&fh->vb_vidq, b); else { /* FIXME: I'm not sure yet whether this is a bug in zvbi or the videobuf framework, but we probably shouldn't be returning a buffer larger than that which was asked for. At a minimum, it causes a crash in zvbi since it does a memcpy based on the source buffer length */ int result = videobuf_querybuf(&fh->vb_vbiq, b); b->length = dev->vbi_width * dev->vbi_height * 2; return result; } } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return videobuf_qbuf(&fh->vb_vidq, b); else return videobuf_qbuf(&fh->vb_vbiq, b); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct em28xx_fh *fh = priv; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); else return videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK); } /* ----------------------------------------------------------- */ /* RADIO ESPECIFIC IOCTLS */ /* ----------------------------------------------------------- */ static int radio_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct em28xx *dev = ((struct em28xx_fh *)priv)->dev; strlcpy(cap->driver, "em28xx", sizeof(cap->driver)); strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card)); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); cap->capabilities = V4L2_CAP_TUNER; return 0; } static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct em28xx *dev = ((struct em28xx_fh *)priv)->dev; if (unlikely(t->index > 0)) return -EINVAL; strcpy(t->name, "Radio"); t->type = V4L2_TUNER_RADIO; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t); return 0; } static int radio_enum_input(struct file *file, void *priv, struct v4l2_input *i) { if (i->index != 0) return -EINVAL; strcpy(i->name, "Radio"); i->type = V4L2_INPUT_TYPE_TUNER; return 0; } static int radio_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { if (unlikely(a->index)) return -EINVAL; strcpy(a->name, "Radio"); return 0; } static int radio_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct em28xx *dev = ((struct em28xx_fh *)priv)->dev; if (0 != t->index) return -EINVAL; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t); return 0; } static int radio_s_audio(struct file *file, void *fh, struct v4l2_audio *a) { return 0; } static int radio_s_input(struct file *file, void *fh, unsigned int i) { return 0; } static int radio_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { int i; if (qc->id < V4L2_CID_BASE || qc->id >= V4L2_CID_LASTP1) return -EINVAL; for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++) { if (qc->id && qc->id == ac97_qctrl[i].id) { memcpy(qc, &(ac97_qctrl[i]), sizeof(*qc)); return 0; } } return -EINVAL; } /* * em28xx_v4l2_open() * inits the device and starts isoc transfer */ static int em28xx_v4l2_open(struct file *filp) { int errCode = 0, radio = 0; struct video_device *vdev = video_devdata(filp); struct em28xx *dev = video_drvdata(filp); enum v4l2_buf_type fh_type = 0; struct em28xx_fh *fh; enum v4l2_field field; switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; break; case VFL_TYPE_VBI: fh_type = V4L2_BUF_TYPE_VBI_CAPTURE; break; case VFL_TYPE_RADIO: radio = 1; break; } em28xx_videodbg("open dev=%s type=%s users=%d\n", video_device_node_name(vdev), v4l2_type_names[fh_type], dev->users); fh = kzalloc(sizeof(struct em28xx_fh), GFP_KERNEL); if (!fh) { em28xx_errdev("em28xx-video.c: Out of memory?!\n"); return -ENOMEM; } fh->dev = dev; fh->radio = radio; fh->type = fh_type; filp->private_data = fh; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) { em28xx_set_mode(dev, EM28XX_ANALOG_MODE); em28xx_set_alternate(dev); em28xx_resolution_set(dev); /* Needed, since GPIO might have disabled power of some i2c device */ em28xx_wake_i2c(dev); } if (fh->radio) { em28xx_videodbg("video_open: setting radio device\n"); v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_radio); } dev->users++; if (dev->progressive) field = V4L2_FIELD_NONE; else field = V4L2_FIELD_INTERLACED; videobuf_queue_vmalloc_init(&fh->vb_vidq, &em28xx_video_qops, NULL, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, field, sizeof(struct em28xx_buffer), fh, &dev->lock); videobuf_queue_vmalloc_init(&fh->vb_vbiq, &em28xx_vbi_qops, NULL, &dev->slock, V4L2_BUF_TYPE_VBI_CAPTURE, V4L2_FIELD_SEQ_TB, sizeof(struct em28xx_buffer), fh, &dev->lock); return errCode; } /* * em28xx_realease_resources() * unregisters the v4l2,i2c and usb devices * called when the device gets disconected or at module unload */ void em28xx_release_analog_resources(struct em28xx *dev) { /*FIXME: I2C IR should be disconnected */ if (dev->radio_dev) { if (video_is_registered(dev->radio_dev)) video_unregister_device(dev->radio_dev); else video_device_release(dev->radio_dev); dev->radio_dev = NULL; } if (dev->vbi_dev) { em28xx_info("V4L2 device %s deregistered\n", video_device_node_name(dev->vbi_dev)); if (video_is_registered(dev->vbi_dev)) video_unregister_device(dev->vbi_dev); else video_device_release(dev->vbi_dev); dev->vbi_dev = NULL; } if (dev->vdev) { em28xx_info("V4L2 device %s deregistered\n", video_device_node_name(dev->vdev)); if (video_is_registered(dev->vdev)) video_unregister_device(dev->vdev); else video_device_release(dev->vdev); dev->vdev = NULL; } } /* * em28xx_v4l2_close() * stops streaming and deallocates all resources allocated by the v4l2 * calls and ioctls */ static int em28xx_v4l2_close(struct file *filp) { struct em28xx_fh *fh = filp->private_data; struct em28xx *dev = fh->dev; int errCode; em28xx_videodbg("users=%d\n", dev->users); if (res_check(fh, EM28XX_RESOURCE_VIDEO)) { videobuf_stop(&fh->vb_vidq); res_free(fh, EM28XX_RESOURCE_VIDEO); } if (res_check(fh, EM28XX_RESOURCE_VBI)) { videobuf_stop(&fh->vb_vbiq); res_free(fh, EM28XX_RESOURCE_VBI); } if (dev->users == 1) { /* the device is already disconnect, free the remaining resources */ if (dev->state & DEV_DISCONNECTED) { em28xx_release_resources(dev); kfree(dev->alt_max_pkt_size); kfree(dev); return 0; } /* Save some power by putting tuner to sleep */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0); /* do this before setting alternate! */ em28xx_uninit_isoc(dev, EM28XX_ANALOG_MODE); em28xx_set_mode(dev, EM28XX_SUSPEND); /* set alternate 0 */ dev->alt = 0; em28xx_videodbg("setting alternate 0\n"); errCode = usb_set_interface(dev->udev, 0, 0); if (errCode < 0) { em28xx_errdev("cannot change alternate number to " "0 (error=%i)\n", errCode); } } videobuf_mmap_free(&fh->vb_vidq); videobuf_mmap_free(&fh->vb_vbiq); kfree(fh); dev->users--; wake_up_interruptible_nr(&dev->open, 1); return 0; } /* * em28xx_v4l2_read() * will allocate buffers when called for the first time */ static ssize_t em28xx_v4l2_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct em28xx_fh *fh = filp->private_data; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; /* FIXME: read() is not prepared to allow changing the video resolution while streaming. Seems a bug at em28xx_set_fmt */ if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { if (res_locked(dev, EM28XX_RESOURCE_VIDEO)) return -EBUSY; return videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0, filp->f_flags & O_NONBLOCK); } if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) { if (!res_get(fh, EM28XX_RESOURCE_VBI)) return -EBUSY; return videobuf_read_stream(&fh->vb_vbiq, buf, count, pos, 0, filp->f_flags & O_NONBLOCK); } return 0; } /* * em28xx_v4l2_poll() * will allocate buffers when called for the first time */ static unsigned int em28xx_v4l2_poll(struct file *filp, poll_table *wait) { struct em28xx_fh *fh = filp->private_data; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { if (!res_get(fh, EM28XX_RESOURCE_VIDEO)) return POLLERR; return videobuf_poll_stream(filp, &fh->vb_vidq, wait); } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) { if (!res_get(fh, EM28XX_RESOURCE_VBI)) return POLLERR; return videobuf_poll_stream(filp, &fh->vb_vbiq, wait); } else { return POLLERR; } } /* * em28xx_v4l2_mmap() */ static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma) { struct em28xx_fh *fh = filp->private_data; struct em28xx *dev = fh->dev; int rc; rc = check_dev(dev); if (rc < 0) return rc; if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) rc = videobuf_mmap_mapper(&fh->vb_vidq, vma); else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) rc = videobuf_mmap_mapper(&fh->vb_vbiq, vma); em28xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n", (unsigned long)vma->vm_start, (unsigned long)vma->vm_end-(unsigned long)vma->vm_start, rc); return rc; } static const struct v4l2_file_operations em28xx_v4l_fops = { .owner = THIS_MODULE, .open = em28xx_v4l2_open, .release = em28xx_v4l2_close, .read = em28xx_v4l2_read, .poll = em28xx_v4l2_poll, .mmap = em28xx_v4l2_mmap, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, .vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap, .vidioc_enum_framesizes = vidioc_enum_framesizes, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_cropcap = vidioc_cropcap, .vidioc_g_fmt_sliced_vbi_cap = vidioc_g_fmt_sliced_vbi_cap, .vidioc_try_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap, .vidioc_s_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_g_std = vidioc_g_std, .vidioc_querystd = vidioc_querystd, .vidioc_s_std = vidioc_s_std, .vidioc_g_parm = vidioc_g_parm, .vidioc_s_parm = vidioc_s_parm, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, .vidioc_g_chip_ident = vidioc_g_chip_ident, #endif }; static const struct video_device em28xx_video_template = { .fops = &em28xx_v4l_fops, .release = video_device_release, .ioctl_ops = &video_ioctl_ops, .tvnorms = V4L2_STD_ALL, .current_norm = V4L2_STD_PAL, }; static const struct v4l2_file_operations radio_fops = { .owner = THIS_MODULE, .open = em28xx_v4l2_open, .release = em28xx_v4l2_close, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops radio_ioctl_ops = { .vidioc_querycap = radio_querycap, .vidioc_g_tuner = radio_g_tuner, .vidioc_enum_input = radio_enum_input, .vidioc_g_audio = radio_g_audio, .vidioc_s_tuner = radio_s_tuner, .vidioc_s_audio = radio_s_audio, .vidioc_s_input = radio_s_input, .vidioc_queryctrl = radio_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static struct video_device em28xx_radio_template = { .name = "em28xx-radio", .fops = &radio_fops, .ioctl_ops = &radio_ioctl_ops, }; /******************************** usb interface ******************************/ static struct video_device *em28xx_vdev_init(struct em28xx *dev, const struct video_device *template, const char *type_name) { struct video_device *vfd; vfd = video_device_alloc(); if (NULL == vfd) return NULL; *vfd = *template; vfd->v4l2_dev = &dev->v4l2_dev; vfd->release = video_device_release; vfd->debug = video_debug; vfd->lock = &dev->lock; snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name); video_set_drvdata(vfd, dev); return vfd; } int em28xx_register_analog_devices(struct em28xx *dev) { u8 val; int ret; unsigned int maxw; printk(KERN_INFO "%s: v4l2 driver version %s\n", dev->name, EM28XX_VERSION); /* set default norm */ dev->norm = em28xx_video_template.current_norm; v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm); dev->interlaced = EM28XX_INTERLACED_DEFAULT; dev->ctl_input = 0; /* Analog specific initialization */ dev->format = &format[0]; maxw = norm_maxw(dev); /* MaxPacketSize for em2800 is too small to capture at full resolution * use half of maxw as the scaler can only scale to 50% */ if (dev->board.is_em2800) maxw /= 2; em28xx_set_video_format(dev, format[0].fourcc, maxw, norm_maxh(dev)); video_mux(dev, dev->ctl_input); /* Audio defaults */ dev->mute = 1; dev->volume = 0x1f; /* em28xx_write_reg(dev, EM28XX_R0E_AUDIOSRC, 0xc0); audio register */ val = (u8)em28xx_read_reg(dev, EM28XX_R0F_XCLK); em28xx_write_reg(dev, EM28XX_R0F_XCLK, (EM28XX_XCLK_AUDIO_UNMUTE | val)); em28xx_set_outfmt(dev); em28xx_colorlevels_set_default(dev); em28xx_compression_disable(dev); /* allocate and fill video video_device struct */ dev->vdev = em28xx_vdev_init(dev, &em28xx_video_template, "video"); if (!dev->vdev) { em28xx_errdev("cannot allocate video_device.\n"); return -ENODEV; } /* register v4l2 video video_device */ ret = video_register_device(dev->vdev, VFL_TYPE_GRABBER, video_nr[dev->devno]); if (ret) { em28xx_errdev("unable to register video device (error=%i).\n", ret); return ret; } /* Allocate and fill vbi video_device struct */ if (em28xx_vbi_supported(dev) == 1) { dev->vbi_dev = em28xx_vdev_init(dev, &em28xx_video_template, "vbi"); /* register v4l2 vbi video_device */ ret = video_register_device(dev->vbi_dev, VFL_TYPE_VBI, vbi_nr[dev->devno]); if (ret < 0) { em28xx_errdev("unable to register vbi device\n"); return ret; } } if (em28xx_boards[dev->model].radio.type == EM28XX_RADIO) { dev->radio_dev = em28xx_vdev_init(dev, &em28xx_radio_template, "radio"); if (!dev->radio_dev) { em28xx_errdev("cannot allocate video_device.\n"); return -ENODEV; } ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO, radio_nr[dev->devno]); if (ret < 0) { em28xx_errdev("can't register radio device\n"); return ret; } em28xx_info("Registered radio device as %s\n", video_device_node_name(dev->radio_dev)); } em28xx_info("V4L2 video device registered as %s\n", video_device_node_name(dev->vdev)); if (dev->vbi_dev) em28xx_info("V4L2 VBI device registered as %s\n", video_device_node_name(dev->vbi_dev)); return 0; }
gpl-2.0
RealVNC/Android-kernel-mako-NCM
drivers/net/wireless/wl12xx/sdio.c
4802
9377
/* * This file is part of wl1271 * * Copyright (C) 2009-2010 Nokia Corporation * * Contact: Luciano Coelho <luciano.coelho@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/irq.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/platform_device.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/gpio.h> #include <linux/wl12xx.h> #include <linux/pm_runtime.h> #include "wl12xx.h" #include "wl12xx_80211.h" #include "io.h" #ifndef SDIO_VENDOR_ID_TI #define SDIO_VENDOR_ID_TI 0x0097 #endif #ifndef SDIO_DEVICE_ID_TI_WL1271 #define SDIO_DEVICE_ID_TI_WL1271 0x4076 #endif struct wl12xx_sdio_glue { struct device *dev; struct platform_device *core; }; static const struct sdio_device_id wl1271_devices[] __devinitconst = { { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) }, {} }; MODULE_DEVICE_TABLE(sdio, wl1271_devices); static void wl1271_sdio_set_block_size(struct device *child, unsigned int blksz) { struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); struct sdio_func *func = dev_to_sdio_func(glue->dev); sdio_claim_host(func); sdio_set_block_size(func, blksz); sdio_release_host(func); } static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf, size_t len, bool fixed) { int ret; struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); struct sdio_func *func = dev_to_sdio_func(glue->dev); sdio_claim_host(func); if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", addr, ((u8 *)buf)[0]); } else { if (fixed) ret = sdio_readsb(func, buf, addr, len); else ret = sdio_memcpy_fromio(func, buf, addr, len); dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n", addr, len); } sdio_release_host(func); if (ret) dev_err(child->parent, "sdio read failed (%d)\n", ret); } static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf, size_t len, bool fixed) { int ret; struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); struct sdio_func *func = dev_to_sdio_func(glue->dev); sdio_claim_host(func); if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n", addr, ((u8 *)buf)[0]); } else { dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n", addr, len); if (fixed) ret = sdio_writesb(func, addr, buf, len); else ret = sdio_memcpy_toio(func, addr, buf, len); } sdio_release_host(func); if (ret) dev_err(child->parent, "sdio write failed (%d)\n", ret); } static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) { int ret; struct sdio_func *func = dev_to_sdio_func(glue->dev); /* If enabled, tell runtime PM not to power off the card */ if (pm_runtime_enabled(&func->dev)) { ret = pm_runtime_get_sync(&func->dev); if (ret < 0) goto out; } else { /* Runtime PM is disabled: power up the card manually */ ret = mmc_power_restore_host(func->card->host); if (ret < 0) goto out; } sdio_claim_host(func); sdio_enable_func(func); sdio_release_host(func); out: return ret; } static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) { int ret; struct sdio_func *func = dev_to_sdio_func(glue->dev); sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); /* Power off the card manually, even if runtime PM is enabled. */ ret = mmc_power_save_host(func->card->host); if (ret < 0) return ret; /* If enabled, let runtime PM know the card is powered off */ if (pm_runtime_enabled(&func->dev)) ret = pm_runtime_put_sync(&func->dev); return ret; } static int wl12xx_sdio_set_power(struct device *child, bool enable) { struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); if (enable) return wl12xx_sdio_power_on(glue); else return wl12xx_sdio_power_off(glue); } static struct wl1271_if_operations sdio_ops = { .read = wl12xx_sdio_raw_read, .write = wl12xx_sdio_raw_write, .power = wl12xx_sdio_set_power, .set_block_size = wl1271_sdio_set_block_size, }; static int __devinit wl1271_probe(struct sdio_func *func, const struct sdio_device_id *id) { struct wl12xx_platform_data *wlan_data; struct wl12xx_sdio_glue *glue; struct resource res[1]; mmc_pm_flag_t mmcflags; int ret = -ENOMEM; /* We are only able to handle the wlan function */ if (func->num != 0x02) return -ENODEV; glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&func->dev, "can't allocate glue\n"); goto out; } glue->dev = &func->dev; /* Grab access to FN0 for ELP reg. */ func->card->quirks |= MMC_QUIRK_LENIENT_FN0; /* Use block mode for transferring over one block size of data */ func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; wlan_data = wl12xx_get_platform_data(); if (IS_ERR(wlan_data)) { ret = PTR_ERR(wlan_data); dev_err(glue->dev, "missing wlan platform data: %d\n", ret); goto out_free_glue; } /* if sdio can keep power while host is suspended, enable wow */ mmcflags = sdio_get_host_pm_caps(func); dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags); if (mmcflags & MMC_PM_KEEP_POWER) wlan_data->pwr_in_suspend = true; wlan_data->ops = &sdio_ops; sdio_set_drvdata(func, glue); /* Tell PM core that we don't need the card to be powered now */ pm_runtime_put_noidle(&func->dev); glue->core = platform_device_alloc("wl12xx", -1); if (!glue->core) { dev_err(glue->dev, "can't allocate platform_device"); ret = -ENOMEM; goto out_free_glue; } glue->core->dev.parent = &func->dev; memset(res, 0x00, sizeof(res)); res[0].start = wlan_data->irq; res[0].flags = IORESOURCE_IRQ; res[0].name = "irq"; ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); if (ret) { dev_err(glue->dev, "can't add resources\n"); goto out_dev_put; } ret = platform_device_add_data(glue->core, wlan_data, sizeof(*wlan_data)); if (ret) { dev_err(glue->dev, "can't add platform data\n"); goto out_dev_put; } ret = platform_device_add(glue->core); if (ret) { dev_err(glue->dev, "can't add platform device\n"); goto out_dev_put; } return 0; out_dev_put: platform_device_put(glue->core); out_free_glue: kfree(glue); out: return ret; } static void __devexit wl1271_remove(struct sdio_func *func) { struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); /* Undo decrement done above in wl1271_probe */ pm_runtime_get_noresume(&func->dev); platform_device_del(glue->core); platform_device_put(glue->core); kfree(glue); } #ifdef CONFIG_PM static int wl1271_suspend(struct device *dev) { /* Tell MMC/SDIO core it's OK to power down the card * (if it isn't already), but not to remove it completely */ struct sdio_func *func = dev_to_sdio_func(dev); struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); struct wl1271 *wl = platform_get_drvdata(glue->core); mmc_pm_flag_t sdio_flags; int ret = 0; dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", wl->wow_enabled); /* check whether sdio should keep power */ if (wl->wow_enabled) { sdio_flags = sdio_get_host_pm_caps(func); if (!(sdio_flags & MMC_PM_KEEP_POWER)) { dev_err(dev, "can't keep power while host " "is suspended\n"); ret = -EINVAL; goto out; } /* keep power while host suspended */ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) { dev_err(dev, "error while trying to keep power\n"); goto out; } } out: return ret; } static int wl1271_resume(struct device *dev) { dev_dbg(dev, "wl1271 resume\n"); return 0; } static const struct dev_pm_ops wl1271_sdio_pm_ops = { .suspend = wl1271_suspend, .resume = wl1271_resume, }; #endif static struct sdio_driver wl1271_sdio_driver = { .name = "wl1271_sdio", .id_table = wl1271_devices, .probe = wl1271_probe, .remove = __devexit_p(wl1271_remove), #ifdef CONFIG_PM .drv = { .pm = &wl1271_sdio_pm_ops, }, #endif }; static int __init wl1271_init(void) { return sdio_register_driver(&wl1271_sdio_driver); } static void __exit wl1271_exit(void) { sdio_unregister_driver(&wl1271_sdio_driver); } module_init(wl1271_init); module_exit(wl1271_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE); MODULE_FIRMWARE(WL127X_FW_NAME_MULTI); MODULE_FIRMWARE(WL127X_PLT_FW_NAME); MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE); MODULE_FIRMWARE(WL128X_FW_NAME_MULTI); MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
gpl-2.0
rmbq/android_kernel_lge_hammerhead
net/802/garp.c
4802
18089
/* * IEEE 802.1D Generic Attribute Registration Protocol (GARP) * * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/timer.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/llc.h> #include <linux/slab.h> #include <linux/module.h> #include <net/llc.h> #include <net/llc_pdu.h> #include <net/garp.h> #include <asm/unaligned.h> static unsigned int garp_join_time __read_mostly = 200; module_param(garp_join_time, uint, 0644); MODULE_PARM_DESC(garp_join_time, "Join time in ms (default 200ms)"); MODULE_LICENSE("GPL"); static const struct garp_state_trans { u8 state; u8 action; } garp_applicant_state_table[GARP_APPLICANT_MAX + 1][GARP_EVENT_MAX + 1] = { [GARP_APPLICANT_VA] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA, .action = GARP_ACTION_S_JOIN_IN }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AA }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, }, [GARP_APPLICANT_AA] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA, .action = GARP_ACTION_S_JOIN_IN }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, }, [GARP_APPLICANT_QA] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, }, [GARP_APPLICANT_LA] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_VO, .action = GARP_ACTION_S_LEAVE_EMPTY }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_LA }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_LA }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_LA }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, }, [GARP_APPLICANT_VP] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA, .action = GARP_ACTION_S_JOIN_IN }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AP }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_VO }, }, [GARP_APPLICANT_AP] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA, .action = GARP_ACTION_S_JOIN_IN }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_AO }, }, [GARP_APPLICANT_QP] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_QO }, }, [GARP_APPLICANT_VO] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AO }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, }, [GARP_APPLICANT_AO] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_AP }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, }, [GARP_APPLICANT_QO] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_QP }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, }, }; static int garp_attr_cmp(const struct garp_attr *attr, const void *data, u8 len, u8 type) { if (attr->type != type) return attr->type - type; if (attr->dlen != len) return attr->dlen - len; return memcmp(attr->data, data, len); } static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app, const void *data, u8 len, u8 type) { struct rb_node *parent = app->gid.rb_node; struct garp_attr *attr; int d; while (parent) { attr = rb_entry(parent, struct garp_attr, node); d = garp_attr_cmp(attr, data, len, type); if (d < 0) parent = parent->rb_left; else if (d > 0) parent = parent->rb_right; else return attr; } return NULL; } static struct garp_attr *garp_attr_create(struct garp_applicant *app, const void *data, u8 len, u8 type) { struct rb_node *parent = NULL, **p = &app->gid.rb_node; struct garp_attr *attr; int d; while (*p) { parent = *p; attr = rb_entry(parent, struct garp_attr, node); d = garp_attr_cmp(attr, data, len, type); if (d < 0) p = &parent->rb_left; else if (d > 0) p = &parent->rb_right; else { /* The attribute already exists; re-use it. */ return attr; } } attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC); if (!attr) return attr; attr->state = GARP_APPLICANT_VO; attr->type = type; attr->dlen = len; memcpy(attr->data, data, len); rb_link_node(&attr->node, parent, p); rb_insert_color(&attr->node, &app->gid); return attr; } static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr) { rb_erase(&attr->node, &app->gid); kfree(attr); } static int garp_pdu_init(struct garp_applicant *app) { struct sk_buff *skb; struct garp_pdu_hdr *gp; #define LLC_RESERVE sizeof(struct llc_pdu_un) skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev), GFP_ATOMIC); if (!skb) return -ENOMEM; skb->dev = app->dev; skb->protocol = htons(ETH_P_802_2); skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE); gp = (struct garp_pdu_hdr *)__skb_put(skb, sizeof(*gp)); put_unaligned(htons(GARP_PROTOCOL_ID), &gp->protocol); app->pdu = skb; return 0; } static int garp_pdu_append_end_mark(struct garp_applicant *app) { if (skb_tailroom(app->pdu) < sizeof(u8)) return -1; *(u8 *)__skb_put(app->pdu, sizeof(u8)) = GARP_END_MARK; return 0; } static void garp_pdu_queue(struct garp_applicant *app) { if (!app->pdu) return; garp_pdu_append_end_mark(app); garp_pdu_append_end_mark(app); llc_pdu_header_init(app->pdu, LLC_PDU_TYPE_U, LLC_SAP_BSPAN, LLC_SAP_BSPAN, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(app->pdu); llc_mac_hdr_init(app->pdu, app->dev->dev_addr, app->app->proto.group_address); skb_queue_tail(&app->queue, app->pdu); app->pdu = NULL; } static void garp_queue_xmit(struct garp_applicant *app) { struct sk_buff *skb; while ((skb = skb_dequeue(&app->queue))) dev_queue_xmit(skb); } static int garp_pdu_append_msg(struct garp_applicant *app, u8 attrtype) { struct garp_msg_hdr *gm; if (skb_tailroom(app->pdu) < sizeof(*gm)) return -1; gm = (struct garp_msg_hdr *)__skb_put(app->pdu, sizeof(*gm)); gm->attrtype = attrtype; garp_cb(app->pdu)->cur_type = attrtype; return 0; } static int garp_pdu_append_attr(struct garp_applicant *app, const struct garp_attr *attr, enum garp_attr_event event) { struct garp_attr_hdr *ga; unsigned int len; int err; again: if (!app->pdu) { err = garp_pdu_init(app); if (err < 0) return err; } if (garp_cb(app->pdu)->cur_type != attr->type) { if (garp_cb(app->pdu)->cur_type && garp_pdu_append_end_mark(app) < 0) goto queue; if (garp_pdu_append_msg(app, attr->type) < 0) goto queue; } len = sizeof(*ga) + attr->dlen; if (skb_tailroom(app->pdu) < len) goto queue; ga = (struct garp_attr_hdr *)__skb_put(app->pdu, len); ga->len = len; ga->event = event; memcpy(ga->data, attr->data, attr->dlen); return 0; queue: garp_pdu_queue(app); goto again; } static void garp_attr_event(struct garp_applicant *app, struct garp_attr *attr, enum garp_event event) { enum garp_applicant_state state; state = garp_applicant_state_table[attr->state][event].state; if (state == GARP_APPLICANT_INVALID) return; switch (garp_applicant_state_table[attr->state][event].action) { case GARP_ACTION_NONE: break; case GARP_ACTION_S_JOIN_IN: /* When appending the attribute fails, don't update state in * order to retry on next TRANSMIT_PDU event. */ if (garp_pdu_append_attr(app, attr, GARP_JOIN_IN) < 0) return; break; case GARP_ACTION_S_LEAVE_EMPTY: garp_pdu_append_attr(app, attr, GARP_LEAVE_EMPTY); /* As a pure applicant, sending a leave message implies that * the attribute was unregistered and can be destroyed. */ garp_attr_destroy(app, attr); return; default: WARN_ON(1); } attr->state = state; } int garp_request_join(const struct net_device *dev, const struct garp_application *appl, const void *data, u8 len, u8 type) { struct garp_port *port = rtnl_dereference(dev->garp_port); struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]); struct garp_attr *attr; spin_lock_bh(&app->lock); attr = garp_attr_create(app, data, len, type); if (!attr) { spin_unlock_bh(&app->lock); return -ENOMEM; } garp_attr_event(app, attr, GARP_EVENT_REQ_JOIN); spin_unlock_bh(&app->lock); return 0; } EXPORT_SYMBOL_GPL(garp_request_join); void garp_request_leave(const struct net_device *dev, const struct garp_application *appl, const void *data, u8 len, u8 type) { struct garp_port *port = rtnl_dereference(dev->garp_port); struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]); struct garp_attr *attr; spin_lock_bh(&app->lock); attr = garp_attr_lookup(app, data, len, type); if (!attr) { spin_unlock_bh(&app->lock); return; } garp_attr_event(app, attr, GARP_EVENT_REQ_LEAVE); spin_unlock_bh(&app->lock); } EXPORT_SYMBOL_GPL(garp_request_leave); static void garp_gid_event(struct garp_applicant *app, enum garp_event event) { struct rb_node *node, *next; struct garp_attr *attr; for (node = rb_first(&app->gid); next = node ? rb_next(node) : NULL, node != NULL; node = next) { attr = rb_entry(node, struct garp_attr, node); garp_attr_event(app, attr, event); } } static void garp_join_timer_arm(struct garp_applicant *app) { unsigned long delay; delay = (u64)msecs_to_jiffies(garp_join_time) * net_random() >> 32; mod_timer(&app->join_timer, jiffies + delay); } static void garp_join_timer(unsigned long data) { struct garp_applicant *app = (struct garp_applicant *)data; spin_lock(&app->lock); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); garp_pdu_queue(app); spin_unlock(&app->lock); garp_queue_xmit(app); garp_join_timer_arm(app); } static int garp_pdu_parse_end_mark(struct sk_buff *skb) { if (!pskb_may_pull(skb, sizeof(u8))) return -1; if (*skb->data == GARP_END_MARK) { skb_pull(skb, sizeof(u8)); return -1; } return 0; } static int garp_pdu_parse_attr(struct garp_applicant *app, struct sk_buff *skb, u8 attrtype) { const struct garp_attr_hdr *ga; struct garp_attr *attr; enum garp_event event; unsigned int dlen; if (!pskb_may_pull(skb, sizeof(*ga))) return -1; ga = (struct garp_attr_hdr *)skb->data; if (ga->len < sizeof(*ga)) return -1; if (!pskb_may_pull(skb, ga->len)) return -1; skb_pull(skb, ga->len); dlen = sizeof(*ga) - ga->len; if (attrtype > app->app->maxattr) return 0; switch (ga->event) { case GARP_LEAVE_ALL: if (dlen != 0) return -1; garp_gid_event(app, GARP_EVENT_R_LEAVE_EMPTY); return 0; case GARP_JOIN_EMPTY: event = GARP_EVENT_R_JOIN_EMPTY; break; case GARP_JOIN_IN: event = GARP_EVENT_R_JOIN_IN; break; case GARP_LEAVE_EMPTY: event = GARP_EVENT_R_LEAVE_EMPTY; break; case GARP_EMPTY: event = GARP_EVENT_R_EMPTY; break; default: return 0; } if (dlen == 0) return -1; attr = garp_attr_lookup(app, ga->data, dlen, attrtype); if (attr == NULL) return 0; garp_attr_event(app, attr, event); return 0; } static int garp_pdu_parse_msg(struct garp_applicant *app, struct sk_buff *skb) { const struct garp_msg_hdr *gm; if (!pskb_may_pull(skb, sizeof(*gm))) return -1; gm = (struct garp_msg_hdr *)skb->data; if (gm->attrtype == 0) return -1; skb_pull(skb, sizeof(*gm)); while (skb->len > 0) { if (garp_pdu_parse_attr(app, skb, gm->attrtype) < 0) return -1; if (garp_pdu_parse_end_mark(skb) < 0) break; } return 0; } static void garp_pdu_rcv(const struct stp_proto *proto, struct sk_buff *skb, struct net_device *dev) { struct garp_application *appl = proto->data; struct garp_port *port; struct garp_applicant *app; const struct garp_pdu_hdr *gp; port = rcu_dereference(dev->garp_port); if (!port) goto err; app = rcu_dereference(port->applicants[appl->type]); if (!app) goto err; if (!pskb_may_pull(skb, sizeof(*gp))) goto err; gp = (struct garp_pdu_hdr *)skb->data; if (get_unaligned(&gp->protocol) != htons(GARP_PROTOCOL_ID)) goto err; skb_pull(skb, sizeof(*gp)); spin_lock(&app->lock); while (skb->len > 0) { if (garp_pdu_parse_msg(app, skb) < 0) break; if (garp_pdu_parse_end_mark(skb) < 0) break; } spin_unlock(&app->lock); err: kfree_skb(skb); } static int garp_init_port(struct net_device *dev) { struct garp_port *port; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; rcu_assign_pointer(dev->garp_port, port); return 0; } static void garp_release_port(struct net_device *dev) { struct garp_port *port = rtnl_dereference(dev->garp_port); unsigned int i; for (i = 0; i <= GARP_APPLICATION_MAX; i++) { if (rtnl_dereference(port->applicants[i])) return; } RCU_INIT_POINTER(dev->garp_port, NULL); kfree_rcu(port, rcu); } int garp_init_applicant(struct net_device *dev, struct garp_application *appl) { struct garp_applicant *app; int err; ASSERT_RTNL(); if (!rtnl_dereference(dev->garp_port)) { err = garp_init_port(dev); if (err < 0) goto err1; } err = -ENOMEM; app = kzalloc(sizeof(*app), GFP_KERNEL); if (!app) goto err2; err = dev_mc_add(dev, appl->proto.group_address); if (err < 0) goto err3; app->dev = dev; app->app = appl; app->gid = RB_ROOT; spin_lock_init(&app->lock); skb_queue_head_init(&app->queue); rcu_assign_pointer(dev->garp_port->applicants[appl->type], app); setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app); garp_join_timer_arm(app); return 0; err3: kfree(app); err2: garp_release_port(dev); err1: return err; } EXPORT_SYMBOL_GPL(garp_init_applicant); void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl) { struct garp_port *port = rtnl_dereference(dev->garp_port); struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]); ASSERT_RTNL(); RCU_INIT_POINTER(port->applicants[appl->type], NULL); /* Delete timer and generate a final TRANSMIT_PDU event to flush out * all pending messages before the applicant is gone. */ del_timer_sync(&app->join_timer); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); garp_pdu_queue(app); garp_queue_xmit(app); dev_mc_del(dev, appl->proto.group_address); kfree_rcu(app, rcu); garp_release_port(dev); } EXPORT_SYMBOL_GPL(garp_uninit_applicant); int garp_register_application(struct garp_application *appl) { appl->proto.rcv = garp_pdu_rcv; appl->proto.data = appl; return stp_proto_register(&appl->proto); } EXPORT_SYMBOL_GPL(garp_register_application); void garp_unregister_application(struct garp_application *appl) { stp_proto_unregister(&appl->proto); } EXPORT_SYMBOL_GPL(garp_unregister_application);
gpl-2.0
ShinySide/HispAsian_5.1.1
drivers/media/dvb/ttpci/av7110_v4l.c
5058
28015
/* * av7110_v4l.c: av7110 video4linux interface for DVB and Siemens DVB-C analog module * * Copyright (C) 1999-2002 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * * originally based on code by: * Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * the project's page is at http://www.linuxtv.org/ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/timer.h> #include <linux/poll.h> #include "av7110.h" #include "av7110_hw.h" #include "av7110_av.h" int msp_writereg(struct av7110 *av7110, u8 dev, u16 reg, u16 val) { u8 msg[5] = { dev, reg >> 8, reg & 0xff, val >> 8 , val & 0xff }; struct i2c_msg msgs = { .flags = 0, .len = 5, .buf = msg }; switch (av7110->adac_type) { case DVB_ADAC_MSP34x0: msgs.addr = 0x40; break; case DVB_ADAC_MSP34x5: msgs.addr = 0x42; break; default: return 0; } if (i2c_transfer(&av7110->i2c_adap, &msgs, 1) != 1) { dprintk(1, "dvb-ttpci: failed @ card %d, %u = %u\n", av7110->dvb_adapter.num, reg, val); return -EIO; } return 0; } static int msp_readreg(struct av7110 *av7110, u8 dev, u16 reg, u16 *val) { u8 msg1[3] = { dev, reg >> 8, reg & 0xff }; u8 msg2[2]; struct i2c_msg msgs[2] = { { .flags = 0 , .len = 3, .buf = msg1 }, { .flags = I2C_M_RD, .len = 2, .buf = msg2 } }; switch (av7110->adac_type) { case DVB_ADAC_MSP34x0: msgs[0].addr = 0x40; msgs[1].addr = 0x40; break; case DVB_ADAC_MSP34x5: msgs[0].addr = 0x42; msgs[1].addr = 0x42; break; default: return 0; } if (i2c_transfer(&av7110->i2c_adap, &msgs[0], 2) != 2) { dprintk(1, "dvb-ttpci: failed @ card %d, %u\n", av7110->dvb_adapter.num, reg); return -EIO; } *val = (msg2[0] << 8) | msg2[1]; return 0; } static struct v4l2_input inputs[4] = { { .index = 0, .name = "DVB", .type = V4L2_INPUT_TYPE_CAMERA, .audioset = 1, .tuner = 0, /* ignored */ .std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, .status = 0, .capabilities = V4L2_IN_CAP_STD, }, { .index = 1, .name = "Television", .type = V4L2_INPUT_TYPE_TUNER, .audioset = 2, .tuner = 0, .std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, .status = 0, .capabilities = V4L2_IN_CAP_STD, }, { .index = 2, .name = "Video", .type = V4L2_INPUT_TYPE_CAMERA, .audioset = 0, .tuner = 0, .std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, .status = 0, .capabilities = V4L2_IN_CAP_STD, }, { .index = 3, .name = "Y/C", .type = V4L2_INPUT_TYPE_CAMERA, .audioset = 0, .tuner = 0, .std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, .status = 0, .capabilities = V4L2_IN_CAP_STD, } }; static int ves1820_writereg(struct saa7146_dev *dev, u8 addr, u8 reg, u8 data) { struct av7110 *av7110 = dev->ext_priv; u8 buf[] = { 0x00, reg, data }; struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = 3 }; dprintk(4, "dev: %p\n", dev); if (1 != i2c_transfer(&av7110->i2c_adap, &msg, 1)) return -1; return 0; } static int tuner_write(struct saa7146_dev *dev, u8 addr, u8 data [4]) { struct av7110 *av7110 = dev->ext_priv; struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = data, .len = 4 }; dprintk(4, "dev: %p\n", dev); if (1 != i2c_transfer(&av7110->i2c_adap, &msg, 1)) return -1; return 0; } static int ves1820_set_tv_freq(struct saa7146_dev *dev, u32 freq) { u32 div; u8 config; u8 buf[4]; dprintk(4, "freq: 0x%08x\n", freq); /* magic number: 614. tuning with the frequency given by v4l2 is always off by 614*62.5 = 38375 kHz...*/ div = freq + 614; buf[0] = (div >> 8) & 0x7f; buf[1] = div & 0xff; buf[2] = 0x8e; if (freq < (u32) (16 * 168.25)) config = 0xa0; else if (freq < (u32) (16 * 447.25)) config = 0x90; else config = 0x30; config &= ~0x02; buf[3] = config; return tuner_write(dev, 0x61, buf); } static int stv0297_set_tv_freq(struct saa7146_dev *dev, u32 freq) { struct av7110 *av7110 = (struct av7110*)dev->ext_priv; u32 div; u8 data[4]; div = (freq + 38900000 + 31250) / 62500; data[0] = (div >> 8) & 0x7f; data[1] = div & 0xff; data[2] = 0xce; if (freq < 45000000) return -EINVAL; else if (freq < 137000000) data[3] = 0x01; else if (freq < 403000000) data[3] = 0x02; else if (freq < 860000000) data[3] = 0x04; else return -EINVAL; if (av7110->fe->ops.i2c_gate_ctrl) av7110->fe->ops.i2c_gate_ctrl(av7110->fe, 1); return tuner_write(dev, 0x63, data); } static struct saa7146_standard analog_standard[]; static struct saa7146_standard dvb_standard[]; static struct saa7146_standard standard[]; static struct v4l2_audio msp3400_v4l2_audio = { .index = 0, .name = "Television", .capability = V4L2_AUDCAP_STEREO }; static int av7110_dvb_c_switch(struct saa7146_fh *fh) { struct saa7146_dev *dev = fh->dev; struct saa7146_vv *vv = dev->vv_data; struct av7110 *av7110 = (struct av7110*)dev->ext_priv; u16 adswitch; int source, sync, err; dprintk(4, "%p\n", av7110); if ((vv->video_status & STATUS_OVERLAY) != 0) { vv->ov_suspend = vv->video_fh; err = saa7146_stop_preview(vv->video_fh); /* side effect: video_status is now 0, video_fh is NULL */ if (err != 0) { dprintk(2, "suspending video failed\n"); vv->ov_suspend = NULL; } } if (0 != av7110->current_input) { dprintk(1, "switching to analog TV:\n"); adswitch = 1; source = SAA7146_HPS_SOURCE_PORT_B; sync = SAA7146_HPS_SYNC_PORT_B; memcpy(standard, analog_standard, sizeof(struct saa7146_standard) * 2); switch (av7110->current_input) { case 1: dprintk(1, "switching SAA7113 to Analog Tuner Input\n"); msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0000); // loudspeaker source msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0000); // headphone source msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0000); // SCART 1 source msp_writereg(av7110, MSP_WR_DSP, 0x000e, 0x3000); // FM matrix, mono msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x4f00); // loudspeaker + headphone msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x4f00); // SCART 1 volume if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) { if (ves1820_writereg(dev, 0x09, 0x0f, 0x60)) dprintk(1, "setting band in demodulator failed\n"); } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) { saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); // TDA9819 pin9(STD) saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); // TDA9819 pin30(VIF) } if (i2c_writereg(av7110, 0x48, 0x02, 0xd0) != 1) dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num); break; case 2: dprintk(1, "switching SAA7113 to Video AV CVBS Input\n"); if (i2c_writereg(av7110, 0x48, 0x02, 0xd2) != 1) dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num); break; case 3: dprintk(1, "switching SAA7113 to Video AV Y/C Input\n"); if (i2c_writereg(av7110, 0x48, 0x02, 0xd9) != 1) dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num); break; default: dprintk(1, "switching SAA7113 to Input: AV7110: SAA7113: invalid input\n"); } } else { adswitch = 0; source = SAA7146_HPS_SOURCE_PORT_A; sync = SAA7146_HPS_SYNC_PORT_A; memcpy(standard, dvb_standard, sizeof(struct saa7146_standard) * 2); dprintk(1, "switching DVB mode\n"); msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); // loudspeaker source msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0220); // headphone source msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source msp_writereg(av7110, MSP_WR_DSP, 0x000e, 0x3000); // FM matrix, mono msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) { if (ves1820_writereg(dev, 0x09, 0x0f, 0x20)) dprintk(1, "setting band in demodulator failed\n"); } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) { saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD) saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF) } } /* hmm, this does not do anything!? */ if (av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, adswitch)) dprintk(1, "ADSwitch error\n"); saa7146_set_hps_source_and_sync(dev, source, sync); if (vv->ov_suspend != NULL) { saa7146_start_preview(vv->ov_suspend); vv->ov_suspend = NULL; } return 0; } static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *t) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; u16 stereo_det; s8 stereo; dprintk(2, "VIDIOC_G_TUNER: %d\n", t->index); if (!av7110->analog_tuner_flags || t->index != 0) return -EINVAL; memset(t, 0, sizeof(*t)); strcpy((char *)t->name, "Television"); t->type = V4L2_TUNER_ANALOG_TV; t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP; t->rangelow = 772; /* 48.25 MHZ / 62.5 kHz = 772, see fi1216mk2-specs, page 2 */ t->rangehigh = 13684; /* 855.25 MHz / 62.5 kHz = 13684 */ /* FIXME: add the real signal strength here */ t->signal = 0xffff; t->afc = 0; /* FIXME: standard / stereo detection is still broken */ msp_readreg(av7110, MSP_RD_DEM, 0x007e, &stereo_det); dprintk(1, "VIDIOC_G_TUNER: msp3400 TV standard detection: 0x%04x\n", stereo_det); msp_readreg(av7110, MSP_RD_DSP, 0x0018, &stereo_det); dprintk(1, "VIDIOC_G_TUNER: msp3400 stereo detection: 0x%04x\n", stereo_det); stereo = (s8)(stereo_det >> 8); if (stereo > 0x10) { /* stereo */ t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO; t->audmode = V4L2_TUNER_MODE_STEREO; } else if (stereo < -0x10) { /* bilingual */ t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; t->audmode = V4L2_TUNER_MODE_LANG1; } else /* mono */ t->rxsubchans = V4L2_TUNER_SUB_MONO; return 0; } static int vidioc_s_tuner(struct file *file, void *fh, struct v4l2_tuner *t) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; u16 fm_matrix, src; dprintk(2, "VIDIOC_S_TUNER: %d\n", t->index); if (!av7110->analog_tuner_flags || av7110->current_input != 1) return -EINVAL; switch (t->audmode) { case V4L2_TUNER_MODE_STEREO: dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_STEREO\n"); fm_matrix = 0x3001; /* stereo */ src = 0x0020; break; case V4L2_TUNER_MODE_LANG1_LANG2: dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1_LANG2\n"); fm_matrix = 0x3000; /* bilingual */ src = 0x0020; break; case V4L2_TUNER_MODE_LANG1: dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1\n"); fm_matrix = 0x3000; /* mono */ src = 0x0000; break; case V4L2_TUNER_MODE_LANG2: dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG2\n"); fm_matrix = 0x3000; /* mono */ src = 0x0010; break; default: /* case V4L2_TUNER_MODE_MONO: */ dprintk(2, "VIDIOC_S_TUNER: TDA9840_SET_MONO\n"); fm_matrix = 0x3000; /* mono */ src = 0x0030; break; } msp_writereg(av7110, MSP_WR_DSP, 0x000e, fm_matrix); msp_writereg(av7110, MSP_WR_DSP, 0x0008, src); msp_writereg(av7110, MSP_WR_DSP, 0x0009, src); msp_writereg(av7110, MSP_WR_DSP, 0x000a, src); return 0; } static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *f) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_G_FREQ: freq:0x%08x\n", f->frequency); if (!av7110->analog_tuner_flags || av7110->current_input != 1) return -EINVAL; memset(f, 0, sizeof(*f)); f->type = V4L2_TUNER_ANALOG_TV; f->frequency = av7110->current_freq; return 0; } static int vidioc_s_frequency(struct file *file, void *fh, struct v4l2_frequency *f) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_S_FREQUENCY: freq:0x%08x\n", f->frequency); if (!av7110->analog_tuner_flags || av7110->current_input != 1) return -EINVAL; if (V4L2_TUNER_ANALOG_TV != f->type) return -EINVAL; msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0xffe0); /* fast mute */ msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0xffe0); /* tune in desired frequency */ if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) ves1820_set_tv_freq(dev, f->frequency); else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) stv0297_set_tv_freq(dev, f->frequency); av7110->current_freq = f->frequency; msp_writereg(av7110, MSP_WR_DSP, 0x0015, 0x003f); /* start stereo detection */ msp_writereg(av7110, MSP_WR_DSP, 0x0015, 0x0000); msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x4f00); /* loudspeaker + headphone */ msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x4f00); /* SCART 1 volume */ return 0; } static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_ENUMINPUT: %d\n", i->index); if (av7110->analog_tuner_flags) { if (i->index >= 4) return -EINVAL; } else { if (i->index != 0) return -EINVAL; } memcpy(i, &inputs[i->index], sizeof(struct v4l2_input)); return 0; } static int vidioc_g_input(struct file *file, void *fh, unsigned int *input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; *input = av7110->current_input; dprintk(2, "VIDIOC_G_INPUT: %d\n", *input); return 0; } static int vidioc_s_input(struct file *file, void *fh, unsigned int input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_S_INPUT: %d\n", input); if (!av7110->analog_tuner_flags) return 0; if (input >= 4) return -EINVAL; av7110->current_input = input; return av7110_dvb_c_switch(fh); } static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a) { dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index); if (a->index != 0) return -EINVAL; memcpy(a, &msp3400_v4l2_audio, sizeof(struct v4l2_audio)); return 0; } static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a) { dprintk(2, "VIDIOC_S_AUDIO: %d\n", a->index); return 0; } static int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_G_SLICED_VBI_CAP\n"); if (cap->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) return -EINVAL; if (FW_VERSION(av7110->arm_app) >= 0x2623) { cap->service_set = V4L2_SLICED_WSS_625; cap->service_lines[0][23] = V4L2_SLICED_WSS_625; } return 0; } static int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *f) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_G_FMT:\n"); if (FW_VERSION(av7110->arm_app) < 0x2623) return -EINVAL; memset(&f->fmt.sliced, 0, sizeof f->fmt.sliced); if (av7110->wssMode) { f->fmt.sliced.service_set = V4L2_SLICED_WSS_625; f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625; f->fmt.sliced.io_size = sizeof(struct v4l2_sliced_vbi_data); } return 0; } static int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *f) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_S_FMT\n"); if (FW_VERSION(av7110->arm_app) < 0x2623) return -EINVAL; if (f->fmt.sliced.service_set != V4L2_SLICED_WSS_625 && f->fmt.sliced.service_lines[0][23] != V4L2_SLICED_WSS_625) { memset(&f->fmt.sliced, 0, sizeof(f->fmt.sliced)); /* WSS controlled by firmware */ av7110->wssMode = 0; av7110->wssData = 0; return av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 1, 0); } else { memset(&f->fmt.sliced, 0, sizeof(f->fmt.sliced)); f->fmt.sliced.service_set = V4L2_SLICED_WSS_625; f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625; f->fmt.sliced.io_size = sizeof(struct v4l2_sliced_vbi_data); /* WSS controlled by userspace */ av7110->wssMode = 1; av7110->wssData = 0; } return 0; } static int av7110_vbi_reset(struct file *file) { struct saa7146_fh *fh = file->private_data; struct saa7146_dev *dev = fh->dev; struct av7110 *av7110 = (struct av7110*) dev->ext_priv; dprintk(2, "%s\n", __func__); av7110->wssMode = 0; av7110->wssData = 0; if (FW_VERSION(av7110->arm_app) < 0x2623) return 0; else return av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 1, 0); } static ssize_t av7110_vbi_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { struct saa7146_fh *fh = file->private_data; struct saa7146_dev *dev = fh->dev; struct av7110 *av7110 = (struct av7110*) dev->ext_priv; struct v4l2_sliced_vbi_data d; int rc; dprintk(2, "%s\n", __func__); if (FW_VERSION(av7110->arm_app) < 0x2623 || !av7110->wssMode || count != sizeof d) return -EINVAL; if (copy_from_user(&d, data, count)) return -EFAULT; if ((d.id != 0 && d.id != V4L2_SLICED_WSS_625) || d.field != 0 || d.line != 23) return -EINVAL; if (d.id) av7110->wssData = ((d.data[1] << 8) & 0x3f00) | d.data[0]; else av7110->wssData = 0x8000; rc = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 1, av7110->wssData); return (rc < 0) ? rc : count; } /**************************************************************************** * INITIALIZATION ****************************************************************************/ static u8 saa7113_init_regs[] = { 0x02, 0xd0, 0x03, 0x23, 0x04, 0x00, 0x05, 0x00, 0x06, 0xe9, 0x07, 0x0d, 0x08, 0x98, 0x09, 0x02, 0x0a, 0x80, 0x0b, 0x40, 0x0c, 0x40, 0x0d, 0x00, 0x0e, 0x01, 0x0f, 0x7c, 0x10, 0x48, 0x11, 0x0c, 0x12, 0x8b, 0x13, 0x1a, 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17, 0x00, 0x18, 0x00, 0x19, 0x00, 0x1a, 0x00, 0x1b, 0x00, 0x1c, 0x00, 0x1d, 0x00, 0x1e, 0x00, 0x41, 0x77, 0x42, 0x77, 0x43, 0x77, 0x44, 0x77, 0x45, 0x77, 0x46, 0x77, 0x47, 0x77, 0x48, 0x77, 0x49, 0x77, 0x4a, 0x77, 0x4b, 0x77, 0x4c, 0x77, 0x4d, 0x77, 0x4e, 0x77, 0x4f, 0x77, 0x50, 0x77, 0x51, 0x77, 0x52, 0x77, 0x53, 0x77, 0x54, 0x77, 0x55, 0x77, 0x56, 0x77, 0x57, 0xff, 0xff }; static struct saa7146_ext_vv av7110_vv_data_st; static struct saa7146_ext_vv av7110_vv_data_c; int av7110_init_analog_module(struct av7110 *av7110) { u16 version1, version2; if (i2c_writereg(av7110, 0x80, 0x0, 0x80) == 1 && i2c_writereg(av7110, 0x80, 0x0, 0) == 1) { pr_info("DVB-C analog module @ card %d detected, initializing MSP3400\n", av7110->dvb_adapter.num); av7110->adac_type = DVB_ADAC_MSP34x0; } else if (i2c_writereg(av7110, 0x84, 0x0, 0x80) == 1 && i2c_writereg(av7110, 0x84, 0x0, 0) == 1) { pr_info("DVB-C analog module @ card %d detected, initializing MSP3415\n", av7110->dvb_adapter.num); av7110->adac_type = DVB_ADAC_MSP34x5; } else return -ENODEV; msleep(100); // the probing above resets the msp... msp_readreg(av7110, MSP_RD_DSP, 0x001e, &version1); msp_readreg(av7110, MSP_RD_DSP, 0x001f, &version2); dprintk(1, "dvb-ttpci: @ card %d MSP34xx version 0x%04x 0x%04x\n", av7110->dvb_adapter.num, version1, version2); msp_writereg(av7110, MSP_WR_DSP, 0x0013, 0x0c00); msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); // loudspeaker source msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0220); // headphone source msp_writereg(av7110, MSP_WR_DSP, 0x0004, 0x7f00); // loudspeaker volume msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume msp_writereg(av7110, MSP_WR_DSP, 0x000d, 0x1900); // prescale SCART if (i2c_writereg(av7110, 0x48, 0x01, 0x00)!=1) { pr_info("saa7113 not accessible\n"); } else { u8 *i = saa7113_init_regs; if ((av7110->dev->pci->subsystem_vendor == 0x110a) && (av7110->dev->pci->subsystem_device == 0x0000)) { /* Fujitsu/Siemens DVB-Cable */ av7110->analog_tuner_flags |= ANALOG_TUNER_VES1820; } else if ((av7110->dev->pci->subsystem_vendor == 0x13c2) && (av7110->dev->pci->subsystem_device == 0x0002)) { /* Hauppauge/TT DVB-C premium */ av7110->analog_tuner_flags |= ANALOG_TUNER_VES1820; } else if ((av7110->dev->pci->subsystem_vendor == 0x13c2) && (av7110->dev->pci->subsystem_device == 0x000A)) { /* Hauppauge/TT DVB-C premium */ av7110->analog_tuner_flags |= ANALOG_TUNER_STV0297; } /* setup for DVB by default */ if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) { if (ves1820_writereg(av7110->dev, 0x09, 0x0f, 0x20)) dprintk(1, "setting band in demodulator failed\n"); } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) { saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD) saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF) } /* init the saa7113 */ while (*i != 0xff) { if (i2c_writereg(av7110, 0x48, i[0], i[1]) != 1) { dprintk(1, "saa7113 initialization failed @ card %d", av7110->dvb_adapter.num); break; } i += 2; } /* setup msp for analog sound: B/G Dual-FM */ msp_writereg(av7110, MSP_WR_DEM, 0x00bb, 0x02d0); // AD_CV msp_writereg(av7110, MSP_WR_DEM, 0x0001, 3); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0001, 18); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0001, 27); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0001, 48); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0001, 66); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0001, 72); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 4); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 64); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 0); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 3); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 18); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 27); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 48); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 66); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 72); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0083, 0xa000); // MODE_REG msp_writereg(av7110, MSP_WR_DEM, 0x0093, 0x00aa); // DCO1_LO 5.74MHz msp_writereg(av7110, MSP_WR_DEM, 0x009b, 0x04fc); // DCO1_HI msp_writereg(av7110, MSP_WR_DEM, 0x00a3, 0x038e); // DCO2_LO 5.5MHz msp_writereg(av7110, MSP_WR_DEM, 0x00ab, 0x04c6); // DCO2_HI msp_writereg(av7110, MSP_WR_DEM, 0x0056, 0); // LOAD_REG 1/2 } memcpy(standard, dvb_standard, sizeof(struct saa7146_standard) * 2); /* set dd1 stream a & b */ saa7146_write(av7110->dev, DD1_STREAM_B, 0x00000000); saa7146_write(av7110->dev, DD1_INIT, 0x03000700); saa7146_write(av7110->dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); return 0; } int av7110_init_v4l(struct av7110 *av7110) { struct saa7146_dev* dev = av7110->dev; struct saa7146_ext_vv *vv_data; int ret; /* special case DVB-C: these cards have an analog tuner plus need some special handling, so we have separate saa7146_ext_vv data for these... */ if (av7110->analog_tuner_flags) vv_data = &av7110_vv_data_c; else vv_data = &av7110_vv_data_st; ret = saa7146_vv_init(dev, vv_data); if (ret) { ERR("cannot init capture device. skipping\n"); return -ENODEV; } vv_data->ops.vidioc_enum_input = vidioc_enum_input; vv_data->ops.vidioc_g_input = vidioc_g_input; vv_data->ops.vidioc_s_input = vidioc_s_input; vv_data->ops.vidioc_g_tuner = vidioc_g_tuner; vv_data->ops.vidioc_s_tuner = vidioc_s_tuner; vv_data->ops.vidioc_g_frequency = vidioc_g_frequency; vv_data->ops.vidioc_s_frequency = vidioc_s_frequency; vv_data->ops.vidioc_g_audio = vidioc_g_audio; vv_data->ops.vidioc_s_audio = vidioc_s_audio; vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap; vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out; vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out; if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) { ERR("cannot register capture device. skipping\n"); saa7146_vv_release(dev); return -ENODEV; } if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI)) ERR("cannot register vbi v4l2 device. skipping\n"); return 0; } int av7110_exit_v4l(struct av7110 *av7110) { struct saa7146_dev* dev = av7110->dev; saa7146_unregister_device(&av7110->v4l_dev, av7110->dev); saa7146_unregister_device(&av7110->vbi_dev, av7110->dev); saa7146_vv_release(dev); return 0; } /* FIXME: these values are experimental values that look better than the values from the latest "official" driver -- at least for me... (MiHu) */ static struct saa7146_standard standard[] = { { .name = "PAL", .id = V4L2_STD_PAL_BG, .v_offset = 0x15, .v_field = 288, .h_offset = 0x48, .h_pixels = 708, .v_max_out = 576, .h_max_out = 768, }, { .name = "NTSC", .id = V4L2_STD_NTSC, .v_offset = 0x10, .v_field = 244, .h_offset = 0x40, .h_pixels = 708, .v_max_out = 480, .h_max_out = 640, } }; static struct saa7146_standard analog_standard[] = { { .name = "PAL", .id = V4L2_STD_PAL_BG, .v_offset = 0x1b, .v_field = 288, .h_offset = 0x08, .h_pixels = 708, .v_max_out = 576, .h_max_out = 768, }, { .name = "NTSC", .id = V4L2_STD_NTSC, .v_offset = 0x10, .v_field = 244, .h_offset = 0x40, .h_pixels = 708, .v_max_out = 480, .h_max_out = 640, } }; static struct saa7146_standard dvb_standard[] = { { .name = "PAL", .id = V4L2_STD_PAL_BG, .v_offset = 0x14, .v_field = 288, .h_offset = 0x48, .h_pixels = 708, .v_max_out = 576, .h_max_out = 768, }, { .name = "NTSC", .id = V4L2_STD_NTSC, .v_offset = 0x10, .v_field = 244, .h_offset = 0x40, .h_pixels = 708, .v_max_out = 480, .h_max_out = 640, } }; static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std) { struct av7110 *av7110 = (struct av7110*) dev->ext_priv; if (std->id & V4L2_STD_PAL) { av7110->vidmode = AV7110_VIDEO_MODE_PAL; av7110_set_vidmode(av7110, av7110->vidmode); } else if (std->id & V4L2_STD_NTSC) { av7110->vidmode = AV7110_VIDEO_MODE_NTSC; av7110_set_vidmode(av7110, av7110->vidmode); } else return -1; return 0; } static struct saa7146_ext_vv av7110_vv_data_st = { .inputs = 1, .audios = 1, .capabilities = V4L2_CAP_SLICED_VBI_OUTPUT, .flags = 0, .stds = &standard[0], .num_stds = ARRAY_SIZE(standard), .std_callback = &std_callback, .vbi_fops.open = av7110_vbi_reset, .vbi_fops.release = av7110_vbi_reset, .vbi_fops.write = av7110_vbi_write, }; static struct saa7146_ext_vv av7110_vv_data_c = { .inputs = 1, .audios = 1, .capabilities = V4L2_CAP_TUNER | V4L2_CAP_SLICED_VBI_OUTPUT, .flags = SAA7146_USE_PORT_B_FOR_VBI, .stds = &standard[0], .num_stds = ARRAY_SIZE(standard), .std_callback = &std_callback, .vbi_fops.open = av7110_vbi_reset, .vbi_fops.release = av7110_vbi_reset, .vbi_fops.write = av7110_vbi_write, };
gpl-2.0
RuanJG/uTouch-kernel
arch/sparc/mm/srmmu.c
7106
70399
/* * srmmu.c: SRMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/bootmem.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kdebug.h> #include <linux/log2.h> #include <linux/gfp.h> #include <asm/bitext.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/vaddrs.h> #include <asm/traps.h> #include <asm/smp.h> #include <asm/mbus.h> #include <asm/cache.h> #include <asm/oplib.h> #include <asm/asi.h> #include <asm/msi.h> #include <asm/mmu_context.h> #include <asm/io-unit.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> /* Now the cpu specific definitions. */ #include <asm/viking.h> #include <asm/mxcc.h> #include <asm/ross.h> #include <asm/tsunami.h> #include <asm/swift.h> #include <asm/turbosparc.h> #include <asm/leon.h> #include <asm/btfixup.h> enum mbus_module srmmu_modtype; static unsigned int hwbug_bitmask; int vac_cache_size; int vac_line_size; extern struct resource sparc_iomap; extern unsigned long last_valid_pfn; extern unsigned long page_kernel; static pgd_t *srmmu_swapper_pg_dir; #ifdef CONFIG_SMP #define FLUSH_BEGIN(mm) #define FLUSH_END #else #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { #define FLUSH_END } #endif BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) int flush_page_for_dma_global = 1; #ifdef CONFIG_SMP BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page) #endif char *srmmu_name; ctxd_t *srmmu_ctx_table_phys; static ctxd_t *srmmu_context_table; int viking_mxcc_present; static DEFINE_SPINLOCK(srmmu_context_spinlock); static int is_hypersparc; /* * In general all page table modifications should use the V8 atomic * swap instruction. This insures the mmu and the cpu are in sync * with respect to ref/mod bits in the page tables. */ static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) { __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr)); return value; } static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval) { srmmu_swap((unsigned long *)ptep, pte_val(pteval)); } /* The very generic SRMMU page table operations. */ static inline int srmmu_device_memory(unsigned long x) { return ((x & 0xF0000000) != 0); } static int srmmu_cache_pagetables; /* these will be initialized in srmmu_nocache_calcsize() */ static unsigned long srmmu_nocache_size; static unsigned long srmmu_nocache_end; /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) /* The context table is a nocache user with the biggest alignment needs. */ #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) void *srmmu_nocache_pool; void *srmmu_nocache_bitmap; static struct bit_map srmmu_nocache_map; static unsigned long srmmu_pte_pfn(pte_t pte) { if (srmmu_device_memory(pte_val(pte))) { /* Just return something that will cause * pfn_valid() to return false. This makes * copy_one_pte() to just directly copy to * PTE over. */ return ~0UL; } return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); } static struct page *srmmu_pmd_page(pmd_t pmd) { if (srmmu_device_memory(pmd_val(pmd))) BUG(); return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); } static inline unsigned long srmmu_pgd_page(pgd_t pgd) { return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } static inline int srmmu_pte_none(pte_t pte) { return !(pte_val(pte) & 0xFFFFFFF); } static inline int srmmu_pte_present(pte_t pte) { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } static inline void srmmu_pte_clear(pte_t *ptep) { srmmu_set_pte(ptep, __pte(0)); } static inline int srmmu_pmd_none(pmd_t pmd) { return !(pmd_val(pmd) & 0xFFFFFFF); } static inline int srmmu_pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } static inline int srmmu_pmd_present(pmd_t pmd) { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } static inline void srmmu_pmd_clear(pmd_t *pmdp) { int i; for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); } static inline int srmmu_pgd_none(pgd_t pgd) { return !(pgd_val(pgd) & 0xFFFFFFF); } static inline int srmmu_pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } static inline int srmmu_pgd_present(pgd_t pgd) { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } static inline void srmmu_pgd_clear(pgd_t * pgdp) { srmmu_set_pte((pte_t *)pgdp, __pte(0)); } static inline pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);} static inline pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);} static inline pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);} static inline pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);} static inline pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);} static inline pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);} /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) { return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); } static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) { return __pte(((page) >> 4) | pgprot_val(pgprot)); } static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) { return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); } /* XXX should we hyper_flush_whole_icache here - Anton */ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) { srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) { srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) { unsigned long ptp; /* Physical address, shifted right by 4 */ int i; ptp = __nocache_pa((unsigned long) ptep) >> 4; for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); } } static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) { unsigned long ptp; /* Physical address, shifted right by 4 */ int i; ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); } } static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } /* to find an entry in a top-level page table... */ static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } /* Find an entry in the second-level page table.. */ static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) { return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); } /* Find an entry in the third-level page table.. */ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) { void *pte; pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4); return (pte_t *) pte + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); } static unsigned long srmmu_swp_type(swp_entry_t entry) { return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; } static unsigned long srmmu_swp_offset(swp_entry_t entry) { return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK; } static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset) { return (swp_entry_t) { (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT }; } /* * size: bytes to allocate in the nocache area. * align: bytes, number to align at. * Returns the virtual address of the allocated area. */ static unsigned long __srmmu_get_nocache(int size, int align) { int offset; if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { printk("Size 0x%x too small for nocache request\n", size); size = SRMMU_NOCACHE_BITMAP_SHIFT; } if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { printk("Size 0x%x unaligned int nocache request\n", size); size += SRMMU_NOCACHE_BITMAP_SHIFT-1; } BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); offset = bit_map_string_get(&srmmu_nocache_map, size >> SRMMU_NOCACHE_BITMAP_SHIFT, align >> SRMMU_NOCACHE_BITMAP_SHIFT); if (offset == -1) { printk("srmmu: out of nocache %d: %d/%d\n", size, (int) srmmu_nocache_size, srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); return 0; } return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); } static unsigned long srmmu_get_nocache(int size, int align) { unsigned long tmp; tmp = __srmmu_get_nocache(size, align); if (tmp) memset((void *)tmp, 0, size); return tmp; } static void srmmu_free_nocache(unsigned long vaddr, int size) { int offset; if (vaddr < SRMMU_NOCACHE_VADDR) { printk("Vaddr %lx is smaller than nocache base 0x%lx\n", vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); BUG(); } if (vaddr+size > srmmu_nocache_end) { printk("Vaddr %lx is bigger than nocache end 0x%lx\n", vaddr, srmmu_nocache_end); BUG(); } if (!is_power_of_2(size)) { printk("Size 0x%x is not a power of 2\n", size); BUG(); } if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { printk("Size 0x%x is too small\n", size); BUG(); } if (vaddr & (size-1)) { printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); BUG(); } offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; size = size >> SRMMU_NOCACHE_BITMAP_SHIFT; bit_map_clear(&srmmu_nocache_map, offset, size); } static void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end); extern unsigned long probe_memory(void); /* in fault.c */ /* * Reserve nocache dynamically proportionally to the amount of * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 */ static void srmmu_nocache_calcsize(void) { unsigned long sysmemavail = probe_memory() / 1024; int srmmu_nocache_npages; srmmu_nocache_npages = sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256; /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */ // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256; if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES) srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES; /* anything above 1280 blows up */ if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES) srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES; srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE; srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; } static void __init srmmu_nocache_init(void) { unsigned int bitmap_bits; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned long paddr, vaddr; unsigned long pteval; bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX, 0UL); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); init_mm.pgd = srmmu_swapper_pg_dir; srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end); paddr = __pa((unsigned long)srmmu_nocache_pool); vaddr = SRMMU_NOCACHE_VADDR; while (vaddr < srmmu_nocache_end) { pgd = pgd_offset_k(vaddr); pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); if (srmmu_cache_pagetables) pteval |= SRMMU_CACHE; srmmu_set_pte(__nocache_fix(pte), __pte(pteval)); vaddr += PAGE_SIZE; paddr += PAGE_SIZE; } flush_cache_all(); flush_tlb_all(); } static inline pgd_t *srmmu_get_pgd_fast(void) { pgd_t *pgd = NULL; pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); if (pgd) { pgd_t *init = pgd_offset_k(0); memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } return pgd; } static void srmmu_free_pgd_fast(pgd_t *pgd) { srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); } static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address) { return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); } static void srmmu_pmd_free(pmd_t * pmd) { srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); } /* * Hardware needs alignment to 256 only, but we align to whole page size * to reduce fragmentation problems due to the buddy principle. * XXX Provide actual fragmentation statistics in /proc. * * Alignments up to the page size are the same for physical and virtual * addresses of the nocache area. */ static pte_t * srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); } static pgtable_t srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) { unsigned long pte; struct page *page; if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) return NULL; page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); pgtable_page_ctor(page); return page; } static void srmmu_free_pte_fast(pte_t *pte) { srmmu_free_nocache((unsigned long)pte, PTE_SIZE); } static void srmmu_pte_free(pgtable_t pte) { unsigned long p; pgtable_page_dtor(pte); p = (unsigned long)page_address(pte); /* Cached address (for test) */ if (p == 0) BUG(); p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ p = (unsigned long) __nocache_va(p); /* Nocached virtual */ srmmu_free_nocache(p, PTE_SIZE); } /* */ static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) { struct ctx_list *ctxp; ctxp = ctx_free.next; if(ctxp != &ctx_free) { remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); mm->context = ctxp->ctx_number; ctxp->ctx_mm = mm; return; } ctxp = ctx_used.next; if(ctxp->ctx_mm == old_mm) ctxp = ctxp->next; if(ctxp == &ctx_used) panic("out of mmu contexts"); flush_cache_mm(ctxp->ctx_mm); flush_tlb_mm(ctxp->ctx_mm); remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); ctxp->ctx_mm->context = NO_CONTEXT; ctxp->ctx_mm = mm; mm->context = ctxp->ctx_number; } static inline void free_context(int context) { struct ctx_list *ctx_old; ctx_old = ctx_list_pool + context; remove_from_ctx_list(ctx_old); add_to_free_ctxlist(ctx_old); } static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu) { if(mm->context == NO_CONTEXT) { spin_lock(&srmmu_context_spinlock); alloc_context(old_mm, mm); spin_unlock(&srmmu_context_spinlock); srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); } if (sparc_cpu_model == sparc_leon) leon_switch_mm(); if (is_hypersparc) hyper_flush_whole_icache(); srmmu_set_context(mm->context); } /* Low level IO area allocation on the SRMMU. */ static inline void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; unsigned long tmp; physaddr &= PAGE_MASK; pgdp = pgd_offset_k(virt_addr); pmdp = srmmu_pmd_offset(pgdp, virt_addr); ptep = srmmu_pte_offset(pmdp, virt_addr); tmp = (physaddr >> 4) | SRMMU_ET_PTE; /* * I need to test whether this is consistent over all * sun4m's. The bus_type represents the upper 4 bits of * 36-bit physical address on the I/O space lines... */ tmp |= (bus_type << 28); tmp |= SRMMU_PRIV; __flush_page_to_ram(virt_addr); srmmu_set_pte(ptep, __pte(tmp)); } static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, unsigned long xva, unsigned int len) { while (len != 0) { len -= PAGE_SIZE; srmmu_mapioaddr(xpa, xva, bus); xva += PAGE_SIZE; xpa += PAGE_SIZE; } flush_tlb_all(); } static inline void srmmu_unmapioaddr(unsigned long virt_addr) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(virt_addr); pmdp = srmmu_pmd_offset(pgdp, virt_addr); ptep = srmmu_pte_offset(pmdp, virt_addr); /* No need to flush uncacheable page. */ srmmu_pte_clear(ptep); } static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) { while (len != 0) { len -= PAGE_SIZE; srmmu_unmapioaddr(virt_addr); virt_addr += PAGE_SIZE; } flush_tlb_all(); } /* * On the SRMMU we do not have the problems with limited tlb entries * for mapping kernel pages, so we just take things from the free page * pool. As a side effect we are putting a little too much pressure * on the gfp() subsystem. This setup also makes the logic of the * iommu mapping code a lot easier as we can transparently handle * mappings on the kernel stack without any special code as we did * need on the sun4c. */ static struct thread_info *srmmu_alloc_thread_info_node(int node) { struct thread_info *ret; ret = (struct thread_info *)__get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER); #ifdef CONFIG_DEBUG_STACK_USAGE if (ret) memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER); #endif /* DEBUG_STACK_USAGE */ return ret; } static void srmmu_free_thread_info(struct thread_info *ti) { free_pages((unsigned long)ti, THREAD_INFO_ORDER); } /* tsunami.S */ extern void tsunami_flush_cache_all(void); extern void tsunami_flush_cache_mm(struct mm_struct *mm); extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void tsunami_flush_page_to_ram(unsigned long page); extern void tsunami_flush_page_for_dma(unsigned long page); extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void tsunami_flush_tlb_all(void); extern void tsunami_flush_tlb_mm(struct mm_struct *mm); extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void tsunami_setup_blockops(void); /* * Workaround, until we find what's going on with Swift. When low on memory, * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find * out it is already in page tables/ fault again on the same instruction. * I really don't understand it, have checked it and contexts * are right, flush_tlb_all is done as well, and it faults again... * Strange. -jj * * The following code is a deadwood that may be necessary when * we start to make precise page flushes again. --zaitcev */ static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep) { #if 0 static unsigned long last; unsigned int val; /* unsigned int n; */ if (address == last) { val = srmmu_hwprobe(address); if (val != 0 && pte_val(*ptep) != val) { printk("swift_update_mmu_cache: " "addr %lx put %08x probed %08x from %p\n", address, pte_val(*ptep), val, __builtin_return_address(0)); srmmu_flush_whole_tlb(); } } last = address; #endif } /* swift.S */ extern void swift_flush_cache_all(void); extern void swift_flush_cache_mm(struct mm_struct *mm); extern void swift_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void swift_flush_page_to_ram(unsigned long page); extern void swift_flush_page_for_dma(unsigned long page); extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void swift_flush_tlb_all(void); extern void swift_flush_tlb_mm(struct mm_struct *mm); extern void swift_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); #if 0 /* P3: deadwood to debug precise flushes on Swift. */ void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cctx, ctx1; page &= PAGE_MASK; if ((ctx1 = vma->vm_mm->context) != -1) { cctx = srmmu_get_context(); /* Is context # ever different from current context? P3 */ if (cctx != ctx1) { printk("flush ctx %02x curr %02x\n", ctx1, cctx); srmmu_set_context(ctx1); swift_flush_page(page); __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (page), "i" (ASI_M_FLUSH_PROBE)); srmmu_set_context(cctx); } else { /* Rm. prot. bits from virt. c. */ /* swift_flush_cache_all(); */ /* swift_flush_cache_page(vma, page); */ swift_flush_page(page); __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (page), "i" (ASI_M_FLUSH_PROBE)); /* same as above: srmmu_flush_tlb_page() */ } } } #endif /* * The following are all MBUS based SRMMU modules, and therefore could * be found in a multiprocessor configuration. On the whole, these * chips seems to be much more touchy about DVMA and page tables * with respect to cache coherency. */ /* Cypress flushes. */ static void cypress_flush_cache_all(void) { volatile unsigned long cypress_sucks; unsigned long faddr, tagval; flush_user_windows(); for(faddr = 0; faddr < 0x10000; faddr += 0x20) { __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : "=r" (tagval) : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); /* If modified and valid, kick it. */ if((tagval & 0x60) == 0x60) cypress_sucks = *(unsigned long *)(0xf0020000 + faddr); } } static void cypress_flush_cache_mm(struct mm_struct *mm) { register unsigned long a, b, c, d, e, f, g; unsigned long flags, faddr; int octx; FLUSH_BEGIN(mm) flush_user_windows(); local_irq_save(flags); octx = srmmu_get_context(); srmmu_set_context(mm->context); a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; faddr = (0x10000 - 0x100); goto inside; do { faddr -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (faddr), "i" (ASI_M_FLUSH_CTX), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while(faddr); srmmu_set_context(octx); local_irq_restore(flags); FLUSH_END } static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; register unsigned long a, b, c, d, e, f, g; unsigned long flags, faddr; int octx; FLUSH_BEGIN(mm) flush_user_windows(); local_irq_save(flags); octx = srmmu_get_context(); srmmu_set_context(mm->context); a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; start &= SRMMU_REAL_PMD_MASK; while(start < end) { faddr = (start + (0x10000 - 0x100)); goto inside; do { faddr -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (faddr), "i" (ASI_M_FLUSH_SEG), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while (faddr != start); start += SRMMU_REAL_PMD_SIZE; } srmmu_set_context(octx); local_irq_restore(flags); FLUSH_END } static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page) { register unsigned long a, b, c, d, e, f, g; struct mm_struct *mm = vma->vm_mm; unsigned long flags, line; int octx; FLUSH_BEGIN(mm) flush_user_windows(); local_irq_save(flags); octx = srmmu_get_context(); srmmu_set_context(mm->context); a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; page &= PAGE_MASK; line = (page + PAGE_SIZE) - 0x100; goto inside; do { line -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (line), "i" (ASI_M_FLUSH_PAGE), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while(line != page); srmmu_set_context(octx); local_irq_restore(flags); FLUSH_END } /* Cypress is copy-back, at least that is how we configure it. */ static void cypress_flush_page_to_ram(unsigned long page) { register unsigned long a, b, c, d, e, f, g; unsigned long line; a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; page &= PAGE_MASK; line = (page + PAGE_SIZE) - 0x100; goto inside; do { line -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (line), "i" (ASI_M_FLUSH_PAGE), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while(line != page); } /* Cypress is also IO cache coherent. */ static void cypress_flush_page_for_dma(unsigned long page) { } /* Cypress has unified L2 VIPT, from which both instructions and data * are stored. It does not have an onboard icache of any sort, therefore * no flush is necessary. */ static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { } static void cypress_flush_tlb_all(void) { srmmu_flush_whole_tlb(); } static void cypress_flush_tlb_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) __asm__ __volatile__( "lda [%0] %3, %%g5\n\t" "sta %2, [%0] %3\n\t" "sta %%g0, [%1] %4\n\t" "sta %%g5, [%0] %3\n" : /* no outputs */ : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context), "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) : "g5"); FLUSH_END } static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long size; FLUSH_BEGIN(mm) start &= SRMMU_PGDIR_MASK; size = SRMMU_PGDIR_ALIGN(end) - start; __asm__ __volatile__( "lda [%0] %5, %%g5\n\t" "sta %1, [%0] %5\n" "1:\n\t" "subcc %3, %4, %3\n\t" "bne 1b\n\t" " sta %%g0, [%2 + %3] %6\n\t" "sta %%g5, [%0] %5\n" : /* no outputs */ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200), "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) : "g5", "cc"); FLUSH_END } static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { struct mm_struct *mm = vma->vm_mm; FLUSH_BEGIN(mm) __asm__ __volatile__( "lda [%0] %3, %%g5\n\t" "sta %1, [%0] %3\n\t" "sta %%g0, [%2] %4\n\t" "sta %%g5, [%0] %3\n" : /* no outputs */ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK), "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) : "g5"); FLUSH_END } /* viking.S */ extern void viking_flush_cache_all(void); extern void viking_flush_cache_mm(struct mm_struct *mm); extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void viking_flush_page_to_ram(unsigned long page); extern void viking_flush_page_for_dma(unsigned long page); extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); extern void viking_flush_page(unsigned long page); extern void viking_mxcc_flush_page(unsigned long page); extern void viking_flush_tlb_all(void); extern void viking_flush_tlb_mm(struct mm_struct *mm); extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void sun4dsmp_flush_tlb_all(void); extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); /* hypersparc.S */ extern void hypersparc_flush_cache_all(void); extern void hypersparc_flush_cache_mm(struct mm_struct *mm); extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void hypersparc_flush_page_to_ram(unsigned long page); extern void hypersparc_flush_page_for_dma(unsigned long page); extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void hypersparc_flush_tlb_all(void); extern void hypersparc_flush_tlb_mm(struct mm_struct *mm); extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void hypersparc_setup_blockops(void); /* * NOTE: All of this startup code assumes the low 16mb (approx.) of * kernel mappings are done with one single contiguous chunk of * ram. On small ram machines (classics mainly) we only get * around 8mb mapped for us. */ static void __init early_pgtable_allocfail(char *type) { prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); prom_halt(); } static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; while(start < end) { pgdp = pgd_offset_k(start); if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { pmdp = (pmd_t *) __srmmu_get_nocache( SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); srmmu_pgd_set(__nocache_fix(pgdp), pmdp); } pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(__nocache_fix(ptep), 0, PTE_SIZE); srmmu_pmd_set(__nocache_fix(pmdp), ptep); } if (start > (0xffffffffUL - PMD_SIZE)) break; start = (start + PMD_SIZE) & PMD_MASK; } } static void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; while(start < end) { pgdp = pgd_offset_k(start); if(srmmu_pgd_none(*pgdp)) { pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); srmmu_pgd_set(pgdp, pmdp); } pmdp = srmmu_pmd_offset(pgdp, start); if(srmmu_pmd_none(*pmdp)) { ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(ptep, 0, PTE_SIZE); srmmu_pmd_set(pmdp, ptep); } if (start > (0xffffffffUL - PMD_SIZE)) break; start = (start + PMD_SIZE) & PMD_MASK; } } /* * This is much cleaner than poking around physical address space * looking at the prom's page table directly which is what most * other OS's do. Yuck... this is much better. */ static void __init srmmu_inherit_prom_mappings(unsigned long start, unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ unsigned long prompte; while(start <= end) { if (start == 0) break; /* probably wrap around */ if(start == 0xfef00000) start = KADB_DEBUGGER_BEGVM; if(!(prompte = srmmu_hwprobe(start))) { start += PAGE_SIZE; continue; } /* A red snapper, see what it really is. */ what = 0; if(!(start & ~(SRMMU_REAL_PMD_MASK))) { if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) what = 1; } if(!(start & ~(SRMMU_PGDIR_MASK))) { if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == prompte) what = 2; } pgdp = pgd_offset_k(start); if(what == 2) { *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); start += SRMMU_PGDIR_SIZE; continue; } if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); srmmu_pgd_set(__nocache_fix(pgdp), pmdp); } pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(__nocache_fix(ptep), 0, PTE_SIZE); srmmu_pmd_set(__nocache_fix(pmdp), ptep); } if(what == 1) { /* * We bend the rule where all 16 PTPs in a pmd_t point * inside the same PTE page, and we leak a perfectly * good hardware PTE piece. Alternatives seem worse. */ unsigned int x; /* Index of HW PMD in soft cluster */ x = (start >> PMD_SHIFT) & 15; *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; start += SRMMU_REAL_PMD_SIZE; continue; } ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); *(pte_t *)__nocache_fix(ptep) = __pte(prompte); start += PAGE_SIZE; } } #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) /* Create a third-level SRMMU 16MB page mapping. */ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) { pgd_t *pgdp = pgd_offset_k(vaddr); unsigned long big_pte; big_pte = KERNEL_PTE(phys_base >> 4); *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); } /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) { unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); /* Map "low" memory only */ const unsigned long min_vaddr = PAGE_OFFSET; const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; if (vstart < min_vaddr || vstart >= max_vaddr) return vstart; if (vend > max_vaddr || vend < min_vaddr) vend = max_vaddr; while(vstart < vend) { do_large_mapping(vstart, pstart); vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; } return vstart; } static inline void memprobe_error(char *msg) { prom_printf(msg); prom_printf("Halting now...\n"); prom_halt(); } static inline void map_kernel(void) { int i; if (phys_base > 0) { do_large_mapping(PAGE_OFFSET, phys_base); } for (i = 0; sp_banks[i].num_bytes != 0; i++) { map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); } BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE); } /* Paging initialization on the Sparc Reference MMU. */ extern void sparc_context_init(int); void (*poke_srmmu)(void) __cpuinitdata = NULL; extern unsigned long bootmem_init(unsigned long *pages_avail); void __init srmmu_paging_init(void) { int i; phandle cpunode; char node_str[128]; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned long pages_avail; sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ if (sparc_cpu_model == sun4d) num_contexts = 65536; /* We know it is Viking */ else { /* Find the number of contexts on the srmmu. */ cpunode = prom_getchild(prom_root_node); num_contexts = 0; while(cpunode != 0) { prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); break; } cpunode = prom_getsibling(cpunode); } } if(!num_contexts) { prom_printf("Something wrong, can't find cpu node in paging_init.\n"); prom_halt(); } pages_avail = 0; last_valid_pfn = bootmem_init(&pages_avail); srmmu_nocache_calcsize(); srmmu_nocache_init(); srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); map_kernel(); /* ctx table has to be physically aligned to its size */ srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); for(i = 0; i < num_contexts; i++) srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); flush_cache_all(); srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); #ifdef CONFIG_SMP /* Stop from hanging here... */ local_flush_tlb_all(); #else flush_tlb_all(); #endif poke_srmmu(); srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); srmmu_allocate_ptable_skeleton( __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); pgd = pgd_offset_k(PKMAP_BASE); pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); pte = srmmu_pte_offset(pmd, PKMAP_BASE); pkmap_page_table = pte; flush_cache_all(); flush_tlb_all(); sparc_context_init(num_contexts); kmap_init(); { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; unsigned long npages; int znum; for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; npages = max_low_pfn - pfn_base; zones_size[ZONE_DMA] = npages; zholes_size[ZONE_DMA] = npages - pages_avail; npages = highend_pfn - max_low_pfn; zones_size[ZONE_HIGHMEM] = npages; zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); free_area_init_node(0, zones_size, pfn_base, zholes_size); } } static void srmmu_mmu_info(struct seq_file *m) { seq_printf(m, "MMU type\t: %s\n" "contexts\t: %d\n" "nocache total\t: %ld\n" "nocache used\t: %d\n", srmmu_name, num_contexts, srmmu_nocache_size, srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); } static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) { } static void srmmu_destroy_context(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { flush_cache_mm(mm); srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); flush_tlb_mm(mm); spin_lock(&srmmu_context_spinlock); free_context(mm->context); spin_unlock(&srmmu_context_spinlock); mm->context = NO_CONTEXT; } } /* Init various srmmu chip types. */ static void __init srmmu_is_bad(void) { prom_printf("Could not determine SRMMU chip type.\n"); prom_halt(); } static void __init init_vac_layout(void) { phandle nd; int cache_lines; char node_str[128]; #ifdef CONFIG_SMP int cpu = 0; unsigned long max_size = 0; unsigned long min_line_size = 0x10000000; #endif nd = prom_getchild(prom_root_node); while((nd = prom_getsibling(nd)) != 0) { prom_getstring(nd, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { vac_line_size = prom_getint(nd, "cache-line-size"); if (vac_line_size == -1) { prom_printf("can't determine cache-line-size, " "halting.\n"); prom_halt(); } cache_lines = prom_getint(nd, "cache-nlines"); if (cache_lines == -1) { prom_printf("can't determine cache-nlines, halting.\n"); prom_halt(); } vac_cache_size = cache_lines * vac_line_size; #ifdef CONFIG_SMP if(vac_cache_size > max_size) max_size = vac_cache_size; if(vac_line_size < min_line_size) min_line_size = vac_line_size; //FIXME: cpus not contiguous!! cpu++; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) break; #else break; #endif } } if(nd == 0) { prom_printf("No CPU nodes found, halting.\n"); prom_halt(); } #ifdef CONFIG_SMP vac_cache_size = max_size; vac_line_size = min_line_size; #endif printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", (int)vac_cache_size, (int)vac_line_size); } static void __cpuinit poke_hypersparc(void) { volatile unsigned long clear; unsigned long mreg = srmmu_get_mmureg(); hyper_flush_unconditional_combined(); mreg &= ~(HYPERSPARC_CWENABLE); mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); mreg |= (HYPERSPARC_CMODE); srmmu_set_mmureg(mreg); #if 0 /* XXX I think this is bad news... -DaveM */ hyper_clear_all_tags(); #endif put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); hyper_flush_whole_icache(); clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); } static void __init init_hypersparc(void) { srmmu_name = "ROSS HyperSparc"; srmmu_modtype = HyperSparc; init_vac_layout(); is_hypersparc = 1; BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); poke_srmmu = poke_hypersparc; hypersparc_setup_blockops(); } static void __cpuinit poke_cypress(void) { unsigned long mreg = srmmu_get_mmureg(); unsigned long faddr, tagval; volatile unsigned long cypress_sucks; volatile unsigned long clear; clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); if (!(mreg & CYPRESS_CENABLE)) { for(faddr = 0x0; faddr < 0x10000; faddr += 20) { __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t" "sta %%g0, [%0] %2\n\t" : : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); } } else { for(faddr = 0; faddr < 0x10000; faddr += 0x20) { __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : "=r" (tagval) : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); /* If modified and valid, kick it. */ if((tagval & 0x60) == 0x60) cypress_sucks = *(unsigned long *) (0xf0020000 + faddr); } } /* And one more, for our good neighbor, Mr. Broken Cypress. */ clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE); srmmu_set_mmureg(mreg); } static void __init init_cypress_common(void) { init_vac_layout(); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); poke_srmmu = poke_cypress; } static void __init init_cypress_604(void) { srmmu_name = "ROSS Cypress-604(UP)"; srmmu_modtype = Cypress; init_cypress_common(); } static void __init init_cypress_605(unsigned long mrev) { srmmu_name = "ROSS Cypress-605(MP)"; if(mrev == 0xe) { srmmu_modtype = Cypress_vE; hwbug_bitmask |= HWBUG_COPYBACK_BROKEN; } else { if(mrev == 0xd) { srmmu_modtype = Cypress_vD; hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN; } else { srmmu_modtype = Cypress; } } init_cypress_common(); } static void __cpuinit poke_swift(void) { unsigned long mreg; /* Clear any crap from the cache or else... */ swift_flush_cache_all(); /* Enable I & D caches */ mreg = srmmu_get_mmureg(); mreg |= (SWIFT_IE | SWIFT_DE); /* * The Swift branch folding logic is completely broken. At * trap time, if things are just right, if can mistakenly * think that a trap is coming from kernel mode when in fact * it is coming from user mode (it mis-executes the branch in * the trap code). So you see things like crashme completely * hosing your machine which is completely unacceptable. Turn * this shit off... nice job Fujitsu. */ mreg &= ~(SWIFT_BF); srmmu_set_mmureg(mreg); } #define SWIFT_MASKID_ADDR 0x10003018 static void __init init_swift(void) { unsigned long swift_rev; __asm__ __volatile__("lda [%1] %2, %0\n\t" "srl %0, 0x18, %0\n\t" : "=r" (swift_rev) : "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); srmmu_name = "Fujitsu Swift"; switch(swift_rev) { case 0x11: case 0x20: case 0x23: case 0x30: srmmu_modtype = Swift_lots_o_bugs; hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); /* * Gee george, I wonder why Sun is so hush hush about * this hardware bug... really braindamage stuff going * on here. However I think we can find a way to avoid * all of the workaround overhead under Linux. Basically, * any page fault can cause kernel pages to become user * accessible (the mmu gets confused and clears some of * the ACC bits in kernel ptes). Aha, sounds pretty * horrible eh? But wait, after extensive testing it appears * that if you use pgd_t level large kernel pte's (like the * 4MB pages on the Pentium) the bug does not get tripped * at all. This avoids almost all of the major overhead. * Welcome to a world where your vendor tells you to, * "apply this kernel patch" instead of "sorry for the * broken hardware, send it back and we'll give you * properly functioning parts" */ break; case 0x25: case 0x31: srmmu_modtype = Swift_bad_c; hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; /* * You see Sun allude to this hardware bug but never * admit things directly, they'll say things like, * "the Swift chip cache problems" or similar. */ break; default: srmmu_modtype = Swift_ok; break; } BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); flush_page_for_dma_global = 0; /* * Are you now convinced that the Swift is one of the * biggest VLSI abortions of all time? Bravo Fujitsu! * Fujitsu, the !#?!%$'d up processor people. I bet if * you examined the microcode of the Swift you'd find * XXX's all over the place. */ poke_srmmu = poke_swift; } static void turbosparc_flush_cache_all(void) { flush_user_windows(); turbosparc_idflash_clear(); } static void turbosparc_flush_cache_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) flush_user_windows(); turbosparc_idflash_clear(); FLUSH_END } static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { FLUSH_BEGIN(vma->vm_mm) flush_user_windows(); turbosparc_idflash_clear(); FLUSH_END } static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) { FLUSH_BEGIN(vma->vm_mm) flush_user_windows(); if (vma->vm_flags & VM_EXEC) turbosparc_flush_icache(); turbosparc_flush_dcache(); FLUSH_END } /* TurboSparc is copy-back, if we turn it on, but this does not work. */ static void turbosparc_flush_page_to_ram(unsigned long page) { #ifdef TURBOSPARC_WRITEBACK volatile unsigned long clear; if (srmmu_hwprobe(page)) turbosparc_flush_page_cache(page); clear = srmmu_get_fstatus(); #endif } static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { } static void turbosparc_flush_page_for_dma(unsigned long page) { turbosparc_flush_dcache(); } static void turbosparc_flush_tlb_all(void) { srmmu_flush_whole_tlb(); } static void turbosparc_flush_tlb_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) srmmu_flush_whole_tlb(); FLUSH_END } static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { FLUSH_BEGIN(vma->vm_mm) srmmu_flush_whole_tlb(); FLUSH_END } static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { FLUSH_BEGIN(vma->vm_mm) srmmu_flush_whole_tlb(); FLUSH_END } static void __cpuinit poke_turbosparc(void) { unsigned long mreg = srmmu_get_mmureg(); unsigned long ccreg; /* Clear any crap from the cache or else... */ turbosparc_flush_cache_all(); mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ srmmu_set_mmureg(mreg); ccreg = turbosparc_get_ccreg(); #ifdef TURBOSPARC_WRITEBACK ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */ ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE); /* Write-back D-cache, emulate VLSI * abortion number three, not number one */ #else /* For now let's play safe, optimize later */ ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE); /* Do DVMA snooping in Dcache, Write-thru D-cache */ ccreg &= ~(TURBOSPARC_uS2); /* Emulate VLSI abortion number three, not number one */ #endif switch (ccreg & 7) { case 0: /* No SE cache */ case 7: /* Test mode */ break; default: ccreg |= (TURBOSPARC_SCENABLE); } turbosparc_set_ccreg (ccreg); mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ srmmu_set_mmureg(mreg); } static void __init init_turbosparc(void) { srmmu_name = "Fujitsu TurboSparc"; srmmu_modtype = TurboSparc; BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM); poke_srmmu = poke_turbosparc; } static void __cpuinit poke_tsunami(void) { unsigned long mreg = srmmu_get_mmureg(); tsunami_flush_icache(); tsunami_flush_dcache(); mreg &= ~TSUNAMI_ITD; mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); srmmu_set_mmureg(mreg); } static void __init init_tsunami(void) { /* * Tsunami's pretty sane, Sun and TI actually got it * somewhat right this time. Fujitsu should have * taken some lessons from them. */ srmmu_name = "TI Tsunami"; srmmu_modtype = Tsunami; BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); poke_srmmu = poke_tsunami; tsunami_setup_blockops(); } static void __cpuinit poke_viking(void) { unsigned long mreg = srmmu_get_mmureg(); static int smp_catch; if(viking_mxcc_present) { unsigned long mxcc_control = mxcc_get_creg(); mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); mxcc_control &= ~(MXCC_CTL_RRC); mxcc_set_creg(mxcc_control); /* * We don't need memory parity checks. * XXX This is a mess, have to dig out later. ecd. viking_mxcc_turn_off_parity(&mreg, &mxcc_control); */ /* We do cache ptables on MXCC. */ mreg |= VIKING_TCENABLE; } else { unsigned long bpreg; mreg &= ~(VIKING_TCENABLE); if(smp_catch++) { /* Must disable mixed-cmd mode here for other cpu's. */ bpreg = viking_get_bpreg(); bpreg &= ~(VIKING_ACTION_MIX); viking_set_bpreg(bpreg); /* Just in case PROM does something funny. */ msi_set_sync(); } } mreg |= VIKING_SPENABLE; mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); mreg |= VIKING_SBENABLE; mreg &= ~(VIKING_ACENABLE); srmmu_set_mmureg(mreg); } static void __init init_viking(void) { unsigned long mreg = srmmu_get_mmureg(); /* Ahhh, the viking. SRMMU VLSI abortion number two... */ if(mreg & VIKING_MMODE) { srmmu_name = "TI Viking"; viking_mxcc_present = 0; msi_set_sync(); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); /* * We need this to make sure old viking takes no hits * on it's cache for dma snoops to workaround the * "load from non-cacheable memory" interrupt bug. * This is only necessary because of the new way in * which we use the IOMMU. */ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); flush_page_for_dma_global = 0; } else { srmmu_name = "TI Viking/MXCC"; viking_mxcc_present = 1; srmmu_cache_pagetables = 1; /* MXCC vikings lack the DMA snooping bug. */ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); } BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM); #ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) { BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); } else #endif { BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); } BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); poke_srmmu = poke_viking; } #ifdef CONFIG_SPARC_LEON void __init poke_leonsparc(void) { } void __init init_leon(void) { srmmu_name = "LEON"; BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); poke_srmmu = poke_leonsparc; srmmu_cache_pagetables = 0; leon_flush_during_switch = leon_flush_needed(); } #endif /* Probe for the srmmu chip version. */ static void __init get_srmmu_type(void) { unsigned long mreg, psr; unsigned long mod_typ, mod_rev, psr_typ, psr_vers; srmmu_modtype = SRMMU_INVAL_MOD; hwbug_bitmask = 0; mreg = srmmu_get_mmureg(); psr = get_psr(); mod_typ = (mreg & 0xf0000000) >> 28; mod_rev = (mreg & 0x0f000000) >> 24; psr_typ = (psr >> 28) & 0xf; psr_vers = (psr >> 24) & 0xf; /* First, check for sparc-leon. */ if (sparc_cpu_model == sparc_leon) { init_leon(); return; } /* Second, check for HyperSparc or Cypress. */ if(mod_typ == 1) { switch(mod_rev) { case 7: /* UP or MP Hypersparc */ init_hypersparc(); break; case 0: case 2: /* Uniprocessor Cypress */ init_cypress_604(); break; case 10: case 11: case 12: /* _REALLY OLD_ Cypress MP chips... */ case 13: case 14: case 15: /* MP Cypress mmu/cache-controller */ init_cypress_605(mod_rev); break; default: /* Some other Cypress revision, assume a 605. */ init_cypress_605(mod_rev); break; } return; } /* * Now Fujitsu TurboSparc. It might happen that it is * in Swift emulation mode, so we will check later... */ if (psr_typ == 0 && psr_vers == 5) { init_turbosparc(); return; } /* Next check for Fujitsu Swift. */ if(psr_typ == 0 && psr_vers == 4) { phandle cpunode; char node_str[128]; /* Look if it is not a TurboSparc emulating Swift... */ cpunode = prom_getchild(prom_root_node); while((cpunode = prom_getsibling(cpunode)) != 0) { prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { if (!prom_getintdefault(cpunode, "psr-implementation", 1) && prom_getintdefault(cpunode, "psr-version", 1) == 5) { init_turbosparc(); return; } break; } } init_swift(); return; } /* Now the Viking family of srmmu. */ if(psr_typ == 4 && ((psr_vers == 0) || ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { init_viking(); return; } /* Finally the Tsunami. */ if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { init_tsunami(); return; } /* Oh well */ srmmu_is_bad(); } /* don't laugh, static pagetables */ static void srmmu_check_pgt_cache(int low, int high) { } extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, tsetup_mmu_patchme, rtrap_mmu_patchme; extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk, tsetup_srmmu_stackchk, srmmu_rett_stackchk; extern unsigned long srmmu_fault; #define PATCH_BRANCH(insn, dest) do { \ iaddr = &(insn); \ daddr = &(dest); \ *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \ } while(0) static void __init patch_window_trap_handlers(void) { unsigned long *iaddr, *daddr; PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk); PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk); PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk); PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk); PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault); PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault); PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault); } #ifdef CONFIG_SMP /* Local cross-calls. */ static void smp_flush_page_for_dma(unsigned long page) { xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); local_flush_page_for_dma(page); } #endif static pte_t srmmu_pgoff_to_pte(unsigned long pgoff) { return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE); } static unsigned long srmmu_pte_to_pgoff(pte_t pte) { return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT; } static pgprot_t srmmu_pgprot_noncached(pgprot_t prot) { prot &= ~__pgprot(SRMMU_CACHE); return prot; } /* Load up routines and constants for sun4m and sun4d mmu */ void __init ld_mmu_srmmu(void) { extern void ld_mmu_iommu(void); extern void ld_mmu_iounit(void); extern void ___xchg32_sun4md(void); BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); /* Functions */ BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM); #ifndef CONFIG_SMP BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); #endif BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); BTFIXUPSET_HALF(pte_filei, SRMMU_FILE); BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM); get_srmmu_type(); patch_window_trap_handlers(); #ifdef CONFIG_SMP /* El switcheroo... */ BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram); BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); if (sparc_cpu_model != sun4d && sparc_cpu_model != sparc_leon) { BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); } BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); if (poke_srmmu == poke_viking) { /* Avoid unnecessary cross calls. */ BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); } #endif if (sparc_cpu_model == sun4d) ld_mmu_iounit(); else ld_mmu_iommu(); #ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) sun4d_init_smp(); else if (sparc_cpu_model == sparc_leon) leon_init_smp(); else sun4m_init_smp(); #endif }
gpl-2.0
sistux/lge-kernel-omap4
drivers/scsi/arm/eesox.c
8130
17113
/* * linux/drivers/acorn/scsi/eesox.c * * Copyright (C) 1997-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver is based on experimentation. Hence, it may have made * assumptions about the particular card that I have available, and * may not be reliable! * * Changelog: * 01-10-1997 RMK Created, READONLY version * 15-02-1998 RMK READ/WRITE version * added DMA support and hardware definitions * 14-03-1998 RMK Updated DMA support * Added terminator control * 15-04-1998 RMK Only do PIO if FAS216 will allow it. * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h * 02-04-2000 RMK 0.0.3 Fixed NO_IRQ/NO_DMA problem, updated for new * error handling code. */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/ecard.h> #include <asm/pgtable.h> #include "../scsi.h" #include <scsi/scsi_host.h> #include "fas216.h" #include "scsi.h" #include <scsi/scsicam.h> #define EESOX_FAS216_OFFSET 0x3000 #define EESOX_FAS216_SHIFT 5 #define EESOX_DMASTAT 0x2800 #define EESOX_STAT_INTR 0x01 #define EESOX_STAT_DMA 0x02 #define EESOX_CONTROL 0x2800 #define EESOX_INTR_ENABLE 0x04 #define EESOX_TERM_ENABLE 0x02 #define EESOX_RESET 0x01 #define EESOX_DMADATA 0x3800 #define VERSION "1.10 (17/01/2003 2.5.59)" /* * Use term=0,1,0,0,0 to turn terminators on/off */ static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 }; #define NR_SG 256 struct eesoxscsi_info { FAS216_Info info; struct expansion_card *ec; void __iomem *base; void __iomem *ctl_port; unsigned int control; struct scatterlist sg[NR_SG]; /* Scatter DMA list */ }; /* Prototype: void eesoxscsi_irqenable(ec, irqnr) * Purpose : Enable interrupts on EESOX SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void eesoxscsi_irqenable(struct expansion_card *ec, int irqnr) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; info->control |= EESOX_INTR_ENABLE; writeb(info->control, info->ctl_port); } /* Prototype: void eesoxscsi_irqdisable(ec, irqnr) * Purpose : Disable interrupts on EESOX SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void eesoxscsi_irqdisable(struct expansion_card *ec, int irqnr) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; info->control &= ~EESOX_INTR_ENABLE; writeb(info->control, info->ctl_port); } static const expansioncard_ops_t eesoxscsi_ops = { .irqenable = eesoxscsi_irqenable, .irqdisable = eesoxscsi_irqdisable, }; /* Prototype: void eesoxscsi_terminator_ctl(*host, on_off) * Purpose : Turn the EESOX SCSI terminators on or off * Params : host - card to turn on/off * : on_off - !0 to turn on, 0 to turn off */ static void eesoxscsi_terminator_ctl(struct Scsi_Host *host, int on_off) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); if (on_off) info->control |= EESOX_TERM_ENABLE; else info->control &= ~EESOX_TERM_ENABLE; writeb(info->control, info->ctl_port); spin_unlock_irqrestore(host->host_lock, flags); } /* Prototype: void eesoxscsi_intr(irq, *dev_id, *regs) * Purpose : handle interrupts from EESOX SCSI card * Params : irq - interrupt number * dev_id - user-defined (Scsi_Host structure) */ static irqreturn_t eesoxscsi_intr(int irq, void *dev_id) { struct eesoxscsi_info *info = dev_id; return fas216_intr(&info->info); } /* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; struct device *dev = scsi_get_device(host); int dmach = info->info.scsi.dma; if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs, map_dir, dma_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) map_dir = DMA_TO_DEVICE, dma_dir = DMA_MODE_WRITE; else map_dir = DMA_FROM_DEVICE, dma_dir = DMA_MODE_READ; dma_map_sg(dev, info->sg, bufs, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs); set_dma_mode(dmach, dma_dir); enable_dma(dmach); return fasdma_real_all; } /* * We don't do DMA, we only do slow PIO * * Some day, we will do Pseudo DMA */ return fasdma_pseudo; } static void eesoxscsi_buffer_in(void *buf, int length, void __iomem *base) { const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; const void __iomem *reg_dmastat = base + EESOX_DMASTAT; const void __iomem *reg_dmadata = base + EESOX_DMADATA; register const unsigned long mask = 0xffff; do { unsigned int status; /* * Interrupt request? */ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); if (status & STAT_INT) break; /* * DMA request active? */ status = readb(reg_dmastat); if (!(status & EESOX_STAT_DMA)) continue; /* * Get number of bytes in FIFO */ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; if (status > 16) status = 16; if (status > length) status = length; /* * Align buffer. */ if (((u32)buf) & 2 && status >= 2) { *(u16 *)buf = readl(reg_dmadata); buf += 2; status -= 2; length -= 2; } if (status >= 8) { unsigned long l1, l2; l1 = readl(reg_dmadata) & mask; l1 |= readl(reg_dmadata) << 16; l2 = readl(reg_dmadata) & mask; l2 |= readl(reg_dmadata) << 16; *(u32 *)buf = l1; buf += 4; *(u32 *)buf = l2; buf += 4; length -= 8; continue; } if (status >= 4) { unsigned long l1; l1 = readl(reg_dmadata) & mask; l1 |= readl(reg_dmadata) << 16; *(u32 *)buf = l1; buf += 4; length -= 4; continue; } if (status >= 2) { *(u16 *)buf = readl(reg_dmadata); buf += 2; length -= 2; } } while (length); } static void eesoxscsi_buffer_out(void *buf, int length, void __iomem *base) { const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; const void __iomem *reg_dmastat = base + EESOX_DMASTAT; const void __iomem *reg_dmadata = base + EESOX_DMADATA; do { unsigned int status; /* * Interrupt request? */ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); if (status & STAT_INT) break; /* * DMA request active? */ status = readb(reg_dmastat); if (!(status & EESOX_STAT_DMA)) continue; /* * Get number of bytes in FIFO */ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; if (status > 16) status = 16; status = 16 - status; if (status > length) status = length; status &= ~1; /* * Align buffer. */ if (((u32)buf) & 2 && status >= 2) { writel(*(u16 *)buf << 16, reg_dmadata); buf += 2; status -= 2; length -= 2; } if (status >= 8) { unsigned long l1, l2; l1 = *(u32 *)buf; buf += 4; l2 = *(u32 *)buf; buf += 4; writel(l1 << 16, reg_dmadata); writel(l1, reg_dmadata); writel(l2 << 16, reg_dmadata); writel(l2, reg_dmadata); length -= 8; continue; } if (status >= 4) { unsigned long l1; l1 = *(u32 *)buf; buf += 4; writel(l1 << 16, reg_dmadata); writel(l1, reg_dmadata); length -= 4; continue; } if (status >= 2) { writel(*(u16 *)buf << 16, reg_dmadata); buf += 2; length -= 2; } } while (length); } static void eesoxscsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t dir, int transfer_size) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; if (dir == DMA_IN) { eesoxscsi_buffer_in(SCp->ptr, SCp->this_residual, info->base); } else { eesoxscsi_buffer_out(SCp->ptr, SCp->this_residual, info->base); } } /* Prototype: int eesoxscsi_dma_stop(host, SCpnt) * Purpose : stops DMA/PIO * Params : host - host * SCpnt - command */ static void eesoxscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; if (info->info.scsi.dma != NO_DMA) disable_dma(info->info.scsi.dma); } /* Prototype: const char *eesoxscsi_info(struct Scsi_Host * host) * Purpose : returns a descriptive string about this interface, * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ const char *eesoxscsi_info(struct Scsi_Host *host) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; static char string[150]; sprintf(string, "%s (%s) in slot %d v%s terminators o%s", host->hostt->name, info->info.scsi.type, info->ec->slot_no, VERSION, info->control & EESOX_TERM_ENABLE ? "n" : "ff"); return string; } /* Prototype: int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) * Purpose : Set a driver specific function * Params : host - host to setup * : buffer - buffer containing string describing operation * : length - length of string * Returns : -EINVAL, or 0 */ static int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) { int ret = length; if (length >= 9 && strncmp(buffer, "EESOXSCSI", 9) == 0) { buffer += 9; length -= 9; if (length >= 5 && strncmp(buffer, "term=", 5) == 0) { if (buffer[5] == '1') eesoxscsi_terminator_ctl(host, 1); else if (buffer[5] == '0') eesoxscsi_terminator_ctl(host, 0); else ret = -EINVAL; } else ret = -EINVAL; } else ret = -EINVAL; return ret; } /* Prototype: int eesoxscsi_proc_info(char *buffer, char **start, off_t offset, * int length, int host_no, int inout) * Purpose : Return information about the driver to a user process accessing * the /proc filesystem. * Params : buffer - a buffer to write information to * start - a pointer into this buffer set by this routine to the start * of the required information. * offset - offset into information that we have read up to. * length - length of buffer * host_no - host number to return information for * inout - 0 for reading, 1 for writing. * Returns : length of data written to buffer. */ int eesoxscsi_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) { struct eesoxscsi_info *info; char *p = buffer; int pos; if (inout == 1) return eesoxscsi_set_proc_info(host, buffer, length); info = (struct eesoxscsi_info *)host->hostdata; p += sprintf(p, "EESOX SCSI driver v%s\n", VERSION); p += fas216_print_host(&info->info, p); p += sprintf(p, "Term : o%s\n", info->control & EESOX_TERM_ENABLE ? "n" : "ff"); p += fas216_print_stats(&info->info, p); p += fas216_print_devices(&info->info, p); *start = buffer + offset; pos = p - buffer - offset; if (pos > length) pos = length; return pos; } static ssize_t eesoxscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; return sprintf(buf, "%d\n", info->control & EESOX_TERM_ENABLE ? 1 : 0); } static ssize_t eesoxscsi_store_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; unsigned long flags; if (len > 1) { spin_lock_irqsave(host->host_lock, flags); if (buf[0] != '0') { info->control |= EESOX_TERM_ENABLE; } else { info->control &= ~EESOX_TERM_ENABLE; } writeb(info->control, info->ctl_port); spin_unlock_irqrestore(host->host_lock, flags); } return len; } static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR, eesoxscsi_show_term, eesoxscsi_store_term); static struct scsi_host_template eesox_template = { .module = THIS_MODULE, .proc_info = eesoxscsi_proc_info, .name = "EESOX SCSI", .info = eesoxscsi_info, .queuecommand = fas216_queue_command, .eh_host_reset_handler = fas216_eh_host_reset, .eh_bus_reset_handler = fas216_eh_bus_reset, .eh_device_reset_handler = fas216_eh_device_reset, .eh_abort_handler = fas216_eh_abort, .can_queue = 1, .this_id = 7, .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, .proc_name = "eesox", }; static int __devinit eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; struct eesoxscsi_info *info; void __iomem *base; int ret; ret = ecard_request_resources(ec); if (ret) goto out; base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!base) { ret = -ENOMEM; goto out_region; } host = scsi_host_alloc(&eesox_template, sizeof(struct eesoxscsi_info)); if (!host) { ret = -ENOMEM; goto out_region; } ecard_set_drvdata(ec, host); info = (struct eesoxscsi_info *)host->hostdata; info->ec = ec; info->base = base; info->ctl_port = base + EESOX_CONTROL; info->control = term[ec->slot_no] ? EESOX_TERM_ENABLE : 0; writeb(info->control, info->ctl_port); info->info.scsi.io_base = base + EESOX_FAS216_OFFSET; info->info.scsi.io_shift = EESOX_FAS216_SHIFT; info->info.scsi.irq = ec->irq; info->info.scsi.dma = ec->dma; info->info.ifcfg.clockrate = 40; /* MHz */ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = 200; /* ns */ info->info.ifcfg.sync_max_depth = 7; info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 1; info->info.ifcfg.wide_max_size = 0; info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; info->info.dma.setup = eesoxscsi_dma_setup; info->info.dma.pseudo = eesoxscsi_dma_pseudo; info->info.dma.stop = eesoxscsi_dma_stop; ec->irqaddr = base + EESOX_DMASTAT; ec->irqmask = EESOX_STAT_INTR; ecard_setirq(ec, &eesoxscsi_ops, info); device_create_file(&ec->dev, &dev_attr_bus_term); ret = fas216_init(host); if (ret) goto out_free; ret = request_irq(ec->irq, eesoxscsi_intr, 0, "eesoxscsi", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); goto out_remove; } if (info->info.scsi.dma != NO_DMA) { if (request_dma(info->info.scsi.dma, "eesox")) { printk("scsi%d: DMA%d not free, DMA disabled\n", host->host_no, info->info.scsi.dma); info->info.scsi.dma = NO_DMA; } else { set_dma_speed(info->info.scsi.dma, 180); info->info.ifcfg.capabilities |= FASCAP_DMA; info->info.ifcfg.cntl3 |= CNTL3_BS8; } } ret = fas216_add(host, &ec->dev); if (ret == 0) goto out; if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, host); out_remove: fas216_remove(host); out_free: device_remove_file(&ec->dev, &dev_attr_bus_term); scsi_host_put(host); out_region: ecard_release_resources(ec); out: return ret; } static void __devexit eesoxscsi_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; ecard_set_drvdata(ec, NULL); fas216_remove(host); if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, info); device_remove_file(&ec->dev, &dev_attr_bus_term); fas216_release(host); scsi_host_put(host); ecard_release_resources(ec); } static const struct ecard_id eesoxscsi_cids[] = { { MANU_EESOX, PROD_EESOX_SCSI2 }, { 0xffff, 0xffff }, }; static struct ecard_driver eesoxscsi_driver = { .probe = eesoxscsi_probe, .remove = __devexit_p(eesoxscsi_remove), .id_table = eesoxscsi_cids, .drv = { .name = "eesoxscsi", }, }; static int __init eesox_init(void) { return ecard_register_driver(&eesoxscsi_driver); } static void __exit eesox_exit(void) { ecard_remove_driver(&eesoxscsi_driver); } module_init(eesox_init); module_exit(eesox_exit); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("EESOX 'Fast' SCSI driver for Acorn machines"); module_param_array(term, int, NULL, 0); MODULE_PARM_DESC(term, "SCSI bus termination"); MODULE_LICENSE("GPL");
gpl-2.0
semdoc/kernel_htc_msm8960
drivers/video/hpfb.c
8130
11168
/* * HP300 Topcat framebuffer support (derived from macfb of all things) * Phil Blundell <philb@gnu.org> 1998 * DIO-II, colour map and Catseye support by * Kars de Jong <jongk@linux-m68k.org>, May 2004. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/dio.h> #include <asm/io.h> #include <asm/uaccess.h> static struct fb_info fb_info = { .fix = { .id = "HP300 ", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .accel = FB_ACCEL_NONE, } }; static unsigned long fb_regs; static unsigned char fb_bitmask; #define TC_NBLANK 0x4080 #define TC_WEN 0x4088 #define TC_REN 0x408c #define TC_FBEN 0x4090 #define TC_PRR 0x40ea /* These defines match the X window system */ #define RR_CLEAR 0x0 #define RR_COPY 0x3 #define RR_NOOP 0x5 #define RR_XOR 0x6 #define RR_INVERT 0xa #define RR_COPYINVERTED 0xc #define RR_SET 0xf /* blitter regs */ #define BUSY 0x4044 #define WMRR 0x40ef #define SOURCE_X 0x40f2 #define SOURCE_Y 0x40f6 #define DEST_X 0x40fa #define DEST_Y 0x40fe #define WHEIGHT 0x4106 #define WWIDTH 0x4102 #define WMOVE 0x409c static struct fb_var_screeninfo hpfb_defined = { .red = { .length = 8, }, .green = { .length = 8, }, .blue = { .length = 8, }, .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .vmode = FB_VMODE_NONINTERLACED, }; static int hpfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { /* use MSBs */ unsigned char _red =red>>8; unsigned char _green=green>>8; unsigned char _blue =blue>>8; unsigned char _regno=regno; /* * Set a single color register. The values supplied are * already rounded down to the hardware's capabilities * (according to the entries in the `var' structure). Return * != 0 for invalid regno. */ if (regno >= info->cmap.len) return 1; while (in_be16(fb_regs + 0x6002) & 0x4) udelay(1); out_be16(fb_regs + 0x60ba, 0xff); out_be16(fb_regs + 0x60b2, _red); out_be16(fb_regs + 0x60b4, _green); out_be16(fb_regs + 0x60b6, _blue); out_be16(fb_regs + 0x60b8, ~_regno); out_be16(fb_regs + 0x60f0, 0xff); udelay(100); while (in_be16(fb_regs + 0x6002) & 0x4) udelay(1); out_be16(fb_regs + 0x60b2, 0); out_be16(fb_regs + 0x60b4, 0); out_be16(fb_regs + 0x60b6, 0); out_be16(fb_regs + 0x60b8, 0); return 0; } /* 0 unblank, 1 blank, 2 no vsync, 3 no hsync, 4 off */ static int hpfb_blank(int blank, struct fb_info *info) { out_8(fb_regs + TC_NBLANK, (blank ? 0x00 : fb_bitmask)); return 0; } static void topcat_blit(int x0, int y0, int x1, int y1, int w, int h, int rr) { if (rr >= 0) { while (in_8(fb_regs + BUSY) & fb_bitmask) ; } out_8(fb_regs + TC_FBEN, fb_bitmask); if (rr >= 0) { out_8(fb_regs + TC_WEN, fb_bitmask); out_8(fb_regs + WMRR, rr); } out_be16(fb_regs + SOURCE_X, x0); out_be16(fb_regs + SOURCE_Y, y0); out_be16(fb_regs + DEST_X, x1); out_be16(fb_regs + DEST_Y, y1); out_be16(fb_regs + WWIDTH, w); out_be16(fb_regs + WHEIGHT, h); out_8(fb_regs + WMOVE, fb_bitmask); } static void hpfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { topcat_blit(area->sx, area->sy, area->dx, area->dy, area->width, area->height, RR_COPY); } static void hpfb_fillrect(struct fb_info *p, const struct fb_fillrect *region) { u8 clr; clr = region->color & 0xff; while (in_8(fb_regs + BUSY) & fb_bitmask) ; /* Foreground */ out_8(fb_regs + TC_WEN, fb_bitmask & clr); out_8(fb_regs + WMRR, (region->rop == ROP_COPY ? RR_SET : RR_INVERT)); /* Background */ out_8(fb_regs + TC_WEN, fb_bitmask & ~clr); out_8(fb_regs + WMRR, (region->rop == ROP_COPY ? RR_CLEAR : RR_NOOP)); topcat_blit(region->dx, region->dy, region->dx, region->dy, region->width, region->height, -1); } static int hpfb_sync(struct fb_info *info) { /* * Since we also access the framebuffer directly, we have to wait * until the block mover is finished */ while (in_8(fb_regs + BUSY) & fb_bitmask) ; out_8(fb_regs + TC_WEN, fb_bitmask); out_8(fb_regs + TC_PRR, RR_COPY); out_8(fb_regs + TC_FBEN, fb_bitmask); return 0; } static struct fb_ops hpfb_ops = { .owner = THIS_MODULE, .fb_setcolreg = hpfb_setcolreg, .fb_blank = hpfb_blank, .fb_fillrect = hpfb_fillrect, .fb_copyarea = hpfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_sync = hpfb_sync, }; /* Common to all HP framebuffers */ #define HPFB_FBWMSB 0x05 /* Frame buffer width */ #define HPFB_FBWLSB 0x07 #define HPFB_FBHMSB 0x09 /* Frame buffer height */ #define HPFB_FBHLSB 0x0b #define HPFB_DWMSB 0x0d /* Display width */ #define HPFB_DWLSB 0x0f #define HPFB_DHMSB 0x11 /* Display height */ #define HPFB_DHLSB 0x13 #define HPFB_NUMPLANES 0x5b /* Number of colour planes */ #define HPFB_FBOMSB 0x5d /* Frame buffer offset */ #define HPFB_FBOLSB 0x5f static int __devinit hpfb_init_one(unsigned long phys_base, unsigned long virt_base) { unsigned long fboff, fb_width, fb_height, fb_start; fb_regs = virt_base; fboff = (in_8(fb_regs + HPFB_FBOMSB) << 8) | in_8(fb_regs + HPFB_FBOLSB); fb_info.fix.smem_start = (in_8(fb_regs + fboff) << 16); if (phys_base >= DIOII_BASE) { fb_info.fix.smem_start += phys_base; } if (DIO_SECID(fb_regs) != DIO_ID2_TOPCAT) { /* This is the magic incantation the HP X server uses to make Catseye boards work. */ while (in_be16(fb_regs+0x4800) & 1) ; out_be16(fb_regs+0x4800, 0); /* Catseye status */ out_be16(fb_regs+0x4510, 0); /* VB */ out_be16(fb_regs+0x4512, 0); /* TCNTRL */ out_be16(fb_regs+0x4514, 0); /* ACNTRL */ out_be16(fb_regs+0x4516, 0); /* PNCNTRL */ out_be16(fb_regs+0x4206, 0x90); /* RUG Command/Status */ out_be16(fb_regs+0x60a2, 0); /* Overlay Mask */ out_be16(fb_regs+0x60bc, 0); /* Ram Select */ } /* * Fill in the available video resolution */ fb_width = (in_8(fb_regs + HPFB_FBWMSB) << 8) | in_8(fb_regs + HPFB_FBWLSB); fb_info.fix.line_length = fb_width; fb_height = (in_8(fb_regs + HPFB_FBHMSB) << 8) | in_8(fb_regs + HPFB_FBHLSB); fb_info.fix.smem_len = fb_width * fb_height; fb_start = (unsigned long)ioremap_writethrough(fb_info.fix.smem_start, fb_info.fix.smem_len); hpfb_defined.xres = (in_8(fb_regs + HPFB_DWMSB) << 8) | in_8(fb_regs + HPFB_DWLSB); hpfb_defined.yres = (in_8(fb_regs + HPFB_DHMSB) << 8) | in_8(fb_regs + HPFB_DHLSB); hpfb_defined.xres_virtual = hpfb_defined.xres; hpfb_defined.yres_virtual = hpfb_defined.yres; hpfb_defined.bits_per_pixel = in_8(fb_regs + HPFB_NUMPLANES); printk(KERN_INFO "hpfb: framebuffer at 0x%lx, mapped to 0x%lx, size %dk\n", fb_info.fix.smem_start, fb_start, fb_info.fix.smem_len/1024); printk(KERN_INFO "hpfb: mode is %dx%dx%d, linelength=%d\n", hpfb_defined.xres, hpfb_defined.yres, hpfb_defined.bits_per_pixel, fb_info.fix.line_length); /* * Give the hardware a bit of a prod and work out how many bits per * pixel are supported. */ out_8(fb_regs + TC_WEN, 0xff); out_8(fb_regs + TC_PRR, RR_COPY); out_8(fb_regs + TC_FBEN, 0xff); out_8(fb_start, 0xff); fb_bitmask = in_8(fb_start); out_8(fb_start, 0); /* * Enable reading/writing of all the planes. */ out_8(fb_regs + TC_WEN, fb_bitmask); out_8(fb_regs + TC_PRR, RR_COPY); out_8(fb_regs + TC_REN, fb_bitmask); out_8(fb_regs + TC_FBEN, fb_bitmask); /* * Clear the screen. */ topcat_blit(0, 0, 0, 0, fb_width, fb_height, RR_CLEAR); /* * Let there be consoles.. */ if (DIO_SECID(fb_regs) == DIO_ID2_TOPCAT) strcat(fb_info.fix.id, "Topcat"); else strcat(fb_info.fix.id, "Catseye"); fb_info.fbops = &hpfb_ops; fb_info.flags = FBINFO_DEFAULT; fb_info.var = hpfb_defined; fb_info.screen_base = (char *)fb_start; fb_alloc_cmap(&fb_info.cmap, 1 << hpfb_defined.bits_per_pixel, 0); if (register_framebuffer(&fb_info) < 0) { fb_dealloc_cmap(&fb_info.cmap); iounmap(fb_info.screen_base); fb_info.screen_base = NULL; return 1; } printk(KERN_INFO "fb%d: %s frame buffer device\n", fb_info.node, fb_info.fix.id); return 0; } /* * Check that the secondary ID indicates that we have some hope of working with this * framebuffer. The catseye boards are pretty much like topcats and we can muddle through. */ #define topcat_sid_ok(x) (((x) == DIO_ID2_LRCATSEYE) || ((x) == DIO_ID2_HRCCATSEYE) \ || ((x) == DIO_ID2_HRMCATSEYE) || ((x) == DIO_ID2_TOPCAT)) /* * Initialise the framebuffer */ static int __devinit hpfb_dio_probe(struct dio_dev * d, const struct dio_device_id * ent) { unsigned long paddr, vaddr; paddr = d->resource.start; if (!request_mem_region(d->resource.start, resource_size(&d->resource), d->name)) return -EBUSY; if (d->scode >= DIOII_SCBASE) { vaddr = (unsigned long)ioremap(paddr, resource_size(&d->resource)); } else { vaddr = paddr + DIO_VIRADDRBASE; } printk(KERN_INFO "Topcat found at DIO select code %d " "(secondary id %02x)\n", d->scode, (d->id >> 8) & 0xff); if (hpfb_init_one(paddr, vaddr)) { if (d->scode >= DIOII_SCBASE) iounmap((void *)vaddr); return -ENOMEM; } return 0; } static void __devexit hpfb_remove_one(struct dio_dev *d) { unregister_framebuffer(&fb_info); if (d->scode >= DIOII_SCBASE) iounmap((void *)fb_regs); release_mem_region(d->resource.start, resource_size(&d->resource)); } static struct dio_device_id hpfb_dio_tbl[] = { { DIO_ENCODE_ID(DIO_ID_FBUFFER, DIO_ID2_LRCATSEYE) }, { DIO_ENCODE_ID(DIO_ID_FBUFFER, DIO_ID2_HRCCATSEYE) }, { DIO_ENCODE_ID(DIO_ID_FBUFFER, DIO_ID2_HRMCATSEYE) }, { DIO_ENCODE_ID(DIO_ID_FBUFFER, DIO_ID2_TOPCAT) }, { 0 } }; static struct dio_driver hpfb_driver = { .name = "hpfb", .id_table = hpfb_dio_tbl, .probe = hpfb_dio_probe, .remove = __devexit_p(hpfb_remove_one), }; int __init hpfb_init(void) { unsigned int sid; mm_segment_t fs; unsigned char i; int err; /* Topcats can be on the internal IO bus or real DIO devices. * The internal variant sits at 0x560000; it has primary * and secondary ID registers just like the DIO version. * So we merge the two detection routines. * * Perhaps this #define should be in a global header file: * I believe it's common to all internal fbs, not just topcat. */ #define INTFBVADDR 0xf0560000 #define INTFBPADDR 0x560000 if (!MACH_IS_HP300) return -ENODEV; if (fb_get_options("hpfb", NULL)) return -ENODEV; err = dio_register_driver(&hpfb_driver); if (err) return err; fs = get_fs(); set_fs(KERNEL_DS); err = get_user(i, (unsigned char *)INTFBVADDR + DIO_IDOFF); set_fs(fs); if (!err && (i == DIO_ID_FBUFFER) && topcat_sid_ok(sid = DIO_SECID(INTFBVADDR))) { if (!request_mem_region(INTFBPADDR, DIO_DEVSIZE, "Internal Topcat")) return -EBUSY; printk(KERN_INFO "Internal Topcat found (secondary id %02x)\n", sid); if (hpfb_init_one(INTFBPADDR, INTFBVADDR)) { return -ENOMEM; } } return 0; } void __exit hpfb_cleanup_module(void) { dio_unregister_driver(&hpfb_driver); } module_init(hpfb_init); module_exit(hpfb_cleanup_module); MODULE_LICENSE("GPL");
gpl-2.0
zachf714/android_kernel_common
drivers/block/paride/ktti.c
15554
2782
/* ktti.c (c) 1998 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. ktti.c is a low-level protocol driver for the KT Technology parallel port adapter. This adapter is used in the "PHd" portable hard-drives. As far as I can tell, this device supports 4-bit mode _only_. */ #define KTTI_VERSION "1.0" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" #define j44(a,b) (((a>>4)&0x0f)|(b&0xf0)) /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set */ static int cont_map[2] = { 0x10, 0x08 }; static void ktti_write_regr( PIA *pi, int cont, int regr, int val) { int r; r = regr + cont_map[cont]; w0(r); w2(0xb); w2(0xa); w2(3); w2(6); w0(val); w2(3); w0(0); w2(6); w2(0xb); } static int ktti_read_regr( PIA *pi, int cont, int regr ) { int a, b, r; r = regr + cont_map[cont]; w0(r); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9); a = r1(); w2(0xc); b = r1(); w2(9); w2(0xc); w2(9); return j44(a,b); } static void ktti_read_block( PIA *pi, char * buf, int count ) { int k, a, b; for (k=0;k<count/2;k++) { w0(0x10); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9); a = r1(); w2(0xc); b = r1(); w2(9); buf[2*k] = j44(a,b); a = r1(); w2(0xc); b = r1(); w2(9); buf[2*k+1] = j44(a,b); } } static void ktti_write_block( PIA *pi, char * buf, int count ) { int k; for (k=0;k<count/2;k++) { w0(0x10); w2(0xb); w2(0xa); w2(3); w2(6); w0(buf[2*k]); w2(3); w0(buf[2*k+1]); w2(6); w2(0xb); } } static void ktti_connect ( PIA *pi ) { pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(0xb); w2(0xa); w0(0); w2(3); w2(6); } static void ktti_disconnect ( PIA *pi ) { w2(0xb); w2(0xa); w0(0xa0); w2(3); w2(4); w0(pi->saved_r0); w2(pi->saved_r2); } static void ktti_log_adapter( PIA *pi, char * scratch, int verbose ) { printk("%s: ktti %s, KT adapter at 0x%x, delay %d\n", pi->device,KTTI_VERSION,pi->port,pi->delay); } static struct pi_protocol ktti = { .owner = THIS_MODULE, .name = "ktti", .max_mode = 1, .epp_first = 2, .default_delay = 1, .max_units = 1, .write_regr = ktti_write_regr, .read_regr = ktti_read_regr, .write_block = ktti_write_block, .read_block = ktti_read_block, .connect = ktti_connect, .disconnect = ktti_disconnect, .log_adapter = ktti_log_adapter, }; static int __init ktti_init(void) { return paride_register(&ktti); } static void __exit ktti_exit(void) { paride_unregister(&ktti); } MODULE_LICENSE("GPL"); module_init(ktti_init) module_exit(ktti_exit)
gpl-2.0
wjgood/Kernel
drivers/media/dvb/frontends/dvb_dummy_fe.c
195
7049
/* * Driver for Dummy Frontend * * Written by Emard <emard@softhome.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= */ #include <linux/module.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "dvb_dummy_fe.h" struct dvb_dummy_fe_state { struct dvb_frontend frontend; }; static int dvb_dummy_fe_read_status(struct dvb_frontend* fe, fe_status_t* status) { *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; return 0; } static int dvb_dummy_fe_read_ber(struct dvb_frontend* fe, u32* ber) { *ber = 0; return 0; } static int dvb_dummy_fe_read_signal_strength(struct dvb_frontend* fe, u16* strength) { *strength = 0; return 0; } static int dvb_dummy_fe_read_snr(struct dvb_frontend* fe, u16* snr) { *snr = 0; return 0; } static int dvb_dummy_fe_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { *ucblocks = 0; return 0; } static int dvb_dummy_fe_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) { return 0; } static int dvb_dummy_fe_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) { if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, p); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } return 0; } static int dvb_dummy_fe_sleep(struct dvb_frontend* fe) { return 0; } static int dvb_dummy_fe_init(struct dvb_frontend* fe) { return 0; } static int dvb_dummy_fe_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone) { return 0; } static int dvb_dummy_fe_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage) { return 0; } static void dvb_dummy_fe_release(struct dvb_frontend* fe) { struct dvb_dummy_fe_state* state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops; struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void) { struct dvb_dummy_fe_state* state = NULL; /* allocate memory for the internal state */ state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); if (state == NULL) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &dvb_dummy_fe_ofdm_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops; struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void) { struct dvb_dummy_fe_state* state = NULL; /* allocate memory for the internal state */ state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); if (state == NULL) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &dvb_dummy_fe_qpsk_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops dvb_dummy_fe_qam_ops; struct dvb_frontend *dvb_dummy_fe_qam_attach(void) { struct dvb_dummy_fe_state* state = NULL; /* allocate memory for the internal state */ state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); if (state == NULL) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &dvb_dummy_fe_qam_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = { .info = { .name = "Dummy DVB-T", .type = FE_OFDM, .frequency_min = 0, .frequency_max = 863250000, .frequency_stepsize = 62500, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO, }, .release = dvb_dummy_fe_release, .init = dvb_dummy_fe_init, .sleep = dvb_dummy_fe_sleep, .set_frontend = dvb_dummy_fe_set_frontend, .get_frontend = dvb_dummy_fe_get_frontend, .read_status = dvb_dummy_fe_read_status, .read_ber = dvb_dummy_fe_read_ber, .read_signal_strength = dvb_dummy_fe_read_signal_strength, .read_snr = dvb_dummy_fe_read_snr, .read_ucblocks = dvb_dummy_fe_read_ucblocks, }; static struct dvb_frontend_ops dvb_dummy_fe_qam_ops = { .info = { .name = "Dummy DVB-C", .type = FE_QAM, .frequency_stepsize = 62500, .frequency_min = 51000000, .frequency_max = 858000000, .symbol_rate_min = (57840000/2)/64, /* SACLK/64 == (XIN/2)/64 */ .symbol_rate_max = (57840000/2)/4, /* SACLK/4 */ .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO | FE_CAN_INVERSION_AUTO }, .release = dvb_dummy_fe_release, .init = dvb_dummy_fe_init, .sleep = dvb_dummy_fe_sleep, .set_frontend = dvb_dummy_fe_set_frontend, .get_frontend = dvb_dummy_fe_get_frontend, .read_status = dvb_dummy_fe_read_status, .read_ber = dvb_dummy_fe_read_ber, .read_signal_strength = dvb_dummy_fe_read_signal_strength, .read_snr = dvb_dummy_fe_read_snr, .read_ucblocks = dvb_dummy_fe_read_ucblocks, }; static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = { .info = { .name = "Dummy DVB-S", .type = FE_QPSK, .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 250, /* kHz for QPSK frontends */ .frequency_tolerance = 29500, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK }, .release = dvb_dummy_fe_release, .init = dvb_dummy_fe_init, .sleep = dvb_dummy_fe_sleep, .set_frontend = dvb_dummy_fe_set_frontend, .get_frontend = dvb_dummy_fe_get_frontend, .read_status = dvb_dummy_fe_read_status, .read_ber = dvb_dummy_fe_read_ber, .read_signal_strength = dvb_dummy_fe_read_signal_strength, .read_snr = dvb_dummy_fe_read_snr, .read_ucblocks = dvb_dummy_fe_read_ucblocks, .set_voltage = dvb_dummy_fe_set_voltage, .set_tone = dvb_dummy_fe_set_tone, }; MODULE_DESCRIPTION("DVB DUMMY Frontend"); MODULE_AUTHOR("Emard"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(dvb_dummy_fe_ofdm_attach); EXPORT_SYMBOL(dvb_dummy_fe_qam_attach); EXPORT_SYMBOL(dvb_dummy_fe_qpsk_attach);
gpl-2.0
finch0219/linux
drivers/media/i2c/adp1653.c
195
14556
/* * drivers/media/i2c/adp1653.c * * Copyright (C) 2008--2011 Nokia Corporation * * Contact: Sakari Ailus <sakari.ailus@iki.fi> * * Contributors: * Sakari Ailus <sakari.ailus@iki.fi> * Tuukka Toivonen <tuukkat76@gmail.com> * Pavel Machek <pavel@ucw.cz> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * TODO: * - fault interrupt handling * - hardware strobe * - power doesn't need to be ON if all lights are off * */ #include <linux/delay.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/gpio/consumer.h> #include <media/adp1653.h> #include <media/v4l2-device.h> #define TIMEOUT_MAX 820000 #define TIMEOUT_STEP 54600 #define TIMEOUT_MIN (TIMEOUT_MAX - ADP1653_REG_CONFIG_TMR_SET_MAX \ * TIMEOUT_STEP) #define TIMEOUT_US_TO_CODE(t) ((TIMEOUT_MAX + (TIMEOUT_STEP / 2) - (t)) \ / TIMEOUT_STEP) #define TIMEOUT_CODE_TO_US(c) (TIMEOUT_MAX - (c) * TIMEOUT_STEP) /* Write values into ADP1653 registers. */ static int adp1653_update_hw(struct adp1653_flash *flash) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); u8 out_sel; u8 config = 0; int rval; out_sel = ADP1653_INDICATOR_INTENSITY_uA_TO_REG( flash->indicator_intensity->val) << ADP1653_REG_OUT_SEL_ILED_SHIFT; switch (flash->led_mode->val) { case V4L2_FLASH_LED_MODE_NONE: break; case V4L2_FLASH_LED_MODE_FLASH: /* Flash mode, light on with strobe, duration from timer */ config = ADP1653_REG_CONFIG_TMR_CFG; config |= TIMEOUT_US_TO_CODE(flash->flash_timeout->val) << ADP1653_REG_CONFIG_TMR_SET_SHIFT; break; case V4L2_FLASH_LED_MODE_TORCH: /* Torch mode, light immediately on, duration indefinite */ out_sel |= ADP1653_FLASH_INTENSITY_mA_TO_REG( flash->torch_intensity->val) << ADP1653_REG_OUT_SEL_HPLED_SHIFT; break; } rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel); if (rval < 0) return rval; rval = i2c_smbus_write_byte_data(client, ADP1653_REG_CONFIG, config); if (rval < 0) return rval; return 0; } static int adp1653_get_fault(struct adp1653_flash *flash) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); int fault; int rval; fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT); if (IS_ERR_VALUE(fault)) return fault; flash->fault |= fault; if (!flash->fault) return 0; /* Clear faults. */ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0); if (IS_ERR_VALUE(rval)) return rval; flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE; rval = adp1653_update_hw(flash); if (IS_ERR_VALUE(rval)) return rval; return flash->fault; } static int adp1653_strobe(struct adp1653_flash *flash, int enable) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); u8 out_sel = ADP1653_INDICATOR_INTENSITY_uA_TO_REG( flash->indicator_intensity->val) << ADP1653_REG_OUT_SEL_ILED_SHIFT; int rval; if (flash->led_mode->val != V4L2_FLASH_LED_MODE_FLASH) return -EBUSY; if (!enable) return i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel); out_sel |= ADP1653_FLASH_INTENSITY_mA_TO_REG( flash->flash_intensity->val) << ADP1653_REG_OUT_SEL_HPLED_SHIFT; rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel); if (rval) return rval; /* Software strobe using i2c */ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE, ADP1653_REG_SW_STROBE_SW_STROBE); if (rval) return rval; return i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE, 0); } /* -------------------------------------------------------------------------- * V4L2 controls */ static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl) { struct adp1653_flash *flash = container_of(ctrl->handler, struct adp1653_flash, ctrls); int rval; rval = adp1653_get_fault(flash); if (IS_ERR_VALUE(rval)) return rval; ctrl->cur.val = 0; if (flash->fault & ADP1653_REG_FAULT_FLT_SCP) ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT; if (flash->fault & ADP1653_REG_FAULT_FLT_OT) ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE; if (flash->fault & ADP1653_REG_FAULT_FLT_TMR) ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT; if (flash->fault & ADP1653_REG_FAULT_FLT_OV) ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE; flash->fault = 0; return 0; } static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl) { struct adp1653_flash *flash = container_of(ctrl->handler, struct adp1653_flash, ctrls); int rval; rval = adp1653_get_fault(flash); if (IS_ERR_VALUE(rval)) return rval; if ((rval & (ADP1653_REG_FAULT_FLT_SCP | ADP1653_REG_FAULT_FLT_OT | ADP1653_REG_FAULT_FLT_OV)) && (ctrl->id == V4L2_CID_FLASH_STROBE || ctrl->id == V4L2_CID_FLASH_TORCH_INTENSITY || ctrl->id == V4L2_CID_FLASH_LED_MODE)) return -EBUSY; switch (ctrl->id) { case V4L2_CID_FLASH_STROBE: return adp1653_strobe(flash, 1); case V4L2_CID_FLASH_STROBE_STOP: return adp1653_strobe(flash, 0); } return adp1653_update_hw(flash); } static const struct v4l2_ctrl_ops adp1653_ctrl_ops = { .g_volatile_ctrl = adp1653_get_ctrl, .s_ctrl = adp1653_set_ctrl, }; static int adp1653_init_controls(struct adp1653_flash *flash) { struct v4l2_ctrl *fault; v4l2_ctrl_handler_init(&flash->ctrls, 9); flash->led_mode = v4l2_ctrl_new_std_menu(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_LED_MODE, V4L2_FLASH_LED_MODE_TORCH, ~0x7, 0); v4l2_ctrl_new_std_menu(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_STROBE_SOURCE, V4L2_FLASH_STROBE_SOURCE_SOFTWARE, ~0x1, 0); v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0); v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0); flash->flash_timeout = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_TIMEOUT, TIMEOUT_MIN, flash->platform_data->max_flash_timeout, TIMEOUT_STEP, flash->platform_data->max_flash_timeout); flash->flash_intensity = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_INTENSITY, ADP1653_FLASH_INTENSITY_MIN, flash->platform_data->max_flash_intensity, 1, flash->platform_data->max_flash_intensity); flash->torch_intensity = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_TORCH_INTENSITY, ADP1653_TORCH_INTENSITY_MIN, flash->platform_data->max_torch_intensity, ADP1653_FLASH_INTENSITY_STEP, flash->platform_data->max_torch_intensity); flash->indicator_intensity = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_INDICATOR_INTENSITY, ADP1653_INDICATOR_INTENSITY_MIN, flash->platform_data->max_indicator_intensity, ADP1653_INDICATOR_INTENSITY_STEP, ADP1653_INDICATOR_INTENSITY_MIN); fault = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_FAULT, 0, V4L2_FLASH_FAULT_OVER_VOLTAGE | V4L2_FLASH_FAULT_OVER_TEMPERATURE | V4L2_FLASH_FAULT_SHORT_CIRCUIT, 0, 0); if (flash->ctrls.error) return flash->ctrls.error; fault->flags |= V4L2_CTRL_FLAG_VOLATILE; flash->subdev.ctrl_handler = &flash->ctrls; return 0; } /* -------------------------------------------------------------------------- * V4L2 subdev operations */ static int adp1653_init_device(struct adp1653_flash *flash) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); int rval; /* Clear FAULT register by writing zero to OUT_SEL */ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0); if (rval < 0) { dev_err(&client->dev, "failed writing fault register\n"); return -EIO; } mutex_lock(flash->ctrls.lock); /* Reset faults before reading new ones. */ flash->fault = 0; rval = adp1653_get_fault(flash); mutex_unlock(flash->ctrls.lock); if (rval > 0) { dev_err(&client->dev, "faults detected: 0x%1.1x\n", rval); return -EIO; } mutex_lock(flash->ctrls.lock); rval = adp1653_update_hw(flash); mutex_unlock(flash->ctrls.lock); if (rval) { dev_err(&client->dev, "adp1653_update_hw failed at %s\n", __func__); return -EIO; } return 0; } static int __adp1653_set_power(struct adp1653_flash *flash, int on) { int ret; if (flash->platform_data->power) { ret = flash->platform_data->power(&flash->subdev, on); if (ret < 0) return ret; } else { gpiod_set_value(flash->platform_data->enable_gpio, on); if (on) /* Some delay is apparently required. */ udelay(20); } if (!on) return 0; ret = adp1653_init_device(flash); if (ret >= 0) return ret; if (flash->platform_data->power) flash->platform_data->power(&flash->subdev, 0); else gpiod_set_value(flash->platform_data->enable_gpio, 0); return ret; } static int adp1653_set_power(struct v4l2_subdev *subdev, int on) { struct adp1653_flash *flash = to_adp1653_flash(subdev); int ret = 0; mutex_lock(&flash->power_lock); /* If the power count is modified from 0 to != 0 or from != 0 to 0, * update the power state. */ if (flash->power_count == !on) { ret = __adp1653_set_power(flash, !!on); if (ret < 0) goto done; } /* Update the power count. */ flash->power_count += on ? 1 : -1; WARN_ON(flash->power_count < 0); done: mutex_unlock(&flash->power_lock); return ret; } static int adp1653_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return adp1653_set_power(sd, 1); } static int adp1653_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return adp1653_set_power(sd, 0); } static const struct v4l2_subdev_core_ops adp1653_core_ops = { .s_power = adp1653_set_power, }; static const struct v4l2_subdev_ops adp1653_ops = { .core = &adp1653_core_ops, }; static const struct v4l2_subdev_internal_ops adp1653_internal_ops = { .open = adp1653_open, .close = adp1653_close, }; /* -------------------------------------------------------------------------- * I2C driver */ #ifdef CONFIG_PM static int adp1653_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct adp1653_flash *flash = to_adp1653_flash(subdev); if (!flash->power_count) return 0; return __adp1653_set_power(flash, 0); } static int adp1653_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct adp1653_flash *flash = to_adp1653_flash(subdev); if (!flash->power_count) return 0; return __adp1653_set_power(flash, 1); } #else #define adp1653_suspend NULL #define adp1653_resume NULL #endif /* CONFIG_PM */ static int adp1653_of_init(struct i2c_client *client, struct adp1653_flash *flash, struct device_node *node) { struct adp1653_platform_data *pd; struct device_node *child; pd = devm_kzalloc(&client->dev, sizeof(*pd), GFP_KERNEL); if (!pd) return -ENOMEM; flash->platform_data = pd; child = of_get_child_by_name(node, "flash"); if (!child) return -EINVAL; if (of_property_read_u32(child, "flash-timeout-us", &pd->max_flash_timeout)) goto err; if (of_property_read_u32(child, "flash-max-microamp", &pd->max_flash_intensity)) goto err; pd->max_flash_intensity /= 1000; if (of_property_read_u32(child, "led-max-microamp", &pd->max_torch_intensity)) goto err; pd->max_torch_intensity /= 1000; of_node_put(child); child = of_get_child_by_name(node, "indicator"); if (!child) return -EINVAL; if (of_property_read_u32(child, "led-max-microamp", &pd->max_indicator_intensity)) goto err; of_node_put(child); pd->enable_gpio = devm_gpiod_get(&client->dev, "enable"); if (!pd->enable_gpio) { dev_err(&client->dev, "Error getting GPIO\n"); return -EINVAL; } return 0; err: dev_err(&client->dev, "Required property not found\n"); of_node_put(child); return -EINVAL; } static int adp1653_probe(struct i2c_client *client, const struct i2c_device_id *devid) { struct adp1653_flash *flash; int ret; flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL); if (flash == NULL) return -ENOMEM; if (client->dev.of_node) { ret = adp1653_of_init(client, flash, client->dev.of_node); if (ret) return ret; } else { if (!client->dev.platform_data) { dev_err(&client->dev, "Neither DT not platform data provided\n"); return EINVAL; } flash->platform_data = client->dev.platform_data; } mutex_init(&flash->power_lock); v4l2_i2c_subdev_init(&flash->subdev, client, &adp1653_ops); flash->subdev.internal_ops = &adp1653_internal_ops; flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ret = adp1653_init_controls(flash); if (ret) goto free_and_quit; ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0); if (ret < 0) goto free_and_quit; flash->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH; return 0; free_and_quit: dev_err(&client->dev, "adp1653: failed to register device\n"); v4l2_ctrl_handler_free(&flash->ctrls); return ret; } static int adp1653_remove(struct i2c_client *client) { struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct adp1653_flash *flash = to_adp1653_flash(subdev); v4l2_device_unregister_subdev(&flash->subdev); v4l2_ctrl_handler_free(&flash->ctrls); media_entity_cleanup(&flash->subdev.entity); return 0; } static const struct i2c_device_id adp1653_id_table[] = { { ADP1653_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adp1653_id_table); static const struct dev_pm_ops adp1653_pm_ops = { .suspend = adp1653_suspend, .resume = adp1653_resume, }; static struct i2c_driver adp1653_i2c_driver = { .driver = { .name = ADP1653_NAME, .pm = &adp1653_pm_ops, }, .probe = adp1653_probe, .remove = adp1653_remove, .id_table = adp1653_id_table, }; module_i2c_driver(adp1653_i2c_driver); MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>"); MODULE_DESCRIPTION("Analog Devices ADP1653 LED flash driver"); MODULE_LICENSE("GPL");
gpl-2.0
kusl/linux
tools/testing/selftests/seccomp/seccomp_bpf.c
195
63047
/* * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. * Use of this source code is governed by the GPLv2 license. * * Test code for seccomp bpf. */ #include <sys/types.h> #include <asm/siginfo.h> #define __have_siginfo_t 1 #define __have_sigval_t 1 #define __have_sigevent_t 1 #include <errno.h> #include <linux/filter.h> #include <sys/prctl.h> #include <sys/ptrace.h> #include <sys/user.h> #include <linux/prctl.h> #include <linux/ptrace.h> #include <linux/seccomp.h> #include <pthread.h> #include <semaphore.h> #include <signal.h> #include <stddef.h> #include <stdbool.h> #include <string.h> #include <time.h> #include <linux/elf.h> #include <sys/uio.h> #include <sys/utsname.h> #include <sys/fcntl.h> #include <sys/mman.h> #include <sys/times.h> #define _GNU_SOURCE #include <unistd.h> #include <sys/syscall.h> #include "test_harness.h" #ifndef PR_SET_PTRACER # define PR_SET_PTRACER 0x59616d61 #endif #ifndef PR_SET_NO_NEW_PRIVS #define PR_SET_NO_NEW_PRIVS 38 #define PR_GET_NO_NEW_PRIVS 39 #endif #ifndef PR_SECCOMP_EXT #define PR_SECCOMP_EXT 43 #endif #ifndef SECCOMP_EXT_ACT #define SECCOMP_EXT_ACT 1 #endif #ifndef SECCOMP_EXT_ACT_TSYNC #define SECCOMP_EXT_ACT_TSYNC 1 #endif #ifndef SECCOMP_MODE_STRICT #define SECCOMP_MODE_STRICT 1 #endif #ifndef SECCOMP_MODE_FILTER #define SECCOMP_MODE_FILTER 2 #endif #ifndef SECCOMP_RET_KILL #define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */ #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ /* Masks for the return value sections. */ #define SECCOMP_RET_ACTION 0x7fff0000U #define SECCOMP_RET_DATA 0x0000ffffU struct seccomp_data { int nr; __u32 arch; __u64 instruction_pointer; __u64 args[6]; }; #endif #if __BYTE_ORDER == __LITTLE_ENDIAN #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) #elif __BYTE_ORDER == __BIG_ENDIAN #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32)) #else #error "wut? Unknown __BYTE_ORDER?!" #endif #define SIBLING_EXIT_UNKILLED 0xbadbeef #define SIBLING_EXIT_FAILURE 0xbadface #define SIBLING_EXIT_NEWPRIVS 0xbadfeed TEST(mode_strict_support) { long ret; ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support CONFIG_SECCOMP"); } syscall(__NR_exit, 1); } TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL) { long ret; ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support CONFIG_SECCOMP"); } syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL); EXPECT_FALSE(true) { TH_LOG("Unreachable!"); } } /* Note! This doesn't test no new privs behavior */ TEST(no_new_privs_support) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); EXPECT_EQ(0, ret) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } } /* Tests kernel support by checking for a copy_from_user() fault on * NULL. */ TEST(mode_filter_support) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL); EXPECT_EQ(-1, ret); EXPECT_EQ(EFAULT, errno) { TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!"); } } TEST(mode_filter_without_nnp) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0); ASSERT_LE(0, ret) { TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS"); } errno = 0; ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); /* Succeeds with CAP_SYS_ADMIN, fails without */ /* TODO(wad) check caps not euid */ if (geteuid()) { EXPECT_EQ(-1, ret); EXPECT_EQ(EACCES, errno); } else { EXPECT_EQ(0, ret); } } #define MAX_INSNS_PER_PATH 32768 TEST(filter_size_limits) { int i; int count = BPF_MAXINSNS + 1; struct sock_filter allow[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_filter *filter; struct sock_fprog prog = { }; long ret; filter = calloc(count, sizeof(*filter)); ASSERT_NE(NULL, filter); for (i = 0; i < count; i++) filter[i] = allow[0]; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); prog.filter = filter; prog.len = count; /* Too many filter instructions in a single filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_NE(0, ret) { TH_LOG("Installing %d insn filter was allowed", prog.len); } /* One less is okay, though. */ prog.len -= 1; ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret) { TH_LOG("Installing %d insn filter wasn't allowed", prog.len); } } TEST(filter_chain_limits) { int i; int count = BPF_MAXINSNS; struct sock_filter allow[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_filter *filter; struct sock_fprog prog = { }; long ret; filter = calloc(count, sizeof(*filter)); ASSERT_NE(NULL, filter); for (i = 0; i < count; i++) filter[i] = allow[0]; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); prog.filter = filter; prog.len = 1; ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); prog.len = count; /* Too many total filter instructions. */ for (i = 0; i < MAX_INSNS_PER_PATH; i++) { ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); if (ret != 0) break; } ASSERT_NE(0, ret) { TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)", i, count, i * (count + 4)); } } TEST(mode_filter_cannot_move_to_strict) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0); EXPECT_EQ(-1, ret); EXPECT_EQ(EINVAL, errno); } TEST(mode_filter_get_seccomp) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); EXPECT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); EXPECT_EQ(2, ret); } TEST(ALLOW_all) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); } TEST(empty_prog) { struct sock_filter filter[] = { }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); EXPECT_EQ(-1, ret); EXPECT_EQ(EINVAL, errno); } TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, 0x10000000U), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(0, syscall(__NR_getpid)) { TH_LOG("getpid() shouldn't ever return"); } } /* return code >= 0x80000000 is unused. */ TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, 0x90000000U), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(0, syscall(__NR_getpid)) { TH_LOG("getpid() shouldn't ever return"); } } TEST_SIGNAL(KILL_all, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); } TEST_SIGNAL(KILL_one, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); /* getpid() should never return. */ EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_SIGNAL(KILL_one_arg_one, SIGSYS) { void *fatal_address; struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), /* Only both with lower 32-bit for now. */ BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, (unsigned long)&fatal_address, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); struct tms timebuf; clock_t clock = times(&timebuf); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); EXPECT_LE(clock, syscall(__NR_times, &timebuf)); /* times() should never return. */ EXPECT_EQ(0, syscall(__NR_times, &fatal_address)); } TEST_SIGNAL(KILL_one_arg_six, SIGSYS) { #ifndef __NR_mmap2 int sysno = __NR_mmap; #else int sysno = __NR_mmap2; #endif struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), /* Only both with lower 32-bit for now. */ BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); int fd; void *map1, *map2; int page_size = sysconf(_SC_PAGESIZE); ASSERT_LT(0, page_size); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); fd = open("/dev/zero", O_RDONLY); ASSERT_NE(-1, fd); EXPECT_EQ(parent, syscall(__NR_getppid)); map1 = (void *)syscall(sysno, NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); EXPECT_NE(MAP_FAILED, map1); /* mmap2() should never return. */ map2 = (void *)syscall(sysno, NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); EXPECT_EQ(MAP_FAILED, map2); /* The test failed, so clean up the resources. */ munmap(map1, page_size); munmap(map2, page_size); close(fd); } /* TODO(wad) add 64-bit versus 32-bit arg tests. */ TEST(arg_out_of_range) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); EXPECT_EQ(-1, ret); EXPECT_EQ(EINVAL, errno); } TEST(ERRNO_valid) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | E2BIG), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); EXPECT_EQ(-1, read(0, NULL, 0)); EXPECT_EQ(E2BIG, errno); } TEST(ERRNO_zero) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); /* "errno" of 0 is ok. */ EXPECT_EQ(0, read(0, NULL, 0)); } TEST(ERRNO_capped) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 4096), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); EXPECT_EQ(-1, read(0, NULL, 0)); EXPECT_EQ(4095, errno); } FIXTURE_DATA(TRAP) { struct sock_fprog prog; }; FIXTURE_SETUP(TRAP) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; memset(&self->prog, 0, sizeof(self->prog)); self->prog.filter = malloc(sizeof(filter)); ASSERT_NE(NULL, self->prog.filter); memcpy(self->prog.filter, filter, sizeof(filter)); self->prog.len = (unsigned short)ARRAY_SIZE(filter); } FIXTURE_TEARDOWN(TRAP) { if (self->prog.filter) free(self->prog.filter); } TEST_F_SIGNAL(TRAP, dfl, SIGSYS) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); ASSERT_EQ(0, ret); syscall(__NR_getpid); } /* Ensure that SIGSYS overrides SIG_IGN */ TEST_F_SIGNAL(TRAP, ign, SIGSYS) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); signal(SIGSYS, SIG_IGN); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); ASSERT_EQ(0, ret); syscall(__NR_getpid); } static struct siginfo TRAP_info; static volatile int TRAP_nr; static void TRAP_action(int nr, siginfo_t *info, void *void_context) { memcpy(&TRAP_info, info, sizeof(TRAP_info)); TRAP_nr = nr; } TEST_F(TRAP, handler) { int ret, test; struct sigaction act; sigset_t mask; memset(&act, 0, sizeof(act)); sigemptyset(&mask); sigaddset(&mask, SIGSYS); act.sa_sigaction = &TRAP_action; act.sa_flags = SA_SIGINFO; ret = sigaction(SIGSYS, &act, NULL); ASSERT_EQ(0, ret) { TH_LOG("sigaction failed"); } ret = sigprocmask(SIG_UNBLOCK, &mask, NULL); ASSERT_EQ(0, ret) { TH_LOG("sigprocmask failed"); } ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); ASSERT_EQ(0, ret); TRAP_nr = 0; memset(&TRAP_info, 0, sizeof(TRAP_info)); /* Expect the registers to be rolled back. (nr = error) may vary * based on arch. */ ret = syscall(__NR_getpid); /* Silence gcc warning about volatile. */ test = TRAP_nr; EXPECT_EQ(SIGSYS, test); struct local_sigsys { void *_call_addr; /* calling user insn */ int _syscall; /* triggering system call number */ unsigned int _arch; /* AUDIT_ARCH_* of syscall */ } *sigsys = (struct local_sigsys *) #ifdef si_syscall &(TRAP_info.si_call_addr); #else &TRAP_info.si_pid; #endif EXPECT_EQ(__NR_getpid, sigsys->_syscall); /* Make sure arch is non-zero. */ EXPECT_NE(0, sigsys->_arch); EXPECT_NE(0, (unsigned long)sigsys->_call_addr); } FIXTURE_DATA(precedence) { struct sock_fprog allow; struct sock_fprog trace; struct sock_fprog error; struct sock_fprog trap; struct sock_fprog kill; }; FIXTURE_SETUP(precedence) { struct sock_filter allow_insns[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_filter trace_insns[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE), }; struct sock_filter error_insns[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO), }; struct sock_filter trap_insns[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), }; struct sock_filter kill_insns[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), }; memset(self, 0, sizeof(*self)); #define FILTER_ALLOC(_x) \ self->_x.filter = malloc(sizeof(_x##_insns)); \ ASSERT_NE(NULL, self->_x.filter); \ memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \ self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns) FILTER_ALLOC(allow); FILTER_ALLOC(trace); FILTER_ALLOC(error); FILTER_ALLOC(trap); FILTER_ALLOC(kill); } FIXTURE_TEARDOWN(precedence) { #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter) FILTER_FREE(allow); FILTER_FREE(trace); FILTER_FREE(error); FILTER_FREE(trap); FILTER_FREE(kill); } TEST_F(precedence, allow_ok) { pid_t parent, res = 0; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); ASSERT_EQ(0, ret); /* Should work just fine. */ res = syscall(__NR_getppid); EXPECT_EQ(parent, res); } TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS) { pid_t parent, res = 0; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); ASSERT_EQ(0, ret); /* Should work just fine. */ res = syscall(__NR_getppid); EXPECT_EQ(parent, res); /* getpid() should never return. */ res = syscall(__NR_getpid); EXPECT_EQ(0, res); } TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); /* getpid() should never return. */ EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); /* getpid() should never return. */ EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); /* getpid() should never return. */ EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_F(precedence, errno_is_third) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_F(precedence, errno_is_third_in_any_order) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_F(precedence, trace_is_fourth) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); /* No ptracer */ EXPECT_EQ(-1, syscall(__NR_getpid)); } TEST_F(precedence, trace_is_fourth_in_any_order) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); /* No ptracer */ EXPECT_EQ(-1, syscall(__NR_getpid)); } #ifndef PTRACE_O_TRACESECCOMP #define PTRACE_O_TRACESECCOMP 0x00000080 #endif /* Catch the Ubuntu 12.04 value error. */ #if PTRACE_EVENT_SECCOMP != 7 #undef PTRACE_EVENT_SECCOMP #endif #ifndef PTRACE_EVENT_SECCOMP #define PTRACE_EVENT_SECCOMP 7 #endif #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP) bool tracer_running; void tracer_stop(int sig) { tracer_running = false; } typedef void tracer_func_t(struct __test_metadata *_metadata, pid_t tracee, int status, void *args); void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, tracer_func_t tracer_func, void *args, bool ptrace_syscall) { int ret = -1; struct sigaction action = { .sa_handler = tracer_stop, }; /* Allow external shutdown. */ tracer_running = true; ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL)); errno = 0; while (ret == -1 && errno != EINVAL) ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0); ASSERT_EQ(0, ret) { kill(tracee, SIGKILL); } /* Wait for attach stop */ wait(NULL); ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ? PTRACE_O_TRACESYSGOOD : PTRACE_O_TRACESECCOMP); ASSERT_EQ(0, ret) { TH_LOG("Failed to set PTRACE_O_TRACESECCOMP"); kill(tracee, SIGKILL); } ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, tracee, NULL, 0); ASSERT_EQ(0, ret); /* Unblock the tracee */ ASSERT_EQ(1, write(fd, "A", 1)); ASSERT_EQ(0, close(fd)); /* Run until we're shut down. Must assert to stop execution. */ while (tracer_running) { int status; if (wait(&status) != tracee) continue; if (WIFSIGNALED(status) || WIFEXITED(status)) /* Child is dead. Time to go. */ return; /* Check if this is a seccomp event. */ ASSERT_EQ(!ptrace_syscall, IS_SECCOMP_EVENT(status)); tracer_func(_metadata, tracee, status, args); ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, tracee, NULL, 0); ASSERT_EQ(0, ret); } /* Directly report the status of our test harness results. */ syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); } /* Common tracer setup/teardown functions. */ void cont_handler(int num) { } pid_t setup_trace_fixture(struct __test_metadata *_metadata, tracer_func_t func, void *args, bool ptrace_syscall) { char sync; int pipefd[2]; pid_t tracer_pid; pid_t tracee = getpid(); /* Setup a pipe for clean synchronization. */ ASSERT_EQ(0, pipe(pipefd)); /* Fork a child which we'll promote to tracer */ tracer_pid = fork(); ASSERT_LE(0, tracer_pid); signal(SIGALRM, cont_handler); if (tracer_pid == 0) { close(pipefd[0]); start_tracer(_metadata, pipefd[1], tracee, func, args, ptrace_syscall); syscall(__NR_exit, 0); } close(pipefd[1]); prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); read(pipefd[0], &sync, 1); close(pipefd[0]); return tracer_pid; } void teardown_trace_fixture(struct __test_metadata *_metadata, pid_t tracer) { if (tracer) { int status; /* * Extract the exit code from the other process and * adopt it for ourselves in case its asserts failed. */ ASSERT_EQ(0, kill(tracer, SIGUSR1)); ASSERT_EQ(tracer, waitpid(tracer, &status, 0)); if (WEXITSTATUS(status)) _metadata->passed = 0; } } /* "poke" tracer arguments and function. */ struct tracer_args_poke_t { unsigned long poke_addr; }; void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status, void *args) { int ret; unsigned long msg; struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args; ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); EXPECT_EQ(0, ret); /* If this fails, don't try to recover. */ ASSERT_EQ(0x1001, msg) { kill(tracee, SIGKILL); } /* * Poke in the message. * Registers are not touched to try to keep this relatively arch * agnostic. */ ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001); EXPECT_EQ(0, ret); } FIXTURE_DATA(TRACE_poke) { struct sock_fprog prog; pid_t tracer; long poked; struct tracer_args_poke_t tracer_args; }; FIXTURE_SETUP(TRACE_poke) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; self->poked = 0; memset(&self->prog, 0, sizeof(self->prog)); self->prog.filter = malloc(sizeof(filter)); ASSERT_NE(NULL, self->prog.filter); memcpy(self->prog.filter, filter, sizeof(filter)); self->prog.len = (unsigned short)ARRAY_SIZE(filter); /* Set up tracer args. */ self->tracer_args.poke_addr = (unsigned long)&self->poked; /* Launch tracer. */ self->tracer = setup_trace_fixture(_metadata, tracer_poke, &self->tracer_args, false); } FIXTURE_TEARDOWN(TRACE_poke) { teardown_trace_fixture(_metadata, self->tracer); if (self->prog.filter) free(self->prog.filter); } TEST_F(TRACE_poke, read_has_side_effects) { ssize_t ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); EXPECT_EQ(0, self->poked); ret = read(-1, NULL, 0); EXPECT_EQ(-1, ret); EXPECT_EQ(0x1001, self->poked); } TEST_F(TRACE_poke, getpid_runs_normally) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); EXPECT_EQ(0, self->poked); EXPECT_NE(0, syscall(__NR_getpid)); EXPECT_EQ(0, self->poked); } #if defined(__x86_64__) # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM orig_rax # define SYSCALL_RET rax #elif defined(__i386__) # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM orig_eax # define SYSCALL_RET eax #elif defined(__arm__) # define ARCH_REGS struct pt_regs # define SYSCALL_NUM ARM_r7 # define SYSCALL_RET ARM_r0 #elif defined(__aarch64__) # define ARCH_REGS struct user_pt_regs # define SYSCALL_NUM regs[8] # define SYSCALL_RET regs[0] #elif defined(__hppa__) # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM gr[20] # define SYSCALL_RET gr[28] #elif defined(__powerpc__) # define ARCH_REGS struct pt_regs # define SYSCALL_NUM gpr[0] # define SYSCALL_RET gpr[3] #elif defined(__s390__) # define ARCH_REGS s390_regs # define SYSCALL_NUM gprs[2] # define SYSCALL_RET gprs[2] #elif defined(__mips__) # define ARCH_REGS struct pt_regs # define SYSCALL_NUM regs[2] # define SYSCALL_SYSCALL_NUM regs[4] # define SYSCALL_RET regs[2] # define SYSCALL_NUM_RET_SHARE_REG #else # error "Do not know how to find your architecture's registers and syscalls" #endif /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). */ #if defined(__x86_64__) || defined(__i386__) || defined(__mips__) #define HAVE_GETREGS #endif /* Architecture-specific syscall fetching routine. */ int get_syscall(struct __test_metadata *_metadata, pid_t tracee) { ARCH_REGS regs; #ifdef HAVE_GETREGS EXPECT_EQ(0, ptrace(PTRACE_GETREGS, tracee, 0, &regs)) { TH_LOG("PTRACE_GETREGS failed"); return -1; } #else struct iovec iov; iov.iov_base = &regs; iov.iov_len = sizeof(regs); EXPECT_EQ(0, ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov)) { TH_LOG("PTRACE_GETREGSET failed"); return -1; } #endif #if defined(__mips__) if (regs.SYSCALL_NUM == __NR_O32_Linux) return regs.SYSCALL_SYSCALL_NUM; #endif return regs.SYSCALL_NUM; } /* Architecture-specific syscall changing routine. */ void change_syscall(struct __test_metadata *_metadata, pid_t tracee, int syscall) { int ret; ARCH_REGS regs; #ifdef HAVE_GETREGS ret = ptrace(PTRACE_GETREGS, tracee, 0, &regs); #else struct iovec iov; iov.iov_base = &regs; iov.iov_len = sizeof(regs); ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov); #endif EXPECT_EQ(0, ret); #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \ defined(__s390__) || defined(__hppa__) { regs.SYSCALL_NUM = syscall; } #elif defined(__mips__) { if (regs.SYSCALL_NUM == __NR_O32_Linux) regs.SYSCALL_SYSCALL_NUM = syscall; else regs.SYSCALL_NUM = syscall; } #elif defined(__arm__) # ifndef PTRACE_SET_SYSCALL # define PTRACE_SET_SYSCALL 23 # endif { ret = ptrace(PTRACE_SET_SYSCALL, tracee, NULL, syscall); EXPECT_EQ(0, ret); } #elif defined(__aarch64__) # ifndef NT_ARM_SYSTEM_CALL # define NT_ARM_SYSTEM_CALL 0x404 # endif { iov.iov_base = &syscall; iov.iov_len = sizeof(syscall); ret = ptrace(PTRACE_SETREGSET, tracee, NT_ARM_SYSTEM_CALL, &iov); EXPECT_EQ(0, ret); } #else ASSERT_EQ(1, 0) { TH_LOG("How is the syscall changed on this architecture?"); } #endif /* If syscall is skipped, change return value. */ if (syscall == -1) #ifdef SYSCALL_NUM_RET_SHARE_REG TH_LOG("Can't modify syscall return on this architecture"); #else regs.SYSCALL_RET = 1; #endif #ifdef HAVE_GETREGS ret = ptrace(PTRACE_SETREGS, tracee, 0, &regs); #else iov.iov_base = &regs; iov.iov_len = sizeof(regs); ret = ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &iov); #endif EXPECT_EQ(0, ret); } void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee, int status, void *args) { int ret; unsigned long msg; /* Make sure we got the right message. */ ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); EXPECT_EQ(0, ret); /* Validate and take action on expected syscalls. */ switch (msg) { case 0x1002: /* change getpid to getppid. */ EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); change_syscall(_metadata, tracee, __NR_getppid); break; case 0x1003: /* skip gettid. */ EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); change_syscall(_metadata, tracee, -1); break; case 0x1004: /* do nothing (allow getppid) */ EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); break; default: EXPECT_EQ(0, msg) { TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg); kill(tracee, SIGKILL); } } } void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, int status, void *args) { int ret, nr; unsigned long msg; static bool entry; /* Make sure we got an empty message. */ ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); EXPECT_EQ(0, ret); EXPECT_EQ(0, msg); /* The only way to tell PTRACE_SYSCALL entry/exit is by counting. */ entry = !entry; if (!entry) return; nr = get_syscall(_metadata, tracee); if (nr == __NR_getpid) change_syscall(_metadata, tracee, __NR_getppid); } FIXTURE_DATA(TRACE_syscall) { struct sock_fprog prog; pid_t tracer, mytid, mypid, parent; }; FIXTURE_SETUP(TRACE_syscall) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; memset(&self->prog, 0, sizeof(self->prog)); self->prog.filter = malloc(sizeof(filter)); ASSERT_NE(NULL, self->prog.filter); memcpy(self->prog.filter, filter, sizeof(filter)); self->prog.len = (unsigned short)ARRAY_SIZE(filter); /* Prepare some testable syscall results. */ self->mytid = syscall(__NR_gettid); ASSERT_GT(self->mytid, 0); ASSERT_NE(self->mytid, 1) { TH_LOG("Running this test as init is not supported. :)"); } self->mypid = getpid(); ASSERT_GT(self->mypid, 0); ASSERT_EQ(self->mytid, self->mypid); self->parent = getppid(); ASSERT_GT(self->parent, 0); ASSERT_NE(self->parent, self->mypid); /* Launch tracer. */ self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL, false); } FIXTURE_TEARDOWN(TRACE_syscall) { teardown_trace_fixture(_metadata, self->tracer); if (self->prog.filter) free(self->prog.filter); } TEST_F(TRACE_syscall, syscall_allowed) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); /* getppid works as expected (no changes). */ EXPECT_EQ(self->parent, syscall(__NR_getppid)); EXPECT_NE(self->mypid, syscall(__NR_getppid)); } TEST_F(TRACE_syscall, syscall_redirected) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); /* getpid has been redirected to getppid as expected. */ EXPECT_EQ(self->parent, syscall(__NR_getpid)); EXPECT_NE(self->mypid, syscall(__NR_getpid)); } TEST_F(TRACE_syscall, syscall_dropped) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); #ifdef SYSCALL_NUM_RET_SHARE_REG /* gettid has been skipped */ EXPECT_EQ(-1, syscall(__NR_gettid)); #else /* gettid has been skipped and an altered return value stored. */ EXPECT_EQ(1, syscall(__NR_gettid)); #endif EXPECT_NE(self->mytid, syscall(__NR_gettid)); } TEST_F(TRACE_syscall, skip_after_RET_TRACE) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); /* Install fixture filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); /* Install "errno on getppid" filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); /* Tracer will redirect getpid to getppid, and we should see EPERM. */ EXPECT_EQ(-1, syscall(__NR_getpid)); EXPECT_EQ(EPERM, errno); } TEST_F_SIGNAL(TRACE_syscall, kill_after_RET_TRACE, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); /* Install fixture filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); /* Install "death on getppid" filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); /* Tracer will redirect getpid to getppid, and we should die. */ EXPECT_NE(self->mypid, syscall(__NR_getpid)); } TEST_F(TRACE_syscall, skip_after_ptrace) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ teardown_trace_fixture(_metadata, self->tracer); self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, true); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); /* Install "errno on getppid" filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); /* Tracer will redirect getpid to getppid, and we should see EPERM. */ EXPECT_EQ(-1, syscall(__NR_getpid)); EXPECT_EQ(EPERM, errno); } TEST_F_SIGNAL(TRACE_syscall, kill_after_ptrace, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ teardown_trace_fixture(_metadata, self->tracer); self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, true); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); /* Install "death on getppid" filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); /* Tracer will redirect getpid to getppid, and we should die. */ EXPECT_NE(self->mypid, syscall(__NR_getpid)); } #ifndef __NR_seccomp # if defined(__i386__) # define __NR_seccomp 354 # elif defined(__x86_64__) # define __NR_seccomp 317 # elif defined(__arm__) # define __NR_seccomp 383 # elif defined(__aarch64__) # define __NR_seccomp 277 # elif defined(__hppa__) # define __NR_seccomp 338 # elif defined(__powerpc__) # define __NR_seccomp 358 # elif defined(__s390__) # define __NR_seccomp 348 # else # warning "seccomp syscall number unknown for this architecture" # define __NR_seccomp 0xffff # endif #endif #ifndef SECCOMP_SET_MODE_STRICT #define SECCOMP_SET_MODE_STRICT 0 #endif #ifndef SECCOMP_SET_MODE_FILTER #define SECCOMP_SET_MODE_FILTER 1 #endif #ifndef SECCOMP_FILTER_FLAG_TSYNC #define SECCOMP_FILTER_FLAG_TSYNC 1 #endif #ifndef seccomp int seccomp(unsigned int op, unsigned int flags, void *args) { errno = 0; return syscall(__NR_seccomp, op, flags, args); } #endif TEST(seccomp_syscall) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } /* Reject insane operation. */ ret = seccomp(-1, 0, &prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } EXPECT_EQ(EINVAL, errno) { TH_LOG("Did not reject crazy op value!"); } /* Reject strict with flags or pointer. */ ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL); EXPECT_EQ(EINVAL, errno) { TH_LOG("Did not reject mode strict with flags!"); } ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog); EXPECT_EQ(EINVAL, errno) { TH_LOG("Did not reject mode strict with uargs!"); } /* Reject insane args for filter. */ ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog); EXPECT_EQ(EINVAL, errno) { TH_LOG("Did not reject crazy filter flags!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL); EXPECT_EQ(EFAULT, errno) { TH_LOG("Did not reject NULL filter!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); EXPECT_EQ(0, errno) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s", strerror(errno)); } } TEST(seccomp_syscall_mode_lock) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } EXPECT_EQ(0, ret) { TH_LOG("Could not install filter!"); } /* Make sure neither entry point will switch to strict. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0); EXPECT_EQ(EINVAL, errno) { TH_LOG("Switched to mode strict!"); } ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL); EXPECT_EQ(EINVAL, errno) { TH_LOG("Switched to mode strict!"); } } TEST(TSYNC_first) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } EXPECT_EQ(0, ret) { TH_LOG("Could not install initial filter with TSYNC!"); } } #define TSYNC_SIBLINGS 2 struct tsync_sibling { pthread_t tid; pid_t system_tid; sem_t *started; pthread_cond_t *cond; pthread_mutex_t *mutex; int diverge; int num_waits; struct sock_fprog *prog; struct __test_metadata *metadata; }; FIXTURE_DATA(TSYNC) { struct sock_fprog root_prog, apply_prog; struct tsync_sibling sibling[TSYNC_SIBLINGS]; sem_t started; pthread_cond_t cond; pthread_mutex_t mutex; int sibling_count; }; FIXTURE_SETUP(TSYNC) { struct sock_filter root_filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_filter apply_filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; memset(&self->root_prog, 0, sizeof(self->root_prog)); memset(&self->apply_prog, 0, sizeof(self->apply_prog)); memset(&self->sibling, 0, sizeof(self->sibling)); self->root_prog.filter = malloc(sizeof(root_filter)); ASSERT_NE(NULL, self->root_prog.filter); memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter)); self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter); self->apply_prog.filter = malloc(sizeof(apply_filter)); ASSERT_NE(NULL, self->apply_prog.filter); memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter)); self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter); self->sibling_count = 0; pthread_mutex_init(&self->mutex, NULL); pthread_cond_init(&self->cond, NULL); sem_init(&self->started, 0, 0); self->sibling[0].tid = 0; self->sibling[0].cond = &self->cond; self->sibling[0].started = &self->started; self->sibling[0].mutex = &self->mutex; self->sibling[0].diverge = 0; self->sibling[0].num_waits = 1; self->sibling[0].prog = &self->root_prog; self->sibling[0].metadata = _metadata; self->sibling[1].tid = 0; self->sibling[1].cond = &self->cond; self->sibling[1].started = &self->started; self->sibling[1].mutex = &self->mutex; self->sibling[1].diverge = 0; self->sibling[1].prog = &self->root_prog; self->sibling[1].num_waits = 1; self->sibling[1].metadata = _metadata; } FIXTURE_TEARDOWN(TSYNC) { int sib = 0; if (self->root_prog.filter) free(self->root_prog.filter); if (self->apply_prog.filter) free(self->apply_prog.filter); for ( ; sib < self->sibling_count; ++sib) { struct tsync_sibling *s = &self->sibling[sib]; void *status; if (!s->tid) continue; if (pthread_kill(s->tid, 0)) { pthread_cancel(s->tid); pthread_join(s->tid, &status); } } pthread_mutex_destroy(&self->mutex); pthread_cond_destroy(&self->cond); sem_destroy(&self->started); } void *tsync_sibling(void *data) { long ret = 0; struct tsync_sibling *me = data; me->system_tid = syscall(__NR_gettid); pthread_mutex_lock(me->mutex); if (me->diverge) { /* Just re-apply the root prog to fork the tree */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, me->prog, 0, 0); } sem_post(me->started); /* Return outside of started so parent notices failures. */ if (ret) { pthread_mutex_unlock(me->mutex); return (void *)SIBLING_EXIT_FAILURE; } do { pthread_cond_wait(me->cond, me->mutex); me->num_waits = me->num_waits - 1; } while (me->num_waits); pthread_mutex_unlock(me->mutex); ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); if (!ret) return (void *)SIBLING_EXIT_NEWPRIVS; read(0, NULL, 0); return (void *)SIBLING_EXIT_UNKILLED; } void tsync_start_sibling(struct tsync_sibling *sibling) { pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling); } TEST_F(TSYNC, siblings_fail_prctl) { long ret; void *status; struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } /* Check prctl failure detection by requesting sib 0 diverge. */ ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } ASSERT_EQ(0, ret) { TH_LOG("setting filter failed"); } self->sibling[0].diverge = 1; tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } /* Signal the threads to clean up*/ pthread_mutex_lock(&self->mutex); ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); /* Ensure diverging sibling failed to call prctl. */ pthread_join(self->sibling[0].tid, &status); EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status); pthread_join(self->sibling[1].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); } TEST_F(TSYNC, two_siblings_with_ancestor) { long ret; void *status; ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); } tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(0, ret) { TH_LOG("Could install filter on all threads!"); } /* Tell the siblings to test the policy */ pthread_mutex_lock(&self->mutex); ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); /* Ensure they are both killed and don't exit cleanly. */ pthread_join(self->sibling[0].tid, &status); EXPECT_EQ(0x0, (long)status); pthread_join(self->sibling[1].tid, &status); EXPECT_EQ(0x0, (long)status); } TEST_F(TSYNC, two_sibling_want_nnp) { void *status; /* start siblings before any prctl() operations */ tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } /* Tell the siblings to test no policy */ pthread_mutex_lock(&self->mutex); ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); /* Ensure they are both upset about lacking nnp. */ pthread_join(self->sibling[0].tid, &status); EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); pthread_join(self->sibling[1].tid, &status); EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); } TEST_F(TSYNC, two_siblings_with_no_filter) { long ret; void *status; /* start siblings before any prctl() operations */ tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } ASSERT_EQ(0, ret) { TH_LOG("Could install filter on all threads!"); } /* Tell the siblings to test the policy */ pthread_mutex_lock(&self->mutex); ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); /* Ensure they are both killed and don't exit cleanly. */ pthread_join(self->sibling[0].tid, &status); EXPECT_EQ(0x0, (long)status); pthread_join(self->sibling[1].tid, &status); EXPECT_EQ(0x0, (long)status); } TEST_F(TSYNC, two_siblings_with_one_divergence) { long ret; void *status; ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); } self->sibling[0].diverge = 1; tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(self->sibling[0].system_tid, ret) { TH_LOG("Did not fail on diverged sibling."); } /* Wake the threads */ pthread_mutex_lock(&self->mutex); ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); /* Ensure they are both unkilled. */ pthread_join(self->sibling[0].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); pthread_join(self->sibling[1].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); } TEST_F(TSYNC, two_siblings_not_under_filter) { long ret, sib; void *status; ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } /* * Sibling 0 will have its own seccomp policy * and Sibling 1 will not be under seccomp at * all. Sibling 1 will enter seccomp and 0 * will cause failure. */ self->sibling[0].diverge = 1; tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(ret, self->sibling[0].system_tid) { TH_LOG("Did not fail on diverged sibling."); } sib = 1; if (ret == self->sibling[0].system_tid) sib = 0; pthread_mutex_lock(&self->mutex); /* Increment the other siblings num_waits so we can clean up * the one we just saw. */ self->sibling[!sib].num_waits += 1; /* Signal the thread to clean up*/ ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); pthread_join(self->sibling[sib].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); /* Poll for actual task death. pthread_join doesn't guarantee it. */ while (!kill(self->sibling[sib].system_tid, 0)) sleep(0.1); /* Switch to the remaining sibling */ sib = !sib; ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(0, ret) { TH_LOG("Expected the remaining sibling to sync"); }; pthread_mutex_lock(&self->mutex); /* If remaining sibling didn't have a chance to wake up during * the first broadcast, manually reduce the num_waits now. */ if (self->sibling[sib].num_waits > 1) self->sibling[sib].num_waits = 1; ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); pthread_join(self->sibling[sib].tid, &status); EXPECT_EQ(0, (long)status); /* Poll for actual task death. pthread_join doesn't guarantee it. */ while (!kill(self->sibling[sib].system_tid, 0)) sleep(0.1); ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(0, ret); /* just us chickens */ } /* Make sure restarted syscalls are seen directly as "restart_syscall". */ TEST(syscall_restart) { long ret; unsigned long msg; pid_t child_pid; int pipefd[2]; int status; siginfo_t info = { }; struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), #ifdef __NR_sigreturn BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 6, 0), #endif BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 5, 0), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 4, 0), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 3, 0), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 4, 0), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0), /* Allow __NR_write for easy logging. */ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), /* The nanosleep jump target. */ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100), /* The restart_syscall jump target. */ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; #if defined(__arm__) struct utsname utsbuf; #endif ASSERT_EQ(0, pipe(pipefd)); child_pid = fork(); ASSERT_LE(0, child_pid); if (child_pid == 0) { /* Child uses EXPECT not ASSERT to deliver status correctly. */ char buf = ' '; struct timespec timeout = { }; /* Attach parent as tracer and stop. */ EXPECT_EQ(0, ptrace(PTRACE_TRACEME)); EXPECT_EQ(0, raise(SIGSTOP)); EXPECT_EQ(0, close(pipefd[1])); EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); EXPECT_EQ(0, ret) { TH_LOG("Failed to install filter!"); } EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { TH_LOG("Failed to read() sync from parent"); } EXPECT_EQ('.', buf) { TH_LOG("Failed to get sync data from read()"); } /* Start nanosleep to be interrupted. */ timeout.tv_sec = 1; errno = 0; EXPECT_EQ(0, nanosleep(&timeout, NULL)) { TH_LOG("Call to nanosleep() failed (errno %d)", errno); } /* Read final sync from parent. */ EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { TH_LOG("Failed final read() from parent"); } EXPECT_EQ('!', buf) { TH_LOG("Failed to get final data from read()"); } /* Directly report the status of our test harness results. */ syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); } EXPECT_EQ(0, close(pipefd[0])); /* Attach to child, setup options, and release. */ ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); ASSERT_EQ(true, WIFSTOPPED(status)); ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL, PTRACE_O_TRACESECCOMP)); ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); ASSERT_EQ(1, write(pipefd[1], ".", 1)); /* Wait for nanosleep() to start. */ ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); ASSERT_EQ(true, WIFSTOPPED(status)); ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); ASSERT_EQ(0x100, msg); EXPECT_EQ(__NR_nanosleep, get_syscall(_metadata, child_pid)); /* Might as well check siginfo for sanity while we're here. */ ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); ASSERT_EQ(SIGTRAP, info.si_signo); ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code); EXPECT_EQ(0, info.si_errno); EXPECT_EQ(getuid(), info.si_uid); /* Verify signal delivery came from child (seccomp-triggered). */ EXPECT_EQ(child_pid, info.si_pid); /* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */ ASSERT_EQ(0, kill(child_pid, SIGSTOP)); ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); ASSERT_EQ(true, WIFSTOPPED(status)); ASSERT_EQ(SIGSTOP, WSTOPSIG(status)); /* Verify signal delivery came from parent now. */ ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); EXPECT_EQ(getpid(), info.si_pid); /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */ ASSERT_EQ(0, kill(child_pid, SIGCONT)); ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); ASSERT_EQ(true, WIFSTOPPED(status)); ASSERT_EQ(SIGCONT, WSTOPSIG(status)); ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); /* Wait for restart_syscall() to start. */ ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); ASSERT_EQ(true, WIFSTOPPED(status)); ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); ASSERT_EQ(0x200, msg); ret = get_syscall(_metadata, child_pid); #if defined(__arm__) /* * FIXME: * - native ARM registers do NOT expose true syscall. * - compat ARM registers on ARM64 DO expose true syscall. */ ASSERT_EQ(0, uname(&utsbuf)); if (strncmp(utsbuf.machine, "arm", 3) == 0) { EXPECT_EQ(__NR_nanosleep, ret); } else #endif { EXPECT_EQ(__NR_restart_syscall, ret); } /* Write again to end test. */ ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); ASSERT_EQ(1, write(pipefd[1], "!", 1)); EXPECT_EQ(0, close(pipefd[1])); ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); if (WIFSIGNALED(status) || WEXITSTATUS(status)) _metadata->passed = 0; } /* * TODO: * - add microbenchmarks * - expand NNP testing * - better arch-specific TRACE and TRAP handlers. * - endianness checking when appropriate * - 64-bit arg prodding * - arch value testing (x86 modes especially) * - ... */ TEST_HARNESS_MAIN
gpl-2.0
El-Nath/bidji-find5
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
963
32751
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> #include <drm/drm_fixed.h> #include "radeon.h" #include "atom.h" static void radeon_overscan_setup(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0); WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0); WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0); } static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); int xres = mode->hdisplay; int yres = mode->vdisplay; bool hscale = true, vscale = true; int hsync_wid; int vsync_wid; int hsync_start; int blank_width; u32 scale, inc, crtc_more_cntl; u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; struct drm_display_mode *native_mode = &radeon_crtc->native_mode; fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & (RADEON_VERT_STRETCH_RESERVED | RADEON_VERT_AUTO_RATIO_INC); fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & (RADEON_HORZ_FP_LOOP_STRETCH | RADEON_HORZ_AUTO_RATIO_INC); crtc_more_cntl = 0; if ((rdev->family == CHIP_RS100) || (rdev->family == CHIP_RS200)) { /* This is to workaround the asic bug for RMX, some versions of BIOS dosen't have this register initialized correctly. */ crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; } fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; if (!hsync_wid) hsync_wid = 1; hsync_start = mode->crtc_hsync_start - 8; fp_h_sync_strt_wid = ((hsync_start & 0x1fff) | ((hsync_wid & 0x3f) << 16) | ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? RADEON_CRTC_H_SYNC_POL : 0)); fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) | ((mode->crtc_vdisplay - 1) << 16)); vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; if (!vsync_wid) vsync_wid = 1; fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) | ((vsync_wid & 0x1f) << 16) | ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? RADEON_CRTC_V_SYNC_POL : 0)); fp_horz_vert_active = 0; if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) { hscale = false; vscale = false; } else { if (xres > native_mode->hdisplay) xres = native_mode->hdisplay; if (yres > native_mode->vdisplay) yres = native_mode->vdisplay; if (xres == native_mode->hdisplay) hscale = false; if (yres == native_mode->vdisplay) vscale = false; } switch (radeon_crtc->rmx_type) { case RMX_FULL: case RMX_ASPECT: if (!hscale) fp_horz_stretch |= ((xres/8-1) << 16); else { inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) / native_mode->hdisplay + 1; fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | RADEON_HORZ_STRETCH_BLEND | RADEON_HORZ_STRETCH_ENABLE | ((native_mode->hdisplay/8-1) << 16)); } if (!vscale) fp_vert_stretch |= ((yres-1) << 12); else { inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) / native_mode->vdisplay + 1; fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | RADEON_VERT_STRETCH_ENABLE | RADEON_VERT_STRETCH_BLEND | ((native_mode->vdisplay-1) << 12)); } break; case RMX_CENTER: fp_horz_stretch |= ((xres/8-1) << 16); fp_vert_stretch |= ((yres-1) << 12); crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | RADEON_CRTC_AUTO_VERT_CENTER_EN); blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; if (blank_width > 110) blank_width = 110; fp_crtc_h_total_disp = (((blank_width) & 0x3ff) | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; if (!hsync_wid) hsync_wid = 1; fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) | ((hsync_wid & 0x3f) << 16) | ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? RADEON_CRTC_H_SYNC_POL : 0)); fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) | ((mode->crtc_vdisplay - 1) << 16)); vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; if (!vsync_wid) vsync_wid = 1; fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) | ((vsync_wid & 0x1f) << 16) | ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? RADEON_CRTC_V_SYNC_POL : 0))); fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) | (((native_mode->hdisplay / 8) & 0x1ff) << 16)); break; case RMX_OFF: default: fp_horz_stretch |= ((xres/8-1) << 16); fp_vert_stretch |= ((yres-1) << 12); break; } WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); } void radeon_restore_common_regs(struct drm_device *dev) { /* don't need this yet */ } static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; int i = 0; /* FIXME: Certain revisions of R300 can't recover here. Not sure of the cause yet, but this workaround will mask the problem for now. Other chips usually will pass at the very first test, so the workaround shouldn't have any effect on them. */ for (i = 0; (i < 10000 && RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R); i++); } static void radeon_pll_write_update(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; while (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R); WREG32_PLL_P(RADEON_PPLL_REF_DIV, RADEON_PPLL_ATOMIC_UPDATE_W, ~(RADEON_PPLL_ATOMIC_UPDATE_W)); } static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; int i = 0; /* FIXME: Certain revisions of R300 can't recover here. Not sure of the cause yet, but this workaround will mask the problem for now. Other chips usually will pass at the very first test, so the workaround shouldn't have any effect on them. */ for (i = 0; (i < 10000 && RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R); i++); } static void radeon_pll2_write_update(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; while (RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R); WREG32_PLL_P(RADEON_P2PLL_REF_DIV, RADEON_P2PLL_ATOMIC_UPDATE_W, ~(RADEON_P2PLL_ATOMIC_UPDATE_W)); } static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div, uint16_t fb_div) { unsigned int vcoFreq; if (!ref_div) return 1; vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div; /* * This is horribly crude: the VCO frequency range is divided into * 3 parts, each part having a fixed PLL gain value. */ if (vcoFreq >= 30000) /* * [300..max] MHz : 7 */ return 7; else if (vcoFreq >= 18000) /* * [180..300) MHz : 4 */ return 4; else /* * [0..180) MHz : 1 */ return 1; } void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; uint32_t mask; if (radeon_crtc->crtc_id) mask = (RADEON_CRTC2_DISP_DIS | RADEON_CRTC2_VSYNC_DIS | RADEON_CRTC2_HSYNC_DIS | RADEON_CRTC2_DISP_REQ_EN_B); else mask = (RADEON_CRTC_DISPLAY_DIS | RADEON_CRTC_VSYNC_DIS | RADEON_CRTC_HSYNC_DIS); switch (mode) { case DRM_MODE_DPMS_ON: radeon_crtc->enabled = true; /* adjust pm to dpms changes BEFORE enabling crtcs */ radeon_pm_compute_clocks(rdev); if (radeon_crtc->crtc_id) WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); else { WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | RADEON_CRTC_DISP_REQ_EN_B)); WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); } drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); if (radeon_crtc->crtc_id) WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); else { WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | RADEON_CRTC_DISP_REQ_EN_B)); WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); } radeon_crtc->enabled = false; /* adjust pm to dpms changes AFTER disabling crtcs */ radeon_pm_compute_clocks(rdev); break; } } int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0); } int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { return radeon_crtc_do_set_base(crtc, fb, x, y, 1); } int radeon_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int atomic) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_framebuffer *radeon_fb; struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; uint64_t base; uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; uint32_t crtc_pitch, pitch_pixels; uint32_t tiling_flags; int format; uint32_t gen_cntl_reg, gen_cntl_val; int r; DRM_DEBUG_KMS("\n"); /* no fb bound */ if (!atomic && !crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } if (atomic) { radeon_fb = to_radeon_framebuffer(fb); target_fb = fb; } else { radeon_fb = to_radeon_framebuffer(crtc->fb); target_fb = crtc->fb; } switch (target_fb->bits_per_pixel) { case 8: format = 2; break; case 15: /* 555 */ format = 3; break; case 16: /* 565 */ format = 4; break; case 24: /* RGB */ format = 5; break; case 32: /* xRGB */ format = 6; break; default: return false; } /* Pin framebuffer & get tilling informations */ obj = radeon_fb->obj; rbo = gem_to_radeon_bo(obj); retry: r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; /* Only 27 bit offset for legacy CRTC */ r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 1 << 27, &base); if (unlikely(r != 0)) { radeon_bo_unreserve(rbo); /* On old GPU like RN50 with little vram pining can fails because * current fb is taking all space needed. So instead of unpining * the old buffer after pining the new one, first unpin old one * and then retry pining new one. * * As only master can set mode only master can pin and it is * unlikely the master client will race with itself especialy * on those old gpu with single crtc. * * We don't shutdown the display controller because new buffer * will end up in same spot. */ if (!atomic && fb && fb != crtc->fb) { struct radeon_bo *old_rbo; unsigned long nsize, osize; old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj); osize = radeon_bo_size(old_rbo); nsize = radeon_bo_size(rbo); if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) { radeon_bo_unpin(old_rbo); radeon_bo_unreserve(old_rbo); fb = NULL; goto retry; } } return -EINVAL; } radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); if (tiling_flags & RADEON_TILING_MICRO) DRM_ERROR("trying to scanout microtiled buffer\n"); /* if scanout was in GTT this really wouldn't work */ /* crtc offset is from display base addr not FB location */ radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start; base -= radeon_crtc->legacy_display_base_addr; crtc_offset_cntl = 0; pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) + ((target_fb->bits_per_pixel * 8) - 1)) / (target_fb->bits_per_pixel * 8)); crtc_pitch |= crtc_pitch << 16; crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | R300_CRTC_MICRO_TILE_BUFFER_DIS | R300_CRTC_MACRO_TILE_EN); else crtc_offset_cntl |= RADEON_CRTC_TILE_EN; } else { if (ASIC_IS_R300(rdev)) crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN | R300_CRTC_MICRO_TILE_BUFFER_DIS | R300_CRTC_MACRO_TILE_EN); else crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; } if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) { crtc_tile_x0_y0 = x | (y << 16); base &= ~0x7ff; } else { int byteshift = target_fb->bits_per_pixel >> 4; int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); crtc_offset_cntl |= (y % 16); } } else { int offset = y * pitch_pixels + x; switch (target_fb->bits_per_pixel) { case 8: offset *= 1; break; case 15: case 16: offset *= 2; break; case 24: offset *= 3; break; case 32: offset *= 4; break; default: return false; } base += offset; } base &= ~7; if (radeon_crtc->crtc_id == 1) gen_cntl_reg = RADEON_CRTC2_GEN_CNTL; else gen_cntl_reg = RADEON_CRTC_GEN_CNTL; gen_cntl_val = RREG32(gen_cntl_reg); gen_cntl_val &= ~(0xf << 8); gen_cntl_val |= (format << 8); gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK; WREG32(gen_cntl_reg, gen_cntl_val); crtc_offset = (u32)base; WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr); if (ASIC_IS_R300(rdev)) { if (radeon_crtc->crtc_id) WREG32(R300_CRTC2_TILE_X0_Y0, crtc_tile_x0_y0); else WREG32(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0); } WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, crtc_offset_cntl); WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset); WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); if (!atomic && fb && fb != crtc->fb) { radeon_fb = to_radeon_framebuffer(fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } /* Bytes per pixel may have changed */ radeon_bandwidth_update(rdev); return 0; } static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_encoder *encoder; int format; int hsync_start; int hsync_wid; int vsync_wid; uint32_t crtc_h_total_disp; uint32_t crtc_h_sync_strt_wid; uint32_t crtc_v_total_disp; uint32_t crtc_v_sync_strt_wid; bool is_tv = false; DRM_DEBUG_KMS("\n"); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { is_tv = true; DRM_INFO("crtc %d is connected to a TV\n", radeon_crtc->crtc_id); break; } } } switch (crtc->fb->bits_per_pixel) { case 8: format = 2; break; case 15: /* 555 */ format = 3; break; case 16: /* 565 */ format = 4; break; case 24: /* RGB */ format = 5; break; case 32: /* xRGB */ format = 6; break; default: return false; } crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; if (!hsync_wid) hsync_wid = 1; hsync_start = mode->crtc_hsync_start - 8; crtc_h_sync_strt_wid = ((hsync_start & 0x1fff) | ((hsync_wid & 0x3f) << 16) | ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? RADEON_CRTC_H_SYNC_POL : 0)); /* This works for double scan mode. */ crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) | ((mode->crtc_vdisplay - 1) << 16)); vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; if (!vsync_wid) vsync_wid = 1; crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) | ((vsync_wid & 0x1f) << 16) | ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? RADEON_CRTC_V_SYNC_POL : 0)); if (radeon_crtc->crtc_id) { uint32_t crtc2_gen_cntl; uint32_t disp2_merge_cntl; /* if TV DAC is enabled for another crtc and keep it enabled */ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080; crtc2_gen_cntl |= ((format << 8) | RADEON_CRTC2_VSYNC_DIS | RADEON_CRTC2_HSYNC_DIS | RADEON_CRTC2_DISP_DIS | RADEON_CRTC2_DISP_REQ_EN_B | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? RADEON_CRTC2_DBL_SCAN_EN : 0) | ((mode->flags & DRM_MODE_FLAG_CSYNC) ? RADEON_CRTC2_CSYNC_EN : 0) | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? RADEON_CRTC2_INTERLACE_EN : 0)); /* rs4xx chips seem to like to have the crtc enabled when the timing is set */ if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) crtc2_gen_cntl |= RADEON_CRTC2_EN; disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl); WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid); WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid); } else { uint32_t crtc_gen_cntl; uint32_t crtc_ext_cntl; uint32_t disp_merge_cntl; crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000; crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN | (format << 8) | RADEON_CRTC_DISP_REQ_EN_B | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? RADEON_CRTC_DBL_SCAN_EN : 0) | ((mode->flags & DRM_MODE_FLAG_CSYNC) ? RADEON_CRTC_CSYNC_EN : 0) | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? RADEON_CRTC_INTERLACE_EN : 0)); /* rs4xx chips seem to like to have the crtc enabled when the timing is set */ if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) crtc_gen_cntl |= RADEON_CRTC_EN; crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); crtc_ext_cntl |= (RADEON_XCRT_CNT_EN | RADEON_CRTC_VSYNC_DIS | RADEON_CRTC_HSYNC_DIS | RADEON_CRTC_DISPLAY_DIS); disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); } if (is_tv) radeon_legacy_tv_adjust_crtc_reg(encoder, &crtc_h_total_disp, &crtc_h_sync_strt_wid, &crtc_v_total_disp, &crtc_v_sync_strt_wid); WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp); WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid); WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp); WREG32(RADEON_CRTC_V_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_v_sync_strt_wid); return true; } static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_encoder *encoder; uint32_t feedback_div = 0; uint32_t frac_fb_div = 0; uint32_t reference_div = 0; uint32_t post_divider = 0; uint32_t freq = 0; uint8_t pll_gain; bool use_bios_divs = false; /* PLL registers */ uint32_t pll_ref_div = 0; uint32_t pll_fb_post_div = 0; uint32_t htotal_cntl = 0; bool is_tv = false; struct radeon_pll *pll; struct { int divider; int bitvalue; } *post_div, post_divs[] = { /* From RAGE 128 VR/RAGE 128 GL Register * Reference Manual (Technical Reference * Manual P/N RRG-G04100-C Rev. 0.04), page * 3-17 (PLL_DIV_[3:0]). */ { 1, 0 }, /* VCLK_SRC */ { 2, 1 }, /* VCLK_SRC/2 */ { 4, 2 }, /* VCLK_SRC/4 */ { 8, 3 }, /* VCLK_SRC/8 */ { 3, 4 }, /* VCLK_SRC/3 */ { 16, 5 }, /* VCLK_SRC/16 */ { 6, 6 }, /* VCLK_SRC/6 */ { 12, 7 }, /* VCLK_SRC/12 */ { 0, 0 } }; if (radeon_crtc->crtc_id) pll = &rdev->clock.p2pll; else pll = &rdev->clock.p1pll; pll->flags = RADEON_PLL_LEGACY; if (mode->clock > 200000) /* range limits??? */ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { is_tv = true; break; } if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { if (!rdev->is_atom_bios) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; if (lvds) { if (lvds->use_bios_dividers) { pll_ref_div = lvds->panel_ref_divider; pll_fb_post_div = (lvds->panel_fb_divider | (lvds->panel_post_divider << 16)); htotal_cntl = 0; use_bios_divs = true; } } } pll->flags |= RADEON_PLL_USE_REF_DIV; } } } DRM_DEBUG_KMS("\n"); if (!use_bios_divs) { radeon_compute_pll_legacy(pll, mode->clock, &freq, &feedback_div, &frac_fb_div, &reference_div, &post_divider); for (post_div = &post_divs[0]; post_div->divider; ++post_div) { if (post_div->divider == post_divider) break; } if (!post_div->divider) post_div = &post_divs[0]; DRM_DEBUG_KMS("dc=%u, fd=%d, rd=%d, pd=%d\n", (unsigned)freq, feedback_div, reference_div, post_divider); pll_ref_div = reference_div; #if defined(__powerpc__) && (0) /* TODO */ /* apparently programming this otherwise causes a hang??? */ if (info->MacModel == RADEON_MAC_IBOOK) pll_fb_post_div = 0x000600ad; else #endif pll_fb_post_div = (feedback_div | (post_div->bitvalue << 16)); htotal_cntl = mode->htotal & 0x7; } pll_gain = radeon_compute_pll_gain(pll->reference_freq, pll_ref_div & 0x3ff, pll_fb_post_div & 0x7ff); if (radeon_crtc->crtc_id) { uint32_t pixclks_cntl = ((RREG32_PLL(RADEON_PIXCLKS_CNTL) & ~(RADEON_PIX2CLK_SRC_SEL_MASK)) | RADEON_PIX2CLK_SRC_SEL_P2PLLCLK); if (is_tv) { radeon_legacy_tv_adjust_pll2(encoder, &htotal_cntl, &pll_ref_div, &pll_fb_post_div, &pixclks_cntl); } WREG32_PLL_P(RADEON_PIXCLKS_CNTL, RADEON_PIX2CLK_SRC_SEL_CPUCLK, ~(RADEON_PIX2CLK_SRC_SEL_MASK)); WREG32_PLL_P(RADEON_P2PLL_CNTL, RADEON_P2PLL_RESET | RADEON_P2PLL_ATOMIC_UPDATE_EN | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT), ~(RADEON_P2PLL_RESET | RADEON_P2PLL_ATOMIC_UPDATE_EN | RADEON_P2PLL_PVG_MASK)); WREG32_PLL_P(RADEON_P2PLL_REF_DIV, pll_ref_div, ~RADEON_P2PLL_REF_DIV_MASK); WREG32_PLL_P(RADEON_P2PLL_DIV_0, pll_fb_post_div, ~RADEON_P2PLL_FB0_DIV_MASK); WREG32_PLL_P(RADEON_P2PLL_DIV_0, pll_fb_post_div, ~RADEON_P2PLL_POST0_DIV_MASK); radeon_pll2_write_update(dev); radeon_pll2_wait_for_read_update_complete(dev); WREG32_PLL(RADEON_HTOTAL2_CNTL, htotal_cntl); WREG32_PLL_P(RADEON_P2PLL_CNTL, 0, ~(RADEON_P2PLL_RESET | RADEON_P2PLL_SLEEP | RADEON_P2PLL_ATOMIC_UPDATE_EN)); DRM_DEBUG_KMS("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n", (unsigned)pll_ref_div, (unsigned)pll_fb_post_div, (unsigned)htotal_cntl, RREG32_PLL(RADEON_P2PLL_CNTL)); DRM_DEBUG_KMS("Wrote2: rd=%u, fd=%u, pd=%u\n", (unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK, (unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK, (unsigned)((pll_fb_post_div & RADEON_P2PLL_POST0_DIV_MASK) >> 16)); mdelay(50); /* Let the clock to lock */ WREG32_PLL_P(RADEON_PIXCLKS_CNTL, RADEON_PIX2CLK_SRC_SEL_P2PLLCLK, ~(RADEON_PIX2CLK_SRC_SEL_MASK)); WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); } else { uint32_t pixclks_cntl; if (is_tv) { pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); radeon_legacy_tv_adjust_pll1(encoder, &htotal_cntl, &pll_ref_div, &pll_fb_post_div, &pixclks_cntl); } if (rdev->flags & RADEON_IS_MOBILITY) { /* A temporal workaround for the occasional blanking on certain laptop panels. This appears to related to the PLL divider registers (fail to lock?). It occurs even when all dividers are the same with their old settings. In this case we really don't need to fiddle with PLL registers. By doing this we can avoid the blanking problem with some panels. */ if ((pll_ref_div == (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) && (pll_fb_post_div == (RREG32_PLL(RADEON_PPLL_DIV_3) & (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) { WREG32_P(RADEON_CLOCK_CNTL_INDEX, RADEON_PLL_DIV_SEL, ~(RADEON_PLL_DIV_SEL)); r100_pll_errata_after_index(rdev); return; } } WREG32_PLL_P(RADEON_VCLK_ECP_CNTL, RADEON_VCLK_SRC_SEL_CPUCLK, ~(RADEON_VCLK_SRC_SEL_MASK)); WREG32_PLL_P(RADEON_PPLL_CNTL, RADEON_PPLL_RESET | RADEON_PPLL_ATOMIC_UPDATE_EN | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT), ~(RADEON_PPLL_RESET | RADEON_PPLL_ATOMIC_UPDATE_EN | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN | RADEON_PPLL_PVG_MASK)); WREG32_P(RADEON_CLOCK_CNTL_INDEX, RADEON_PLL_DIV_SEL, ~(RADEON_PLL_DIV_SEL)); r100_pll_errata_after_index(rdev); if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RS300) || (rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) { if (pll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) { /* When restoring console mode, use saved PPLL_REF_DIV * setting. */ WREG32_PLL_P(RADEON_PPLL_REF_DIV, pll_ref_div, 0); } else { /* R300 uses ref_div_acc field as real ref divider */ WREG32_PLL_P(RADEON_PPLL_REF_DIV, (pll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT), ~R300_PPLL_REF_DIV_ACC_MASK); } } else WREG32_PLL_P(RADEON_PPLL_REF_DIV, pll_ref_div, ~RADEON_PPLL_REF_DIV_MASK); WREG32_PLL_P(RADEON_PPLL_DIV_3, pll_fb_post_div, ~RADEON_PPLL_FB3_DIV_MASK); WREG32_PLL_P(RADEON_PPLL_DIV_3, pll_fb_post_div, ~RADEON_PPLL_POST3_DIV_MASK); radeon_pll_write_update(dev); radeon_pll_wait_for_read_update_complete(dev); WREG32_PLL(RADEON_HTOTAL_CNTL, htotal_cntl); WREG32_PLL_P(RADEON_PPLL_CNTL, 0, ~(RADEON_PPLL_RESET | RADEON_PPLL_SLEEP | RADEON_PPLL_ATOMIC_UPDATE_EN | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN)); DRM_DEBUG_KMS("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n", pll_ref_div, pll_fb_post_div, (unsigned)htotal_cntl, RREG32_PLL(RADEON_PPLL_CNTL)); DRM_DEBUG_KMS("Wrote: rd=%d, fd=%d, pd=%d\n", pll_ref_div & RADEON_PPLL_REF_DIV_MASK, pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK, (pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16); mdelay(50); /* Let the clock to lock */ WREG32_PLL_P(RADEON_VCLK_ECP_CNTL, RADEON_VCLK_SRC_SEL_PPLLCLK, ~(RADEON_VCLK_SRC_SEL_MASK)); if (is_tv) WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); } } static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; return true; } static int radeon_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); /* TODO TV */ radeon_crtc_set_base(crtc, x, y, old_fb); radeon_set_crtc_timing(crtc, adjusted_mode); radeon_set_pll(crtc, adjusted_mode); radeon_overscan_setup(crtc, adjusted_mode); if (radeon_crtc->crtc_id == 0) { radeon_legacy_rmx_mode_set(crtc, adjusted_mode); } else { if (radeon_crtc->rmx_type != RMX_OFF) { /* FIXME: only first crtc has rmx what should we * do ? */ DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); } } return 0; } static void radeon_crtc_prepare(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; radeon_crtc->in_mode_set = true; /* * The hardware wedges sometimes if you reconfigure one CRTC * whilst another is running (see fdo bug #24611). */ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF); } static void radeon_crtc_commit(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; /* * Reenable the CRTCs that should be running. */ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) { if (crtci->enabled) radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); } radeon_crtc->in_mode_set = false; } static const struct drm_crtc_helper_funcs legacy_helper_funcs = { .dpms = radeon_crtc_dpms, .mode_fixup = radeon_crtc_mode_fixup, .mode_set = radeon_crtc_mode_set, .mode_set_base = radeon_crtc_set_base, .mode_set_base_atomic = radeon_crtc_set_base_atomic, .prepare = radeon_crtc_prepare, .commit = radeon_crtc_commit, .load_lut = radeon_crtc_load_lut, }; void radeon_legacy_init_crtc(struct drm_device *dev, struct radeon_crtc *radeon_crtc) { if (radeon_crtc->crtc_id == 1) radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); }
gpl-2.0
ImYeol/linux_fbtft
fs/ubifs/log.c
1219
19811
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* * This file is a part of UBIFS journal implementation and contains various * functions which manipulate the log. The log is a fixed area on the flash * which does not contain any data but refers to buds. The log is a part of the * journal. */ #include "ubifs.h" static int dbg_check_bud_bytes(struct ubifs_info *c); /** * ubifs_search_bud - search bud LEB. * @c: UBIFS file-system description object * @lnum: logical eraseblock number to search * * This function searches bud LEB @lnum. Returns bud description object in case * of success and %NULL if there is no bud with this LEB number. */ struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum) { struct rb_node *p; struct ubifs_bud *bud; spin_lock(&c->buds_lock); p = c->buds.rb_node; while (p) { bud = rb_entry(p, struct ubifs_bud, rb); if (lnum < bud->lnum) p = p->rb_left; else if (lnum > bud->lnum) p = p->rb_right; else { spin_unlock(&c->buds_lock); return bud; } } spin_unlock(&c->buds_lock); return NULL; } /** * ubifs_get_wbuf - get the wbuf associated with a LEB, if there is one. * @c: UBIFS file-system description object * @lnum: logical eraseblock number to search * * This functions returns the wbuf for @lnum or %NULL if there is not one. */ struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum) { struct rb_node *p; struct ubifs_bud *bud; int jhead; if (!c->jheads) return NULL; spin_lock(&c->buds_lock); p = c->buds.rb_node; while (p) { bud = rb_entry(p, struct ubifs_bud, rb); if (lnum < bud->lnum) p = p->rb_left; else if (lnum > bud->lnum) p = p->rb_right; else { jhead = bud->jhead; spin_unlock(&c->buds_lock); return &c->jheads[jhead].wbuf; } } spin_unlock(&c->buds_lock); return NULL; } /** * empty_log_bytes - calculate amount of empty space in the log. * @c: UBIFS file-system description object */ static inline long long empty_log_bytes(const struct ubifs_info *c) { long long h, t; h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs; t = (long long)c->ltail_lnum * c->leb_size; if (h >= t) return c->log_bytes - h + t; else return t - h; } /** * ubifs_add_bud - add bud LEB to the tree of buds and its journal head list. * @c: UBIFS file-system description object * @bud: the bud to add */ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud) { struct rb_node **p, *parent = NULL; struct ubifs_bud *b; struct ubifs_jhead *jhead; spin_lock(&c->buds_lock); p = &c->buds.rb_node; while (*p) { parent = *p; b = rb_entry(parent, struct ubifs_bud, rb); ubifs_assert(bud->lnum != b->lnum); if (bud->lnum < b->lnum) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&bud->rb, parent, p); rb_insert_color(&bud->rb, &c->buds); if (c->jheads) { jhead = &c->jheads[bud->jhead]; list_add_tail(&bud->list, &jhead->buds_list); } else ubifs_assert(c->replaying && c->ro_mount); /* * Note, although this is a new bud, we anyway account this space now, * before any data has been written to it, because this is about to * guarantee fixed mount time, and this bud will anyway be read and * scanned. */ c->bud_bytes += c->leb_size - bud->start; dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum, bud->start, dbg_jhead(bud->jhead), c->bud_bytes); spin_unlock(&c->buds_lock); } /** * ubifs_add_bud_to_log - add a new bud to the log. * @c: UBIFS file-system description object * @jhead: journal head the bud belongs to * @lnum: LEB number of the bud * @offs: starting offset of the bud * * This function writes reference node for the new bud LEB @lnum it to the log, * and adds it to the buds tress. It also makes sure that log size does not * exceed the 'c->max_bud_bytes' limit. Returns zero in case of success, * %-EAGAIN if commit is required, and a negative error codes in case of * failure. */ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) { int err; struct ubifs_bud *bud; struct ubifs_ref_node *ref; bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS); if (!bud) return -ENOMEM; ref = kzalloc(c->ref_node_alsz, GFP_NOFS); if (!ref) { kfree(bud); return -ENOMEM; } mutex_lock(&c->log_mutex); ubifs_assert(!c->ro_media && !c->ro_mount); if (c->ro_error) { err = -EROFS; goto out_unlock; } /* Make sure we have enough space in the log */ if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) { dbg_log("not enough log space - %lld, required %d", empty_log_bytes(c), c->min_log_bytes); ubifs_commit_required(c); err = -EAGAIN; goto out_unlock; } /* * Make sure the amount of space in buds will not exceed the * 'c->max_bud_bytes' limit, because we want to guarantee mount time * limits. * * It is not necessary to hold @c->buds_lock when reading @c->bud_bytes * because we are holding @c->log_mutex. All @c->bud_bytes take place * when both @c->log_mutex and @c->bud_bytes are locked. */ if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) { dbg_log("bud bytes %lld (%lld max), require commit", c->bud_bytes, c->max_bud_bytes); ubifs_commit_required(c); err = -EAGAIN; goto out_unlock; } /* * If the journal is full enough - start background commit. Note, it is * OK to read 'c->cmt_state' without spinlock because integer reads * are atomic in the kernel. */ if (c->bud_bytes >= c->bg_bud_bytes && c->cmt_state == COMMIT_RESTING) { dbg_log("bud bytes %lld (%lld max), initiate BG commit", c->bud_bytes, c->max_bud_bytes); ubifs_request_bg_commit(c); } bud->lnum = lnum; bud->start = offs; bud->jhead = jhead; ref->ch.node_type = UBIFS_REF_NODE; ref->lnum = cpu_to_le32(bud->lnum); ref->offs = cpu_to_le32(bud->start); ref->jhead = cpu_to_le32(jhead); if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); c->lhead_offs = 0; } if (c->lhead_offs == 0) { /* Must ensure next log LEB has been unmapped */ err = ubifs_leb_unmap(c, c->lhead_lnum); if (err) goto out_unlock; } if (bud->start == 0) { /* * Before writing the LEB reference which refers an empty LEB * to the log, we have to make sure it is mapped, because * otherwise we'd risk to refer an LEB with garbage in case of * an unclean reboot, because the target LEB might have been * unmapped, but not yet physically erased. */ err = ubifs_leb_map(c, bud->lnum); if (err) goto out_unlock; } dbg_log("write ref LEB %d:%d", c->lhead_lnum, c->lhead_offs); err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum, c->lhead_offs); if (err) goto out_unlock; c->lhead_offs += c->ref_node_alsz; ubifs_add_bud(c, bud); mutex_unlock(&c->log_mutex); kfree(ref); return 0; out_unlock: mutex_unlock(&c->log_mutex); kfree(ref); kfree(bud); return err; } /** * remove_buds - remove used buds. * @c: UBIFS file-system description object * * This function removes use buds from the buds tree. It does not remove the * buds which are pointed to by journal heads. */ static void remove_buds(struct ubifs_info *c) { struct rb_node *p; ubifs_assert(list_empty(&c->old_buds)); c->cmt_bud_bytes = 0; spin_lock(&c->buds_lock); p = rb_first(&c->buds); while (p) { struct rb_node *p1 = p; struct ubifs_bud *bud; struct ubifs_wbuf *wbuf; p = rb_next(p); bud = rb_entry(p1, struct ubifs_bud, rb); wbuf = &c->jheads[bud->jhead].wbuf; if (wbuf->lnum == bud->lnum) { /* * Do not remove buds which are pointed to by journal * heads (non-closed buds). */ c->cmt_bud_bytes += wbuf->offs - bud->start; dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld", bud->lnum, bud->start, dbg_jhead(bud->jhead), wbuf->offs - bud->start, c->cmt_bud_bytes); bud->start = wbuf->offs; } else { c->cmt_bud_bytes += c->leb_size - bud->start; dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld", bud->lnum, bud->start, dbg_jhead(bud->jhead), c->leb_size - bud->start, c->cmt_bud_bytes); rb_erase(p1, &c->buds); /* * If the commit does not finish, the recovery will need * to replay the journal, in which case the old buds * must be unchanged. Do not release them until post * commit i.e. do not allow them to be garbage * collected. */ list_move(&bud->list, &c->old_buds); } } spin_unlock(&c->buds_lock); } /** * ubifs_log_start_commit - start commit. * @c: UBIFS file-system description object * @ltail_lnum: return new log tail LEB number * * The commit operation starts with writing "commit start" node to the log and * reference nodes for all journal heads which will define new journal after * the commit has been finished. The commit start and reference nodes are * written in one go to the nearest empty log LEB (hence, when commit is * finished UBIFS may safely unmap all the previous log LEBs). This function * returns zero in case of success and a negative error code in case of * failure. */ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) { void *buf; struct ubifs_cs_node *cs; struct ubifs_ref_node *ref; int err, i, max_len, len; err = dbg_check_bud_bytes(c); if (err) return err; max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ; max_len = ALIGN(max_len, c->min_io_size); buf = cs = kmalloc(max_len, GFP_NOFS); if (!buf) return -ENOMEM; cs->ch.node_type = UBIFS_CS_NODE; cs->cmt_no = cpu_to_le64(c->cmt_no); ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0); /* * Note, we do not lock 'c->log_mutex' because this is the commit start * phase and we are exclusively using the log. And we do not lock * write-buffer because nobody can write to the file-system at this * phase. */ len = UBIFS_CS_NODE_SZ; for (i = 0; i < c->jhead_cnt; i++) { int lnum = c->jheads[i].wbuf.lnum; int offs = c->jheads[i].wbuf.offs; if (lnum == -1 || offs == c->leb_size) continue; dbg_log("add ref to LEB %d:%d for jhead %s", lnum, offs, dbg_jhead(i)); ref = buf + len; ref->ch.node_type = UBIFS_REF_NODE; ref->lnum = cpu_to_le32(lnum); ref->offs = cpu_to_le32(offs); ref->jhead = cpu_to_le32(i); ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0); len += UBIFS_REF_NODE_SZ; } ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len); /* Switch to the next log LEB */ if (c->lhead_offs) { c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); c->lhead_offs = 0; } if (c->lhead_offs == 0) { /* Must ensure next LEB has been unmapped */ err = ubifs_leb_unmap(c, c->lhead_lnum); if (err) goto out; } len = ALIGN(len, c->min_io_size); dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len); err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len); if (err) goto out; *ltail_lnum = c->lhead_lnum; c->lhead_offs += len; if (c->lhead_offs == c->leb_size) { c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); c->lhead_offs = 0; } remove_buds(c); /* * We have started the commit and now users may use the rest of the log * for new writes. */ c->min_log_bytes = 0; out: kfree(buf); return err; } /** * ubifs_log_end_commit - end commit. * @c: UBIFS file-system description object * @ltail_lnum: new log tail LEB number * * This function is called on when the commit operation was finished. It * moves log tail to new position and unmaps LEBs which contain obsolete data. * Returns zero in case of success and a negative error code in case of * failure. */ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum) { int err; /* * At this phase we have to lock 'c->log_mutex' because UBIFS allows FS * writes during commit. Its only short "commit" start phase when * writers are blocked. */ mutex_lock(&c->log_mutex); dbg_log("old tail was LEB %d:0, new tail is LEB %d:0", c->ltail_lnum, ltail_lnum); c->ltail_lnum = ltail_lnum; /* * The commit is finished and from now on it must be guaranteed that * there is always enough space for the next commit. */ c->min_log_bytes = c->leb_size; spin_lock(&c->buds_lock); c->bud_bytes -= c->cmt_bud_bytes; spin_unlock(&c->buds_lock); err = dbg_check_bud_bytes(c); mutex_unlock(&c->log_mutex); return err; } /** * ubifs_log_post_commit - things to do after commit is completed. * @c: UBIFS file-system description object * @old_ltail_lnum: old log tail LEB number * * Release buds only after commit is completed, because they must be unchanged * if recovery is needed. * * Unmap log LEBs only after commit is completed, because they may be needed for * recovery. * * This function returns %0 on success and a negative error code on failure. */ int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum) { int lnum, err = 0; while (!list_empty(&c->old_buds)) { struct ubifs_bud *bud; bud = list_entry(c->old_buds.next, struct ubifs_bud, list); err = ubifs_return_leb(c, bud->lnum); if (err) return err; list_del(&bud->list); kfree(bud); } mutex_lock(&c->log_mutex); for (lnum = old_ltail_lnum; lnum != c->ltail_lnum; lnum = ubifs_next_log_lnum(c, lnum)) { dbg_log("unmap log LEB %d", lnum); err = ubifs_leb_unmap(c, lnum); if (err) goto out; } out: mutex_unlock(&c->log_mutex); return err; } /** * struct done_ref - references that have been done. * @rb: rb-tree node * @lnum: LEB number */ struct done_ref { struct rb_node rb; int lnum; }; /** * done_already - determine if a reference has been done already. * @done_tree: rb-tree to store references that have been done * @lnum: LEB number of reference * * This function returns %1 if the reference has been done, %0 if not, otherwise * a negative error code is returned. */ static int done_already(struct rb_root *done_tree, int lnum) { struct rb_node **p = &done_tree->rb_node, *parent = NULL; struct done_ref *dr; while (*p) { parent = *p; dr = rb_entry(parent, struct done_ref, rb); if (lnum < dr->lnum) p = &(*p)->rb_left; else if (lnum > dr->lnum) p = &(*p)->rb_right; else return 1; } dr = kzalloc(sizeof(struct done_ref), GFP_NOFS); if (!dr) return -ENOMEM; dr->lnum = lnum; rb_link_node(&dr->rb, parent, p); rb_insert_color(&dr->rb, done_tree); return 0; } /** * destroy_done_tree - destroy the done tree. * @done_tree: done tree to destroy */ static void destroy_done_tree(struct rb_root *done_tree) { struct rb_node *this = done_tree->rb_node; struct done_ref *dr; while (this) { if (this->rb_left) { this = this->rb_left; continue; } else if (this->rb_right) { this = this->rb_right; continue; } dr = rb_entry(this, struct done_ref, rb); this = rb_parent(this); if (this) { if (this->rb_left == &dr->rb) this->rb_left = NULL; else this->rb_right = NULL; } kfree(dr); } } /** * add_node - add a node to the consolidated log. * @c: UBIFS file-system description object * @buf: buffer to which to add * @lnum: LEB number to which to write is passed and returned here * @offs: offset to where to write is passed and returned here * @node: node to add * * This function returns %0 on success and a negative error code on failure. */ static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs, void *node) { struct ubifs_ch *ch = node; int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs; if (len > remains) { int sz = ALIGN(*offs, c->min_io_size), err; ubifs_pad(c, buf + *offs, sz - *offs); err = ubifs_leb_change(c, *lnum, buf, sz); if (err) return err; *lnum = ubifs_next_log_lnum(c, *lnum); *offs = 0; } memcpy(buf + *offs, node, len); *offs += ALIGN(len, 8); return 0; } /** * ubifs_consolidate_log - consolidate the log. * @c: UBIFS file-system description object * * Repeated failed commits could cause the log to be full, but at least 1 LEB is * needed for commit. This function rewrites the reference nodes in the log * omitting duplicates, and failed CS nodes, and leaving no gaps. * * This function returns %0 on success and a negative error code on failure. */ int ubifs_consolidate_log(struct ubifs_info *c) { struct ubifs_scan_leb *sleb; struct ubifs_scan_node *snod; struct rb_root done_tree = RB_ROOT; int lnum, err, first = 1, write_lnum, offs = 0; void *buf; dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum, c->lhead_lnum); buf = vmalloc(c->leb_size); if (!buf) return -ENOMEM; lnum = c->ltail_lnum; write_lnum = lnum; while (1) { sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0); if (IS_ERR(sleb)) { err = PTR_ERR(sleb); goto out_free; } list_for_each_entry(snod, &sleb->nodes, list) { switch (snod->type) { case UBIFS_REF_NODE: { struct ubifs_ref_node *ref = snod->node; int ref_lnum = le32_to_cpu(ref->lnum); err = done_already(&done_tree, ref_lnum); if (err < 0) goto out_scan; if (err != 1) { err = add_node(c, buf, &write_lnum, &offs, snod->node); if (err) goto out_scan; } break; } case UBIFS_CS_NODE: if (!first) break; err = add_node(c, buf, &write_lnum, &offs, snod->node); if (err) goto out_scan; first = 0; break; } } ubifs_scan_destroy(sleb); if (lnum == c->lhead_lnum) break; lnum = ubifs_next_log_lnum(c, lnum); } if (offs) { int sz = ALIGN(offs, c->min_io_size); ubifs_pad(c, buf + offs, sz - offs); err = ubifs_leb_change(c, write_lnum, buf, sz); if (err) goto out_free; offs = ALIGN(offs, c->min_io_size); } destroy_done_tree(&done_tree); vfree(buf); if (write_lnum == c->lhead_lnum) { ubifs_err("log is too full"); return -EINVAL; } /* Unmap remaining LEBs */ lnum = write_lnum; do { lnum = ubifs_next_log_lnum(c, lnum); err = ubifs_leb_unmap(c, lnum); if (err) return err; } while (lnum != c->lhead_lnum); c->lhead_lnum = write_lnum; c->lhead_offs = offs; dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs); return 0; out_scan: ubifs_scan_destroy(sleb); out_free: destroy_done_tree(&done_tree); vfree(buf); return err; } /** * dbg_check_bud_bytes - make sure bud bytes calculation are all right. * @c: UBIFS file-system description object * * This function makes sure the amount of flash space used by closed buds * ('c->bud_bytes' is correct). Returns zero in case of success and %-EINVAL in * case of failure. */ static int dbg_check_bud_bytes(struct ubifs_info *c) { int i, err = 0; struct ubifs_bud *bud; long long bud_bytes = 0; if (!dbg_is_chk_gen(c)) return 0; spin_lock(&c->buds_lock); for (i = 0; i < c->jhead_cnt; i++) list_for_each_entry(bud, &c->jheads[i].buds_list, list) bud_bytes += c->leb_size - bud->start; if (c->bud_bytes != bud_bytes) { ubifs_err("bad bud_bytes %lld, calculated %lld", c->bud_bytes, bud_bytes); err = -EINVAL; } spin_unlock(&c->buds_lock); return err; }
gpl-2.0
kozmikkick/kozmikvigor
drivers/gpio/gpio-plat-samsung.c
1987
5568
/* arch/arm/plat-samsung/gpiolib.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * Copyright (c) 2009 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * SAMSUNG - GPIOlib support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/gpio.h> #include <plat/gpio-core.h> #include <plat/gpio-cfg.h> #include <plat/gpio-cfg-helpers.h> #ifndef DEBUG_GPIO #define gpio_dbg(x...) do { } while (0) #else #define gpio_dbg(x...) printk(KERN_DEBUG x) #endif /* The samsung_gpiolib_4bit routines are to control the gpio banks where * the gpio configuration register (GPxCON) has 4 bits per GPIO, as the * following example: * * base + 0x00: Control register, 4 bits per gpio * gpio n: 4 bits starting at (4*n) * 0000 = input, 0001 = output, others mean special-function * base + 0x04: Data register, 1 bit per gpio * bit n: data bit n * * Note, since the data register is one bit per gpio and is at base + 0x4 * we can use s3c_gpiolib_get and s3c_gpiolib_set to change the state of * the output. */ static int samsung_gpiolib_4bit_input(struct gpio_chip *chip, unsigned int offset) { struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip); void __iomem *base = ourchip->base; unsigned long con; con = __raw_readl(base + GPIOCON_OFF); con &= ~(0xf << con_4bit_shift(offset)); __raw_writel(con, base + GPIOCON_OFF); gpio_dbg("%s: %p: CON now %08lx\n", __func__, base, con); return 0; } static int samsung_gpiolib_4bit_output(struct gpio_chip *chip, unsigned int offset, int value) { struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip); void __iomem *base = ourchip->base; unsigned long con; unsigned long dat; con = __raw_readl(base + GPIOCON_OFF); con &= ~(0xf << con_4bit_shift(offset)); con |= 0x1 << con_4bit_shift(offset); dat = __raw_readl(base + GPIODAT_OFF); if (value) dat |= 1 << offset; else dat &= ~(1 << offset); __raw_writel(dat, base + GPIODAT_OFF); __raw_writel(con, base + GPIOCON_OFF); __raw_writel(dat, base + GPIODAT_OFF); gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat); return 0; } /* The next set of routines are for the case where the GPIO configuration * registers are 4 bits per GPIO but there is more than one register (the * bank has more than 8 GPIOs. * * This case is the similar to the 4 bit case, but the registers are as * follows: * * base + 0x00: Control register, 4 bits per gpio (lower 8 GPIOs) * gpio n: 4 bits starting at (4*n) * 0000 = input, 0001 = output, others mean special-function * base + 0x04: Control register, 4 bits per gpio (up to 8 additions GPIOs) * gpio n: 4 bits starting at (4*n) * 0000 = input, 0001 = output, others mean special-function * base + 0x08: Data register, 1 bit per gpio * bit n: data bit n * * To allow us to use the s3c_gpiolib_get and s3c_gpiolib_set routines we * store the 'base + 0x4' address so that these routines see the data * register at ourchip->base + 0x04. */ static int samsung_gpiolib_4bit2_input(struct gpio_chip *chip, unsigned int offset) { struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip); void __iomem *base = ourchip->base; void __iomem *regcon = base; unsigned long con; if (offset > 7) offset -= 8; else regcon -= 4; con = __raw_readl(regcon); con &= ~(0xf << con_4bit_shift(offset)); __raw_writel(con, regcon); gpio_dbg("%s: %p: CON %08lx\n", __func__, base, con); return 0; } static int samsung_gpiolib_4bit2_output(struct gpio_chip *chip, unsigned int offset, int value) { struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip); void __iomem *base = ourchip->base; void __iomem *regcon = base; unsigned long con; unsigned long dat; unsigned con_offset = offset; if (con_offset > 7) con_offset -= 8; else regcon -= 4; con = __raw_readl(regcon); con &= ~(0xf << con_4bit_shift(con_offset)); con |= 0x1 << con_4bit_shift(con_offset); dat = __raw_readl(base + GPIODAT_OFF); if (value) dat |= 1 << offset; else dat &= ~(1 << offset); __raw_writel(dat, base + GPIODAT_OFF); __raw_writel(con, regcon); __raw_writel(dat, base + GPIODAT_OFF); gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat); return 0; } void __init samsung_gpiolib_add_4bit(struct s3c_gpio_chip *chip) { chip->chip.direction_input = samsung_gpiolib_4bit_input; chip->chip.direction_output = samsung_gpiolib_4bit_output; chip->pm = __gpio_pm(&s3c_gpio_pm_4bit); } void __init samsung_gpiolib_add_4bit2(struct s3c_gpio_chip *chip) { chip->chip.direction_input = samsung_gpiolib_4bit2_input; chip->chip.direction_output = samsung_gpiolib_4bit2_output; chip->pm = __gpio_pm(&s3c_gpio_pm_4bit); } void __init samsung_gpiolib_add_4bit_chips(struct s3c_gpio_chip *chip, int nr_chips) { for (; nr_chips > 0; nr_chips--, chip++) { samsung_gpiolib_add_4bit(chip); s3c_gpiolib_add(chip); } } void __init samsung_gpiolib_add_4bit2_chips(struct s3c_gpio_chip *chip, int nr_chips) { for (; nr_chips > 0; nr_chips--, chip++) { samsung_gpiolib_add_4bit2(chip); s3c_gpiolib_add(chip); } } void __init samsung_gpiolib_add_2bit_chips(struct s3c_gpio_chip *chip, int nr_chips) { for (; nr_chips > 0; nr_chips--, chip++) s3c_gpiolib_add(chip); }
gpl-2.0
mydongistiny/kernel_huawei_angler-ak
drivers/hwmon/adt7310.c
2243
2945
/* * ADT7310/ADT7310 digital temperature sensor driver * * Copyright 2012-2013 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/init.h> #include <linux/spi/spi.h> #include <asm/unaligned.h> #include "adt7x10.h" #define ADT7310_STATUS 0 #define ADT7310_CONFIG 1 #define ADT7310_TEMPERATURE 2 #define ADT7310_ID 3 #define ADT7310_T_CRIT 4 #define ADT7310_T_HYST 5 #define ADT7310_T_ALARM_HIGH 6 #define ADT7310_T_ALARM_LOW 7 static const u8 adt7310_reg_table[] = { [ADT7X10_TEMPERATURE] = ADT7310_TEMPERATURE, [ADT7X10_STATUS] = ADT7310_STATUS, [ADT7X10_CONFIG] = ADT7310_CONFIG, [ADT7X10_T_ALARM_HIGH] = ADT7310_T_ALARM_HIGH, [ADT7X10_T_ALARM_LOW] = ADT7310_T_ALARM_LOW, [ADT7X10_T_CRIT] = ADT7310_T_CRIT, [ADT7X10_T_HYST] = ADT7310_T_HYST, [ADT7X10_ID] = ADT7310_ID, }; #define ADT7310_CMD_REG_OFFSET 3 #define ADT7310_CMD_READ 0x40 #define AD7310_COMMAND(reg) (adt7310_reg_table[(reg)] << ADT7310_CMD_REG_OFFSET) static int adt7310_spi_read_word(struct device *dev, u8 reg) { struct spi_device *spi = to_spi_device(dev); int ret; ret = spi_w8r16(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ); if (ret < 0) return ret; return be16_to_cpu((__force __be16)ret); } static int adt7310_spi_write_word(struct device *dev, u8 reg, u16 data) { struct spi_device *spi = to_spi_device(dev); u8 buf[3]; buf[0] = AD7310_COMMAND(reg); put_unaligned_be16(data, &buf[1]); return spi_write(spi, buf, sizeof(buf)); } static int adt7310_spi_read_byte(struct device *dev, u8 reg) { struct spi_device *spi = to_spi_device(dev); return spi_w8r8(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ); } static int adt7310_spi_write_byte(struct device *dev, u8 reg, u8 data) { struct spi_device *spi = to_spi_device(dev); u8 buf[2]; buf[0] = AD7310_COMMAND(reg); buf[1] = data; return spi_write(spi, buf, sizeof(buf)); } static const struct adt7x10_ops adt7310_spi_ops = { .read_word = adt7310_spi_read_word, .write_word = adt7310_spi_write_word, .read_byte = adt7310_spi_read_byte, .write_byte = adt7310_spi_write_byte, }; static int adt7310_spi_probe(struct spi_device *spi) { return adt7x10_probe(&spi->dev, spi_get_device_id(spi)->name, spi->irq, &adt7310_spi_ops); } static int adt7310_spi_remove(struct spi_device *spi) { return adt7x10_remove(&spi->dev, spi->irq); } static const struct spi_device_id adt7310_id[] = { { "adt7310", 0 }, { "adt7320", 0 }, {} }; MODULE_DEVICE_TABLE(spi, adt7310_id); static struct spi_driver adt7310_driver = { .driver = { .name = "adt7310", .owner = THIS_MODULE, .pm = ADT7X10_DEV_PM_OPS, }, .probe = adt7310_spi_probe, .remove = adt7310_spi_remove, .id_table = adt7310_id, }; module_spi_driver(adt7310_driver); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("ADT7310/ADT7320 driver"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_hardkernel_odroidc1
drivers/staging/vt6656/datarate.c
2243
11098
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: datarate.c * * Purpose: Handles the auto fallback & data rates functions * * Author: Lyndon Chen * * Date: July 17, 2002 * * Functions: * RATEvParseMaxRate - Parsing the highest basic & support rate in rate field of frame * RATEvTxRateFallBack - Rate fallback Algorithm Implementaion * RATEuSetIE- Set rate IE field. * * Revision History: * */ #include "tmacro.h" #include "mac.h" #include "80211mgr.h" #include "bssdb.h" #include "datarate.h" #include "card.h" #include "baseband.h" #include "srom.h" #include "rf.h" /* static int msglevel = MSG_LEVEL_DEBUG; */ static int msglevel =MSG_LEVEL_INFO; const u8 acbyIERate[MAX_RATE] = {0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C}; #define AUTORATE_TXOK_CNT 0x0400 #define AUTORATE_TXFAIL_CNT 0x0064 #define AUTORATE_TIMEOUT 10 void s_vResetCounter(PKnownNodeDB psNodeDBTable); void s_vResetCounter(PKnownNodeDB psNodeDBTable) { u8 ii; /* clear statistics counter for auto_rate */ for (ii = 0; ii <= MAX_RATE; ii++) { psNodeDBTable->uTxOk[ii] = 0; psNodeDBTable->uTxFail[ii] = 0; } } /*+ * * Routine Description: * Rate fallback Algorithm Implementaion * * Parameters: * In: * pDevice - Pointer to the adapter * psNodeDBTable - Pointer to Node Data Base * Out: * none * * Return Value: none * -*/ #define AUTORATE_TXCNT_THRESHOLD 20 #define AUTORATE_INC_THRESHOLD 30 /*+ * * Description: * Get RateIdx from the value in SuppRates IE or ExtSuppRates IE * * Parameters: * In: * u8 - Rate value in SuppRates IE or ExtSuppRates IE * Out: * none * * Return Value: RateIdx * -*/ u16 RATEwGetRateIdx( u8 byRate ) { u16 ii; /* erase BasicRate flag */ byRate = byRate & 0x7F; for (ii = 0; ii < MAX_RATE; ii ++) { if (acbyIERate[ii] == byRate) return ii; } return 0; } /*+ * * Description: * Parsing the highest basic & support rate in rate field of frame. * * Parameters: * In: * pDevice - Pointer to the adapter * pItemRates - Pointer to Rate field defined in 802.11 spec. * pItemExtRates - Pointer to Extended Rate field defined in 802.11 spec. * Out: * pwMaxBasicRate - Maximum Basic Rate * pwMaxSuppRate - Maximum Supported Rate * pbyTopCCKRate - Maximum Basic Rate in CCK mode * pbyTopOFDMRate - Maximum Basic Rate in OFDM mode * * Return Value: none * -*/ void RATEvParseMaxRate(struct vnt_private *pDevice, PWLAN_IE_SUPP_RATES pItemRates, PWLAN_IE_SUPP_RATES pItemExtRates, int bUpdateBasicRate, u16 *pwMaxBasicRate, u16 *pwMaxSuppRate, u16 *pwSuppRate, u8 *pbyTopCCKRate, u8 *pbyTopOFDMRate) { int ii; u8 byHighSuppRate = 0, byRate = 0; u16 wOldBasicRate = pDevice->wBasicRate; u32 uRateLen; if (pItemRates == NULL) return; *pwSuppRate = 0; uRateLen = pItemRates->len; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate Len: %d\n", uRateLen); if (pDevice->byBBType != BB_TYPE_11B) { if (uRateLen > WLAN_RATES_MAXLEN) uRateLen = WLAN_RATES_MAXLEN; } else { if (uRateLen > WLAN_RATES_MAXLEN_11B) uRateLen = WLAN_RATES_MAXLEN_11B; } for (ii = 0; ii < uRateLen; ii++) { byRate = (u8)(pItemRates->abyRates[ii]); if (WLAN_MGMT_IS_BASICRATE(byRate) && (bUpdateBasicRate == true)) { /* * add to basic rate set, update pDevice->byTopCCKBasicRate and * pDevice->byTopOFDMBasicRate */ CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate)); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate)); } byRate = (u8)(pItemRates->abyRates[ii]&0x7F); if (byHighSuppRate == 0) byHighSuppRate = byRate; if (byRate > byHighSuppRate) byHighSuppRate = byRate; *pwSuppRate |= (1<<RATEwGetRateIdx(byRate)); } if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) && (pDevice->byBBType != BB_TYPE_11B)) { unsigned int uExtRateLen = pItemExtRates->len; if (uExtRateLen > WLAN_RATES_MAXLEN) uExtRateLen = WLAN_RATES_MAXLEN; for (ii = 0; ii < uExtRateLen ; ii++) { byRate = (u8)(pItemExtRates->abyRates[ii]); /* select highest basic rate */ if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) { /* * add to basic rate set, update pDevice->byTopCCKBasicRate and * pDevice->byTopOFDMBasicRate */ CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate)); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate)); } byRate = (u8)(pItemExtRates->abyRates[ii]&0x7F); if (byHighSuppRate == 0) byHighSuppRate = byRate; if (byRate > byHighSuppRate) byHighSuppRate = byRate; *pwSuppRate |= (1<<RATEwGetRateIdx(byRate)); /* DBG_PRN_GRP09(("ParseMaxRate : HighSuppRate: %d, %X\n", RATEwGetRateIdx(byRate), byRate)); */ } } if ((pDevice->byPacketType == PK_TYPE_11GB) && CARDbIsOFDMinBasicRate((void *)pDevice)) { pDevice->byPacketType = PK_TYPE_11GA; } *pbyTopCCKRate = pDevice->byTopCCKBasicRate; *pbyTopOFDMRate = pDevice->byTopOFDMBasicRate; *pwMaxSuppRate = RATEwGetRateIdx(byHighSuppRate); if ((pDevice->byPacketType==PK_TYPE_11B) || (pDevice->byPacketType==PK_TYPE_11GB)) *pwMaxBasicRate = pDevice->byTopCCKBasicRate; else *pwMaxBasicRate = pDevice->byTopOFDMBasicRate; if (wOldBasicRate != pDevice->wBasicRate) CARDvSetRSPINF((void *)pDevice, pDevice->byBBType); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Exit ParseMaxRate\n"); } /*+ * * Routine Description: * Rate fallback Algorithm Implementaion * * Parameters: * In: * pDevice - Pointer to the adapter * psNodeDBTable - Pointer to Node Data Base * Out: * none * * Return Value: none * -*/ #define AUTORATE_TXCNT_THRESHOLD 20 #define AUTORATE_INC_THRESHOLD 30 void RATEvTxRateFallBack(struct vnt_private *pDevice, PKnownNodeDB psNodeDBTable) { struct vnt_manager *pMgmt = &pDevice->vnt_mgmt; u16 wIdxDownRate = 0; int ii; int bAutoRate[MAX_RATE] = {true, true, true, true, false, false, true, true, true, true, true, true}; u32 dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540}; u32 dwThroughput = 0; u16 wIdxUpRate = 0; u32 dwTxDiff = 0; if (pMgmt->eScanState != WMAC_NO_SCANNING) return; /* Don't do Fallback when scanning Channel */ psNodeDBTable->uTimeCount++; if (psNodeDBTable->uTxFail[MAX_RATE] > psNodeDBTable->uTxOk[MAX_RATE]) dwTxDiff = psNodeDBTable->uTxFail[MAX_RATE] - psNodeDBTable->uTxOk[MAX_RATE]; if ((psNodeDBTable->uTxOk[MAX_RATE] < AUTORATE_TXOK_CNT) && (dwTxDiff < AUTORATE_TXFAIL_CNT) && (psNodeDBTable->uTimeCount < AUTORATE_TIMEOUT)) { return; } if (psNodeDBTable->uTimeCount >= AUTORATE_TIMEOUT) { psNodeDBTable->uTimeCount = 0; } for (ii = 0; ii < MAX_RATE; ii++) { if (psNodeDBTable->wSuppRate & (0x0001<<ii)) { if (bAutoRate[ii] == true) { wIdxUpRate = (u16) ii; } } else { bAutoRate[ii] = false; } } for (ii = 0; ii <= psNodeDBTable->wTxDataRate; ii++) { if ( (psNodeDBTable->uTxOk[ii] != 0) || (psNodeDBTable->uTxFail[ii] != 0) ) { dwThroughputTbl[ii] *= psNodeDBTable->uTxOk[ii]; if (ii < RATE_11M) { psNodeDBTable->uTxFail[ii] *= 4; } dwThroughputTbl[ii] /= (psNodeDBTable->uTxOk[ii] + psNodeDBTable->uTxFail[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate %d,Ok: %d, Fail:%d, Throughput:%d\n", ii, (int)psNodeDBTable->uTxOk[ii], (int)psNodeDBTable->uTxFail[ii], (int)dwThroughputTbl[ii]); } dwThroughput = dwThroughputTbl[psNodeDBTable->wTxDataRate]; wIdxDownRate = psNodeDBTable->wTxDataRate; for (ii = psNodeDBTable->wTxDataRate; ii > 0;) { ii--; if ( (dwThroughputTbl[ii] > dwThroughput) && (bAutoRate[ii]==true) ) { dwThroughput = dwThroughputTbl[ii]; wIdxDownRate = (u16) ii; } } psNodeDBTable->wTxDataRate = wIdxDownRate; if (psNodeDBTable->uTxOk[MAX_RATE]) { if (psNodeDBTable->uTxOk[MAX_RATE] > (psNodeDBTable->uTxFail[MAX_RATE] * 4) ) { psNodeDBTable->wTxDataRate = wIdxUpRate; } } else { /* adhoc, if uTxOk(total) == 0 & uTxFail(total) == 0 */ if (psNodeDBTable->uTxFail[MAX_RATE] == 0) psNodeDBTable->wTxDataRate = wIdxUpRate; } if (pDevice->byBBType == BB_TYPE_11A) { if (psNodeDBTable->wTxDataRate <= RATE_11M) psNodeDBTable->wTxDataRate = RATE_6M; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uTxOk[MAX_RATE] %d, uTxFail[MAX_RATE]:%d\n",(int)psNodeDBTable->uTxOk[MAX_RATE], (int)psNodeDBTable->uTxFail[MAX_RATE]); s_vResetCounter(psNodeDBTable); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate: %d, U:%d, D:%d\n", (int)psNodeDBTable->wTxDataRate, (int)wIdxUpRate, (int)wIdxDownRate); return; } /*+ * * Description: * This routine is used to assemble available Rate IE. * * Parameters: * In: * pDevice * Out: * * Return Value: None * -*/ u8 RATEuSetIE ( PWLAN_IE_SUPP_RATES pSrcRates, PWLAN_IE_SUPP_RATES pDstRates, unsigned int uRateLen ) { unsigned int ii, uu, uRateCnt = 0; if ((pSrcRates == NULL) || (pDstRates == NULL)) return 0; if (pSrcRates->len == 0) return 0; for (ii = 0; ii < uRateLen; ii++) { for (uu = 0; uu < pSrcRates->len; uu++) { if ((pSrcRates->abyRates[uu] & 0x7F) == acbyIERate[ii]) { pDstRates->abyRates[uRateCnt ++] = pSrcRates->abyRates[uu]; break; } } } return (u8)uRateCnt; }
gpl-2.0