repo_name
string
path
string
copies
string
size
string
content
string
license
string
yakir-Yang/linux
fs/ncpfs/ncplib_kernel.c
1124
33216
/* * ncplib_kernel.c * * Copyright (C) 1995, 1996 by Volker Lendecke * Modified for big endian by J.F. Chadima and David S. Miller * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache * Modified 1999 Wolfram Pienkoss for NLS * Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "ncp_fs.h" static inline void assert_server_locked(struct ncp_server *server) { if (server->lock == 0) { ncp_dbg(1, "server not locked!\n"); } } static void ncp_add_byte(struct ncp_server *server, __u8 x) { assert_server_locked(server); *(__u8 *) (&(server->packet[server->current_size])) = x; server->current_size += 1; return; } static void ncp_add_word(struct ncp_server *server, __le16 x) { assert_server_locked(server); put_unaligned(x, (__le16 *) (&(server->packet[server->current_size]))); server->current_size += 2; return; } static void ncp_add_be16(struct ncp_server *server, __u16 x) { assert_server_locked(server); put_unaligned(cpu_to_be16(x), (__be16 *) (&(server->packet[server->current_size]))); server->current_size += 2; } static void ncp_add_dword(struct ncp_server *server, __le32 x) { assert_server_locked(server); put_unaligned(x, (__le32 *) (&(server->packet[server->current_size]))); server->current_size += 4; return; } static void ncp_add_be32(struct ncp_server *server, __u32 x) { assert_server_locked(server); put_unaligned(cpu_to_be32(x), (__be32 *)(&(server->packet[server->current_size]))); server->current_size += 4; } static inline void ncp_add_dword_lh(struct ncp_server *server, __u32 x) { ncp_add_dword(server, cpu_to_le32(x)); } static void ncp_add_mem(struct ncp_server *server, const void *source, int size) { assert_server_locked(server); memcpy(&(server->packet[server->current_size]), source, size); server->current_size += size; return; } static void ncp_add_pstring(struct ncp_server *server, const char *s) { int len = strlen(s); assert_server_locked(server); if (len > 255) { ncp_dbg(1, "string too long: %s\n", s); len = 255; } ncp_add_byte(server, len); ncp_add_mem(server, s, len); return; } static inline void ncp_init_request(struct ncp_server *server) { ncp_lock_server(server); server->current_size = sizeof(struct ncp_request_header); server->has_subfunction = 0; } static inline void ncp_init_request_s(struct ncp_server *server, int subfunction) { ncp_lock_server(server); server->current_size = sizeof(struct ncp_request_header) + 2; ncp_add_byte(server, subfunction); server->has_subfunction = 1; } static inline char * ncp_reply_data(struct ncp_server *server, int offset) { return &(server->packet[sizeof(struct ncp_reply_header) + offset]); } static inline u8 BVAL(const void *data) { return *(const u8 *)data; } static u8 ncp_reply_byte(struct ncp_server *server, int offset) { return *(const u8 *)ncp_reply_data(server, offset); } static inline u16 WVAL_LH(const void *data) { return get_unaligned_le16(data); } static u16 ncp_reply_le16(struct ncp_server *server, int offset) { return get_unaligned_le16(ncp_reply_data(server, offset)); } static u16 ncp_reply_be16(struct ncp_server *server, int offset) { return get_unaligned_be16(ncp_reply_data(server, offset)); } static inline u32 DVAL_LH(const void *data) { return get_unaligned_le32(data); } static __le32 ncp_reply_dword(struct ncp_server *server, int offset) { return get_unaligned((__le32 *)ncp_reply_data(server, offset)); } static inline __u32 ncp_reply_dword_lh(struct ncp_server* server, int offset) { return le32_to_cpu(ncp_reply_dword(server, offset)); } int ncp_negotiate_buffersize(struct ncp_server *server, int size, int *target) { int result; ncp_init_request(server); ncp_add_be16(server, size); if ((result = ncp_request(server, 33)) != 0) { ncp_unlock_server(server); return result; } *target = min_t(unsigned int, ncp_reply_be16(server, 0), size); ncp_unlock_server(server); return 0; } /* options: * bit 0 ipx checksum * bit 1 packet signing */ int ncp_negotiate_size_and_options(struct ncp_server *server, int size, int options, int *ret_size, int *ret_options) { int result; /* there is minimum */ if (size < NCP_BLOCK_SIZE) size = NCP_BLOCK_SIZE; ncp_init_request(server); ncp_add_be16(server, size); ncp_add_byte(server, options); if ((result = ncp_request(server, 0x61)) != 0) { ncp_unlock_server(server); return result; } /* NCP over UDP returns 0 (!!!) */ result = ncp_reply_be16(server, 0); if (result >= NCP_BLOCK_SIZE) size = min(result, size); *ret_size = size; *ret_options = ncp_reply_byte(server, 4); ncp_unlock_server(server); return 0; } int ncp_get_volume_info_with_number(struct ncp_server* server, int n, struct ncp_volume_info* target) { int result; int len; ncp_init_request_s(server, 44); ncp_add_byte(server, n); if ((result = ncp_request(server, 22)) != 0) { goto out; } target->total_blocks = ncp_reply_dword_lh(server, 0); target->free_blocks = ncp_reply_dword_lh(server, 4); target->purgeable_blocks = ncp_reply_dword_lh(server, 8); target->not_yet_purgeable_blocks = ncp_reply_dword_lh(server, 12); target->total_dir_entries = ncp_reply_dword_lh(server, 16); target->available_dir_entries = ncp_reply_dword_lh(server, 20); target->sectors_per_block = ncp_reply_byte(server, 28); memset(&(target->volume_name), 0, sizeof(target->volume_name)); result = -EIO; len = ncp_reply_byte(server, 29); if (len > NCP_VOLNAME_LEN) { ncp_dbg(1, "volume name too long: %d\n", len); goto out; } memcpy(&(target->volume_name), ncp_reply_data(server, 30), len); result = 0; out: ncp_unlock_server(server); return result; } int ncp_get_directory_info(struct ncp_server* server, __u8 n, struct ncp_volume_info* target) { int result; int len; ncp_init_request_s(server, 45); ncp_add_byte(server, n); if ((result = ncp_request(server, 22)) != 0) { goto out; } target->total_blocks = ncp_reply_dword_lh(server, 0); target->free_blocks = ncp_reply_dword_lh(server, 4); target->purgeable_blocks = 0; target->not_yet_purgeable_blocks = 0; target->total_dir_entries = ncp_reply_dword_lh(server, 8); target->available_dir_entries = ncp_reply_dword_lh(server, 12); target->sectors_per_block = ncp_reply_byte(server, 20); memset(&(target->volume_name), 0, sizeof(target->volume_name)); result = -EIO; len = ncp_reply_byte(server, 21); if (len > NCP_VOLNAME_LEN) { ncp_dbg(1, "volume name too long: %d\n", len); goto out; } memcpy(&(target->volume_name), ncp_reply_data(server, 22), len); result = 0; out: ncp_unlock_server(server); return result; } int ncp_close_file(struct ncp_server *server, const char *file_id) { int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); result = ncp_request(server, 66); ncp_unlock_server(server); return result; } int ncp_make_closed(struct inode *inode) { int err; err = 0; mutex_lock(&NCP_FINFO(inode)->open_mutex); if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { atomic_set(&NCP_FINFO(inode)->opened, 0); err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); if (!err) ncp_vdbg("volnum=%d, dirent=%u, error=%d\n", NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum, err); } mutex_unlock(&NCP_FINFO(inode)->open_mutex); return err; } static void ncp_add_handle_path(struct ncp_server *server, __u8 vol_num, __le32 dir_base, int have_dir_base, const char *path) { ncp_add_byte(server, vol_num); ncp_add_dword(server, dir_base); if (have_dir_base != 0) { ncp_add_byte(server, 1); /* dir_base */ } else { ncp_add_byte(server, 0xff); /* no handle */ } if (path != NULL) { ncp_add_byte(server, 1); /* 1 component */ ncp_add_pstring(server, path); } else { ncp_add_byte(server, 0); } } int ncp_dirhandle_alloc(struct ncp_server* server, __u8 volnum, __le32 dirent, __u8* dirhandle) { int result; ncp_init_request(server); ncp_add_byte(server, 12); /* subfunction */ ncp_add_byte(server, NW_NS_DOS); ncp_add_byte(server, 0); ncp_add_word(server, 0); ncp_add_handle_path(server, volnum, dirent, 1, NULL); if ((result = ncp_request(server, 87)) == 0) { *dirhandle = ncp_reply_byte(server, 0); } ncp_unlock_server(server); return result; } int ncp_dirhandle_free(struct ncp_server* server, __u8 dirhandle) { int result; ncp_init_request_s(server, 20); ncp_add_byte(server, dirhandle); result = ncp_request(server, 22); ncp_unlock_server(server); return result; } void ncp_extract_file_info(const void *structure, struct nw_info_struct *target) { const __u8 *name_len; const int info_struct_size = offsetof(struct nw_info_struct, nameLen); memcpy(target, structure, info_struct_size); name_len = structure + info_struct_size; target->nameLen = *name_len; memcpy(target->entryName, name_len + 1, *name_len); target->entryName[*name_len] = '\0'; target->volNumber = le32_to_cpu(target->volNumber); return; } #ifdef CONFIG_NCPFS_NFS_NS static inline void ncp_extract_nfs_info(const unsigned char *structure, struct nw_nfs_info *target) { target->mode = DVAL_LH(structure); target->rdev = DVAL_LH(structure + 8); } #endif int ncp_obtain_nfs_info(struct ncp_server *server, struct nw_info_struct *target) { int result = 0; #ifdef CONFIG_NCPFS_NFS_NS __u32 volnum = target->volNumber; if (ncp_is_nfs_extras(server, volnum)) { ncp_init_request(server); ncp_add_byte(server, 19); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, NW_NS_NFS); ncp_add_byte(server, 0); ncp_add_byte(server, volnum); ncp_add_dword(server, target->dirEntNum); /* We must retrieve both nlinks and rdev, otherwise some server versions report zeroes instead of valid data */ ncp_add_dword_lh(server, NSIBM_NFS_MODE | NSIBM_NFS_NLINKS | NSIBM_NFS_RDEV); if ((result = ncp_request(server, 87)) == 0) { ncp_extract_nfs_info(ncp_reply_data(server, 0), &target->nfs); ncp_dbg(1, "(%s) mode=0%o, rdev=0x%x\n", target->entryName, target->nfs.mode, target->nfs.rdev); } else { target->nfs.mode = 0; target->nfs.rdev = 0; } ncp_unlock_server(server); } else #endif { target->nfs.mode = 0; target->nfs.rdev = 0; } return result; } /* * Returns information for a (one-component) name relative to * the specified directory. */ int ncp_obtain_info(struct ncp_server *server, struct inode *dir, const char *path, struct nw_info_struct *target) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int result; if (target == NULL) { pr_err("%s: invalid call\n", __func__); return -EINVAL; } ncp_init_request(server); ncp_add_byte(server, 6); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, server->name_space[volnum]); /* N.B. twice ?? */ ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */ ncp_add_dword(server, RIM_ALL); ncp_add_handle_path(server, volnum, dirent, 1, path); if ((result = ncp_request(server, 87)) != 0) goto out; ncp_extract_file_info(ncp_reply_data(server, 0), target); ncp_unlock_server(server); result = ncp_obtain_nfs_info(server, target); return result; out: ncp_unlock_server(server); return result; } #ifdef CONFIG_NCPFS_NFS_NS static int ncp_obtain_DOS_dir_base(struct ncp_server *server, __u8 ns, __u8 volnum, __le32 dirent, const char *path, /* At most 1 component */ __le32 *DOS_dir_base) { int result; ncp_init_request(server); ncp_add_byte(server, 6); /* subfunction */ ncp_add_byte(server, ns); ncp_add_byte(server, ns); ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */ ncp_add_dword(server, RIM_DIRECTORY); ncp_add_handle_path(server, volnum, dirent, 1, path); if ((result = ncp_request(server, 87)) == 0) { if (DOS_dir_base) *DOS_dir_base=ncp_reply_dword(server, 0x34); } ncp_unlock_server(server); return result; } #endif /* CONFIG_NCPFS_NFS_NS */ static inline int ncp_get_known_namespace(struct ncp_server *server, __u8 volume) { #if defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS) int result; __u8 *namespace; __u16 no_namespaces; ncp_init_request(server); ncp_add_byte(server, 24); /* Subfunction: Get Name Spaces Loaded */ ncp_add_word(server, 0); ncp_add_byte(server, volume); if ((result = ncp_request(server, 87)) != 0) { ncp_unlock_server(server); return NW_NS_DOS; /* not result ?? */ } result = NW_NS_DOS; no_namespaces = ncp_reply_le16(server, 0); namespace = ncp_reply_data(server, 2); while (no_namespaces > 0) { ncp_dbg(1, "found %d on %d\n", *namespace, volume); #ifdef CONFIG_NCPFS_NFS_NS if ((*namespace == NW_NS_NFS) && !(server->m.flags&NCP_MOUNT_NO_NFS)) { result = NW_NS_NFS; break; } #endif /* CONFIG_NCPFS_NFS_NS */ #ifdef CONFIG_NCPFS_OS2_NS if ((*namespace == NW_NS_OS2) && !(server->m.flags&NCP_MOUNT_NO_OS2)) { result = NW_NS_OS2; } #endif /* CONFIG_NCPFS_OS2_NS */ namespace += 1; no_namespaces -= 1; } ncp_unlock_server(server); return result; #else /* neither OS2 nor NFS - only DOS */ return NW_NS_DOS; #endif /* defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS) */ } int ncp_update_known_namespace(struct ncp_server *server, __u8 volume, int *ret_ns) { int ns = ncp_get_known_namespace(server, volume); if (ret_ns) *ret_ns = ns; ncp_dbg(1, "namespace[%d] = %d\n", volume, server->name_space[volume]); if (server->name_space[volume] == ns) return 0; server->name_space[volume] = ns; return 1; } static int ncp_ObtainSpecificDirBase(struct ncp_server *server, __u8 nsSrc, __u8 nsDst, __u8 vol_num, __le32 dir_base, const char *path, /* At most 1 component */ __le32 *dirEntNum, __le32 *DosDirNum) { int result; ncp_init_request(server); ncp_add_byte(server, 6); /* subfunction */ ncp_add_byte(server, nsSrc); ncp_add_byte(server, nsDst); ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */ ncp_add_dword(server, RIM_ALL); ncp_add_handle_path(server, vol_num, dir_base, 1, path); if ((result = ncp_request(server, 87)) != 0) { ncp_unlock_server(server); return result; } if (dirEntNum) *dirEntNum = ncp_reply_dword(server, 0x30); if (DosDirNum) *DosDirNum = ncp_reply_dword(server, 0x34); ncp_unlock_server(server); return 0; } int ncp_mount_subdir(struct ncp_server *server, __u8 volNumber, __u8 srcNS, __le32 dirEntNum, __u32* volume, __le32* newDirEnt, __le32* newDosEnt) { int dstNS; int result; ncp_update_known_namespace(server, volNumber, &dstNS); if ((result = ncp_ObtainSpecificDirBase(server, srcNS, dstNS, volNumber, dirEntNum, NULL, newDirEnt, newDosEnt)) != 0) { return result; } *volume = volNumber; server->m.mounted_vol[1] = 0; server->m.mounted_vol[0] = 'X'; return 0; } int ncp_get_volume_root(struct ncp_server *server, const char *volname, __u32* volume, __le32* dirent, __le32* dosdirent) { int result; ncp_dbg(1, "looking up vol %s\n", volname); ncp_init_request(server); ncp_add_byte(server, 22); /* Subfunction: Generate dir handle */ ncp_add_byte(server, 0); /* DOS namespace */ ncp_add_byte(server, 0); /* reserved */ ncp_add_byte(server, 0); /* reserved */ ncp_add_byte(server, 0); /* reserved */ ncp_add_byte(server, 0); /* faked volume number */ ncp_add_dword(server, 0); /* faked dir_base */ ncp_add_byte(server, 0xff); /* Don't have a dir_base */ ncp_add_byte(server, 1); /* 1 path component */ ncp_add_pstring(server, volname); if ((result = ncp_request(server, 87)) != 0) { ncp_unlock_server(server); return result; } *dirent = *dosdirent = ncp_reply_dword(server, 4); *volume = ncp_reply_byte(server, 8); ncp_unlock_server(server); return 0; } int ncp_lookup_volume(struct ncp_server *server, const char *volname, struct nw_info_struct *target) { int result; memset(target, 0, sizeof(*target)); result = ncp_get_volume_root(server, volname, &target->volNumber, &target->dirEntNum, &target->DosDirNum); if (result) { return result; } ncp_update_known_namespace(server, target->volNumber, NULL); target->nameLen = strlen(volname); memcpy(target->entryName, volname, target->nameLen+1); target->attributes = aDIR; /* set dates to Jan 1, 1986 00:00 */ target->creationTime = target->modifyTime = cpu_to_le16(0x0000); target->creationDate = target->modifyDate = target->lastAccessDate = cpu_to_le16(0x0C21); target->nfs.mode = 0; return 0; } int ncp_modify_file_or_subdir_dos_info_path(struct ncp_server *server, struct inode *dir, const char *path, __le32 info_mask, const struct nw_modify_dos_info *info) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int result; ncp_init_request(server); ncp_add_byte(server, 7); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, 0); /* reserved */ ncp_add_word(server, cpu_to_le16(0x8006)); /* search attribs: all */ ncp_add_dword(server, info_mask); ncp_add_mem(server, info, sizeof(*info)); ncp_add_handle_path(server, volnum, dirent, 1, path); result = ncp_request(server, 87); ncp_unlock_server(server); return result; } int ncp_modify_file_or_subdir_dos_info(struct ncp_server *server, struct inode *dir, __le32 info_mask, const struct nw_modify_dos_info *info) { return ncp_modify_file_or_subdir_dos_info_path(server, dir, NULL, info_mask, info); } #ifdef CONFIG_NCPFS_NFS_NS int ncp_modify_nfs_info(struct ncp_server *server, __u8 volnum, __le32 dirent, __u32 mode, __u32 rdev) { int result = 0; ncp_init_request(server); if (server->name_space[volnum] == NW_NS_NFS) { ncp_add_byte(server, 25); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, NW_NS_NFS); ncp_add_byte(server, volnum); ncp_add_dword(server, dirent); /* we must always operate on both nlinks and rdev, otherwise rdev is not set */ ncp_add_dword_lh(server, NSIBM_NFS_MODE | NSIBM_NFS_NLINKS | NSIBM_NFS_RDEV); ncp_add_dword_lh(server, mode); ncp_add_dword_lh(server, 1); /* nlinks */ ncp_add_dword_lh(server, rdev); result = ncp_request(server, 87); } ncp_unlock_server(server); return result; } #endif static int ncp_DeleteNSEntry(struct ncp_server *server, __u8 have_dir_base, __u8 volnum, __le32 dirent, const char* name, __u8 ns, __le16 attr) { int result; ncp_init_request(server); ncp_add_byte(server, 8); /* subfunction */ ncp_add_byte(server, ns); ncp_add_byte(server, 0); /* reserved */ ncp_add_word(server, attr); /* search attribs: all */ ncp_add_handle_path(server, volnum, dirent, have_dir_base, name); result = ncp_request(server, 87); ncp_unlock_server(server); return result; } int ncp_del_file_or_subdir2(struct ncp_server *server, struct dentry *dentry) { struct inode *inode = d_inode(dentry); __u8 volnum; __le32 dirent; if (!inode) { return 0xFF; /* Any error */ } volnum = NCP_FINFO(inode)->volNumber; dirent = NCP_FINFO(inode)->DosDirNum; return ncp_DeleteNSEntry(server, 1, volnum, dirent, NULL, NW_NS_DOS, cpu_to_le16(0x8006)); } int ncp_del_file_or_subdir(struct ncp_server *server, struct inode *dir, const char *name) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int name_space; name_space = server->name_space[volnum]; #ifdef CONFIG_NCPFS_NFS_NS if (name_space == NW_NS_NFS) { int result; result=ncp_obtain_DOS_dir_base(server, name_space, volnum, dirent, name, &dirent); if (result) return result; name = NULL; name_space = NW_NS_DOS; } #endif /* CONFIG_NCPFS_NFS_NS */ return ncp_DeleteNSEntry(server, 1, volnum, dirent, name, name_space, cpu_to_le16(0x8006)); } static inline void ConvertToNWfromDWORD(__u16 v0, __u16 v1, __u8 ret[6]) { __le16 *dest = (__le16 *) ret; dest[1] = cpu_to_le16(v0); dest[2] = cpu_to_le16(v1); dest[0] = cpu_to_le16(v0 + 1); return; } /* If both dir and name are NULL, then in target there's already a looked-up entry that wants to be opened. */ int ncp_open_create_file_or_subdir(struct ncp_server *server, struct inode *dir, const char *name, int open_create_mode, __le32 create_attributes, __le16 desired_acc_rights, struct ncp_entry_info *target) { __le16 search_attribs = cpu_to_le16(0x0006); __u8 volnum; __le32 dirent; int result; volnum = NCP_FINFO(dir)->volNumber; dirent = NCP_FINFO(dir)->dirEntNum; if ((create_attributes & aDIR) != 0) { search_attribs |= cpu_to_le16(0x8000); } ncp_init_request(server); ncp_add_byte(server, 1); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, open_create_mode); ncp_add_word(server, search_attribs); ncp_add_dword(server, RIM_ALL); ncp_add_dword(server, create_attributes); /* The desired acc rights seem to be the inherited rights mask for directories */ ncp_add_word(server, desired_acc_rights); ncp_add_handle_path(server, volnum, dirent, 1, name); if ((result = ncp_request(server, 87)) != 0) goto out; if (!(create_attributes & aDIR)) target->opened = 1; /* in target there's a new finfo to fill */ ncp_extract_file_info(ncp_reply_data(server, 6), &(target->i)); target->volume = target->i.volNumber; ConvertToNWfromDWORD(ncp_reply_le16(server, 0), ncp_reply_le16(server, 2), target->file_handle); ncp_unlock_server(server); (void)ncp_obtain_nfs_info(server, &(target->i)); return 0; out: ncp_unlock_server(server); return result; } int ncp_initialize_search(struct ncp_server *server, struct inode *dir, struct nw_search_sequence *target) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int result; ncp_init_request(server); ncp_add_byte(server, 2); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, 0); /* reserved */ ncp_add_handle_path(server, volnum, dirent, 1, NULL); result = ncp_request(server, 87); if (result) goto out; memcpy(target, ncp_reply_data(server, 0), sizeof(*target)); out: ncp_unlock_server(server); return result; } int ncp_search_for_fileset(struct ncp_server *server, struct nw_search_sequence *seq, int* more, int* cnt, char* buffer, size_t bufsize, char** rbuf, size_t* rsize) { int result; ncp_init_request(server); ncp_add_byte(server, 20); ncp_add_byte(server, server->name_space[seq->volNumber]); ncp_add_byte(server, 0); /* datastream */ ncp_add_word(server, cpu_to_le16(0x8006)); ncp_add_dword(server, RIM_ALL); ncp_add_word(server, cpu_to_le16(32767)); /* max returned items */ ncp_add_mem(server, seq, 9); #ifdef CONFIG_NCPFS_NFS_NS if (server->name_space[seq->volNumber] == NW_NS_NFS) { ncp_add_byte(server, 0); /* 0 byte pattern */ } else #endif { ncp_add_byte(server, 2); /* 2 byte pattern */ ncp_add_byte(server, 0xff); /* following is a wildcard */ ncp_add_byte(server, '*'); } result = ncp_request2(server, 87, buffer, bufsize); if (result) { ncp_unlock_server(server); return result; } if (server->ncp_reply_size < 12) { ncp_unlock_server(server); return 0xFF; } *rsize = server->ncp_reply_size - 12; ncp_unlock_server(server); buffer = buffer + sizeof(struct ncp_reply_header); *rbuf = buffer + 12; *cnt = WVAL_LH(buffer + 10); *more = BVAL(buffer + 9); memcpy(seq, buffer, 9); return 0; } static int ncp_RenameNSEntry(struct ncp_server *server, struct inode *old_dir, const char *old_name, __le16 old_type, struct inode *new_dir, const char *new_name) { int result = -EINVAL; if ((old_dir == NULL) || (old_name == NULL) || (new_dir == NULL) || (new_name == NULL)) goto out; ncp_init_request(server); ncp_add_byte(server, 4); /* subfunction */ ncp_add_byte(server, server->name_space[NCP_FINFO(old_dir)->volNumber]); ncp_add_byte(server, 1); /* rename flag */ ncp_add_word(server, old_type); /* search attributes */ /* source Handle Path */ ncp_add_byte(server, NCP_FINFO(old_dir)->volNumber); ncp_add_dword(server, NCP_FINFO(old_dir)->dirEntNum); ncp_add_byte(server, 1); ncp_add_byte(server, 1); /* 1 source component */ /* dest Handle Path */ ncp_add_byte(server, NCP_FINFO(new_dir)->volNumber); ncp_add_dword(server, NCP_FINFO(new_dir)->dirEntNum); ncp_add_byte(server, 1); ncp_add_byte(server, 1); /* 1 destination component */ /* source path string */ ncp_add_pstring(server, old_name); /* dest path string */ ncp_add_pstring(server, new_name); result = ncp_request(server, 87); ncp_unlock_server(server); out: return result; } int ncp_ren_or_mov_file_or_subdir(struct ncp_server *server, struct inode *old_dir, const char *old_name, struct inode *new_dir, const char *new_name) { int result; __le16 old_type = cpu_to_le16(0x06); /* If somebody can do it atomic, call me... vandrove@vc.cvut.cz */ result = ncp_RenameNSEntry(server, old_dir, old_name, old_type, new_dir, new_name); if (result == 0xFF) /* File Not Found, try directory */ { old_type = cpu_to_le16(0x16); result = ncp_RenameNSEntry(server, old_dir, old_name, old_type, new_dir, new_name); } if (result != 0x92) return result; /* All except NO_FILES_RENAMED */ result = ncp_del_file_or_subdir(server, new_dir, new_name); if (result != 0) return -EACCES; result = ncp_RenameNSEntry(server, old_dir, old_name, old_type, new_dir, new_name); return result; } /* We have to transfer to/from user space */ int ncp_read_kernel(struct ncp_server *server, const char *file_id, __u32 offset, __u16 to_read, char *target, int *bytes_read) { const char *source; int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be16(server, to_read); if ((result = ncp_request(server, 72)) != 0) { goto out; } *bytes_read = ncp_reply_be16(server, 0); source = ncp_reply_data(server, 2 + (offset & 1)); memcpy(target, source, *bytes_read); out: ncp_unlock_server(server); return result; } /* There is a problem... egrep and some other silly tools do: x = mmap(NULL, MAP_PRIVATE, PROT_READ|PROT_WRITE, <ncpfs fd>, 32768); read(<ncpfs fd>, x, 32768); Now copying read result by copy_to_user causes pagefault. This pagefault could not be handled because of server was locked due to read. So we have to use temporary buffer. So ncp_unlock_server must be done before copy_to_user (and for write, copy_from_user must be done before ncp_init_request... same applies for send raw packet ioctl). Because of file is normally read in bigger chunks, caller provides kmalloced (vmalloced) chunk of memory with size >= to_read... */ int ncp_read_bounce(struct ncp_server *server, const char *file_id, __u32 offset, __u16 to_read, struct iov_iter *to, int *bytes_read, void *bounce, __u32 bufsize) { int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be16(server, to_read); result = ncp_request2(server, 72, bounce, bufsize); ncp_unlock_server(server); if (!result) { int len = get_unaligned_be16((char *)bounce + sizeof(struct ncp_reply_header)); result = -EIO; if (len <= to_read) { char* source; source = (char*)bounce + sizeof(struct ncp_reply_header) + 2 + (offset & 1); *bytes_read = len; result = 0; if (copy_to_iter(source, len, to) != len) result = -EFAULT; } } return result; } int ncp_write_kernel(struct ncp_server *server, const char *file_id, __u32 offset, __u16 to_write, const char *source, int *bytes_written) { int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be16(server, to_write); ncp_add_mem(server, source, to_write); if ((result = ncp_request(server, 73)) == 0) *bytes_written = to_write; ncp_unlock_server(server); return result; } #ifdef CONFIG_NCPFS_IOCTL_LOCKING int ncp_LogPhysicalRecord(struct ncp_server *server, const char *file_id, __u8 locktype, __u32 offset, __u32 length, __u16 timeout) { int result; ncp_init_request(server); ncp_add_byte(server, locktype); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be32(server, length); ncp_add_be16(server, timeout); if ((result = ncp_request(server, 0x1A)) != 0) { ncp_unlock_server(server); return result; } ncp_unlock_server(server); return 0; } int ncp_ClearPhysicalRecord(struct ncp_server *server, const char *file_id, __u32 offset, __u32 length) { int result; ncp_init_request(server); ncp_add_byte(server, 0); /* who knows... lanalyzer says that */ ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be32(server, length); if ((result = ncp_request(server, 0x1E)) != 0) { ncp_unlock_server(server); return result; } ncp_unlock_server(server); return 0; } #endif /* CONFIG_NCPFS_IOCTL_LOCKING */ #ifdef CONFIG_NCPFS_NLS /* This are the NLS conversion routines with inspirations and code parts * from the vfat file system and hints from Petr Vandrovec. */ int ncp__io2vol(struct ncp_server *server, unsigned char *vname, unsigned int *vlen, const unsigned char *iname, unsigned int ilen, int cc) { struct nls_table *in = server->nls_io; struct nls_table *out = server->nls_vol; unsigned char *vname_start; unsigned char *vname_end; const unsigned char *iname_end; iname_end = iname + ilen; vname_start = vname; vname_end = vname + *vlen - 1; while (iname < iname_end) { int chl; wchar_t ec; if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) { int k; unicode_t u; k = utf8_to_utf32(iname, iname_end - iname, &u); if (k < 0 || u > MAX_WCHAR_T) return -EINVAL; iname += k; ec = u; } else { if (*iname == NCP_ESC) { int k; if (iname_end - iname < 5) goto nospec; ec = 0; for (k = 1; k < 5; k++) { unsigned char nc; nc = iname[k] - '0'; if (nc >= 10) { nc -= 'A' - '0' - 10; if ((nc < 10) || (nc > 15)) { goto nospec; } } ec = (ec << 4) | nc; } iname += 5; } else { nospec:; if ( (chl = in->char2uni(iname, iname_end - iname, &ec)) < 0) return chl; iname += chl; } } /* unitoupper should be here! */ chl = out->uni2char(ec, vname, vname_end - vname); if (chl < 0) return chl; /* this is wrong... */ if (cc) { int chi; for (chi = 0; chi < chl; chi++){ vname[chi] = ncp_toupper(out, vname[chi]); } } vname += chl; } *vname = 0; *vlen = vname - vname_start; return 0; } int ncp__vol2io(struct ncp_server *server, unsigned char *iname, unsigned int *ilen, const unsigned char *vname, unsigned int vlen, int cc) { struct nls_table *in = server->nls_vol; struct nls_table *out = server->nls_io; const unsigned char *vname_end; unsigned char *iname_start; unsigned char *iname_end; unsigned char *vname_cc; int err; vname_cc = NULL; if (cc) { int i; /* this is wrong! */ vname_cc = kmalloc(vlen, GFP_KERNEL); if (!vname_cc) return -ENOMEM; for (i = 0; i < vlen; i++) vname_cc[i] = ncp_tolower(in, vname[i]); vname = vname_cc; } iname_start = iname; iname_end = iname + *ilen - 1; vname_end = vname + vlen; while (vname < vname_end) { wchar_t ec; int chl; if ( (chl = in->char2uni(vname, vname_end - vname, &ec)) < 0) { err = chl; goto quit; } vname += chl; /* unitolower should be here! */ if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) { int k; k = utf32_to_utf8(ec, iname, iname_end - iname); if (k < 0) { err = -ENAMETOOLONG; goto quit; } iname += k; } else { if ( (chl = out->uni2char(ec, iname, iname_end - iname)) >= 0) { iname += chl; } else { int k; if (iname_end - iname < 5) { err = -ENAMETOOLONG; goto quit; } *iname = NCP_ESC; for (k = 4; k > 0; k--) { unsigned char v; v = (ec & 0xF) + '0'; if (v > '9') { v += 'A' - '9' - 1; } iname[k] = v; ec >>= 4; } iname += 5; } } } *iname = 0; *ilen = iname - iname_start; err = 0; quit:; if (cc) kfree(vname_cc); return err; } #else int ncp__io2vol(unsigned char *vname, unsigned int *vlen, const unsigned char *iname, unsigned int ilen, int cc) { int i; if (*vlen <= ilen) return -ENAMETOOLONG; if (cc) for (i = 0; i < ilen; i++) { *vname = toupper(*iname); vname++; iname++; } else { memmove(vname, iname, ilen); vname += ilen; } *vlen = ilen; *vname = 0; return 0; } int ncp__vol2io(unsigned char *iname, unsigned int *ilen, const unsigned char *vname, unsigned int vlen, int cc) { int i; if (*ilen <= vlen) return -ENAMETOOLONG; if (cc) for (i = 0; i < vlen; i++) { *iname = tolower(*vname); iname++; vname++; } else { memmove(iname, vname, vlen); iname += vlen; } *ilen = vlen; *iname = 0; return 0; } #endif
gpl-2.0
sid1607/linux-3.14.65-src
drivers/tty/serial/rp2.c
2148
24666
/* * Driver for Comtrol RocketPort EXPRESS/INFINITY cards * * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com> * * Inspired by, and loosely based on: * * ar933x_uart.c * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> * * rocketport_infinity_express-linux-1.20.tar.gz * Copyright (C) 2004-2011 Comtrol, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/bitops.h> #include <linux/compiler.h> #include <linux/completion.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/slab.h> #include <linux/sysrq.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/types.h> #define DRV_NAME "rp2" #define RP2_FW_NAME "rp2.fw" #define RP2_UCODE_BYTES 0x3f #define PORTS_PER_ASIC 16 #define ALL_PORTS_MASK (BIT(PORTS_PER_ASIC) - 1) #define UART_CLOCK 44236800 #define DEFAULT_BAUD_DIV (UART_CLOCK / (9600 * 16)) #define FIFO_SIZE 512 /* BAR0 registers */ #define RP2_FPGA_CTL0 0x110 #define RP2_FPGA_CTL1 0x11c #define RP2_IRQ_MASK 0x1ec #define RP2_IRQ_MASK_EN_m BIT(0) #define RP2_IRQ_STATUS 0x1f0 /* BAR1 registers */ #define RP2_ASIC_SPACING 0x1000 #define RP2_ASIC_OFFSET(i) ((i) << ilog2(RP2_ASIC_SPACING)) #define RP2_PORT_BASE 0x000 #define RP2_PORT_SPACING 0x040 #define RP2_UCODE_BASE 0x400 #define RP2_UCODE_SPACING 0x80 #define RP2_CLK_PRESCALER 0xc00 #define RP2_CH_IRQ_STAT 0xc04 #define RP2_CH_IRQ_MASK 0xc08 #define RP2_ASIC_IRQ 0xd00 #define RP2_ASIC_IRQ_EN_m BIT(20) #define RP2_GLOBAL_CMD 0xd0c #define RP2_ASIC_CFG 0xd04 /* port registers */ #define RP2_DATA_DWORD 0x000 #define RP2_DATA_BYTE 0x008 #define RP2_DATA_BYTE_ERR_PARITY_m BIT(8) #define RP2_DATA_BYTE_ERR_OVERRUN_m BIT(9) #define RP2_DATA_BYTE_ERR_FRAMING_m BIT(10) #define RP2_DATA_BYTE_BREAK_m BIT(11) /* This lets uart_insert_char() drop bytes received on a !CREAD port */ #define RP2_DUMMY_READ BIT(16) #define RP2_DATA_BYTE_EXCEPTION_MASK (RP2_DATA_BYTE_ERR_PARITY_m | \ RP2_DATA_BYTE_ERR_OVERRUN_m | \ RP2_DATA_BYTE_ERR_FRAMING_m | \ RP2_DATA_BYTE_BREAK_m) #define RP2_RX_FIFO_COUNT 0x00c #define RP2_TX_FIFO_COUNT 0x00e #define RP2_CHAN_STAT 0x010 #define RP2_CHAN_STAT_RXDATA_m BIT(0) #define RP2_CHAN_STAT_DCD_m BIT(3) #define RP2_CHAN_STAT_DSR_m BIT(4) #define RP2_CHAN_STAT_CTS_m BIT(5) #define RP2_CHAN_STAT_RI_m BIT(6) #define RP2_CHAN_STAT_OVERRUN_m BIT(13) #define RP2_CHAN_STAT_DSR_CHANGED_m BIT(16) #define RP2_CHAN_STAT_CTS_CHANGED_m BIT(17) #define RP2_CHAN_STAT_CD_CHANGED_m BIT(18) #define RP2_CHAN_STAT_RI_CHANGED_m BIT(22) #define RP2_CHAN_STAT_TXEMPTY_m BIT(25) #define RP2_CHAN_STAT_MS_CHANGED_MASK (RP2_CHAN_STAT_DSR_CHANGED_m | \ RP2_CHAN_STAT_CTS_CHANGED_m | \ RP2_CHAN_STAT_CD_CHANGED_m | \ RP2_CHAN_STAT_RI_CHANGED_m) #define RP2_TXRX_CTL 0x014 #define RP2_TXRX_CTL_MSRIRQ_m BIT(0) #define RP2_TXRX_CTL_RXIRQ_m BIT(2) #define RP2_TXRX_CTL_RX_TRIG_s 3 #define RP2_TXRX_CTL_RX_TRIG_m (0x3 << RP2_TXRX_CTL_RX_TRIG_s) #define RP2_TXRX_CTL_RX_TRIG_1 (0x1 << RP2_TXRX_CTL_RX_TRIG_s) #define RP2_TXRX_CTL_RX_TRIG_256 (0x2 << RP2_TXRX_CTL_RX_TRIG_s) #define RP2_TXRX_CTL_RX_TRIG_448 (0x3 << RP2_TXRX_CTL_RX_TRIG_s) #define RP2_TXRX_CTL_RX_EN_m BIT(5) #define RP2_TXRX_CTL_RTSFLOW_m BIT(6) #define RP2_TXRX_CTL_DTRFLOW_m BIT(7) #define RP2_TXRX_CTL_TX_TRIG_s 16 #define RP2_TXRX_CTL_TX_TRIG_m (0x3 << RP2_TXRX_CTL_RX_TRIG_s) #define RP2_TXRX_CTL_DSRFLOW_m BIT(18) #define RP2_TXRX_CTL_TXIRQ_m BIT(19) #define RP2_TXRX_CTL_CTSFLOW_m BIT(23) #define RP2_TXRX_CTL_TX_EN_m BIT(24) #define RP2_TXRX_CTL_RTS_m BIT(25) #define RP2_TXRX_CTL_DTR_m BIT(26) #define RP2_TXRX_CTL_LOOP_m BIT(27) #define RP2_TXRX_CTL_BREAK_m BIT(28) #define RP2_TXRX_CTL_CMSPAR_m BIT(29) #define RP2_TXRX_CTL_nPARODD_m BIT(30) #define RP2_TXRX_CTL_PARENB_m BIT(31) #define RP2_UART_CTL 0x018 #define RP2_UART_CTL_MODE_s 0 #define RP2_UART_CTL_MODE_m (0x7 << RP2_UART_CTL_MODE_s) #define RP2_UART_CTL_MODE_rs232 (0x1 << RP2_UART_CTL_MODE_s) #define RP2_UART_CTL_FLUSH_RX_m BIT(3) #define RP2_UART_CTL_FLUSH_TX_m BIT(4) #define RP2_UART_CTL_RESET_CH_m BIT(5) #define RP2_UART_CTL_XMIT_EN_m BIT(6) #define RP2_UART_CTL_DATABITS_s 8 #define RP2_UART_CTL_DATABITS_m (0x3 << RP2_UART_CTL_DATABITS_s) #define RP2_UART_CTL_DATABITS_8 (0x3 << RP2_UART_CTL_DATABITS_s) #define RP2_UART_CTL_DATABITS_7 (0x2 << RP2_UART_CTL_DATABITS_s) #define RP2_UART_CTL_DATABITS_6 (0x1 << RP2_UART_CTL_DATABITS_s) #define RP2_UART_CTL_DATABITS_5 (0x0 << RP2_UART_CTL_DATABITS_s) #define RP2_UART_CTL_STOPBITS_m BIT(10) #define RP2_BAUD 0x01c /* ucode registers */ #define RP2_TX_SWFLOW 0x02 #define RP2_TX_SWFLOW_ena 0x81 #define RP2_TX_SWFLOW_dis 0x9d #define RP2_RX_SWFLOW 0x0c #define RP2_RX_SWFLOW_ena 0x81 #define RP2_RX_SWFLOW_dis 0x8d #define RP2_RX_FIFO 0x37 #define RP2_RX_FIFO_ena 0x08 #define RP2_RX_FIFO_dis 0x81 static struct uart_driver rp2_uart_driver = { .owner = THIS_MODULE, .driver_name = DRV_NAME, .dev_name = "ttyRP", .nr = CONFIG_SERIAL_RP2_NR_UARTS, }; struct rp2_card; struct rp2_uart_port { struct uart_port port; int idx; int ignore_rx; struct rp2_card *card; void __iomem *asic_base; void __iomem *base; void __iomem *ucode; }; struct rp2_card { struct pci_dev *pdev; struct rp2_uart_port *ports; int n_ports; int initialized_ports; int minor_start; int smpte; void __iomem *bar0; void __iomem *bar1; spinlock_t card_lock; struct completion fw_loaded; }; #define RP_ID(prod) PCI_VDEVICE(RP, (prod)) #define RP_CAP(ports, smpte) (((ports) << 8) | ((smpte) << 0)) static inline void rp2_decode_cap(const struct pci_device_id *id, int *ports, int *smpte) { *ports = id->driver_data >> 8; *smpte = id->driver_data & 0xff; } static DEFINE_SPINLOCK(rp2_minor_lock); static int rp2_minor_next; static int rp2_alloc_ports(int n_ports) { int ret = -ENOSPC; spin_lock(&rp2_minor_lock); if (rp2_minor_next + n_ports <= CONFIG_SERIAL_RP2_NR_UARTS) { /* sorry, no support for hot unplugging individual cards */ ret = rp2_minor_next; rp2_minor_next += n_ports; } spin_unlock(&rp2_minor_lock); return ret; } static inline struct rp2_uart_port *port_to_up(struct uart_port *port) { return container_of(port, struct rp2_uart_port, port); } static void rp2_rmw(struct rp2_uart_port *up, int reg, u32 clr_bits, u32 set_bits) { u32 tmp = readl(up->base + reg); tmp &= ~clr_bits; tmp |= set_bits; writel(tmp, up->base + reg); } static void rp2_rmw_clr(struct rp2_uart_port *up, int reg, u32 val) { rp2_rmw(up, reg, val, 0); } static void rp2_rmw_set(struct rp2_uart_port *up, int reg, u32 val) { rp2_rmw(up, reg, 0, val); } static void rp2_mask_ch_irq(struct rp2_uart_port *up, int ch_num, int is_enabled) { unsigned long flags, irq_mask; spin_lock_irqsave(&up->card->card_lock, flags); irq_mask = readl(up->asic_base + RP2_CH_IRQ_MASK); if (is_enabled) irq_mask &= ~BIT(ch_num); else irq_mask |= BIT(ch_num); writel(irq_mask, up->asic_base + RP2_CH_IRQ_MASK); spin_unlock_irqrestore(&up->card->card_lock, flags); } static unsigned int rp2_uart_tx_empty(struct uart_port *port) { struct rp2_uart_port *up = port_to_up(port); unsigned long tx_fifo_bytes, flags; /* * This should probably check the transmitter, not the FIFO. * But the TXEMPTY bit doesn't seem to work unless the TX IRQ is * enabled. */ spin_lock_irqsave(&up->port.lock, flags); tx_fifo_bytes = readw(up->base + RP2_TX_FIFO_COUNT); spin_unlock_irqrestore(&up->port.lock, flags); return tx_fifo_bytes ? 0 : TIOCSER_TEMT; } static unsigned int rp2_uart_get_mctrl(struct uart_port *port) { struct rp2_uart_port *up = port_to_up(port); u32 status; status = readl(up->base + RP2_CHAN_STAT); return ((status & RP2_CHAN_STAT_DCD_m) ? TIOCM_CAR : 0) | ((status & RP2_CHAN_STAT_DSR_m) ? TIOCM_DSR : 0) | ((status & RP2_CHAN_STAT_CTS_m) ? TIOCM_CTS : 0) | ((status & RP2_CHAN_STAT_RI_m) ? TIOCM_RI : 0); } static void rp2_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_DTR_m | RP2_TXRX_CTL_RTS_m | RP2_TXRX_CTL_LOOP_m, ((mctrl & TIOCM_DTR) ? RP2_TXRX_CTL_DTR_m : 0) | ((mctrl & TIOCM_RTS) ? RP2_TXRX_CTL_RTS_m : 0) | ((mctrl & TIOCM_LOOP) ? RP2_TXRX_CTL_LOOP_m : 0)); } static void rp2_uart_start_tx(struct uart_port *port) { rp2_rmw_set(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_TXIRQ_m); } static void rp2_uart_stop_tx(struct uart_port *port) { rp2_rmw_clr(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_TXIRQ_m); } static void rp2_uart_stop_rx(struct uart_port *port) { rp2_rmw_clr(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_RXIRQ_m); } static void rp2_uart_break_ctl(struct uart_port *port, int break_state) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_BREAK_m, break_state ? RP2_TXRX_CTL_BREAK_m : 0); spin_unlock_irqrestore(&port->lock, flags); } static void rp2_uart_enable_ms(struct uart_port *port) { rp2_rmw_set(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_MSRIRQ_m); } static void __rp2_uart_set_termios(struct rp2_uart_port *up, unsigned long cfl, unsigned long ifl, unsigned int baud_div) { /* baud rate divisor (calculated elsewhere). 0 = divide-by-1 */ writew(baud_div - 1, up->base + RP2_BAUD); /* data bits and stop bits */ rp2_rmw(up, RP2_UART_CTL, RP2_UART_CTL_STOPBITS_m | RP2_UART_CTL_DATABITS_m, ((cfl & CSTOPB) ? RP2_UART_CTL_STOPBITS_m : 0) | (((cfl & CSIZE) == CS8) ? RP2_UART_CTL_DATABITS_8 : 0) | (((cfl & CSIZE) == CS7) ? RP2_UART_CTL_DATABITS_7 : 0) | (((cfl & CSIZE) == CS6) ? RP2_UART_CTL_DATABITS_6 : 0) | (((cfl & CSIZE) == CS5) ? RP2_UART_CTL_DATABITS_5 : 0)); /* parity and hardware flow control */ rp2_rmw(up, RP2_TXRX_CTL, RP2_TXRX_CTL_PARENB_m | RP2_TXRX_CTL_nPARODD_m | RP2_TXRX_CTL_CMSPAR_m | RP2_TXRX_CTL_DTRFLOW_m | RP2_TXRX_CTL_DSRFLOW_m | RP2_TXRX_CTL_RTSFLOW_m | RP2_TXRX_CTL_CTSFLOW_m, ((cfl & PARENB) ? RP2_TXRX_CTL_PARENB_m : 0) | ((cfl & PARODD) ? 0 : RP2_TXRX_CTL_nPARODD_m) | ((cfl & CMSPAR) ? RP2_TXRX_CTL_CMSPAR_m : 0) | ((cfl & CRTSCTS) ? (RP2_TXRX_CTL_RTSFLOW_m | RP2_TXRX_CTL_CTSFLOW_m) : 0)); /* XON/XOFF software flow control */ writeb((ifl & IXON) ? RP2_TX_SWFLOW_ena : RP2_TX_SWFLOW_dis, up->ucode + RP2_TX_SWFLOW); writeb((ifl & IXOFF) ? RP2_RX_SWFLOW_ena : RP2_RX_SWFLOW_dis, up->ucode + RP2_RX_SWFLOW); } static void rp2_uart_set_termios(struct uart_port *port, struct ktermios *new, struct ktermios *old) { struct rp2_uart_port *up = port_to_up(port); unsigned long flags; unsigned int baud, baud_div; baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); baud_div = uart_get_divisor(port, baud); if (tty_termios_baud_rate(new)) tty_termios_encode_baud_rate(new, baud, baud); spin_lock_irqsave(&port->lock, flags); /* ignore all characters if CREAD is not set */ port->ignore_status_mask = (new->c_cflag & CREAD) ? 0 : RP2_DUMMY_READ; __rp2_uart_set_termios(up, new->c_cflag, new->c_iflag, baud_div); uart_update_timeout(port, new->c_cflag, baud); spin_unlock_irqrestore(&port->lock, flags); } static void rp2_rx_chars(struct rp2_uart_port *up) { u16 bytes = readw(up->base + RP2_RX_FIFO_COUNT); struct tty_port *port = &up->port.state->port; for (; bytes != 0; bytes--) { u32 byte = readw(up->base + RP2_DATA_BYTE) | RP2_DUMMY_READ; char ch = byte & 0xff; if (likely(!(byte & RP2_DATA_BYTE_EXCEPTION_MASK))) { if (!uart_handle_sysrq_char(&up->port, ch)) uart_insert_char(&up->port, byte, 0, ch, TTY_NORMAL); } else { char flag = TTY_NORMAL; if (byte & RP2_DATA_BYTE_BREAK_m) flag = TTY_BREAK; else if (byte & RP2_DATA_BYTE_ERR_FRAMING_m) flag = TTY_FRAME; else if (byte & RP2_DATA_BYTE_ERR_PARITY_m) flag = TTY_PARITY; uart_insert_char(&up->port, byte, RP2_DATA_BYTE_ERR_OVERRUN_m, ch, flag); } up->port.icount.rx++; } spin_unlock(&up->port.lock); tty_flip_buffer_push(port); spin_lock(&up->port.lock); } static void rp2_tx_chars(struct rp2_uart_port *up) { u16 max_tx = FIFO_SIZE - readw(up->base + RP2_TX_FIFO_COUNT); struct circ_buf *xmit = &up->port.state->xmit; if (uart_tx_stopped(&up->port)) { rp2_uart_stop_tx(&up->port); return; } for (; max_tx != 0; max_tx--) { if (up->port.x_char) { writeb(up->port.x_char, up->base + RP2_DATA_BYTE); up->port.x_char = 0; up->port.icount.tx++; continue; } if (uart_circ_empty(xmit)) { rp2_uart_stop_tx(&up->port); break; } writeb(xmit->buf[xmit->tail], up->base + RP2_DATA_BYTE); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); } static void rp2_ch_interrupt(struct rp2_uart_port *up) { u32 status; spin_lock(&up->port.lock); /* * The IRQ status bits are clear-on-write. Other status bits in * this register aren't, so it's harmless to write to them. */ status = readl(up->base + RP2_CHAN_STAT); writel(status, up->base + RP2_CHAN_STAT); if (status & RP2_CHAN_STAT_RXDATA_m) rp2_rx_chars(up); if (status & RP2_CHAN_STAT_TXEMPTY_m) rp2_tx_chars(up); if (status & RP2_CHAN_STAT_MS_CHANGED_MASK) wake_up_interruptible(&up->port.state->port.delta_msr_wait); spin_unlock(&up->port.lock); } static int rp2_asic_interrupt(struct rp2_card *card, unsigned int asic_id) { void __iomem *base = card->bar1 + RP2_ASIC_OFFSET(asic_id); int ch, handled = 0; unsigned long status = readl(base + RP2_CH_IRQ_STAT) & ~readl(base + RP2_CH_IRQ_MASK); for_each_set_bit(ch, &status, PORTS_PER_ASIC) { rp2_ch_interrupt(&card->ports[ch]); handled++; } return handled; } static irqreturn_t rp2_uart_interrupt(int irq, void *dev_id) { struct rp2_card *card = dev_id; int handled; handled = rp2_asic_interrupt(card, 0); if (card->n_ports >= PORTS_PER_ASIC) handled += rp2_asic_interrupt(card, 1); return handled ? IRQ_HANDLED : IRQ_NONE; } static inline void rp2_flush_fifos(struct rp2_uart_port *up) { rp2_rmw_set(up, RP2_UART_CTL, RP2_UART_CTL_FLUSH_RX_m | RP2_UART_CTL_FLUSH_TX_m); readl(up->base + RP2_UART_CTL); udelay(10); rp2_rmw_clr(up, RP2_UART_CTL, RP2_UART_CTL_FLUSH_RX_m | RP2_UART_CTL_FLUSH_TX_m); } static int rp2_uart_startup(struct uart_port *port) { struct rp2_uart_port *up = port_to_up(port); rp2_flush_fifos(up); rp2_rmw(up, RP2_TXRX_CTL, RP2_TXRX_CTL_MSRIRQ_m, RP2_TXRX_CTL_RXIRQ_m); rp2_rmw(up, RP2_TXRX_CTL, RP2_TXRX_CTL_RX_TRIG_m, RP2_TXRX_CTL_RX_TRIG_1); rp2_rmw(up, RP2_CHAN_STAT, 0, 0); rp2_mask_ch_irq(up, up->idx, 1); return 0; } static void rp2_uart_shutdown(struct uart_port *port) { struct rp2_uart_port *up = port_to_up(port); unsigned long flags; rp2_uart_break_ctl(port, 0); spin_lock_irqsave(&port->lock, flags); rp2_mask_ch_irq(up, up->idx, 0); rp2_rmw(up, RP2_CHAN_STAT, 0, 0); spin_unlock_irqrestore(&port->lock, flags); } static const char *rp2_uart_type(struct uart_port *port) { return (port->type == PORT_RP2) ? "RocketPort 2 UART" : NULL; } static void rp2_uart_release_port(struct uart_port *port) { /* Nothing to release ... */ } static int rp2_uart_request_port(struct uart_port *port) { /* UARTs always present */ return 0; } static void rp2_uart_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) port->type = PORT_RP2; } static int rp2_uart_verify_port(struct uart_port *port, struct serial_struct *ser) { if (ser->type != PORT_UNKNOWN && ser->type != PORT_RP2) return -EINVAL; return 0; } static const struct uart_ops rp2_uart_ops = { .tx_empty = rp2_uart_tx_empty, .set_mctrl = rp2_uart_set_mctrl, .get_mctrl = rp2_uart_get_mctrl, .stop_tx = rp2_uart_stop_tx, .start_tx = rp2_uart_start_tx, .stop_rx = rp2_uart_stop_rx, .enable_ms = rp2_uart_enable_ms, .break_ctl = rp2_uart_break_ctl, .startup = rp2_uart_startup, .shutdown = rp2_uart_shutdown, .set_termios = rp2_uart_set_termios, .type = rp2_uart_type, .release_port = rp2_uart_release_port, .request_port = rp2_uart_request_port, .config_port = rp2_uart_config_port, .verify_port = rp2_uart_verify_port, }; static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id) { void __iomem *base = card->bar1 + RP2_ASIC_OFFSET(asic_id); u32 clk_cfg; writew(1, base + RP2_GLOBAL_CMD); readw(base + RP2_GLOBAL_CMD); msleep(100); writel(0, base + RP2_CLK_PRESCALER); /* TDM clock configuration */ clk_cfg = readw(base + RP2_ASIC_CFG); clk_cfg = (clk_cfg & ~BIT(8)) | BIT(9); writew(clk_cfg, base + RP2_ASIC_CFG); /* IRQ routing */ writel(ALL_PORTS_MASK, base + RP2_CH_IRQ_MASK); writel(RP2_ASIC_IRQ_EN_m, base + RP2_ASIC_IRQ); } static void rp2_init_card(struct rp2_card *card) { writel(4, card->bar0 + RP2_FPGA_CTL0); writel(0, card->bar0 + RP2_FPGA_CTL1); rp2_reset_asic(card, 0); if (card->n_ports >= PORTS_PER_ASIC) rp2_reset_asic(card, 1); writel(RP2_IRQ_MASK_EN_m, card->bar0 + RP2_IRQ_MASK); } static void rp2_init_port(struct rp2_uart_port *up, const struct firmware *fw) { int i; writel(RP2_UART_CTL_RESET_CH_m, up->base + RP2_UART_CTL); readl(up->base + RP2_UART_CTL); udelay(1); writel(0, up->base + RP2_TXRX_CTL); writel(0, up->base + RP2_UART_CTL); readl(up->base + RP2_UART_CTL); udelay(1); rp2_flush_fifos(up); for (i = 0; i < min_t(int, fw->size, RP2_UCODE_BYTES); i++) writeb(fw->data[i], up->ucode + i); __rp2_uart_set_termios(up, CS8 | CREAD | CLOCAL, 0, DEFAULT_BAUD_DIV); rp2_uart_set_mctrl(&up->port, 0); writeb(RP2_RX_FIFO_ena, up->ucode + RP2_RX_FIFO); rp2_rmw(up, RP2_UART_CTL, RP2_UART_CTL_MODE_m, RP2_UART_CTL_XMIT_EN_m | RP2_UART_CTL_MODE_rs232); rp2_rmw_set(up, RP2_TXRX_CTL, RP2_TXRX_CTL_TX_EN_m | RP2_TXRX_CTL_RX_EN_m); } static void rp2_remove_ports(struct rp2_card *card) { int i; for (i = 0; i < card->initialized_ports; i++) uart_remove_one_port(&rp2_uart_driver, &card->ports[i].port); card->initialized_ports = 0; } static void rp2_fw_cb(const struct firmware *fw, void *context) { struct rp2_card *card = context; resource_size_t phys_base; int i, rc = -ENOENT; if (!fw) { dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n", RP2_FW_NAME); goto no_fw; } phys_base = pci_resource_start(card->pdev, 1); for (i = 0; i < card->n_ports; i++) { struct rp2_uart_port *rp = &card->ports[i]; struct uart_port *p; int j = (unsigned)i % PORTS_PER_ASIC; rp->asic_base = card->bar1; rp->base = card->bar1 + RP2_PORT_BASE + j*RP2_PORT_SPACING; rp->ucode = card->bar1 + RP2_UCODE_BASE + j*RP2_UCODE_SPACING; rp->card = card; rp->idx = j; p = &rp->port; p->line = card->minor_start + i; p->dev = &card->pdev->dev; p->type = PORT_RP2; p->iotype = UPIO_MEM32; p->uartclk = UART_CLOCK; p->regshift = 2; p->fifosize = FIFO_SIZE; p->ops = &rp2_uart_ops; p->irq = card->pdev->irq; p->membase = rp->base; p->mapbase = phys_base + RP2_PORT_BASE + j*RP2_PORT_SPACING; if (i >= PORTS_PER_ASIC) { rp->asic_base += RP2_ASIC_SPACING; rp->base += RP2_ASIC_SPACING; rp->ucode += RP2_ASIC_SPACING; p->mapbase += RP2_ASIC_SPACING; } rp2_init_port(rp, fw); rc = uart_add_one_port(&rp2_uart_driver, p); if (rc) { dev_err(&card->pdev->dev, "error registering port %d: %d\n", i, rc); rp2_remove_ports(card); break; } card->initialized_ports++; } release_firmware(fw); no_fw: /* * rp2_fw_cb() is called from a workqueue long after rp2_probe() * has already returned success. So if something failed here, * we'll just leave the now-dormant device in place until somebody * unbinds it. */ if (rc) dev_warn(&card->pdev->dev, "driver initialization failed\n"); complete(&card->fw_loaded); } static int rp2_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct rp2_card *card; struct rp2_uart_port *ports; void __iomem * const *bars; int rc; card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; pci_set_drvdata(pdev, card); spin_lock_init(&card->card_lock); init_completion(&card->fw_loaded); rc = pcim_enable_device(pdev); if (rc) return rc; rc = pcim_iomap_regions_request_all(pdev, 0x03, DRV_NAME); if (rc) return rc; bars = pcim_iomap_table(pdev); card->bar0 = bars[0]; card->bar1 = bars[1]; card->pdev = pdev; rp2_decode_cap(id, &card->n_ports, &card->smpte); dev_info(&pdev->dev, "found new card with %d ports\n", card->n_ports); card->minor_start = rp2_alloc_ports(card->n_ports); if (card->minor_start < 0) { dev_err(&pdev->dev, "too many ports (try increasing CONFIG_SERIAL_RP2_NR_UARTS)\n"); return -EINVAL; } rp2_init_card(card); ports = devm_kzalloc(&pdev->dev, sizeof(*ports) * card->n_ports, GFP_KERNEL); if (!ports) return -ENOMEM; card->ports = ports; rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt, IRQF_SHARED, DRV_NAME, card); if (rc) return rc; /* * Only catastrophic errors (e.g. ENOMEM) are reported here. * If the FW image is missing, we'll find out in rp2_fw_cb() * and print an error message. */ rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev, GFP_KERNEL, card, rp2_fw_cb); if (rc) return rc; dev_dbg(&pdev->dev, "waiting for firmware blob...\n"); return 0; } static void rp2_remove(struct pci_dev *pdev) { struct rp2_card *card = pci_get_drvdata(pdev); wait_for_completion(&card->fw_loaded); rp2_remove_ports(card); } static const struct pci_device_id rp2_pci_tbl[] = { /* RocketPort INFINITY cards */ { RP_ID(0x0040), RP_CAP(8, 0) }, /* INF Octa, RJ45, selectable */ { RP_ID(0x0041), RP_CAP(32, 0) }, /* INF 32, ext interface */ { RP_ID(0x0042), RP_CAP(8, 0) }, /* INF Octa, ext interface */ { RP_ID(0x0043), RP_CAP(16, 0) }, /* INF 16, ext interface */ { RP_ID(0x0044), RP_CAP(4, 0) }, /* INF Quad, DB, selectable */ { RP_ID(0x0045), RP_CAP(8, 0) }, /* INF Octa, DB, selectable */ { RP_ID(0x0046), RP_CAP(4, 0) }, /* INF Quad, ext interface */ { RP_ID(0x0047), RP_CAP(4, 0) }, /* INF Quad, RJ45 */ { RP_ID(0x004a), RP_CAP(4, 0) }, /* INF Plus, Quad */ { RP_ID(0x004b), RP_CAP(8, 0) }, /* INF Plus, Octa */ { RP_ID(0x004c), RP_CAP(8, 0) }, /* INF III, Octa */ { RP_ID(0x004d), RP_CAP(4, 0) }, /* INF III, Quad */ { RP_ID(0x004e), RP_CAP(2, 0) }, /* INF Plus, 2, RS232 */ { RP_ID(0x004f), RP_CAP(2, 1) }, /* INF Plus, 2, SMPTE */ { RP_ID(0x0050), RP_CAP(4, 0) }, /* INF Plus, Quad, RJ45 */ { RP_ID(0x0051), RP_CAP(8, 0) }, /* INF Plus, Octa, RJ45 */ { RP_ID(0x0052), RP_CAP(8, 1) }, /* INF Octa, SMPTE */ /* RocketPort EXPRESS cards */ { RP_ID(0x0060), RP_CAP(8, 0) }, /* EXP Octa, RJ45, selectable */ { RP_ID(0x0061), RP_CAP(32, 0) }, /* EXP 32, ext interface */ { RP_ID(0x0062), RP_CAP(8, 0) }, /* EXP Octa, ext interface */ { RP_ID(0x0063), RP_CAP(16, 0) }, /* EXP 16, ext interface */ { RP_ID(0x0064), RP_CAP(4, 0) }, /* EXP Quad, DB, selectable */ { RP_ID(0x0065), RP_CAP(8, 0) }, /* EXP Octa, DB, selectable */ { RP_ID(0x0066), RP_CAP(4, 0) }, /* EXP Quad, ext interface */ { RP_ID(0x0067), RP_CAP(4, 0) }, /* EXP Quad, RJ45 */ { RP_ID(0x0068), RP_CAP(8, 0) }, /* EXP Octa, RJ11 */ { RP_ID(0x0072), RP_CAP(8, 1) }, /* EXP Octa, SMPTE */ { } }; MODULE_DEVICE_TABLE(pci, rp2_pci_tbl); static struct pci_driver rp2_pci_driver = { .name = DRV_NAME, .id_table = rp2_pci_tbl, .probe = rp2_probe, .remove = rp2_remove, }; static int __init rp2_uart_init(void) { int rc; rc = uart_register_driver(&rp2_uart_driver); if (rc) return rc; rc = pci_register_driver(&rp2_pci_driver); if (rc) { uart_unregister_driver(&rp2_uart_driver); return rc; } return 0; } static void __exit rp2_uart_exit(void) { pci_unregister_driver(&rp2_pci_driver); uart_unregister_driver(&rp2_uart_driver); } module_init(rp2_uart_init); module_exit(rp2_uart_exit); MODULE_DESCRIPTION("Comtrol RocketPort EXPRESS/INFINITY driver"); MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>"); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(RP2_FW_NAME);
gpl-2.0
7420dev/android_kernel_samsung_zero
net/batman-adv/ring_buffer.c
2148
1280
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors: * * Marek Lindner * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA */ #include "main.h" #include "ring_buffer.h" void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value) { lq_recv[*lq_index] = value; *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE; } uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]) { const uint8_t *ptr; uint16_t count = 0, i = 0, sum = 0; ptr = lq_recv; while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) { if (*ptr != 0) { count++; sum += *ptr; } i++; ptr++; } if (count == 0) return 0; return (uint8_t)(sum / count); }
gpl-2.0
loverlucia/linux-3.10.101
drivers/mfd/max8907.c
2404
9334
/* * max8907.c - mfd driver for MAX8907 * * Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com> * Copyright (C) 2010-2012, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/mfd/core.h> #include <linux/mfd/max8907.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/regmap.h> #include <linux/slab.h> static struct mfd_cell max8907_cells[] = { { .name = "max8907-regulator", }, { .name = "max8907-rtc", }, }; static bool max8907_gen_is_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case MAX8907_REG_ON_OFF_IRQ1: case MAX8907_REG_ON_OFF_STAT: case MAX8907_REG_ON_OFF_IRQ2: case MAX8907_REG_CHG_IRQ1: case MAX8907_REG_CHG_IRQ2: case MAX8907_REG_CHG_STAT: return true; default: return false; } } static bool max8907_gen_is_precious_reg(struct device *dev, unsigned int reg) { switch (reg) { case MAX8907_REG_ON_OFF_IRQ1: case MAX8907_REG_ON_OFF_IRQ2: case MAX8907_REG_CHG_IRQ1: case MAX8907_REG_CHG_IRQ2: return true; default: return false; } } static bool max8907_gen_is_writeable_reg(struct device *dev, unsigned int reg) { return !max8907_gen_is_volatile_reg(dev, reg); } static const struct regmap_config max8907_regmap_gen_config = { .reg_bits = 8, .val_bits = 8, .volatile_reg = max8907_gen_is_volatile_reg, .precious_reg = max8907_gen_is_precious_reg, .writeable_reg = max8907_gen_is_writeable_reg, .max_register = MAX8907_REG_LDO20VOUT, .cache_type = REGCACHE_RBTREE, }; static bool max8907_rtc_is_volatile_reg(struct device *dev, unsigned int reg) { if (reg <= MAX8907_REG_RTC_YEAR2) return true; switch (reg) { case MAX8907_REG_RTC_STATUS: case MAX8907_REG_RTC_IRQ: return true; default: return false; } } static bool max8907_rtc_is_precious_reg(struct device *dev, unsigned int reg) { switch (reg) { case MAX8907_REG_RTC_IRQ: return true; default: return false; } } static bool max8907_rtc_is_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { case MAX8907_REG_RTC_STATUS: case MAX8907_REG_RTC_IRQ: return false; default: return true; } } static const struct regmap_config max8907_regmap_rtc_config = { .reg_bits = 8, .val_bits = 8, .volatile_reg = max8907_rtc_is_volatile_reg, .precious_reg = max8907_rtc_is_precious_reg, .writeable_reg = max8907_rtc_is_writeable_reg, .max_register = MAX8907_REG_MPL_CNTL, .cache_type = REGCACHE_RBTREE, }; static const struct regmap_irq max8907_chg_irqs[] = { { .reg_offset = 0, .mask = 1 << 0, }, { .reg_offset = 0, .mask = 1 << 1, }, { .reg_offset = 0, .mask = 1 << 2, }, { .reg_offset = 1, .mask = 1 << 0, }, { .reg_offset = 1, .mask = 1 << 1, }, { .reg_offset = 1, .mask = 1 << 2, }, { .reg_offset = 1, .mask = 1 << 3, }, { .reg_offset = 1, .mask = 1 << 4, }, { .reg_offset = 1, .mask = 1 << 5, }, { .reg_offset = 1, .mask = 1 << 6, }, { .reg_offset = 1, .mask = 1 << 7, }, }; static const struct regmap_irq_chip max8907_chg_irq_chip = { .name = "max8907 chg", .status_base = MAX8907_REG_CHG_IRQ1, .mask_base = MAX8907_REG_CHG_IRQ1_MASK, .wake_base = MAX8907_REG_CHG_IRQ1_MASK, .irq_reg_stride = MAX8907_REG_CHG_IRQ2 - MAX8907_REG_CHG_IRQ1, .num_regs = 2, .irqs = max8907_chg_irqs, .num_irqs = ARRAY_SIZE(max8907_chg_irqs), }; static const struct regmap_irq max8907_on_off_irqs[] = { { .reg_offset = 0, .mask = 1 << 0, }, { .reg_offset = 0, .mask = 1 << 1, }, { .reg_offset = 0, .mask = 1 << 2, }, { .reg_offset = 0, .mask = 1 << 3, }, { .reg_offset = 0, .mask = 1 << 4, }, { .reg_offset = 0, .mask = 1 << 5, }, { .reg_offset = 0, .mask = 1 << 6, }, { .reg_offset = 0, .mask = 1 << 7, }, { .reg_offset = 1, .mask = 1 << 0, }, { .reg_offset = 1, .mask = 1 << 1, }, }; static const struct regmap_irq_chip max8907_on_off_irq_chip = { .name = "max8907 on_off", .status_base = MAX8907_REG_ON_OFF_IRQ1, .mask_base = MAX8907_REG_ON_OFF_IRQ1_MASK, .irq_reg_stride = MAX8907_REG_ON_OFF_IRQ2 - MAX8907_REG_ON_OFF_IRQ1, .num_regs = 2, .irqs = max8907_on_off_irqs, .num_irqs = ARRAY_SIZE(max8907_on_off_irqs), }; static const struct regmap_irq max8907_rtc_irqs[] = { { .reg_offset = 0, .mask = 1 << 2, }, { .reg_offset = 0, .mask = 1 << 3, }, }; static const struct regmap_irq_chip max8907_rtc_irq_chip = { .name = "max8907 rtc", .status_base = MAX8907_REG_RTC_IRQ, .mask_base = MAX8907_REG_RTC_IRQ_MASK, .num_regs = 1, .irqs = max8907_rtc_irqs, .num_irqs = ARRAY_SIZE(max8907_rtc_irqs), }; static struct max8907 *max8907_pm_off; static void max8907_power_off(void) { regmap_update_bits(max8907_pm_off->regmap_gen, MAX8907_REG_RESET_CNFG, MAX8907_MASK_POWER_OFF, MAX8907_MASK_POWER_OFF); } static int max8907_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct max8907 *max8907; int ret; struct max8907_platform_data *pdata = dev_get_platdata(&i2c->dev); bool pm_off = false; if (pdata) pm_off = pdata->pm_off; else if (i2c->dev.of_node) pm_off = of_property_read_bool(i2c->dev.of_node, "maxim,system-power-controller"); max8907 = devm_kzalloc(&i2c->dev, sizeof(struct max8907), GFP_KERNEL); if (!max8907) { ret = -ENOMEM; goto err_alloc_drvdata; } max8907->dev = &i2c->dev; dev_set_drvdata(max8907->dev, max8907); max8907->i2c_gen = i2c; i2c_set_clientdata(i2c, max8907); max8907->regmap_gen = devm_regmap_init_i2c(i2c, &max8907_regmap_gen_config); if (IS_ERR(max8907->regmap_gen)) { ret = PTR_ERR(max8907->regmap_gen); dev_err(&i2c->dev, "gen regmap init failed: %d\n", ret); goto err_regmap_gen; } max8907->i2c_rtc = i2c_new_dummy(i2c->adapter, MAX8907_RTC_I2C_ADDR); if (!max8907->i2c_rtc) { ret = -ENOMEM; goto err_dummy_rtc; } i2c_set_clientdata(max8907->i2c_rtc, max8907); max8907->regmap_rtc = devm_regmap_init_i2c(max8907->i2c_rtc, &max8907_regmap_rtc_config); if (IS_ERR(max8907->regmap_rtc)) { ret = PTR_ERR(max8907->regmap_rtc); dev_err(&i2c->dev, "rtc regmap init failed: %d\n", ret); goto err_regmap_rtc; } irq_set_status_flags(max8907->i2c_gen->irq, IRQ_NOAUTOEN); ret = regmap_add_irq_chip(max8907->regmap_gen, max8907->i2c_gen->irq, IRQF_ONESHOT | IRQF_SHARED, -1, &max8907_chg_irq_chip, &max8907->irqc_chg); if (ret != 0) { dev_err(&i2c->dev, "failed to add chg irq chip: %d\n", ret); goto err_irqc_chg; } ret = regmap_add_irq_chip(max8907->regmap_gen, max8907->i2c_gen->irq, IRQF_ONESHOT | IRQF_SHARED, -1, &max8907_on_off_irq_chip, &max8907->irqc_on_off); if (ret != 0) { dev_err(&i2c->dev, "failed to add on off irq chip: %d\n", ret); goto err_irqc_on_off; } ret = regmap_add_irq_chip(max8907->regmap_rtc, max8907->i2c_gen->irq, IRQF_ONESHOT | IRQF_SHARED, -1, &max8907_rtc_irq_chip, &max8907->irqc_rtc); if (ret != 0) { dev_err(&i2c->dev, "failed to add rtc irq chip: %d\n", ret); goto err_irqc_rtc; } enable_irq(max8907->i2c_gen->irq); ret = mfd_add_devices(max8907->dev, -1, max8907_cells, ARRAY_SIZE(max8907_cells), NULL, 0, NULL); if (ret != 0) { dev_err(&i2c->dev, "failed to add MFD devices %d\n", ret); goto err_add_devices; } if (pm_off && !pm_power_off) { max8907_pm_off = max8907; pm_power_off = max8907_power_off; } return 0; err_add_devices: regmap_del_irq_chip(max8907->i2c_gen->irq, max8907->irqc_rtc); err_irqc_rtc: regmap_del_irq_chip(max8907->i2c_gen->irq, max8907->irqc_on_off); err_irqc_on_off: regmap_del_irq_chip(max8907->i2c_gen->irq, max8907->irqc_chg); err_irqc_chg: err_regmap_rtc: i2c_unregister_device(max8907->i2c_rtc); err_dummy_rtc: err_regmap_gen: err_alloc_drvdata: return ret; } static int max8907_i2c_remove(struct i2c_client *i2c) { struct max8907 *max8907 = i2c_get_clientdata(i2c); mfd_remove_devices(max8907->dev); regmap_del_irq_chip(max8907->i2c_gen->irq, max8907->irqc_rtc); regmap_del_irq_chip(max8907->i2c_gen->irq, max8907->irqc_on_off); regmap_del_irq_chip(max8907->i2c_gen->irq, max8907->irqc_chg); i2c_unregister_device(max8907->i2c_rtc); return 0; } #ifdef CONFIG_OF static struct of_device_id max8907_of_match[] = { { .compatible = "maxim,max8907" }, { }, }; MODULE_DEVICE_TABLE(of, max8907_of_match); #endif static const struct i2c_device_id max8907_i2c_id[] = { {"max8907", 0}, {} }; MODULE_DEVICE_TABLE(i2c, max8907_i2c_id); static struct i2c_driver max8907_i2c_driver = { .driver = { .name = "max8907", .owner = THIS_MODULE, .of_match_table = of_match_ptr(max8907_of_match), }, .probe = max8907_i2c_probe, .remove = max8907_i2c_remove, .id_table = max8907_i2c_id, }; static int __init max8907_i2c_init(void) { int ret = -ENODEV; ret = i2c_add_driver(&max8907_i2c_driver); if (ret != 0) pr_err("Failed to register I2C driver: %d\n", ret); return ret; } subsys_initcall(max8907_i2c_init); static void __exit max8907_i2c_exit(void) { i2c_del_driver(&max8907_i2c_driver); } module_exit(max8907_i2c_exit); MODULE_DESCRIPTION("MAX8907 multi-function core driver"); MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@maxim-ic.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
wm8120/Linux-eMMC-journaling
drivers/pcmcia/ds.c
2404
35177
/* * ds.c -- 16-bit PCMCIA core support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds * (C) 2003 - 2010 Dominik Brodowski */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/crc32.h> #include <linux/firmware.h> #include <linux/kref.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/ss.h> #include "cs_internal.h" /*====================================================================*/ /* Module parameters */ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("PCMCIA Driver Services"); MODULE_LICENSE("GPL"); /*====================================================================*/ static void pcmcia_check_driver(struct pcmcia_driver *p_drv) { const struct pcmcia_device_id *did = p_drv->id_table; unsigned int i; u32 hash; if (!p_drv->probe || !p_drv->remove) printk(KERN_DEBUG "pcmcia: %s lacks a requisite callback " "function\n", p_drv->name); while (did && did->match_flags) { for (i = 0; i < 4; i++) { if (!did->prod_id[i]) continue; hash = crc32(0, did->prod_id[i], strlen(did->prod_id[i])); if (hash == did->prod_id_hash[i]) continue; printk(KERN_DEBUG "pcmcia: %s: invalid hash for " "product string \"%s\": is 0x%x, should " "be 0x%x\n", p_drv->name, did->prod_id[i], did->prod_id_hash[i], hash); printk(KERN_DEBUG "pcmcia: see " "Documentation/pcmcia/devicetable.txt for " "details\n"); } did++; } return; } /*======================================================================*/ struct pcmcia_dynid { struct list_head node; struct pcmcia_device_id id; }; /** * pcmcia_store_new_id - add a new PCMCIA device ID to this driver and re-probe devices * @driver: target device driver * @buf: buffer for scanning device ID data * @count: input size * * Adds a new dynamic PCMCIA device ID to this driver, * and causes the driver to probe for all devices again. */ static ssize_t pcmcia_store_new_id(struct device_driver *driver, const char *buf, size_t count) { struct pcmcia_dynid *dynid; struct pcmcia_driver *pdrv = to_pcmcia_drv(driver); __u16 match_flags, manf_id, card_id; __u8 func_id, function, device_no; __u32 prod_id_hash[4] = {0, 0, 0, 0}; int fields = 0; int retval = 0; fields = sscanf(buf, "%hx %hx %hx %hhx %hhx %hhx %x %x %x %x", &match_flags, &manf_id, &card_id, &func_id, &function, &device_no, &prod_id_hash[0], &prod_id_hash[1], &prod_id_hash[2], &prod_id_hash[3]); if (fields < 6) return -EINVAL; dynid = kzalloc(sizeof(struct pcmcia_dynid), GFP_KERNEL); if (!dynid) return -ENOMEM; dynid->id.match_flags = match_flags; dynid->id.manf_id = manf_id; dynid->id.card_id = card_id; dynid->id.func_id = func_id; dynid->id.function = function; dynid->id.device_no = device_no; memcpy(dynid->id.prod_id_hash, prod_id_hash, sizeof(__u32) * 4); mutex_lock(&pdrv->dynids.lock); list_add_tail(&dynid->node, &pdrv->dynids.list); mutex_unlock(&pdrv->dynids.lock); retval = driver_attach(&pdrv->drv); if (retval) return retval; return count; } static DRIVER_ATTR(new_id, S_IWUSR, NULL, pcmcia_store_new_id); static void pcmcia_free_dynids(struct pcmcia_driver *drv) { struct pcmcia_dynid *dynid, *n; mutex_lock(&drv->dynids.lock); list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { list_del(&dynid->node); kfree(dynid); } mutex_unlock(&drv->dynids.lock); } static int pcmcia_create_newid_file(struct pcmcia_driver *drv) { int error = 0; if (drv->probe != NULL) error = driver_create_file(&drv->drv, &driver_attr_new_id); return error; } static void pcmcia_remove_newid_file(struct pcmcia_driver *drv) { driver_remove_file(&drv->drv, &driver_attr_new_id); } /** * pcmcia_register_driver - register a PCMCIA driver with the bus core * @driver: the &driver being registered * * Registers a PCMCIA driver with the PCMCIA bus core. */ int pcmcia_register_driver(struct pcmcia_driver *driver) { int error; if (!driver) return -EINVAL; pcmcia_check_driver(driver); /* initialize common fields */ driver->drv.bus = &pcmcia_bus_type; driver->drv.owner = driver->owner; driver->drv.name = driver->name; mutex_init(&driver->dynids.lock); INIT_LIST_HEAD(&driver->dynids.list); pr_debug("registering driver %s\n", driver->name); error = driver_register(&driver->drv); if (error < 0) return error; error = pcmcia_create_newid_file(driver); if (error) driver_unregister(&driver->drv); return error; } EXPORT_SYMBOL(pcmcia_register_driver); /** * pcmcia_unregister_driver - unregister a PCMCIA driver with the bus core * @driver: the &driver being unregistered */ void pcmcia_unregister_driver(struct pcmcia_driver *driver) { pr_debug("unregistering driver %s\n", driver->name); pcmcia_remove_newid_file(driver); driver_unregister(&driver->drv); pcmcia_free_dynids(driver); } EXPORT_SYMBOL(pcmcia_unregister_driver); /* pcmcia_device handling */ static struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev) { struct device *tmp_dev; tmp_dev = get_device(&p_dev->dev); if (!tmp_dev) return NULL; return to_pcmcia_dev(tmp_dev); } static void pcmcia_put_dev(struct pcmcia_device *p_dev) { if (p_dev) put_device(&p_dev->dev); } static void pcmcia_release_function(struct kref *ref) { struct config_t *c = container_of(ref, struct config_t, ref); pr_debug("releasing config_t\n"); kfree(c); } static void pcmcia_release_dev(struct device *dev) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); int i; dev_dbg(dev, "releasing device\n"); pcmcia_put_socket(p_dev->socket); for (i = 0; i < 4; i++) kfree(p_dev->prod_id[i]); kfree(p_dev->devname); kref_put(&p_dev->function_config->ref, pcmcia_release_function); kfree(p_dev); } static int pcmcia_device_probe(struct device *dev) { struct pcmcia_device *p_dev; struct pcmcia_driver *p_drv; struct pcmcia_socket *s; cistpl_config_t cis_config; int ret = 0; dev = get_device(dev); if (!dev) return -ENODEV; p_dev = to_pcmcia_dev(dev); p_drv = to_pcmcia_drv(dev->driver); s = p_dev->socket; dev_dbg(dev, "trying to bind to %s\n", p_drv->name); if ((!p_drv->probe) || (!p_dev->function_config) || (!try_module_get(p_drv->owner))) { ret = -EINVAL; goto put_dev; } /* set up some more device information */ ret = pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_CONFIG, &cis_config); if (!ret) { p_dev->config_base = cis_config.base; p_dev->config_regs = cis_config.rmask[0]; dev_dbg(dev, "base %x, regs %x", p_dev->config_base, p_dev->config_regs); } else { dev_printk(KERN_INFO, dev, "pcmcia: could not parse base and rmask0 of CIS\n"); p_dev->config_base = 0; p_dev->config_regs = 0; } ret = p_drv->probe(p_dev); if (ret) { dev_dbg(dev, "binding to %s failed with %d\n", p_drv->name, ret); goto put_module; } dev_dbg(dev, "%s bound: Vpp %d.%d, idx %x, IRQ %d", p_drv->name, p_dev->vpp/10, p_dev->vpp%10, p_dev->config_index, p_dev->irq); dev_dbg(dev, "resources: ioport %pR %pR iomem %pR %pR %pR", p_dev->resource[0], p_dev->resource[1], p_dev->resource[2], p_dev->resource[3], p_dev->resource[4]); mutex_lock(&s->ops_mutex); if ((s->pcmcia_pfc) && (p_dev->socket->device_count == 1) && (p_dev->device_no == 0)) pcmcia_parse_uevents(s, PCMCIA_UEVENT_REQUERY); mutex_unlock(&s->ops_mutex); put_module: if (ret) module_put(p_drv->owner); put_dev: if (ret) put_device(dev); return ret; } /* * Removes a PCMCIA card from the device tree and socket list. */ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *leftover) { struct pcmcia_device *p_dev; struct pcmcia_device *tmp; dev_dbg(leftover ? &leftover->dev : &s->dev, "pcmcia_card_remove(%d) %s\n", s->sock, leftover ? leftover->devname : ""); mutex_lock(&s->ops_mutex); if (!leftover) s->device_count = 0; else s->device_count = 1; mutex_unlock(&s->ops_mutex); /* unregister all pcmcia_devices registered with this socket, except leftover */ list_for_each_entry_safe(p_dev, tmp, &s->devices_list, socket_device_list) { if (p_dev == leftover) continue; mutex_lock(&s->ops_mutex); list_del(&p_dev->socket_device_list); mutex_unlock(&s->ops_mutex); dev_dbg(&p_dev->dev, "unregistering device\n"); device_unregister(&p_dev->dev); } return; } static int pcmcia_device_remove(struct device *dev) { struct pcmcia_device *p_dev; struct pcmcia_driver *p_drv; int i; p_dev = to_pcmcia_dev(dev); p_drv = to_pcmcia_drv(dev->driver); dev_dbg(dev, "removing device\n"); /* If we're removing the primary module driving a * pseudo multi-function card, we need to unbind * all devices */ if ((p_dev->socket->pcmcia_pfc) && (p_dev->socket->device_count > 0) && (p_dev->device_no == 0)) pcmcia_card_remove(p_dev->socket, p_dev); /* detach the "instance" */ if (!p_drv) return 0; if (p_drv->remove) p_drv->remove(p_dev); /* check for proper unloading */ if (p_dev->_irq || p_dev->_io || p_dev->_locked) dev_printk(KERN_INFO, dev, "pcmcia: driver %s did not release config properly\n", p_drv->name); for (i = 0; i < MAX_WIN; i++) if (p_dev->_win & CLIENT_WIN_REQ(i)) dev_printk(KERN_INFO, dev, "pcmcia: driver %s did not release window properly\n", p_drv->name); /* references from pcmcia_probe_device */ pcmcia_put_dev(p_dev); module_put(p_drv->owner); return 0; } /* * pcmcia_device_query -- determine information about a pcmcia device */ static int pcmcia_device_query(struct pcmcia_device *p_dev) { cistpl_manfid_t manf_id; cistpl_funcid_t func_id; cistpl_vers_1_t *vers1; unsigned int i; vers1 = kmalloc(sizeof(*vers1), GFP_KERNEL); if (!vers1) return -ENOMEM; if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL, CISTPL_MANFID, &manf_id)) { mutex_lock(&p_dev->socket->ops_mutex); p_dev->manf_id = manf_id.manf; p_dev->card_id = manf_id.card; p_dev->has_manf_id = 1; p_dev->has_card_id = 1; mutex_unlock(&p_dev->socket->ops_mutex); } if (!pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_FUNCID, &func_id)) { mutex_lock(&p_dev->socket->ops_mutex); p_dev->func_id = func_id.func; p_dev->has_func_id = 1; mutex_unlock(&p_dev->socket->ops_mutex); } else { /* rule of thumb: cards with no FUNCID, but with * common memory device geometry information, are * probably memory cards (from pcmcia-cs) */ cistpl_device_geo_t *devgeo; devgeo = kmalloc(sizeof(*devgeo), GFP_KERNEL); if (!devgeo) { kfree(vers1); return -ENOMEM; } if (!pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_DEVICE_GEO, devgeo)) { dev_dbg(&p_dev->dev, "mem device geometry probably means " "FUNCID_MEMORY\n"); mutex_lock(&p_dev->socket->ops_mutex); p_dev->func_id = CISTPL_FUNCID_MEMORY; p_dev->has_func_id = 1; mutex_unlock(&p_dev->socket->ops_mutex); } kfree(devgeo); } if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL, CISTPL_VERS_1, vers1)) { mutex_lock(&p_dev->socket->ops_mutex); for (i = 0; i < min_t(unsigned int, 4, vers1->ns); i++) { char *tmp; unsigned int length; char *new; tmp = vers1->str + vers1->ofs[i]; length = strlen(tmp) + 1; if ((length < 2) || (length > 255)) continue; new = kmalloc(sizeof(char) * length, GFP_KERNEL); if (!new) continue; new = strncpy(new, tmp, length); tmp = p_dev->prod_id[i]; p_dev->prod_id[i] = new; kfree(tmp); } mutex_unlock(&p_dev->socket->ops_mutex); } kfree(vers1); return 0; } static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function) { struct pcmcia_device *p_dev, *tmp_dev; int i; s = pcmcia_get_socket(s); if (!s) return NULL; pr_debug("adding device to %d, function %d\n", s->sock, function); p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL); if (!p_dev) goto err_put; mutex_lock(&s->ops_mutex); p_dev->device_no = (s->device_count++); mutex_unlock(&s->ops_mutex); /* max of 2 PFC devices */ if ((p_dev->device_no >= 2) && (function == 0)) goto err_free; /* max of 4 devices overall */ if (p_dev->device_no >= 4) goto err_free; p_dev->socket = s; p_dev->func = function; p_dev->dev.bus = &pcmcia_bus_type; p_dev->dev.parent = s->dev.parent; p_dev->dev.release = pcmcia_release_dev; /* by default don't allow DMA */ p_dev->dma_mask = DMA_MASK_NONE; p_dev->dev.dma_mask = &p_dev->dma_mask; dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no); if (!dev_name(&p_dev->dev)) goto err_free; p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev)); if (!p_dev->devname) goto err_free; dev_dbg(&p_dev->dev, "devname is %s\n", p_dev->devname); mutex_lock(&s->ops_mutex); /* * p_dev->function_config must be the same for all card functions. * Note that this is serialized by ops_mutex, so that only one * such struct will be created. */ list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list) if (p_dev->func == tmp_dev->func) { p_dev->function_config = tmp_dev->function_config; p_dev->irq = tmp_dev->irq; kref_get(&p_dev->function_config->ref); } /* Add to the list in pcmcia_bus_socket */ list_add(&p_dev->socket_device_list, &s->devices_list); if (pcmcia_setup_irq(p_dev)) dev_warn(&p_dev->dev, "IRQ setup failed -- device might not work\n"); if (!p_dev->function_config) { config_t *c; dev_dbg(&p_dev->dev, "creating config_t\n"); c = kzalloc(sizeof(struct config_t), GFP_KERNEL); if (!c) { mutex_unlock(&s->ops_mutex); goto err_unreg; } p_dev->function_config = c; kref_init(&c->ref); for (i = 0; i < MAX_IO_WIN; i++) { c->io[i].name = p_dev->devname; c->io[i].flags = IORESOURCE_IO; } for (i = 0; i< MAX_WIN; i++) { c->mem[i].name = p_dev->devname; c->mem[i].flags = IORESOURCE_MEM; } } for (i = 0; i < MAX_IO_WIN; i++) p_dev->resource[i] = &p_dev->function_config->io[i]; for (; i < (MAX_IO_WIN + MAX_WIN); i++) p_dev->resource[i] = &p_dev->function_config->mem[i-MAX_IO_WIN]; mutex_unlock(&s->ops_mutex); dev_printk(KERN_NOTICE, &p_dev->dev, "pcmcia: registering new device %s (IRQ: %d)\n", p_dev->devname, p_dev->irq); pcmcia_device_query(p_dev); if (device_register(&p_dev->dev)) goto err_unreg; return p_dev; err_unreg: mutex_lock(&s->ops_mutex); list_del(&p_dev->socket_device_list); mutex_unlock(&s->ops_mutex); err_free: mutex_lock(&s->ops_mutex); s->device_count--; mutex_unlock(&s->ops_mutex); for (i = 0; i < 4; i++) kfree(p_dev->prod_id[i]); kfree(p_dev->devname); kfree(p_dev); err_put: pcmcia_put_socket(s); return NULL; } static int pcmcia_card_add(struct pcmcia_socket *s) { cistpl_longlink_mfc_t mfc; unsigned int no_funcs, i, no_chains; int ret = -EAGAIN; mutex_lock(&s->ops_mutex); if (!(s->resource_setup_done)) { dev_dbg(&s->dev, "no resources available, delaying card_add\n"); mutex_unlock(&s->ops_mutex); return -EAGAIN; /* try again, but later... */ } if (pcmcia_validate_mem(s)) { dev_dbg(&s->dev, "validating mem resources failed, " "delaying card_add\n"); mutex_unlock(&s->ops_mutex); return -EAGAIN; /* try again, but later... */ } mutex_unlock(&s->ops_mutex); ret = pccard_validate_cis(s, &no_chains); if (ret || !no_chains) { dev_dbg(&s->dev, "invalid CIS or invalid resources\n"); return -ENODEV; } if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc)) no_funcs = mfc.nfn; else no_funcs = 1; s->functions = no_funcs; for (i = 0; i < no_funcs; i++) pcmcia_device_add(s, i); return ret; } static int pcmcia_requery_callback(struct device *dev, void * _data) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (!p_dev->dev.driver) { dev_dbg(dev, "update device information\n"); pcmcia_device_query(p_dev); } return 0; } static void pcmcia_requery(struct pcmcia_socket *s) { int has_pfc; if (s->functions == 0) { pcmcia_card_add(s); return; } /* some device information might have changed because of a CIS * update or because we can finally read it correctly... so * determine it again, overwriting old values if necessary. */ bus_for_each_dev(&pcmcia_bus_type, NULL, NULL, pcmcia_requery_callback); /* if the CIS changed, we need to check whether the number of * functions changed. */ if (s->fake_cis) { int old_funcs, new_funcs; cistpl_longlink_mfc_t mfc; /* does this cis override add or remove functions? */ old_funcs = s->functions; if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc)) new_funcs = mfc.nfn; else new_funcs = 1; if (old_funcs != new_funcs) { /* we need to re-start */ pcmcia_card_remove(s, NULL); s->functions = 0; pcmcia_card_add(s); } } /* If the PCMCIA device consists of two pseudo devices, * call pcmcia_device_add() -- which will fail if both * devices are already registered. */ mutex_lock(&s->ops_mutex); has_pfc = s->pcmcia_pfc; mutex_unlock(&s->ops_mutex); if (has_pfc) pcmcia_device_add(s, 0); /* we re-scan all devices, not just the ones connected to this * socket. This does not matter, though. */ if (bus_rescan_devices(&pcmcia_bus_type)) dev_warn(&s->dev, "rescanning the bus failed\n"); } #ifdef CONFIG_PCMCIA_LOAD_CIS /** * pcmcia_load_firmware - load CIS from userspace if device-provided is broken * @dev: the pcmcia device which needs a CIS override * @filename: requested filename in /lib/firmware/ * * This uses the in-kernel firmware loading mechanism to use a "fake CIS" if * the one provided by the card is broken. The firmware files reside in * /lib/firmware/ in userspace. */ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) { struct pcmcia_socket *s = dev->socket; const struct firmware *fw; int ret = -ENOMEM; cistpl_longlink_mfc_t mfc; int old_funcs, new_funcs = 1; if (!filename) return -EINVAL; dev_dbg(&dev->dev, "trying to load CIS file %s\n", filename); if (request_firmware(&fw, filename, &dev->dev) == 0) { if (fw->size >= CISTPL_MAX_CIS_SIZE) { ret = -EINVAL; dev_printk(KERN_ERR, &dev->dev, "pcmcia: CIS override is too big\n"); goto release; } if (!pcmcia_replace_cis(s, fw->data, fw->size)) ret = 0; else { dev_printk(KERN_ERR, &dev->dev, "pcmcia: CIS override failed\n"); goto release; } /* we need to re-start if the number of functions changed */ old_funcs = s->functions; if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc)) new_funcs = mfc.nfn; if (old_funcs != new_funcs) ret = -EBUSY; /* update information */ pcmcia_device_query(dev); /* requery (as number of functions might have changed) */ pcmcia_parse_uevents(s, PCMCIA_UEVENT_REQUERY); } release: release_firmware(fw); return ret; } #else /* !CONFIG_PCMCIA_LOAD_CIS */ static inline int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) { return -ENODEV; } #endif static inline int pcmcia_devmatch(struct pcmcia_device *dev, const struct pcmcia_device_id *did) { if (did->match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID) { if ((!dev->has_manf_id) || (dev->manf_id != did->manf_id)) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_CARD_ID) { if ((!dev->has_card_id) || (dev->card_id != did->card_id)) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNCTION) { if (dev->func != did->function) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1) { if (!dev->prod_id[0]) return 0; if (strcmp(did->prod_id[0], dev->prod_id[0])) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2) { if (!dev->prod_id[1]) return 0; if (strcmp(did->prod_id[1], dev->prod_id[1])) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3) { if (!dev->prod_id[2]) return 0; if (strcmp(did->prod_id[2], dev->prod_id[2])) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4) { if (!dev->prod_id[3]) return 0; if (strcmp(did->prod_id[3], dev->prod_id[3])) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) { dev_dbg(&dev->dev, "this is a pseudo-multi-function device\n"); mutex_lock(&dev->socket->ops_mutex); dev->socket->pcmcia_pfc = 1; mutex_unlock(&dev->socket->ops_mutex); if (dev->device_no != did->device_no) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) { int ret; if ((!dev->has_func_id) || (dev->func_id != did->func_id)) return 0; /* if this is a pseudo-multi-function device, * we need explicit matches */ if (dev->socket->pcmcia_pfc) return 0; if (dev->device_no) return 0; /* also, FUNC_ID matching needs to be activated by userspace * after it has re-checked that there is no possible module * with a prod_id/manf_id/card_id match. */ mutex_lock(&dev->socket->ops_mutex); ret = dev->allow_func_id_match; mutex_unlock(&dev->socket->ops_mutex); if (!ret) { dev_dbg(&dev->dev, "skipping FUNC_ID match until userspace ACK\n"); return 0; } } if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { dev_dbg(&dev->dev, "device needs a fake CIS\n"); if (!dev->socket->fake_cis) if (pcmcia_load_firmware(dev, did->cisfile)) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) { int i; for (i = 0; i < 4; i++) if (dev->prod_id[i]) return 0; if (dev->has_manf_id || dev->has_card_id || dev->has_func_id) return 0; } return 1; } static int pcmcia_bus_match(struct device *dev, struct device_driver *drv) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); struct pcmcia_driver *p_drv = to_pcmcia_drv(drv); const struct pcmcia_device_id *did = p_drv->id_table; struct pcmcia_dynid *dynid; /* match dynamic devices first */ mutex_lock(&p_drv->dynids.lock); list_for_each_entry(dynid, &p_drv->dynids.list, node) { dev_dbg(dev, "trying to match to %s\n", drv->name); if (pcmcia_devmatch(p_dev, &dynid->id)) { dev_dbg(dev, "matched to %s\n", drv->name); mutex_unlock(&p_drv->dynids.lock); return 1; } } mutex_unlock(&p_drv->dynids.lock); while (did && did->match_flags) { dev_dbg(dev, "trying to match to %s\n", drv->name); if (pcmcia_devmatch(p_dev, did)) { dev_dbg(dev, "matched to %s\n", drv->name); return 1; } did++; } return 0; } static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env) { struct pcmcia_device *p_dev; int i; u32 hash[4] = { 0, 0, 0, 0}; if (!dev) return -ENODEV; p_dev = to_pcmcia_dev(dev); /* calculate hashes */ for (i = 0; i < 4; i++) { if (!p_dev->prod_id[i]) continue; hash[i] = crc32(0, p_dev->prod_id[i], strlen(p_dev->prod_id[i])); } if (add_uevent_var(env, "SOCKET_NO=%u", p_dev->socket->sock)) return -ENOMEM; if (add_uevent_var(env, "DEVICE_NO=%02X", p_dev->device_no)) return -ENOMEM; if (add_uevent_var(env, "MODALIAS=pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02X" "pa%08Xpb%08Xpc%08Xpd%08X", p_dev->has_manf_id ? p_dev->manf_id : 0, p_dev->has_card_id ? p_dev->card_id : 0, p_dev->has_func_id ? p_dev->func_id : 0, p_dev->func, p_dev->device_no, hash[0], hash[1], hash[2], hash[3])) return -ENOMEM; return 0; } /************************ runtime PM support ***************************/ static int pcmcia_dev_suspend(struct device *dev, pm_message_t state); static int pcmcia_dev_resume(struct device *dev); static int runtime_suspend(struct device *dev) { int rc; device_lock(dev); rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND); device_unlock(dev); return rc; } static int runtime_resume(struct device *dev) { int rc; device_lock(dev); rc = pcmcia_dev_resume(dev); device_unlock(dev); return rc; } /************************ per-device sysfs output ***************************/ #define pcmcia_device_attr(field, test, format) \ static ssize_t field##_show (struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \ return p_dev->test ? sprintf(buf, format, p_dev->field) : -ENODEV; \ } #define pcmcia_device_stringattr(name, field) \ static ssize_t name##_show (struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \ return p_dev->field ? sprintf(buf, "%s\n", p_dev->field) : -ENODEV; \ } pcmcia_device_attr(func, socket, "0x%02x\n"); pcmcia_device_attr(func_id, has_func_id, "0x%02x\n"); pcmcia_device_attr(manf_id, has_manf_id, "0x%04x\n"); pcmcia_device_attr(card_id, has_card_id, "0x%04x\n"); pcmcia_device_stringattr(prod_id1, prod_id[0]); pcmcia_device_stringattr(prod_id2, prod_id[1]); pcmcia_device_stringattr(prod_id3, prod_id[2]); pcmcia_device_stringattr(prod_id4, prod_id[3]); static ssize_t pcmcia_show_resources(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); char *str = buf; int i; for (i = 0; i < PCMCIA_NUM_RESOURCES; i++) str += sprintf(str, "%pr\n", p_dev->resource[i]); return str - buf; } static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (p_dev->suspended) return sprintf(buf, "off\n"); else return sprintf(buf, "on\n"); } static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); int ret = 0; if (!count) return -EINVAL; if ((!p_dev->suspended) && !strncmp(buf, "off", 3)) ret = runtime_suspend(dev); else if (p_dev->suspended && !strncmp(buf, "on", 2)) ret = runtime_resume(dev); return ret ? ret : count; } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); int i; u32 hash[4] = { 0, 0, 0, 0}; /* calculate hashes */ for (i = 0; i < 4; i++) { if (!p_dev->prod_id[i]) continue; hash[i] = crc32(0, p_dev->prod_id[i], strlen(p_dev->prod_id[i])); } return sprintf(buf, "pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02X" "pa%08Xpb%08Xpc%08Xpd%08X\n", p_dev->has_manf_id ? p_dev->manf_id : 0, p_dev->has_card_id ? p_dev->card_id : 0, p_dev->has_func_id ? p_dev->func_id : 0, p_dev->func, p_dev->device_no, hash[0], hash[1], hash[2], hash[3]); } static ssize_t pcmcia_store_allow_func_id_match(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (!count) return -EINVAL; mutex_lock(&p_dev->socket->ops_mutex); p_dev->allow_func_id_match = 1; mutex_unlock(&p_dev->socket->ops_mutex); pcmcia_parse_uevents(p_dev->socket, PCMCIA_UEVENT_REQUERY); return count; } static struct device_attribute pcmcia_dev_attrs[] = { __ATTR(function, 0444, func_show, NULL), __ATTR(pm_state, 0644, pcmcia_show_pm_state, pcmcia_store_pm_state), __ATTR(resources, 0444, pcmcia_show_resources, NULL), __ATTR_RO(func_id), __ATTR_RO(manf_id), __ATTR_RO(card_id), __ATTR_RO(prod_id1), __ATTR_RO(prod_id2), __ATTR_RO(prod_id3), __ATTR_RO(prod_id4), __ATTR_RO(modalias), __ATTR(allow_func_id_match, 0200, NULL, pcmcia_store_allow_func_id_match), __ATTR_NULL, }; /* PM support, also needed for reset */ static int pcmcia_dev_suspend(struct device *dev, pm_message_t state) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); struct pcmcia_driver *p_drv = NULL; int ret = 0; mutex_lock(&p_dev->socket->ops_mutex); if (p_dev->suspended) { mutex_unlock(&p_dev->socket->ops_mutex); return 0; } p_dev->suspended = 1; mutex_unlock(&p_dev->socket->ops_mutex); dev_dbg(dev, "suspending\n"); if (dev->driver) p_drv = to_pcmcia_drv(dev->driver); if (!p_drv) goto out; if (p_drv->suspend) { ret = p_drv->suspend(p_dev); if (ret) { dev_printk(KERN_ERR, dev, "pcmcia: device %s (driver %s) did " "not want to go to sleep (%d)\n", p_dev->devname, p_drv->name, ret); mutex_lock(&p_dev->socket->ops_mutex); p_dev->suspended = 0; mutex_unlock(&p_dev->socket->ops_mutex); goto out; } } if (p_dev->device_no == p_dev->func) { dev_dbg(dev, "releasing configuration\n"); pcmcia_release_configuration(p_dev); } out: return ret; } static int pcmcia_dev_resume(struct device *dev) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); struct pcmcia_driver *p_drv = NULL; int ret = 0; mutex_lock(&p_dev->socket->ops_mutex); if (!p_dev->suspended) { mutex_unlock(&p_dev->socket->ops_mutex); return 0; } p_dev->suspended = 0; mutex_unlock(&p_dev->socket->ops_mutex); dev_dbg(dev, "resuming\n"); if (dev->driver) p_drv = to_pcmcia_drv(dev->driver); if (!p_drv) goto out; if (p_dev->device_no == p_dev->func) { dev_dbg(dev, "requesting configuration\n"); ret = pcmcia_enable_device(p_dev); if (ret) goto out; } if (p_drv->resume) ret = p_drv->resume(p_dev); out: return ret; } static int pcmcia_bus_suspend_callback(struct device *dev, void * _data) { struct pcmcia_socket *skt = _data; struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (p_dev->socket != skt || p_dev->suspended) return 0; return runtime_suspend(dev); } static int pcmcia_bus_resume_callback(struct device *dev, void * _data) { struct pcmcia_socket *skt = _data; struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (p_dev->socket != skt || !p_dev->suspended) return 0; runtime_resume(dev); return 0; } static int pcmcia_bus_resume(struct pcmcia_socket *skt) { dev_dbg(&skt->dev, "resuming socket %d\n", skt->sock); bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback); return 0; } static int pcmcia_bus_suspend(struct pcmcia_socket *skt) { dev_dbg(&skt->dev, "suspending socket %d\n", skt->sock); if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_suspend_callback)) { pcmcia_bus_resume(skt); return -EIO; } return 0; } static int pcmcia_bus_remove(struct pcmcia_socket *skt) { atomic_set(&skt->present, 0); pcmcia_card_remove(skt, NULL); mutex_lock(&skt->ops_mutex); destroy_cis_cache(skt); pcmcia_cleanup_irq(skt); mutex_unlock(&skt->ops_mutex); return 0; } static int pcmcia_bus_add(struct pcmcia_socket *skt) { atomic_set(&skt->present, 1); mutex_lock(&skt->ops_mutex); skt->pcmcia_pfc = 0; destroy_cis_cache(skt); /* to be on the safe side... */ mutex_unlock(&skt->ops_mutex); pcmcia_card_add(skt); return 0; } static int pcmcia_bus_early_resume(struct pcmcia_socket *skt) { if (!verify_cis_cache(skt)) return 0; dev_dbg(&skt->dev, "cis mismatch - different card\n"); /* first, remove the card */ pcmcia_bus_remove(skt); mutex_lock(&skt->ops_mutex); destroy_cis_cache(skt); kfree(skt->fake_cis); skt->fake_cis = NULL; skt->functions = 0; mutex_unlock(&skt->ops_mutex); /* now, add the new card */ pcmcia_bus_add(skt); return 0; } /* * NOTE: This is racy. There's no guarantee the card will still be * physically present, even if the call to this function returns * non-NULL. Furthermore, the device driver most likely is unbound * almost immediately, so the timeframe where pcmcia_dev_present * returns NULL is probably really really small. */ struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev) { struct pcmcia_device *p_dev; struct pcmcia_device *ret = NULL; p_dev = pcmcia_get_dev(_p_dev); if (!p_dev) return NULL; if (atomic_read(&p_dev->socket->present) != 0) ret = p_dev; pcmcia_put_dev(p_dev); return ret; } EXPORT_SYMBOL(pcmcia_dev_present); static struct pcmcia_callback pcmcia_bus_callback = { .owner = THIS_MODULE, .add = pcmcia_bus_add, .remove = pcmcia_bus_remove, .requery = pcmcia_requery, .validate = pccard_validate_cis, .suspend = pcmcia_bus_suspend, .early_resume = pcmcia_bus_early_resume, .resume = pcmcia_bus_resume, }; static int pcmcia_bus_add_socket(struct device *dev, struct class_interface *class_intf) { struct pcmcia_socket *socket = dev_get_drvdata(dev); int ret; socket = pcmcia_get_socket(socket); if (!socket) { dev_printk(KERN_ERR, dev, "PCMCIA obtaining reference to socket failed\n"); return -ENODEV; } ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr); if (ret) { dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n"); pcmcia_put_socket(socket); return ret; } INIT_LIST_HEAD(&socket->devices_list); socket->pcmcia_pfc = 0; socket->device_count = 0; atomic_set(&socket->present, 0); ret = pccard_register_pcmcia(socket, &pcmcia_bus_callback); if (ret) { dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n"); pcmcia_put_socket(socket); return ret; } return 0; } static void pcmcia_bus_remove_socket(struct device *dev, struct class_interface *class_intf) { struct pcmcia_socket *socket = dev_get_drvdata(dev); if (!socket) return; pccard_register_pcmcia(socket, NULL); /* unregister any unbound devices */ mutex_lock(&socket->skt_mutex); pcmcia_card_remove(socket, NULL); release_cis_mem(socket); mutex_unlock(&socket->skt_mutex); sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr); pcmcia_put_socket(socket); return; } /* the pcmcia_bus_interface is used to handle pcmcia socket devices */ static struct class_interface pcmcia_bus_interface __refdata = { .class = &pcmcia_socket_class, .add_dev = &pcmcia_bus_add_socket, .remove_dev = &pcmcia_bus_remove_socket, }; struct bus_type pcmcia_bus_type = { .name = "pcmcia", .uevent = pcmcia_bus_uevent, .match = pcmcia_bus_match, .dev_attrs = pcmcia_dev_attrs, .probe = pcmcia_device_probe, .remove = pcmcia_device_remove, .suspend = pcmcia_dev_suspend, .resume = pcmcia_dev_resume, }; static int __init init_pcmcia_bus(void) { int ret; ret = bus_register(&pcmcia_bus_type); if (ret < 0) { printk(KERN_WARNING "pcmcia: bus_register error: %d\n", ret); return ret; } ret = class_interface_register(&pcmcia_bus_interface); if (ret < 0) { printk(KERN_WARNING "pcmcia: class_interface_register error: %d\n", ret); bus_unregister(&pcmcia_bus_type); return ret; } return 0; } fs_initcall(init_pcmcia_bus); /* one level after subsys_initcall so that * pcmcia_socket_class is already registered */ static void __exit exit_pcmcia_bus(void) { class_interface_unregister(&pcmcia_bus_interface); bus_unregister(&pcmcia_bus_type); } module_exit(exit_pcmcia_bus); MODULE_ALIAS("ds");
gpl-2.0
omnirom/android_kernel_samsung_aries
drivers/power/bq27x00_battery.c
2660
19952
/* * BQ27x00 battery driver * * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it> * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it> * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de> * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com> * * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. * */ /* * Datasheets: * http://focus.ti.com/docs/prod/folders/print/bq27000.html * http://focus.ti.com/docs/prod/folders/print/bq27500.html */ #include <linux/module.h> #include <linux/param.h> #include <linux/jiffies.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/idr.h> #include <linux/i2c.h> #include <linux/slab.h> #include <asm/unaligned.h> #include <linux/power/bq27x00_battery.h> #define DRIVER_VERSION "1.2.0" #define BQ27x00_REG_TEMP 0x06 #define BQ27x00_REG_VOLT 0x08 #define BQ27x00_REG_AI 0x14 #define BQ27x00_REG_FLAGS 0x0A #define BQ27x00_REG_TTE 0x16 #define BQ27x00_REG_TTF 0x18 #define BQ27x00_REG_TTECP 0x26 #define BQ27x00_REG_NAC 0x0C /* Nominal available capaciy */ #define BQ27x00_REG_LMD 0x12 /* Last measured discharge */ #define BQ27x00_REG_CYCT 0x2A /* Cycle count total */ #define BQ27x00_REG_AE 0x22 /* Available enery */ #define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */ #define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */ #define BQ27000_FLAG_CHGS BIT(7) #define BQ27000_FLAG_FC BIT(5) #define BQ27500_REG_SOC 0x2C #define BQ27500_REG_DCAP 0x3C /* Design capacity */ #define BQ27500_FLAG_DSC BIT(0) #define BQ27500_FLAG_FC BIT(9) #define BQ27000_RS 20 /* Resistor sense */ struct bq27x00_device_info; struct bq27x00_access_methods { int (*read)(struct bq27x00_device_info *di, u8 reg, bool single); }; enum bq27x00_chip { BQ27000, BQ27500 }; struct bq27x00_reg_cache { int temperature; int time_to_empty; int time_to_empty_avg; int time_to_full; int charge_full; int cycle_count; int capacity; int flags; int current_now; }; struct bq27x00_device_info { struct device *dev; int id; enum bq27x00_chip chip; struct bq27x00_reg_cache cache; int charge_design_full; unsigned long last_update; struct delayed_work work; struct power_supply bat; struct bq27x00_access_methods bus; struct mutex lock; }; static enum power_supply_property bq27x00_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_ENERGY_NOW, }; static unsigned int poll_interval = 360; module_param(poll_interval, uint, 0644); MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \ "0 disables polling"); /* * Common code for BQ27x00 devices */ static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg, bool single) { return di->bus.read(di, reg, single); } /* * Return the battery Relative State-of-Charge * Or < 0 if something fails. */ static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di) { int rsoc; if (di->chip == BQ27500) rsoc = bq27x00_read(di, BQ27500_REG_SOC, false); else rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true); if (rsoc < 0) dev_err(di->dev, "error reading relative State-of-Charge\n"); return rsoc; } /* * Return a battery charge value in µAh * Or < 0 if something fails. */ static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg) { int charge; charge = bq27x00_read(di, reg, false); if (charge < 0) { dev_err(di->dev, "error reading nominal available capacity\n"); return charge; } if (di->chip == BQ27500) charge *= 1000; else charge = charge * 3570 / BQ27000_RS; return charge; } /* * Return the battery Nominal available capaciy in µAh * Or < 0 if something fails. */ static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di) { return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC); } /* * Return the battery Last measured discharge in µAh * Or < 0 if something fails. */ static inline int bq27x00_battery_read_lmd(struct bq27x00_device_info *di) { return bq27x00_battery_read_charge(di, BQ27x00_REG_LMD); } /* * Return the battery Initial last measured discharge in µAh * Or < 0 if something fails. */ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di) { int ilmd; if (di->chip == BQ27500) ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false); else ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true); if (ilmd < 0) { dev_err(di->dev, "error reading initial last measured discharge\n"); return ilmd; } if (di->chip == BQ27500) ilmd *= 1000; else ilmd = ilmd * 256 * 3570 / BQ27000_RS; return ilmd; } /* * Return the battery Cycle count total * Or < 0 if something fails. */ static int bq27x00_battery_read_cyct(struct bq27x00_device_info *di) { int cyct; cyct = bq27x00_read(di, BQ27x00_REG_CYCT, false); if (cyct < 0) dev_err(di->dev, "error reading cycle count total\n"); return cyct; } /* * Read a time register. * Return < 0 if something fails. */ static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg) { int tval; tval = bq27x00_read(di, reg, false); if (tval < 0) { dev_err(di->dev, "error reading register %02x: %d\n", reg, tval); return tval; } if (tval == 65535) return -ENODATA; return tval * 60; } static void bq27x00_update(struct bq27x00_device_info *di) { struct bq27x00_reg_cache cache = {0, }; bool is_bq27500 = di->chip == BQ27500; cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500); if (cache.flags >= 0) { cache.capacity = bq27x00_battery_read_rsoc(di); cache.temperature = bq27x00_read(di, BQ27x00_REG_TEMP, false); cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE); cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP); cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF); cache.charge_full = bq27x00_battery_read_lmd(di); cache.cycle_count = bq27x00_battery_read_cyct(di); if (!is_bq27500) cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false); /* We only have to read charge design full once */ if (di->charge_design_full <= 0) di->charge_design_full = bq27x00_battery_read_ilmd(di); } /* Ignore current_now which is a snapshot of the current battery state * and is likely to be different even between two consecutive reads */ if (memcmp(&di->cache, &cache, sizeof(cache) - sizeof(int)) != 0) { di->cache = cache; power_supply_changed(&di->bat); } di->last_update = jiffies; } static void bq27x00_battery_poll(struct work_struct *work) { struct bq27x00_device_info *di = container_of(work, struct bq27x00_device_info, work.work); bq27x00_update(di); if (poll_interval > 0) { /* The timer does not have to be accurate. */ set_timer_slack(&di->work.timer, poll_interval * HZ / 4); schedule_delayed_work(&di->work, poll_interval * HZ); } } /* * Return the battery temperature in tenths of degree Celsius * Or < 0 if something fails. */ static int bq27x00_battery_temperature(struct bq27x00_device_info *di, union power_supply_propval *val) { if (di->cache.temperature < 0) return di->cache.temperature; if (di->chip == BQ27500) val->intval = di->cache.temperature - 2731; else val->intval = ((di->cache.temperature * 5) - 5463) / 2; return 0; } /* * Return the battery average current in µA * Note that current can be negative signed as well * Or 0 if something fails. */ static int bq27x00_battery_current(struct bq27x00_device_info *di, union power_supply_propval *val) { int curr; if (di->chip == BQ27500) curr = bq27x00_read(di, BQ27x00_REG_AI, false); else curr = di->cache.current_now; if (curr < 0) return curr; if (di->chip == BQ27500) { /* bq27500 returns signed value */ val->intval = (int)((s16)curr) * 1000; } else { if (di->cache.flags & BQ27000_FLAG_CHGS) { dev_dbg(di->dev, "negative current!\n"); curr = -curr; } val->intval = curr * 3570 / BQ27000_RS; } return 0; } static int bq27x00_battery_status(struct bq27x00_device_info *di, union power_supply_propval *val) { int status; if (di->chip == BQ27500) { if (di->cache.flags & BQ27500_FLAG_FC) status = POWER_SUPPLY_STATUS_FULL; else if (di->cache.flags & BQ27500_FLAG_DSC) status = POWER_SUPPLY_STATUS_DISCHARGING; else status = POWER_SUPPLY_STATUS_CHARGING; } else { if (di->cache.flags & BQ27000_FLAG_FC) status = POWER_SUPPLY_STATUS_FULL; else if (di->cache.flags & BQ27000_FLAG_CHGS) status = POWER_SUPPLY_STATUS_CHARGING; else if (power_supply_am_i_supplied(&di->bat)) status = POWER_SUPPLY_STATUS_NOT_CHARGING; else status = POWER_SUPPLY_STATUS_DISCHARGING; } val->intval = status; return 0; } /* * Return the battery Voltage in milivolts * Or < 0 if something fails. */ static int bq27x00_battery_voltage(struct bq27x00_device_info *di, union power_supply_propval *val) { int volt; volt = bq27x00_read(di, BQ27x00_REG_VOLT, false); if (volt < 0) return volt; val->intval = volt * 1000; return 0; } /* * Return the battery Available energy in µWh * Or < 0 if something fails. */ static int bq27x00_battery_energy(struct bq27x00_device_info *di, union power_supply_propval *val) { int ae; ae = bq27x00_read(di, BQ27x00_REG_AE, false); if (ae < 0) { dev_err(di->dev, "error reading available energy\n"); return ae; } if (di->chip == BQ27500) ae *= 1000; else ae = ae * 29200 / BQ27000_RS; val->intval = ae; return 0; } static int bq27x00_simple_value(int value, union power_supply_propval *val) { if (value < 0) return value; val->intval = value; return 0; } #define to_bq27x00_device_info(x) container_of((x), \ struct bq27x00_device_info, bat); static int bq27x00_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret = 0; struct bq27x00_device_info *di = to_bq27x00_device_info(psy); mutex_lock(&di->lock); if (time_is_before_jiffies(di->last_update + 5 * HZ)) { cancel_delayed_work_sync(&di->work); bq27x00_battery_poll(&di->work.work); } mutex_unlock(&di->lock); if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) return -ENODEV; switch (psp) { case POWER_SUPPLY_PROP_STATUS: ret = bq27x00_battery_status(di, val); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = bq27x00_battery_voltage(di, val); break; case POWER_SUPPLY_PROP_PRESENT: val->intval = di->cache.flags < 0 ? 0 : 1; break; case POWER_SUPPLY_PROP_CURRENT_NOW: ret = bq27x00_battery_current(di, val); break; case POWER_SUPPLY_PROP_CAPACITY: ret = bq27x00_simple_value(di->cache.capacity, val); break; case POWER_SUPPLY_PROP_TEMP: ret = bq27x00_battery_temperature(di, val); break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: ret = bq27x00_simple_value(di->cache.time_to_empty, val); break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val); break; case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: ret = bq27x00_simple_value(di->cache.time_to_full, val); break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_CHARGE_NOW: ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val); break; case POWER_SUPPLY_PROP_CHARGE_FULL: ret = bq27x00_simple_value(di->cache.charge_full, val); break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: ret = bq27x00_simple_value(di->charge_design_full, val); break; case POWER_SUPPLY_PROP_CYCLE_COUNT: ret = bq27x00_simple_value(di->cache.cycle_count, val); break; case POWER_SUPPLY_PROP_ENERGY_NOW: ret = bq27x00_battery_energy(di, val); break; default: return -EINVAL; } return ret; } static void bq27x00_external_power_changed(struct power_supply *psy) { struct bq27x00_device_info *di = to_bq27x00_device_info(psy); cancel_delayed_work_sync(&di->work); schedule_delayed_work(&di->work, 0); } static int bq27x00_powersupply_init(struct bq27x00_device_info *di) { int ret; di->bat.type = POWER_SUPPLY_TYPE_BATTERY; di->bat.properties = bq27x00_battery_props; di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props); di->bat.get_property = bq27x00_battery_get_property; di->bat.external_power_changed = bq27x00_external_power_changed; INIT_DELAYED_WORK(&di->work, bq27x00_battery_poll); mutex_init(&di->lock); ret = power_supply_register(di->dev, &di->bat); if (ret) { dev_err(di->dev, "failed to register battery: %d\n", ret); return ret; } dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION); bq27x00_update(di); return 0; } static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di) { cancel_delayed_work_sync(&di->work); power_supply_unregister(&di->bat); mutex_destroy(&di->lock); } /* i2c specific code */ #ifdef CONFIG_BATTERY_BQ27X00_I2C /* If the system has several batteries we need a different name for each * of them... */ static DEFINE_IDR(battery_id); static DEFINE_MUTEX(battery_mutex); static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single) { struct i2c_client *client = to_i2c_client(di->dev); struct i2c_msg msg[2]; unsigned char data[2]; int ret; if (!client->adapter) return -ENODEV; msg[0].addr = client->addr; msg[0].flags = 0; msg[0].buf = &reg; msg[0].len = sizeof(reg); msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; msg[1].buf = data; if (single) msg[1].len = 1; else msg[1].len = 2; ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); if (ret < 0) return ret; if (!single) ret = get_unaligned_le16(data); else ret = data[0]; return ret; } static int bq27x00_battery_probe(struct i2c_client *client, const struct i2c_device_id *id) { char *name; struct bq27x00_device_info *di; int num; int retval = 0; /* Get new ID for the new battery device */ retval = idr_pre_get(&battery_id, GFP_KERNEL); if (retval == 0) return -ENOMEM; mutex_lock(&battery_mutex); retval = idr_get_new(&battery_id, client, &num); mutex_unlock(&battery_mutex); if (retval < 0) return retval; name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num); if (!name) { dev_err(&client->dev, "failed to allocate device name\n"); retval = -ENOMEM; goto batt_failed_1; } di = kzalloc(sizeof(*di), GFP_KERNEL); if (!di) { dev_err(&client->dev, "failed to allocate device info data\n"); retval = -ENOMEM; goto batt_failed_2; } di->id = num; di->dev = &client->dev; di->chip = id->driver_data; di->bat.name = name; di->bus.read = &bq27x00_read_i2c; if (bq27x00_powersupply_init(di)) goto batt_failed_3; i2c_set_clientdata(client, di); return 0; batt_failed_3: kfree(di); batt_failed_2: kfree(name); batt_failed_1: mutex_lock(&battery_mutex); idr_remove(&battery_id, num); mutex_unlock(&battery_mutex); return retval; } static int bq27x00_battery_remove(struct i2c_client *client) { struct bq27x00_device_info *di = i2c_get_clientdata(client); bq27x00_powersupply_unregister(di); kfree(di->bat.name); mutex_lock(&battery_mutex); idr_remove(&battery_id, di->id); mutex_unlock(&battery_mutex); kfree(di); return 0; } static const struct i2c_device_id bq27x00_id[] = { { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */ { "bq27500", BQ27500 }, {}, }; MODULE_DEVICE_TABLE(i2c, bq27x00_id); static struct i2c_driver bq27x00_battery_driver = { .driver = { .name = "bq27x00-battery", }, .probe = bq27x00_battery_probe, .remove = bq27x00_battery_remove, .id_table = bq27x00_id, }; static inline int bq27x00_battery_i2c_init(void) { int ret = i2c_add_driver(&bq27x00_battery_driver); if (ret) printk(KERN_ERR "Unable to register BQ27x00 i2c driver\n"); return ret; } static inline void bq27x00_battery_i2c_exit(void) { i2c_del_driver(&bq27x00_battery_driver); } #else static inline int bq27x00_battery_i2c_init(void) { return 0; } static inline void bq27x00_battery_i2c_exit(void) {}; #endif /* platform specific code */ #ifdef CONFIG_BATTERY_BQ27X00_PLATFORM static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg, bool single) { struct device *dev = di->dev; struct bq27000_platform_data *pdata = dev->platform_data; unsigned int timeout = 3; int upper, lower; int temp; if (!single) { /* Make sure the value has not changed in between reading the * lower and the upper part */ upper = pdata->read(dev, reg + 1); do { temp = upper; if (upper < 0) return upper; lower = pdata->read(dev, reg); if (lower < 0) return lower; upper = pdata->read(dev, reg + 1); } while (temp != upper && --timeout); if (timeout == 0) return -EIO; return (upper << 8) | lower; } return pdata->read(dev, reg); } static int __devinit bq27000_battery_probe(struct platform_device *pdev) { struct bq27x00_device_info *di; struct bq27000_platform_data *pdata = pdev->dev.platform_data; int ret; if (!pdata) { dev_err(&pdev->dev, "no platform_data supplied\n"); return -EINVAL; } if (!pdata->read) { dev_err(&pdev->dev, "no hdq read callback supplied\n"); return -EINVAL; } di = kzalloc(sizeof(*di), GFP_KERNEL); if (!di) { dev_err(&pdev->dev, "failed to allocate device info data\n"); return -ENOMEM; } platform_set_drvdata(pdev, di); di->dev = &pdev->dev; di->chip = BQ27000; di->bat.name = pdata->name ?: dev_name(&pdev->dev); di->bus.read = &bq27000_read_platform; ret = bq27x00_powersupply_init(di); if (ret) goto err_free; return 0; err_free: platform_set_drvdata(pdev, NULL); kfree(di); return ret; } static int __devexit bq27000_battery_remove(struct platform_device *pdev) { struct bq27x00_device_info *di = platform_get_drvdata(pdev); bq27x00_powersupply_unregister(di); platform_set_drvdata(pdev, NULL); kfree(di); return 0; } static struct platform_driver bq27000_battery_driver = { .probe = bq27000_battery_probe, .remove = __devexit_p(bq27000_battery_remove), .driver = { .name = "bq27000-battery", .owner = THIS_MODULE, }, }; static inline int bq27x00_battery_platform_init(void) { int ret = platform_driver_register(&bq27000_battery_driver); if (ret) printk(KERN_ERR "Unable to register BQ27000 platform driver\n"); return ret; } static inline void bq27x00_battery_platform_exit(void) { platform_driver_unregister(&bq27000_battery_driver); } #else static inline int bq27x00_battery_platform_init(void) { return 0; } static inline void bq27x00_battery_platform_exit(void) {}; #endif /* * Module stuff */ static int __init bq27x00_battery_init(void) { int ret; ret = bq27x00_battery_i2c_init(); if (ret) return ret; ret = bq27x00_battery_platform_init(); if (ret) bq27x00_battery_i2c_exit(); return ret; } module_init(bq27x00_battery_init); static void __exit bq27x00_battery_exit(void) { bq27x00_battery_platform_exit(); bq27x00_battery_i2c_exit(); } module_exit(bq27x00_battery_exit); MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); MODULE_DESCRIPTION("BQ27x00 battery monitor driver"); MODULE_LICENSE("GPL");
gpl-2.0
mukulsoni/android_kernel_samsung_ms013g-cm11
drivers/gpu/drm/radeon/radeon_cursor.c
3172
9284
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" #define CURSOR_WIDTH 64 #define CURSOR_HEIGHT 64 static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock) { struct radeon_device *rdev = crtc->dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); uint32_t cur_lock; if (ASIC_IS_DCE4(rdev)) { cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset); if (lock) cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK; else cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK; WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock); } else if (ASIC_IS_AVIVO(rdev)) { cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset); if (lock) cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK; else cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK; WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock); } else { cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset); if (lock) cur_lock |= RADEON_CUR_LOCK; else cur_lock &= ~RADEON_CUR_LOCK; WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock); } } static void radeon_hide_cursor(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); } else { switch (radeon_crtc->crtc_id) { case 0: WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); break; case 1: WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL); break; default: return; } WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN); } } static void radeon_show_cursor(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); } else { switch (radeon_crtc->crtc_id) { case 0: WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); break; case 1: WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL); break; default: return; } WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN | (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)), ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK)); } } static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, uint64_t gpu_addr) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; if (ASIC_IS_DCE4(rdev)) { WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, upper_32_bits(gpu_addr)); WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr & 0xffffffff); } else if (ASIC_IS_AVIVO(rdev)) { if (rdev->family >= CHIP_RV770) { if (radeon_crtc->crtc_id) WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); else WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); } WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr & 0xffffffff); } else { radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; /* offset is from DISP(2)_BASE_ADDRESS */ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); } } int radeon_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; struct drm_gem_object *obj; struct radeon_bo *robj; uint64_t gpu_addr; int ret; if (!handle) { /* turn off cursor */ radeon_hide_cursor(crtc); obj = NULL; goto unpin; } if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { DRM_ERROR("bad cursor width or height %d x %d\n", width, height); return -EINVAL; } obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); if (!obj) { DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); return -ENOENT; } robj = gem_to_radeon_bo(obj); ret = radeon_bo_reserve(robj, false); if (unlikely(ret != 0)) goto fail; /* Only 27 bit offset for legacy cursor */ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &gpu_addr); radeon_bo_unreserve(robj); if (ret) goto fail; radeon_crtc->cursor_width = width; radeon_crtc->cursor_height = height; radeon_lock_cursor(crtc, true); radeon_set_cursor(crtc, obj, gpu_addr); radeon_show_cursor(crtc); radeon_lock_cursor(crtc, false); unpin: if (radeon_crtc->cursor_bo) { robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); ret = radeon_bo_reserve(robj, false); if (likely(ret == 0)) { radeon_bo_unpin(robj); radeon_bo_unreserve(robj); } drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); } radeon_crtc->cursor_bo = obj; return 0; fail: drm_gem_object_unreference_unlocked(obj); return ret; } int radeon_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; int xorigin = 0, yorigin = 0; int w = radeon_crtc->cursor_width; if (ASIC_IS_AVIVO(rdev)) { /* avivo cursor are offset into the total surface */ x += crtc->x; y += crtc->y; } DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); if (x < 0) { xorigin = min(-x, CURSOR_WIDTH - 1); x = 0; } if (y < 0) { yorigin = min(-y, CURSOR_HEIGHT - 1); y = 0; } if (ASIC_IS_AVIVO(rdev)) { int i = 0; struct drm_crtc *crtc_p; /* avivo cursor image can't end on 128 pixel boundary or * go past the end of the frame if both crtcs are enabled */ list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) { if (crtc_p->enabled) i++; } if (i > 1) { int cursor_end, frame_end; cursor_end = x - xorigin + w; frame_end = crtc->x + crtc->mode.crtc_hdisplay; if (cursor_end >= frame_end) { w = w - (cursor_end - frame_end); if (!(frame_end & 0x7f)) w--; } else { if (!(cursor_end & 0x7f)) w--; } if (w <= 0) w = 1; } } radeon_lock_cursor(crtc, true); if (ASIC_IS_DCE4(rdev)) { WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); } else { if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN) y *= 2; WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset, (RADEON_CUR_LOCK | (xorigin << 16) | yorigin)); WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, (RADEON_CUR_LOCK | (x << 16) | y)); /* offset is from DISP(2)_BASE_ADDRESS */ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + (yorigin * 256))); } radeon_lock_cursor(crtc, false); return 0; }
gpl-2.0
poondog/m7_stock_443
tools/lguest/lguest.c
3428
58966
/*P:100 * This is the Launcher code, a simple program which lays out the "physical" * memory for the new Guest by mapping the kernel image and the virtual * devices, then opens /dev/lguest to tell the kernel about the Guest and * control it. :*/ #define _LARGEFILE64_SOURCE #define _GNU_SOURCE #include <stdio.h> #include <string.h> #include <unistd.h> #include <err.h> #include <stdint.h> #include <stdlib.h> #include <elf.h> #include <sys/mman.h> #include <sys/param.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/wait.h> #include <sys/eventfd.h> #include <fcntl.h> #include <stdbool.h> #include <errno.h> #include <ctype.h> #include <sys/socket.h> #include <sys/ioctl.h> #include <sys/time.h> #include <time.h> #include <netinet/in.h> #include <net/if.h> #include <linux/sockios.h> #include <linux/if_tun.h> #include <sys/uio.h> #include <termios.h> #include <getopt.h> #include <assert.h> #include <sched.h> #include <limits.h> #include <stddef.h> #include <signal.h> #include <pwd.h> #include <grp.h> #include <linux/virtio_config.h> #include <linux/virtio_net.h> #include <linux/virtio_blk.h> #include <linux/virtio_console.h> #include <linux/virtio_rng.h> #include <linux/virtio_ring.h> #include <asm/bootparam.h> #include "../../include/linux/lguest_launcher.h" /*L:110 * We can ignore the 43 include files we need for this program, but I do want * to draw attention to the use of kernel-style types. * * As Linus said, "C is a Spartan language, and so should your naming be." I * like these abbreviations, so we define them here. Note that u64 is always * unsigned long long, which works on all Linux systems: this means that we can * use %llu in printf for any u64. */ typedef unsigned long long u64; typedef uint32_t u32; typedef uint16_t u16; typedef uint8_t u8; /*:*/ #define BRIDGE_PFX "bridge:" #ifndef SIOCBRADDIF #define SIOCBRADDIF 0x89a2 /* add interface to bridge */ #endif /* We can have up to 256 pages for devices. */ #define DEVICE_PAGES 256 /* This will occupy 3 pages: it must be a power of 2. */ #define VIRTQUEUE_NUM 256 /*L:120 * verbose is both a global flag and a macro. The C preprocessor allows * this, and although I wouldn't recommend it, it works quite nicely here. */ static bool verbose; #define verbose(args...) \ do { if (verbose) printf(args); } while(0) /*:*/ /* The pointer to the start of guest memory. */ static void *guest_base; /* The maximum guest physical address allowed, and maximum possible. */ static unsigned long guest_limit, guest_max; /* The /dev/lguest file descriptor. */ static int lguest_fd; /* a per-cpu variable indicating whose vcpu is currently running */ static unsigned int __thread cpu_id; /* This is our list of devices. */ struct device_list { /* Counter to assign interrupt numbers. */ unsigned int next_irq; /* Counter to print out convenient device numbers. */ unsigned int device_num; /* The descriptor page for the devices. */ u8 *descpage; /* A single linked list of devices. */ struct device *dev; /* And a pointer to the last device for easy append. */ struct device *lastdev; }; /* The list of Guest devices, based on command line arguments. */ static struct device_list devices; /* The device structure describes a single device. */ struct device { /* The linked-list pointer. */ struct device *next; /* The device's descriptor, as mapped into the Guest. */ struct lguest_device_desc *desc; /* We can't trust desc values once Guest has booted: we use these. */ unsigned int feature_len; unsigned int num_vq; /* The name of this device, for --verbose. */ const char *name; /* Any queues attached to this device */ struct virtqueue *vq; /* Is it operational */ bool running; /* Device-specific data. */ void *priv; }; /* The virtqueue structure describes a queue attached to a device. */ struct virtqueue { struct virtqueue *next; /* Which device owns me. */ struct device *dev; /* The configuration for this queue. */ struct lguest_vqconfig config; /* The actual ring of buffers. */ struct vring vring; /* Last available index we saw. */ u16 last_avail_idx; /* How many are used since we sent last irq? */ unsigned int pending_used; /* Eventfd where Guest notifications arrive. */ int eventfd; /* Function for the thread which is servicing this virtqueue. */ void (*service)(struct virtqueue *vq); pid_t thread; }; /* Remember the arguments to the program so we can "reboot" */ static char **main_args; /* The original tty settings to restore on exit. */ static struct termios orig_term; /* * We have to be careful with barriers: our devices are all run in separate * threads and so we need to make sure that changes visible to the Guest happen * in precise order. */ #define wmb() __asm__ __volatile__("" : : : "memory") #define mb() __asm__ __volatile__("" : : : "memory") /* * Convert an iovec element to the given type. * * This is a fairly ugly trick: we need to know the size of the type and * alignment requirement to check the pointer is kosher. It's also nice to * have the name of the type in case we report failure. * * Typing those three things all the time is cumbersome and error prone, so we * have a macro which sets them all up and passes to the real function. */ #define convert(iov, type) \ ((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) static void *_convert(struct iovec *iov, size_t size, size_t align, const char *name) { if (iov->iov_len != size) errx(1, "Bad iovec size %zu for %s", iov->iov_len, name); if ((unsigned long)iov->iov_base % align != 0) errx(1, "Bad alignment %p for %s", iov->iov_base, name); return iov->iov_base; } /* Wrapper for the last available index. Makes it easier to change. */ #define lg_last_avail(vq) ((vq)->last_avail_idx) /* * The virtio configuration space is defined to be little-endian. x86 is * little-endian too, but it's nice to be explicit so we have these helpers. */ #define cpu_to_le16(v16) (v16) #define cpu_to_le32(v32) (v32) #define cpu_to_le64(v64) (v64) #define le16_to_cpu(v16) (v16) #define le32_to_cpu(v32) (v32) #define le64_to_cpu(v64) (v64) /* Is this iovec empty? */ static bool iov_empty(const struct iovec iov[], unsigned int num_iov) { unsigned int i; for (i = 0; i < num_iov; i++) if (iov[i].iov_len) return false; return true; } /* Take len bytes from the front of this iovec. */ static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len) { unsigned int i; for (i = 0; i < num_iov; i++) { unsigned int used; used = iov[i].iov_len < len ? iov[i].iov_len : len; iov[i].iov_base += used; iov[i].iov_len -= used; len -= used; } assert(len == 0); } /* The device virtqueue descriptors are followed by feature bitmasks. */ static u8 *get_feature_bits(struct device *dev) { return (u8 *)(dev->desc + 1) + dev->num_vq * sizeof(struct lguest_vqconfig); } /*L:100 * The Launcher code itself takes us out into userspace, that scary place where * pointers run wild and free! Unfortunately, like most userspace programs, * it's quite boring (which is why everyone likes to hack on the kernel!). * Perhaps if you make up an Lguest Drinking Game at this point, it will get * you through this section. Or, maybe not. * * The Launcher sets up a big chunk of memory to be the Guest's "physical" * memory and stores it in "guest_base". In other words, Guest physical == * Launcher virtual with an offset. * * This can be tough to get your head around, but usually it just means that we * use these trivial conversion functions when the Guest gives us its * "physical" addresses: */ static void *from_guest_phys(unsigned long addr) { return guest_base + addr; } static unsigned long to_guest_phys(const void *addr) { return (addr - guest_base); } /*L:130 * Loading the Kernel. * * We start with couple of simple helper routines. open_or_die() avoids * error-checking code cluttering the callers: */ static int open_or_die(const char *name, int flags) { int fd = open(name, flags); if (fd < 0) err(1, "Failed to open %s", name); return fd; } /* map_zeroed_pages() takes a number of pages. */ static void *map_zeroed_pages(unsigned int num) { int fd = open_or_die("/dev/zero", O_RDONLY); void *addr; /* * We use a private mapping (ie. if we write to the page, it will be * copied). We allocate an extra two pages PROT_NONE to act as guard * pages against read/write attempts that exceed allocated space. */ addr = mmap(NULL, getpagesize() * (num+2), PROT_NONE, MAP_PRIVATE, fd, 0); if (addr == MAP_FAILED) err(1, "Mmapping %u pages of /dev/zero", num); if (mprotect(addr + getpagesize(), getpagesize() * num, PROT_READ|PROT_WRITE) == -1) err(1, "mprotect rw %u pages failed", num); /* * One neat mmap feature is that you can close the fd, and it * stays mapped. */ close(fd); /* Return address after PROT_NONE page */ return addr + getpagesize(); } /* Get some more pages for a device. */ static void *get_pages(unsigned int num) { void *addr = from_guest_phys(guest_limit); guest_limit += num * getpagesize(); if (guest_limit > guest_max) errx(1, "Not enough memory for devices"); return addr; } /* * This routine is used to load the kernel or initrd. It tries mmap, but if * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), * it falls back to reading the memory in. */ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) { ssize_t r; /* * We map writable even though for some segments are marked read-only. * The kernel really wants to be writable: it patches its own * instructions. * * MAP_PRIVATE means that the page won't be copied until a write is * done to it. This allows us to share untouched memory between * Guests. */ if (mmap(addr, len, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED) return; /* pread does a seek and a read in one shot: saves a few lines. */ r = pread(fd, addr, len, offset); if (r != len) err(1, "Reading offset %lu len %lu gave %zi", offset, len, r); } /* * This routine takes an open vmlinux image, which is in ELF, and maps it into * the Guest memory. ELF = Embedded Linking Format, which is the format used * by all modern binaries on Linux including the kernel. * * The ELF headers give *two* addresses: a physical address, and a virtual * address. We use the physical address; the Guest will map itself to the * virtual address. * * We return the starting address. */ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) { Elf32_Phdr phdr[ehdr->e_phnum]; unsigned int i; /* * Sanity checks on the main ELF header: an x86 executable with a * reasonable number of correctly-sized program headers. */ if (ehdr->e_type != ET_EXEC || ehdr->e_machine != EM_386 || ehdr->e_phentsize != sizeof(Elf32_Phdr) || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) errx(1, "Malformed elf header"); /* * An ELF executable contains an ELF header and a number of "program" * headers which indicate which parts ("segments") of the program to * load where. */ /* We read in all the program headers at once: */ if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) err(1, "Seeking to program headers"); if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) err(1, "Reading program headers"); /* * Try all the headers: there are usually only three. A read-only one, * a read-write one, and a "note" section which we don't load. */ for (i = 0; i < ehdr->e_phnum; i++) { /* If this isn't a loadable segment, we ignore it */ if (phdr[i].p_type != PT_LOAD) continue; verbose("Section %i: size %i addr %p\n", i, phdr[i].p_memsz, (void *)phdr[i].p_paddr); /* We map this section of the file at its physical address. */ map_at(elf_fd, from_guest_phys(phdr[i].p_paddr), phdr[i].p_offset, phdr[i].p_filesz); } /* The entry point is given in the ELF header. */ return ehdr->e_entry; } /*L:150 * A bzImage, unlike an ELF file, is not meant to be loaded. You're supposed * to jump into it and it will unpack itself. We used to have to perform some * hairy magic because the unpacking code scared me. * * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote * a small patch to jump over the tricky bits in the Guest, so now we just read * the funky header so we know where in the file to load, and away we go! */ static unsigned long load_bzimage(int fd) { struct boot_params boot; int r; /* Modern bzImages get loaded at 1M. */ void *p = from_guest_phys(0x100000); /* * Go back to the start of the file and read the header. It should be * a Linux boot header (see Documentation/x86/boot.txt) */ lseek(fd, 0, SEEK_SET); read(fd, &boot, sizeof(boot)); /* Inside the setup_hdr, we expect the magic "HdrS" */ if (memcmp(&boot.hdr.header, "HdrS", 4) != 0) errx(1, "This doesn't look like a bzImage to me"); /* Skip over the extra sectors of the header. */ lseek(fd, (boot.hdr.setup_sects+1) * 512, SEEK_SET); /* Now read everything into memory. in nice big chunks. */ while ((r = read(fd, p, 65536)) > 0) p += r; /* Finally, code32_start tells us where to enter the kernel. */ return boot.hdr.code32_start; } /*L:140 * Loading the kernel is easy when it's a "vmlinux", but most kernels * come wrapped up in the self-decompressing "bzImage" format. With a little * work, we can load those, too. */ static unsigned long load_kernel(int fd) { Elf32_Ehdr hdr; /* Read in the first few bytes. */ if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr)) err(1, "Reading kernel"); /* If it's an ELF file, it starts with "\177ELF" */ if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0) return map_elf(fd, &hdr); /* Otherwise we assume it's a bzImage, and try to load it. */ return load_bzimage(fd); } /* * This is a trivial little helper to align pages. Andi Kleen hated it because * it calls getpagesize() twice: "it's dumb code." * * Kernel guys get really het up about optimization, even when it's not * necessary. I leave this code as a reaction against that. */ static inline unsigned long page_align(unsigned long addr) { /* Add upwards and truncate downwards. */ return ((addr + getpagesize()-1) & ~(getpagesize()-1)); } /*L:180 * An "initial ram disk" is a disk image loaded into memory along with the * kernel which the kernel can use to boot from without needing any drivers. * Most distributions now use this as standard: the initrd contains the code to * load the appropriate driver modules for the current machine. * * Importantly, James Morris works for RedHat, and Fedora uses initrds for its * kernels. He sent me this (and tells me when I break it). */ static unsigned long load_initrd(const char *name, unsigned long mem) { int ifd; struct stat st; unsigned long len; ifd = open_or_die(name, O_RDONLY); /* fstat() is needed to get the file size. */ if (fstat(ifd, &st) < 0) err(1, "fstat() on initrd '%s'", name); /* * We map the initrd at the top of memory, but mmap wants it to be * page-aligned, so we round the size up for that. */ len = page_align(st.st_size); map_at(ifd, from_guest_phys(mem - len), 0, st.st_size); /* * Once a file is mapped, you can close the file descriptor. It's a * little odd, but quite useful. */ close(ifd); verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len); /* We return the initrd size. */ return len; } /*:*/ /* * Simple routine to roll all the commandline arguments together with spaces * between them. */ static void concat(char *dst, char *args[]) { unsigned int i, len = 0; for (i = 0; args[i]; i++) { if (i) { strcat(dst+len, " "); len++; } strcpy(dst+len, args[i]); len += strlen(args[i]); } /* In case it's empty. */ dst[len] = '\0'; } /*L:185 * This is where we actually tell the kernel to initialize the Guest. We * saw the arguments it expects when we looked at initialize() in lguest_user.c: * the base of Guest "physical" memory, the top physical page to allow and the * entry point for the Guest. */ static void tell_kernel(unsigned long start) { unsigned long args[] = { LHREQ_INITIALIZE, (unsigned long)guest_base, guest_limit / getpagesize(), start }; verbose("Guest: %p - %p (%#lx)\n", guest_base, guest_base + guest_limit, guest_limit); lguest_fd = open_or_die("/dev/lguest", O_RDWR); if (write(lguest_fd, args, sizeof(args)) < 0) err(1, "Writing to /dev/lguest"); } /*:*/ /*L:200 * Device Handling. * * When the Guest gives us a buffer, it sends an array of addresses and sizes. * We need to make sure it's not trying to reach into the Launcher itself, so * we have a convenient routine which checks it and exits with an error message * if something funny is going on: */ static void *_check_pointer(unsigned long addr, unsigned int size, unsigned int line) { /* * Check if the requested address and size exceeds the allocated memory, * or addr + size wraps around. */ if ((addr + size) > guest_limit || (addr + size) < addr) errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); /* * We return a pointer for the caller's convenience, now we know it's * safe to use. */ return from_guest_phys(addr); } /* A macro which transparently hands the line number to the real function. */ #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) /* * Each buffer in the virtqueues is actually a chain of descriptors. This * function returns the next descriptor in the chain, or vq->vring.num if we're * at the end. */ static unsigned next_desc(struct vring_desc *desc, unsigned int i, unsigned int max) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ if (!(desc[i].flags & VRING_DESC_F_NEXT)) return max; /* Check they're not leading us off end of descriptors. */ next = desc[i].next; /* Make sure compiler knows to grab that: we don't want it changing! */ wmb(); if (next >= max) errx(1, "Desc next is %u", next); return next; } /* * This actually sends the interrupt for this virtqueue, if we've used a * buffer. */ static void trigger_irq(struct virtqueue *vq) { unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; /* Don't inform them if nothing used. */ if (!vq->pending_used) return; vq->pending_used = 0; /* If they don't want an interrupt, don't send one... */ if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) { return; } /* Send the Guest an interrupt tell them we used something up. */ if (write(lguest_fd, buf, sizeof(buf)) != 0) err(1, "Triggering irq %i", vq->config.irq); } /* * This looks in the virtqueue for the first available buffer, and converts * it to an iovec for convenient access. Since descriptors consist of some * number of output then some number of input descriptors, it's actually two * iovecs, but we pack them into one and note how many of each there were. * * This function waits if necessary, and returns the descriptor number found. */ static unsigned wait_for_vq_desc(struct virtqueue *vq, struct iovec iov[], unsigned int *out_num, unsigned int *in_num) { unsigned int i, head, max; struct vring_desc *desc; u16 last_avail = lg_last_avail(vq); /* There's nothing available? */ while (last_avail == vq->vring.avail->idx) { u64 event; /* * Since we're about to sleep, now is a good time to tell the * Guest about what we've used up to now. */ trigger_irq(vq); /* OK, now we need to know about added descriptors. */ vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; /* * They could have slipped one in as we were doing that: make * sure it's written, then check again. */ mb(); if (last_avail != vq->vring.avail->idx) { vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; break; } /* Nothing new? Wait for eventfd to tell us they refilled. */ if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event)) errx(1, "Event read failed?"); /* We don't need to be notified again. */ vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; } /* Check it isn't doing very strange things with descriptor numbers. */ if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) errx(1, "Guest moved used index from %u to %u", last_avail, vq->vring.avail->idx); /* * Grab the next descriptor number they're advertising, and increment * the index we've seen. */ head = vq->vring.avail->ring[last_avail % vq->vring.num]; lg_last_avail(vq)++; /* If their number is silly, that's a fatal mistake. */ if (head >= vq->vring.num) errx(1, "Guest says index %u is available", head); /* When we start there are none of either input nor output. */ *out_num = *in_num = 0; max = vq->vring.num; desc = vq->vring.desc; i = head; /* * If this is an indirect entry, then this buffer contains a descriptor * table which we handle as if it's any normal descriptor chain. */ if (desc[i].flags & VRING_DESC_F_INDIRECT) { if (desc[i].len % sizeof(struct vring_desc)) errx(1, "Invalid size for indirect buffer table"); max = desc[i].len / sizeof(struct vring_desc); desc = check_pointer(desc[i].addr, desc[i].len); i = 0; } do { /* Grab the first descriptor, and check it's OK. */ iov[*out_num + *in_num].iov_len = desc[i].len; iov[*out_num + *in_num].iov_base = check_pointer(desc[i].addr, desc[i].len); /* If this is an input descriptor, increment that count. */ if (desc[i].flags & VRING_DESC_F_WRITE) (*in_num)++; else { /* * If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (*in_num) errx(1, "Descriptor has out after in"); (*out_num)++; } /* If we've got too many, that implies a descriptor loop. */ if (*out_num + *in_num > max) errx(1, "Looped descriptor"); } while ((i = next_desc(desc, i, max)) != max); return head; } /* * After we've used one of their buffers, we tell the Guest about it. Sometime * later we'll want to send them an interrupt using trigger_irq(); note that * wait_for_vq_desc() does that for us if it has to wait. */ static void add_used(struct virtqueue *vq, unsigned int head, int len) { struct vring_used_elem *used; /* * The virtqueue contains a ring of used buffers. Get a pointer to the * next entry in that used ring. */ used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; used->id = head; used->len = len; /* Make sure buffer is written before we update index. */ wmb(); vq->vring.used->idx++; vq->pending_used++; } /* And here's the combo meal deal. Supersize me! */ static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len) { add_used(vq, head, len); trigger_irq(vq); } /* * The Console * * We associate some data with the console for our exit hack. */ struct console_abort { /* How many times have they hit ^C? */ int count; /* When did they start? */ struct timeval start; }; /* This is the routine which handles console input (ie. stdin). */ static void console_input(struct virtqueue *vq) { int len; unsigned int head, in_num, out_num; struct console_abort *abort = vq->dev->priv; struct iovec iov[vq->vring.num]; /* Make sure there's a descriptor available. */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); if (out_num) errx(1, "Output buffers in console in queue?"); /* Read into it. This is where we usually wait. */ len = readv(STDIN_FILENO, iov, in_num); if (len <= 0) { /* Ran out of input? */ warnx("Failed to get console input, ignoring console."); /* * For simplicity, dying threads kill the whole Launcher. So * just nap here. */ for (;;) pause(); } /* Tell the Guest we used a buffer. */ add_used_and_trigger(vq, head, len); /* * Three ^C within one second? Exit. * * This is such a hack, but works surprisingly well. Each ^C has to * be in a buffer by itself, so they can't be too fast. But we check * that we get three within about a second, so they can't be too * slow. */ if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) { abort->count = 0; return; } abort->count++; if (abort->count == 1) gettimeofday(&abort->start, NULL); else if (abort->count == 3) { struct timeval now; gettimeofday(&now, NULL); /* Kill all Launcher processes with SIGINT, like normal ^C */ if (now.tv_sec <= abort->start.tv_sec+1) kill(0, SIGINT); abort->count = 0; } } /* This is the routine which handles console output (ie. stdout). */ static void console_output(struct virtqueue *vq) { unsigned int head, out, in; struct iovec iov[vq->vring.num]; /* We usually wait in here, for the Guest to give us something. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (in) errx(1, "Input buffers in console output queue?"); /* writev can return a partial write, so we loop here. */ while (!iov_empty(iov, out)) { int len = writev(STDOUT_FILENO, iov, out); if (len <= 0) { warn("Write to stdout gave %i (%d)", len, errno); break; } iov_consume(iov, out, len); } /* * We're finished with that buffer: if we're going to sleep, * wait_for_vq_desc() will prod the Guest with an interrupt. */ add_used(vq, head, 0); } /* * The Network * * Handling output for network is also simple: we get all the output buffers * and write them to /dev/net/tun. */ struct net_info { int tunfd; }; static void net_output(struct virtqueue *vq) { struct net_info *net_info = vq->dev->priv; unsigned int head, out, in; struct iovec iov[vq->vring.num]; /* We usually wait in here for the Guest to give us a packet. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (in) errx(1, "Input buffers in net output queue?"); /* * Send the whole thing through to /dev/net/tun. It expects the exact * same format: what a coincidence! */ if (writev(net_info->tunfd, iov, out) < 0) warnx("Write to tun failed (%d)?", errno); /* * Done with that one; wait_for_vq_desc() will send the interrupt if * all packets are processed. */ add_used(vq, head, 0); } /* * Handling network input is a bit trickier, because I've tried to optimize it. * * First we have a helper routine which tells is if from this file descriptor * (ie. the /dev/net/tun device) will block: */ static bool will_block(int fd) { fd_set fdset; struct timeval zero = { 0, 0 }; FD_ZERO(&fdset); FD_SET(fd, &fdset); return select(fd+1, &fdset, NULL, NULL, &zero) != 1; } /* * This handles packets coming in from the tun device to our Guest. Like all * service routines, it gets called again as soon as it returns, so you don't * see a while(1) loop here. */ static void net_input(struct virtqueue *vq) { int len; unsigned int head, out, in; struct iovec iov[vq->vring.num]; struct net_info *net_info = vq->dev->priv; /* * Get a descriptor to write an incoming packet into. This will also * send an interrupt if they're out of descriptors. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (out) errx(1, "Output buffers in net input queue?"); /* * If it looks like we'll block reading from the tun device, send them * an interrupt. */ if (vq->pending_used && will_block(net_info->tunfd)) trigger_irq(vq); /* * Read in the packet. This is where we normally wait (when there's no * incoming network traffic). */ len = readv(net_info->tunfd, iov, in); if (len <= 0) warn("Failed to read from tun (%d).", errno); /* * Mark that packet buffer as used, but don't interrupt here. We want * to wait until we've done as much work as we can. */ add_used(vq, head, len); } /*:*/ /* This is the helper to create threads: run the service routine in a loop. */ static int do_thread(void *_vq) { struct virtqueue *vq = _vq; for (;;) vq->service(vq); return 0; } /* * When a child dies, we kill our entire process group with SIGTERM. This * also has the side effect that the shell restores the console for us! */ static void kill_launcher(int signal) { kill(0, SIGTERM); } static void reset_device(struct device *dev) { struct virtqueue *vq; verbose("Resetting device %s\n", dev->name); /* Clear any features they've acked. */ memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len); /* We're going to be explicitly killing threads, so ignore them. */ signal(SIGCHLD, SIG_IGN); /* Zero out the virtqueues, get rid of their threads */ for (vq = dev->vq; vq; vq = vq->next) { if (vq->thread != (pid_t)-1) { kill(vq->thread, SIGTERM); waitpid(vq->thread, NULL, 0); vq->thread = (pid_t)-1; } memset(vq->vring.desc, 0, vring_size(vq->config.num, LGUEST_VRING_ALIGN)); lg_last_avail(vq) = 0; } dev->running = false; /* Now we care if threads die. */ signal(SIGCHLD, (void *)kill_launcher); } /*L:216 * This actually creates the thread which services the virtqueue for a device. */ static void create_thread(struct virtqueue *vq) { /* * Create stack for thread. Since the stack grows upwards, we point * the stack pointer to the end of this region. */ char *stack = malloc(32768); unsigned long args[] = { LHREQ_EVENTFD, vq->config.pfn*getpagesize(), 0 }; /* Create a zero-initialized eventfd. */ vq->eventfd = eventfd(0, 0); if (vq->eventfd < 0) err(1, "Creating eventfd"); args[2] = vq->eventfd; /* * Attach an eventfd to this virtqueue: it will go off when the Guest * does an LHCALL_NOTIFY for this vq. */ if (write(lguest_fd, &args, sizeof(args)) != 0) err(1, "Attaching eventfd"); /* * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so * we get a signal if it dies. */ vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); if (vq->thread == (pid_t)-1) err(1, "Creating clone"); /* We close our local copy now the child has it. */ close(vq->eventfd); } static void start_device(struct device *dev) { unsigned int i; struct virtqueue *vq; verbose("Device %s OK: offered", dev->name); for (i = 0; i < dev->feature_len; i++) verbose(" %02x", get_feature_bits(dev)[i]); verbose(", accepted"); for (i = 0; i < dev->feature_len; i++) verbose(" %02x", get_feature_bits(dev) [dev->feature_len+i]); for (vq = dev->vq; vq; vq = vq->next) { if (vq->service) create_thread(vq); } dev->running = true; } static void cleanup_devices(void) { struct device *dev; for (dev = devices.dev; dev; dev = dev->next) reset_device(dev); /* If we saved off the original terminal settings, restore them now. */ if (orig_term.c_lflag & (ISIG|ICANON|ECHO)) tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); } /* When the Guest tells us they updated the status field, we handle it. */ static void update_device_status(struct device *dev) { /* A zero status is a reset, otherwise it's a set of flags. */ if (dev->desc->status == 0) reset_device(dev); else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { warnx("Device %s configuration FAILED", dev->name); if (dev->running) reset_device(dev); } else { if (dev->running) err(1, "Device %s features finalized twice", dev->name); start_device(dev); } } /*L:215 * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. In * particular, it's used to notify us of device status changes during boot. */ static void handle_output(unsigned long addr) { struct device *i; /* Check each device. */ for (i = devices.dev; i; i = i->next) { struct virtqueue *vq; /* * Notifications to device descriptors mean they updated the * device status. */ if (from_guest_phys(addr) == i->desc) { update_device_status(i); return; } /* Devices should not be used before features are finalized. */ for (vq = i->vq; vq; vq = vq->next) { if (addr != vq->config.pfn*getpagesize()) continue; errx(1, "Notification on %s before setup!", i->name); } } /* * Early console write is done using notify on a nul-terminated string * in Guest memory. It's also great for hacking debugging messages * into a Guest. */ if (addr >= guest_limit) errx(1, "Bad NOTIFY %#lx", addr); write(STDOUT_FILENO, from_guest_phys(addr), strnlen(from_guest_phys(addr), guest_limit - addr)); } /*L:190 * Device Setup * * All devices need a descriptor so the Guest knows it exists, and a "struct * device" so the Launcher can keep track of it. We have common helper * routines to allocate and manage them. */ /* * The layout of the device page is a "struct lguest_device_desc" followed by a * number of virtqueue descriptors, then two sets of feature bits, then an * array of configuration bytes. This routine returns the configuration * pointer. */ static u8 *device_config(const struct device *dev) { return (void *)(dev->desc + 1) + dev->num_vq * sizeof(struct lguest_vqconfig) + dev->feature_len * 2; } /* * This routine allocates a new "struct lguest_device_desc" from descriptor * table page just above the Guest's normal memory. It returns a pointer to * that descriptor. */ static struct lguest_device_desc *new_dev_desc(u16 type) { struct lguest_device_desc d = { .type = type }; void *p; /* Figure out where the next device config is, based on the last one. */ if (devices.lastdev) p = device_config(devices.lastdev) + devices.lastdev->desc->config_len; else p = devices.descpage; /* We only have one page for all the descriptors. */ if (p + sizeof(d) > (void *)devices.descpage + getpagesize()) errx(1, "Too many devices"); /* p might not be aligned, so we memcpy in. */ return memcpy(p, &d, sizeof(d)); } /* * Each device descriptor is followed by the description of its virtqueues. We * specify how many descriptors the virtqueue is to have. */ static void add_virtqueue(struct device *dev, unsigned int num_descs, void (*service)(struct virtqueue *)) { unsigned int pages; struct virtqueue **i, *vq = malloc(sizeof(*vq)); void *p; /* First we need some memory for this virtqueue. */ pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1) / getpagesize(); p = get_pages(pages); /* Initialize the virtqueue */ vq->next = NULL; vq->last_avail_idx = 0; vq->dev = dev; /* * This is the routine the service thread will run, and its Process ID * once it's running. */ vq->service = service; vq->thread = (pid_t)-1; /* Initialize the configuration. */ vq->config.num = num_descs; vq->config.irq = devices.next_irq++; vq->config.pfn = to_guest_phys(p) / getpagesize(); /* Initialize the vring. */ vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); /* * Append virtqueue to this device's descriptor. We use * device_config() to get the end of the device's current virtqueues; * we check that we haven't added any config or feature information * yet, otherwise we'd be overwriting them. */ assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); memcpy(device_config(dev), &vq->config, sizeof(vq->config)); dev->num_vq++; dev->desc->num_vq++; verbose("Virtqueue page %#lx\n", to_guest_phys(p)); /* * Add to tail of list, so dev->vq is first vq, dev->vq->next is * second. */ for (i = &dev->vq; *i; i = &(*i)->next); *i = vq; } /* * The first half of the feature bitmask is for us to advertise features. The * second half is for the Guest to accept features. */ static void add_feature(struct device *dev, unsigned bit) { u8 *features = get_feature_bits(dev); /* We can't extend the feature bits once we've added config bytes */ if (dev->desc->feature_len <= bit / CHAR_BIT) { assert(dev->desc->config_len == 0); dev->feature_len = dev->desc->feature_len = (bit/CHAR_BIT) + 1; } features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); } /* * This routine sets the configuration fields for an existing device's * descriptor. It only works for the last device, but that's OK because that's * how we use it. */ static void set_config(struct device *dev, unsigned len, const void *conf) { /* Check we haven't overflowed our single page. */ if (device_config(dev) + len > devices.descpage + getpagesize()) errx(1, "Too many devices"); /* Copy in the config information, and store the length. */ memcpy(device_config(dev), conf, len); dev->desc->config_len = len; /* Size must fit in config_len field (8 bits)! */ assert(dev->desc->config_len == len); } /* * This routine does all the creation and setup of a new device, including * calling new_dev_desc() to allocate the descriptor and device memory. We * don't actually start the service threads until later. * * See what I mean about userspace being boring? */ static struct device *new_device(const char *name, u16 type) { struct device *dev = malloc(sizeof(*dev)); /* Now we populate the fields one at a time. */ dev->desc = new_dev_desc(type); dev->name = name; dev->vq = NULL; dev->feature_len = 0; dev->num_vq = 0; dev->running = false; /* * Append to device list. Prepending to a single-linked list is * easier, but the user expects the devices to be arranged on the bus * in command-line order. The first network device on the command line * is eth0, the first block device /dev/vda, etc. */ if (devices.lastdev) devices.lastdev->next = dev; else devices.dev = dev; devices.lastdev = dev; return dev; } /* * Our first setup routine is the console. It's a fairly simple device, but * UNIX tty handling makes it uglier than it could be. */ static void setup_console(void) { struct device *dev; /* If we can save the initial standard input settings... */ if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { struct termios term = orig_term; /* * Then we turn off echo, line buffering and ^C etc: We want a * raw input stream to the Guest. */ term.c_lflag &= ~(ISIG|ICANON|ECHO); tcsetattr(STDIN_FILENO, TCSANOW, &term); } dev = new_device("console", VIRTIO_ID_CONSOLE); /* We store the console state in dev->priv, and initialize it. */ dev->priv = malloc(sizeof(struct console_abort)); ((struct console_abort *)dev->priv)->count = 0; /* * The console needs two virtqueues: the input then the output. When * they put something the input queue, we make sure we're listening to * stdin. When they put something in the output queue, we write it to * stdout. */ add_virtqueue(dev, VIRTQUEUE_NUM, console_input); add_virtqueue(dev, VIRTQUEUE_NUM, console_output); verbose("device %u: console\n", ++devices.device_num); } /*:*/ /*M:010 * Inter-guest networking is an interesting area. Simplest is to have a * --sharenet=<name> option which opens or creates a named pipe. This can be * used to send packets to another guest in a 1:1 manner. * * More sophisticated is to use one of the tools developed for project like UML * to do networking. * * Faster is to do virtio bonding in kernel. Doing this 1:1 would be * completely generic ("here's my vring, attach to your vring") and would work * for any traffic. Of course, namespace and permissions issues need to be * dealt with. A more sophisticated "multi-channel" virtio_net.c could hide * multiple inter-guest channels behind one interface, although it would * require some manner of hotplugging new virtio channels. * * Finally, we could use a virtio network switch in the kernel, ie. vhost. :*/ static u32 str2ip(const char *ipaddr) { unsigned int b[4]; if (sscanf(ipaddr, "%u.%u.%u.%u", &b[0], &b[1], &b[2], &b[3]) != 4) errx(1, "Failed to parse IP address '%s'", ipaddr); return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; } static void str2mac(const char *macaddr, unsigned char mac[6]) { unsigned int m[6]; if (sscanf(macaddr, "%02x:%02x:%02x:%02x:%02x:%02x", &m[0], &m[1], &m[2], &m[3], &m[4], &m[5]) != 6) errx(1, "Failed to parse mac address '%s'", macaddr); mac[0] = m[0]; mac[1] = m[1]; mac[2] = m[2]; mac[3] = m[3]; mac[4] = m[4]; mac[5] = m[5]; } /* * This code is "adapted" from libbridge: it attaches the Host end of the * network device to the bridge device specified by the command line. * * This is yet another James Morris contribution (I'm an IP-level guy, so I * dislike bridging), and I just try not to break it. */ static void add_to_bridge(int fd, const char *if_name, const char *br_name) { int ifidx; struct ifreq ifr; if (!*br_name) errx(1, "must specify bridge name"); ifidx = if_nametoindex(if_name); if (!ifidx) errx(1, "interface %s does not exist!", if_name); strncpy(ifr.ifr_name, br_name, IFNAMSIZ); ifr.ifr_name[IFNAMSIZ-1] = '\0'; ifr.ifr_ifindex = ifidx; if (ioctl(fd, SIOCBRADDIF, &ifr) < 0) err(1, "can't add %s to bridge %s", if_name, br_name); } /* * This sets up the Host end of the network device with an IP address, brings * it up so packets will flow, the copies the MAC address into the hwaddr * pointer. */ static void configure_device(int fd, const char *tapif, u32 ipaddr) { struct ifreq ifr; struct sockaddr_in sin; memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, tapif); /* Don't read these incantations. Just cut & paste them like I did! */ sin.sin_family = AF_INET; sin.sin_addr.s_addr = htonl(ipaddr); memcpy(&ifr.ifr_addr, &sin, sizeof(sin)); if (ioctl(fd, SIOCSIFADDR, &ifr) != 0) err(1, "Setting %s interface address", tapif); ifr.ifr_flags = IFF_UP; if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0) err(1, "Bringing interface %s up", tapif); } static int get_tun_device(char tapif[IFNAMSIZ]) { struct ifreq ifr; int netfd; /* Start with this zeroed. Messy but sure. */ memset(&ifr, 0, sizeof(ifr)); /* * We open the /dev/net/tun device and tell it we want a tap device. A * tap device is like a tun device, only somehow different. To tell * the truth, I completely blundered my way through this code, but it * works now! */ netfd = open_or_die("/dev/net/tun", O_RDWR); ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; strcpy(ifr.ifr_name, "tap%d"); if (ioctl(netfd, TUNSETIFF, &ifr) != 0) err(1, "configuring /dev/net/tun"); if (ioctl(netfd, TUNSETOFFLOAD, TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0) err(1, "Could not set features for tun device"); /* * We don't need checksums calculated for packets coming in this * device: trust us! */ ioctl(netfd, TUNSETNOCSUM, 1); memcpy(tapif, ifr.ifr_name, IFNAMSIZ); return netfd; } /*L:195 * Our network is a Host<->Guest network. This can either use bridging or * routing, but the principle is the same: it uses the "tun" device to inject * packets into the Host as if they came in from a normal network card. We * just shunt packets between the Guest and the tun device. */ static void setup_tun_net(char *arg) { struct device *dev; struct net_info *net_info = malloc(sizeof(*net_info)); int ipfd; u32 ip = INADDR_ANY; bool bridging = false; char tapif[IFNAMSIZ], *p; struct virtio_net_config conf; net_info->tunfd = get_tun_device(tapif); /* First we create a new network device. */ dev = new_device("net", VIRTIO_ID_NET); dev->priv = net_info; /* Network devices need a recv and a send queue, just like console. */ add_virtqueue(dev, VIRTQUEUE_NUM, net_input); add_virtqueue(dev, VIRTQUEUE_NUM, net_output); /* * We need a socket to perform the magic network ioctls to bring up the * tap interface, connect to the bridge etc. Any socket will do! */ ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); if (ipfd < 0) err(1, "opening IP socket"); /* If the command line was --tunnet=bridge:<name> do bridging. */ if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) { arg += strlen(BRIDGE_PFX); bridging = true; } /* A mac address may follow the bridge name or IP address */ p = strchr(arg, ':'); if (p) { str2mac(p+1, conf.mac); add_feature(dev, VIRTIO_NET_F_MAC); *p = '\0'; } /* arg is now either an IP address or a bridge name */ if (bridging) add_to_bridge(ipfd, tapif, arg); else ip = str2ip(arg); /* Set up the tun device. */ configure_device(ipfd, tapif, ip); /* Expect Guest to handle everything except UFO */ add_feature(dev, VIRTIO_NET_F_CSUM); add_feature(dev, VIRTIO_NET_F_GUEST_CSUM); add_feature(dev, VIRTIO_NET_F_GUEST_TSO4); add_feature(dev, VIRTIO_NET_F_GUEST_TSO6); add_feature(dev, VIRTIO_NET_F_GUEST_ECN); add_feature(dev, VIRTIO_NET_F_HOST_TSO4); add_feature(dev, VIRTIO_NET_F_HOST_TSO6); add_feature(dev, VIRTIO_NET_F_HOST_ECN); /* We handle indirect ring entries */ add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC); set_config(dev, sizeof(conf), &conf); /* We don't need the socket any more; setup is done. */ close(ipfd); devices.device_num++; if (bridging) verbose("device %u: tun %s attached to bridge: %s\n", devices.device_num, tapif, arg); else verbose("device %u: tun %s: %s\n", devices.device_num, tapif, arg); } /*:*/ /* This hangs off device->priv. */ struct vblk_info { /* The size of the file. */ off64_t len; /* The file descriptor for the file. */ int fd; }; /*L:210 * The Disk * * The disk only has one virtqueue, so it only has one thread. It is really * simple: the Guest asks for a block number and we read or write that position * in the file. * * Before we serviced each virtqueue in a separate thread, that was unacceptably * slow: the Guest waits until the read is finished before running anything * else, even if it could have been doing useful work. * * We could have used async I/O, except it's reputed to suck so hard that * characters actually go missing from your code when you try to use it. */ static void blk_request(struct virtqueue *vq) { struct vblk_info *vblk = vq->dev->priv; unsigned int head, out_num, in_num, wlen; int ret; u8 *in; struct virtio_blk_outhdr *out; struct iovec iov[vq->vring.num]; off64_t off; /* * Get the next request, where we normally wait. It triggers the * interrupt to acknowledge previously serviced requests (if any). */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); /* * Every block request should contain at least one output buffer * (detailing the location on disk and the type of request) and one * input buffer (to hold the result). */ if (out_num == 0 || in_num == 0) errx(1, "Bad virtblk cmd %u out=%u in=%u", head, out_num, in_num); out = convert(&iov[0], struct virtio_blk_outhdr); in = convert(&iov[out_num+in_num-1], u8); /* * For historical reasons, block operations are expressed in 512 byte * "sectors". */ off = out->sector * 512; /* * In general the virtio block driver is allowed to try SCSI commands. * It'd be nice if we supported eject, for example, but we don't. */ if (out->type & VIRTIO_BLK_T_SCSI_CMD) { fprintf(stderr, "Scsi commands unsupported\n"); *in = VIRTIO_BLK_S_UNSUPP; wlen = sizeof(*in); } else if (out->type & VIRTIO_BLK_T_OUT) { /* * Write * * Move to the right location in the block file. This can fail * if they try to write past end. */ if (lseek64(vblk->fd, off, SEEK_SET) != off) err(1, "Bad seek to sector %llu", out->sector); ret = writev(vblk->fd, iov+1, out_num-1); verbose("WRITE to sector %llu: %i\n", out->sector, ret); /* * Grr... Now we know how long the descriptor they sent was, we * make sure they didn't try to write over the end of the block * file (possibly extending it). */ if (ret > 0 && off + ret > vblk->len) { /* Trim it back to the correct length */ ftruncate64(vblk->fd, vblk->len); /* Die, bad Guest, die. */ errx(1, "Write past end %llu+%u", off, ret); } wlen = sizeof(*in); *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); } else if (out->type & VIRTIO_BLK_T_FLUSH) { /* Flush */ ret = fdatasync(vblk->fd); verbose("FLUSH fdatasync: %i\n", ret); wlen = sizeof(*in); *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); } else { /* * Read * * Move to the right location in the block file. This can fail * if they try to read past end. */ if (lseek64(vblk->fd, off, SEEK_SET) != off) err(1, "Bad seek to sector %llu", out->sector); ret = readv(vblk->fd, iov+1, in_num-1); verbose("READ from sector %llu: %i\n", out->sector, ret); if (ret >= 0) { wlen = sizeof(*in) + ret; *in = VIRTIO_BLK_S_OK; } else { wlen = sizeof(*in); *in = VIRTIO_BLK_S_IOERR; } } /* Finished that request. */ add_used(vq, head, wlen); } /*L:198 This actually sets up a virtual block device. */ static void setup_block_file(const char *filename) { struct device *dev; struct vblk_info *vblk; struct virtio_blk_config conf; /* Creat the device. */ dev = new_device("block", VIRTIO_ID_BLOCK); /* The device has one virtqueue, where the Guest places requests. */ add_virtqueue(dev, VIRTQUEUE_NUM, blk_request); /* Allocate the room for our own bookkeeping */ vblk = dev->priv = malloc(sizeof(*vblk)); /* First we open the file and store the length. */ vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE); vblk->len = lseek64(vblk->fd, 0, SEEK_END); /* We support FLUSH. */ add_feature(dev, VIRTIO_BLK_F_FLUSH); /* Tell Guest how many sectors this device has. */ conf.capacity = cpu_to_le64(vblk->len / 512); /* * Tell Guest not to put in too many descriptors at once: two are used * for the in and out elements. */ add_feature(dev, VIRTIO_BLK_F_SEG_MAX); conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); /* Don't try to put whole struct: we have 8 bit limit. */ set_config(dev, offsetof(struct virtio_blk_config, geometry), &conf); verbose("device %u: virtblock %llu sectors\n", ++devices.device_num, le64_to_cpu(conf.capacity)); } /*L:211 * Our random number generator device reads from /dev/random into the Guest's * input buffers. The usual case is that the Guest doesn't want random numbers * and so has no buffers although /dev/random is still readable, whereas * console is the reverse. * * The same logic applies, however. */ struct rng_info { int rfd; }; static void rng_input(struct virtqueue *vq) { int len; unsigned int head, in_num, out_num, totlen = 0; struct rng_info *rng_info = vq->dev->priv; struct iovec iov[vq->vring.num]; /* First we need a buffer from the Guests's virtqueue. */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); if (out_num) errx(1, "Output buffers in rng?"); /* * Just like the console write, we loop to cover the whole iovec. * In this case, short reads actually happen quite a bit. */ while (!iov_empty(iov, in_num)) { len = readv(rng_info->rfd, iov, in_num); if (len <= 0) err(1, "Read from /dev/random gave %i", len); iov_consume(iov, in_num, len); totlen += len; } /* Tell the Guest about the new input. */ add_used(vq, head, totlen); } /*L:199 * This creates a "hardware" random number device for the Guest. */ static void setup_rng(void) { struct device *dev; struct rng_info *rng_info = malloc(sizeof(*rng_info)); /* Our device's privat info simply contains the /dev/random fd. */ rng_info->rfd = open_or_die("/dev/random", O_RDONLY); /* Create the new device. */ dev = new_device("rng", VIRTIO_ID_RNG); dev->priv = rng_info; /* The device has one virtqueue, where the Guest places inbufs. */ add_virtqueue(dev, VIRTQUEUE_NUM, rng_input); verbose("device %u: rng\n", devices.device_num++); } /* That's the end of device setup. */ /*L:230 Reboot is pretty easy: clean up and exec() the Launcher afresh. */ static void __attribute__((noreturn)) restart_guest(void) { unsigned int i; /* * Since we don't track all open fds, we simply close everything beyond * stderr. */ for (i = 3; i < FD_SETSIZE; i++) close(i); /* Reset all the devices (kills all threads). */ cleanup_devices(); execv(main_args[0], main_args); err(1, "Could not exec %s", main_args[0]); } /*L:220 * Finally we reach the core of the Launcher which runs the Guest, serves * its input and output, and finally, lays it to rest. */ static void __attribute__((noreturn)) run_guest(void) { for (;;) { unsigned long notify_addr; int readval; /* We read from the /dev/lguest device to run the Guest. */ readval = pread(lguest_fd, &notify_addr, sizeof(notify_addr), cpu_id); /* One unsigned long means the Guest did HCALL_NOTIFY */ if (readval == sizeof(notify_addr)) { verbose("Notify on address %#lx\n", notify_addr); handle_output(notify_addr); /* ENOENT means the Guest died. Reading tells us why. */ } else if (errno == ENOENT) { char reason[1024] = { 0 }; pread(lguest_fd, reason, sizeof(reason)-1, cpu_id); errx(1, "%s", reason); /* ERESTART means that we need to reboot the guest */ } else if (errno == ERESTART) { restart_guest(); /* Anything else means a bug or incompatible change. */ } else err(1, "Running guest failed"); } } /*L:240 * This is the end of the Launcher. The good news: we are over halfway * through! The bad news: the most fiendish part of the code still lies ahead * of us. * * Are you ready? Take a deep breath and join me in the core of the Host, in * "make Host". :*/ static struct option opts[] = { { "verbose", 0, NULL, 'v' }, { "tunnet", 1, NULL, 't' }, { "block", 1, NULL, 'b' }, { "rng", 0, NULL, 'r' }, { "initrd", 1, NULL, 'i' }, { "username", 1, NULL, 'u' }, { "chroot", 1, NULL, 'c' }, { NULL }, }; static void usage(void) { errx(1, "Usage: lguest [--verbose] " "[--tunnet=(<ipaddr>:<macaddr>|bridge:<bridgename>:<macaddr>)\n" "|--block=<filename>|--initrd=<filename>]...\n" "<mem-in-mb> vmlinux [args...]"); } /*L:105 The main routine is where the real work begins: */ int main(int argc, char *argv[]) { /* Memory, code startpoint and size of the (optional) initrd. */ unsigned long mem = 0, start, initrd_size = 0; /* Two temporaries. */ int i, c; /* The boot information for the Guest. */ struct boot_params *boot; /* If they specify an initrd file to load. */ const char *initrd_name = NULL; /* Password structure for initgroups/setres[gu]id */ struct passwd *user_details = NULL; /* Directory to chroot to */ char *chroot_path = NULL; /* Save the args: we "reboot" by execing ourselves again. */ main_args = argv; /* * First we initialize the device list. We keep a pointer to the last * device, and the next interrupt number to use for devices (1: * remember that 0 is used by the timer). */ devices.lastdev = NULL; devices.next_irq = 1; /* We're CPU 0. In fact, that's the only CPU possible right now. */ cpu_id = 0; /* * We need to know how much memory so we can set up the device * descriptor and memory pages for the devices as we parse the command * line. So we quickly look through the arguments to find the amount * of memory now. */ for (i = 1; i < argc; i++) { if (argv[i][0] != '-') { mem = atoi(argv[i]) * 1024 * 1024; /* * We start by mapping anonymous pages over all of * guest-physical memory range. This fills it with 0, * and ensures that the Guest won't be killed when it * tries to access it. */ guest_base = map_zeroed_pages(mem / getpagesize() + DEVICE_PAGES); guest_limit = mem; guest_max = mem + DEVICE_PAGES*getpagesize(); devices.descpage = get_pages(1); break; } } /* The options are fairly straight-forward */ while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) { switch (c) { case 'v': verbose = true; break; case 't': setup_tun_net(optarg); break; case 'b': setup_block_file(optarg); break; case 'r': setup_rng(); break; case 'i': initrd_name = optarg; break; case 'u': user_details = getpwnam(optarg); if (!user_details) err(1, "getpwnam failed, incorrect username?"); break; case 'c': chroot_path = optarg; break; default: warnx("Unknown argument %s", argv[optind]); usage(); } } /* * After the other arguments we expect memory and kernel image name, * followed by command line arguments for the kernel. */ if (optind + 2 > argc) usage(); verbose("Guest base is at %p\n", guest_base); /* We always have a console device */ setup_console(); /* Now we load the kernel */ start = load_kernel(open_or_die(argv[optind+1], O_RDONLY)); /* Boot information is stashed at physical address 0 */ boot = from_guest_phys(0); /* Map the initrd image if requested (at top of physical memory) */ if (initrd_name) { initrd_size = load_initrd(initrd_name, mem); /* * These are the location in the Linux boot header where the * start and size of the initrd are expected to be found. */ boot->hdr.ramdisk_image = mem - initrd_size; boot->hdr.ramdisk_size = initrd_size; /* The bootloader type 0xFF means "unknown"; that's OK. */ boot->hdr.type_of_loader = 0xFF; } /* * The Linux boot header contains an "E820" memory map: ours is a * simple, single region. */ boot->e820_entries = 1; boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM }); /* * The boot header contains a command line pointer: we put the command * line after the boot header. */ boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); /* We use a simple helper to copy the arguments separated by spaces. */ concat((char *)(boot + 1), argv+optind+2); /* Set kernel alignment to 16M (CONFIG_PHYSICAL_ALIGN) */ boot->hdr.kernel_alignment = 0x1000000; /* Boot protocol version: 2.07 supports the fields for lguest. */ boot->hdr.version = 0x207; /* The hardware_subarch value of "1" tells the Guest it's an lguest. */ boot->hdr.hardware_subarch = 1; /* Tell the entry path not to try to reload segment registers. */ boot->hdr.loadflags |= KEEP_SEGMENTS; /* We tell the kernel to initialize the Guest. */ tell_kernel(start); /* Ensure that we terminate if a device-servicing child dies. */ signal(SIGCHLD, kill_launcher); /* If we exit via err(), this kills all the threads, restores tty. */ atexit(cleanup_devices); /* If requested, chroot to a directory */ if (chroot_path) { if (chroot(chroot_path) != 0) err(1, "chroot(\"%s\") failed", chroot_path); if (chdir("/") != 0) err(1, "chdir(\"/\") failed"); verbose("chroot done\n"); } /* If requested, drop privileges */ if (user_details) { uid_t u; gid_t g; u = user_details->pw_uid; g = user_details->pw_gid; if (initgroups(user_details->pw_name, g) != 0) err(1, "initgroups failed"); if (setresgid(g, g, g) != 0) err(1, "setresgid failed"); if (setresuid(u, u, u) != 0) err(1, "setresuid failed"); verbose("Dropping privileges completed\n"); } /* Finally, run the Guest. This doesn't return. */ run_guest(); } /*:*/ /*M:999 * Mastery is done: you now know everything I do. * * But surely you have seen code, features and bugs in your wanderings which * you now yearn to attack? That is the real game, and I look forward to you * patching and forking lguest into the Your-Name-Here-visor. * * Farewell, and good coding! * Rusty Russell. */
gpl-2.0
drikinukoda/android_kernel_lge_e8lte
drivers/staging/iio/meter/ade7758_core.c
4964
20111
/* * ADE7758 Poly Phase Multifunction Energy Metering IC driver * * Copyright 2010-2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "../buffer.h" #include "meter.h" #include "ade7758.h" int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val) { int ret; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7758_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); st->tx[0] = ADE7758_WRITE_REG(reg_address); st->tx[1] = val; ret = spi_write(st->us, st->tx, 2); mutex_unlock(&st->buf_lock); return ret; } static int ade7758_spi_write_reg_16(struct device *dev, u8 reg_address, u16 value) { int ret; struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7758_state *st = iio_priv(indio_dev); struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 3, } }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7758_WRITE_REG(reg_address); st->tx[1] = (value >> 8) & 0xFF; st->tx[2] = value & 0xFF; spi_message_init(&msg); spi_message_add_tail(xfers, &msg); ret = spi_sync(st->us, &msg); mutex_unlock(&st->buf_lock); return ret; } static int ade7758_spi_write_reg_24(struct device *dev, u8 reg_address, u32 value) { int ret; struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7758_state *st = iio_priv(indio_dev); struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 4, } }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7758_WRITE_REG(reg_address); st->tx[1] = (value >> 16) & 0xFF; st->tx[2] = (value >> 8) & 0xFF; st->tx[3] = value & 0xFF; spi_message_init(&msg); spi_message_add_tail(xfers, &msg); ret = spi_sync(st->us, &msg); mutex_unlock(&st->buf_lock); return ret; } int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val) { struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7758_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 1, .delay_usecs = 4, }, { .tx_buf = &st->tx[1], .rx_buf = st->rx, .bits_per_word = 8, .len = 1, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7758_READ_REG(reg_address); st->tx[1] = 0; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X", reg_address); goto error_ret; } *val = st->rx[0]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static int ade7758_spi_read_reg_16(struct device *dev, u8 reg_address, u16 *val) { struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7758_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 1, .delay_usecs = 4, }, { .tx_buf = &st->tx[1], .rx_buf = st->rx, .bits_per_word = 8, .len = 2, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7758_READ_REG(reg_address); st->tx[1] = 0; st->tx[2] = 0; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", reg_address); goto error_ret; } *val = (st->rx[0] << 8) | st->rx[1]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static int ade7758_spi_read_reg_24(struct device *dev, u8 reg_address, u32 *val) { struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7758_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 1, .delay_usecs = 4, }, { .tx_buf = &st->tx[1], .rx_buf = st->rx, .bits_per_word = 8, .len = 3, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7758_READ_REG(reg_address); st->tx[1] = 0; st->tx[2] = 0; st->tx[3] = 0; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X", reg_address); goto error_ret; } *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static ssize_t ade7758_read_8bit(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u8 val = 0; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); ret = ade7758_spi_read_reg_8(dev, this_attr->address, &val); if (ret) return ret; return sprintf(buf, "%u\n", val); } static ssize_t ade7758_read_16bit(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u16 val = 0; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); ret = ade7758_spi_read_reg_16(dev, this_attr->address, &val); if (ret) return ret; return sprintf(buf, "%u\n", val); } static ssize_t ade7758_read_24bit(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u32 val = 0; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); ret = ade7758_spi_read_reg_24(dev, this_attr->address, &val); if (ret) return ret; return sprintf(buf, "%u\n", val & 0xFFFFFF); } static ssize_t ade7758_write_8bit(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; long val; ret = strict_strtol(buf, 10, &val); if (ret) goto error_ret; ret = ade7758_spi_write_reg_8(dev, this_attr->address, val); error_ret: return ret ? ret : len; } static ssize_t ade7758_write_16bit(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; long val; ret = strict_strtol(buf, 10, &val); if (ret) goto error_ret; ret = ade7758_spi_write_reg_16(dev, this_attr->address, val); error_ret: return ret ? ret : len; } static int ade7758_reset(struct device *dev) { int ret; u8 val; ade7758_spi_read_reg_8(dev, ADE7758_OPMODE, &val); val |= 1 << 6; /* Software Chip Reset */ ret = ade7758_spi_write_reg_8(dev, ADE7758_OPMODE, val); return ret; } static ssize_t ade7758_write_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { if (len < 1) return -1; switch (buf[0]) { case '1': case 'y': case 'Y': return ade7758_reset(dev); } return len; } static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO, ade7758_read_8bit, ade7758_write_8bit, ADE7758_VPEAK); static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO, ade7758_read_8bit, ade7758_write_8bit, ADE7758_VPEAK); static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO, ade7758_read_8bit, ade7758_write_8bit, ADE7758_APHCAL); static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO, ade7758_read_8bit, ade7758_write_8bit, ADE7758_BPHCAL); static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO, ade7758_read_8bit, ade7758_write_8bit, ADE7758_CPHCAL); static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO, ade7758_read_8bit, ade7758_write_8bit, ADE7758_WDIV); static IIO_DEV_ATTR_VADIV(S_IWUSR | S_IRUGO, ade7758_read_8bit, ade7758_write_8bit, ADE7758_VADIV); static IIO_DEV_ATTR_AIRMS(S_IRUGO, ade7758_read_24bit, NULL, ADE7758_AIRMS); static IIO_DEV_ATTR_BIRMS(S_IRUGO, ade7758_read_24bit, NULL, ADE7758_BIRMS); static IIO_DEV_ATTR_CIRMS(S_IRUGO, ade7758_read_24bit, NULL, ADE7758_CIRMS); static IIO_DEV_ATTR_AVRMS(S_IRUGO, ade7758_read_24bit, NULL, ADE7758_AVRMS); static IIO_DEV_ATTR_BVRMS(S_IRUGO, ade7758_read_24bit, NULL, ADE7758_BVRMS); static IIO_DEV_ATTR_CVRMS(S_IRUGO, ade7758_read_24bit, NULL, ADE7758_CVRMS); static IIO_DEV_ATTR_AIRMSOS(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_AIRMSOS); static IIO_DEV_ATTR_BIRMSOS(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_BIRMSOS); static IIO_DEV_ATTR_CIRMSOS(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_CIRMSOS); static IIO_DEV_ATTR_AVRMSOS(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_AVRMSOS); static IIO_DEV_ATTR_BVRMSOS(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_BVRMSOS); static IIO_DEV_ATTR_CVRMSOS(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_CVRMSOS); static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_AIGAIN); static IIO_DEV_ATTR_BIGAIN(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_BIGAIN); static IIO_DEV_ATTR_CIGAIN(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_CIGAIN); static IIO_DEV_ATTR_AVRMSGAIN(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_AVRMSGAIN); static IIO_DEV_ATTR_BVRMSGAIN(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_BVRMSGAIN); static IIO_DEV_ATTR_CVRMSGAIN(S_IWUSR | S_IRUGO, ade7758_read_16bit, ade7758_write_16bit, ADE7758_CVRMSGAIN); int ade7758_set_irq(struct device *dev, bool enable) { int ret; u32 irqen; ret = ade7758_spi_read_reg_24(dev, ADE7758_MASK, &irqen); if (ret) goto error_ret; if (enable) irqen |= 1 << 16; /* Enables an interrupt when a data is present in the waveform register */ else irqen &= ~(1 << 16); ret = ade7758_spi_write_reg_24(dev, ADE7758_MASK, irqen); if (ret) goto error_ret; error_ret: return ret; } /* Power down the device */ static int ade7758_stop_device(struct device *dev) { int ret; u8 val; ade7758_spi_read_reg_8(dev, ADE7758_OPMODE, &val); val |= 7 << 3; /* ADE7758 powered down */ ret = ade7758_spi_write_reg_8(dev, ADE7758_OPMODE, val); return ret; } static int ade7758_initial_setup(struct iio_dev *indio_dev) { struct ade7758_state *st = iio_priv(indio_dev); struct device *dev = &indio_dev->dev; int ret; /* use low spi speed for init */ st->us->mode = SPI_MODE_1; spi_setup(st->us); /* Disable IRQ */ ret = ade7758_set_irq(dev, false); if (ret) { dev_err(dev, "disable irq failed"); goto err_ret; } ade7758_reset(dev); msleep(ADE7758_STARTUP_DELAY); err_ret: return ret; } static ssize_t ade7758_read_frequency(struct device *dev, struct device_attribute *attr, char *buf) { int ret, len = 0; u8 t; int sps; ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t); if (ret) return ret; t = (t >> 5) & 0x3; sps = 26040 / (1 << t); len = sprintf(buf, "%d SPS\n", sps); return len; } static ssize_t ade7758_write_frequency(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); unsigned long val; int ret; u8 reg, t; ret = strict_strtol(buf, 10, &val); if (ret) return ret; mutex_lock(&indio_dev->mlock); switch (val) { case 26040: t = 0; break; case 13020: t = 1; break; case 6510: t = 2; break; case 3255: t = 3; break; default: ret = -EINVAL; goto out; } ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &reg); if (ret) goto out; reg &= ~(5 << 3); reg |= t << 5; ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg); out: mutex_unlock(&indio_dev->mlock); return ret ? ret : len; } static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit); static IIO_CONST_ATTR(in_temp_offset, "129 C"); static IIO_CONST_ATTR(in_temp_scale, "4 C"); static IIO_DEV_ATTR_AWATTHR(ade7758_read_16bit, ADE7758_AWATTHR); static IIO_DEV_ATTR_BWATTHR(ade7758_read_16bit, ADE7758_BWATTHR); static IIO_DEV_ATTR_CWATTHR(ade7758_read_16bit, ADE7758_CWATTHR); static IIO_DEV_ATTR_AVARHR(ade7758_read_16bit, ADE7758_AVARHR); static IIO_DEV_ATTR_BVARHR(ade7758_read_16bit, ADE7758_BVARHR); static IIO_DEV_ATTR_CVARHR(ade7758_read_16bit, ADE7758_CVARHR); static IIO_DEV_ATTR_AVAHR(ade7758_read_16bit, ADE7758_AVAHR); static IIO_DEV_ATTR_BVAHR(ade7758_read_16bit, ADE7758_BVAHR); static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit, ADE7758_CVAHR); static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, ade7758_read_frequency, ade7758_write_frequency); static IIO_DEV_ATTR_RESET(ade7758_write_reset); static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255"); static struct attribute *ade7758_attributes[] = { &iio_dev_attr_in_temp_raw.dev_attr.attr, &iio_const_attr_in_temp_offset.dev_attr.attr, &iio_const_attr_in_temp_scale.dev_attr.attr, &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_reset.dev_attr.attr, &iio_dev_attr_awatthr.dev_attr.attr, &iio_dev_attr_bwatthr.dev_attr.attr, &iio_dev_attr_cwatthr.dev_attr.attr, &iio_dev_attr_avarhr.dev_attr.attr, &iio_dev_attr_bvarhr.dev_attr.attr, &iio_dev_attr_cvarhr.dev_attr.attr, &iio_dev_attr_avahr.dev_attr.attr, &iio_dev_attr_bvahr.dev_attr.attr, &iio_dev_attr_cvahr.dev_attr.attr, &iio_dev_attr_vpeak.dev_attr.attr, &iio_dev_attr_ipeak.dev_attr.attr, &iio_dev_attr_aphcal.dev_attr.attr, &iio_dev_attr_bphcal.dev_attr.attr, &iio_dev_attr_cphcal.dev_attr.attr, &iio_dev_attr_wdiv.dev_attr.attr, &iio_dev_attr_vadiv.dev_attr.attr, &iio_dev_attr_airms.dev_attr.attr, &iio_dev_attr_birms.dev_attr.attr, &iio_dev_attr_cirms.dev_attr.attr, &iio_dev_attr_avrms.dev_attr.attr, &iio_dev_attr_bvrms.dev_attr.attr, &iio_dev_attr_cvrms.dev_attr.attr, &iio_dev_attr_aigain.dev_attr.attr, &iio_dev_attr_bigain.dev_attr.attr, &iio_dev_attr_cigain.dev_attr.attr, &iio_dev_attr_avrmsgain.dev_attr.attr, &iio_dev_attr_bvrmsgain.dev_attr.attr, &iio_dev_attr_cvrmsgain.dev_attr.attr, &iio_dev_attr_airmsos.dev_attr.attr, &iio_dev_attr_birmsos.dev_attr.attr, &iio_dev_attr_cirmsos.dev_attr.attr, &iio_dev_attr_avrmsos.dev_attr.attr, &iio_dev_attr_bvrmsos.dev_attr.attr, &iio_dev_attr_cvrmsos.dev_attr.attr, NULL, }; static const struct attribute_group ade7758_attribute_group = { .attrs = ade7758_attributes, }; static struct iio_chan_spec ade7758_channels[] = { IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 0, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE), 0, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 0, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT), 1, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 0, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR), 2, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 0, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR), 3, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 0, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR), 4, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 1, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE), 5, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 1, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT), 6, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 1, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR), 7, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 1, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR), 8, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 1, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR), 9, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 2, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE), 10, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 2, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT), 11, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 2, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR), 12, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 2, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR), 13, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 2, 0, IIO_CHAN_INFO_SCALE_SHARED_BIT, AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR), 14, IIO_ST('s', 24, 32, 0), 0), IIO_CHAN_SOFT_TIMESTAMP(15), }; static const struct iio_info ade7758_info = { .attrs = &ade7758_attribute_group, .driver_module = THIS_MODULE, }; static int __devinit ade7758_probe(struct spi_device *spi) { int i, ret; struct ade7758_state *st; struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); /* this is only used for removal purposes */ spi_set_drvdata(spi, indio_dev); /* Allocate the comms buffers */ st->rx = kcalloc(ADE7758_MAX_RX, sizeof(*st->rx), GFP_KERNEL); if (st->rx == NULL) { ret = -ENOMEM; goto error_free_dev; } st->tx = kcalloc(ADE7758_MAX_TX, sizeof(*st->tx), GFP_KERNEL); if (st->tx == NULL) { ret = -ENOMEM; goto error_free_rx; } st->us = spi; st->ade7758_ring_channels = &ade7758_channels[0]; mutex_init(&st->buf_lock); indio_dev->name = spi->dev.driver->name; indio_dev->dev.parent = &spi->dev; indio_dev->info = &ade7758_info; indio_dev->modes = INDIO_DIRECT_MODE; for (i = 0; i < AD7758_NUM_WAVESRC; i++) set_bit(i, &st->available_scan_masks[i]); indio_dev->available_scan_masks = st->available_scan_masks; ret = ade7758_configure_ring(indio_dev); if (ret) goto error_free_tx; ret = iio_buffer_register(indio_dev, &ade7758_channels[0], ARRAY_SIZE(ade7758_channels)); if (ret) { dev_err(&spi->dev, "failed to initialize the ring\n"); goto error_unreg_ring_funcs; } /* Get the device into a sane initial state */ ret = ade7758_initial_setup(indio_dev); if (ret) goto error_uninitialize_ring; if (spi->irq) { ret = ade7758_probe_trigger(indio_dev); if (ret) goto error_uninitialize_ring; } ret = iio_device_register(indio_dev); if (ret) goto error_remove_trigger; return 0; error_remove_trigger: if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) ade7758_remove_trigger(indio_dev); error_uninitialize_ring: ade7758_uninitialize_ring(indio_dev); error_unreg_ring_funcs: ade7758_unconfigure_ring(indio_dev); error_free_tx: kfree(st->tx); error_free_rx: kfree(st->rx); error_free_dev: iio_free_device(indio_dev); error_ret: return ret; } static int ade7758_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ade7758_state *st = iio_priv(indio_dev); int ret; iio_device_unregister(indio_dev); ret = ade7758_stop_device(&indio_dev->dev); if (ret) goto err_ret; ade7758_remove_trigger(indio_dev); ade7758_uninitialize_ring(indio_dev); ade7758_unconfigure_ring(indio_dev); kfree(st->tx); kfree(st->rx); iio_free_device(indio_dev); err_ret: return ret; } static const struct spi_device_id ade7758_id[] = { {"ade7758", 0}, {} }; MODULE_DEVICE_TABLE(spi, ade7758_id); static struct spi_driver ade7758_driver = { .driver = { .name = "ade7758", .owner = THIS_MODULE, }, .probe = ade7758_probe, .remove = __devexit_p(ade7758_remove), .id_table = ade7758_id, }; module_spi_driver(ade7758_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADE7758 Polyphase Multifunction Energy Metering IC Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
GustavoRD78/78Kernel-ZL-233
drivers/staging/iio/resolver/ad2s1210.c
4964
19703
/* * ad2s1210.c support for the ADI Resolver to Digital Converters: AD2S1210 * * Copyright (c) 2010-2010 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "ad2s1210.h" #define DRV_NAME "ad2s1210" #define AD2S1210_DEF_CONTROL 0x7E #define AD2S1210_MSB_IS_HIGH 0x80 #define AD2S1210_MSB_IS_LOW 0x7F #define AD2S1210_PHASE_LOCK_RANGE_44 0x20 #define AD2S1210_ENABLE_HYSTERESIS 0x10 #define AD2S1210_SET_ENRES1 0x08 #define AD2S1210_SET_ENRES0 0x04 #define AD2S1210_SET_RES1 0x02 #define AD2S1210_SET_RES0 0x01 #define AD2S1210_SET_ENRESOLUTION (AD2S1210_SET_ENRES1 | \ AD2S1210_SET_ENRES0) #define AD2S1210_SET_RESOLUTION (AD2S1210_SET_RES1 | AD2S1210_SET_RES0) #define AD2S1210_REG_POSITION 0x80 #define AD2S1210_REG_VELOCITY 0x82 #define AD2S1210_REG_LOS_THRD 0x88 #define AD2S1210_REG_DOS_OVR_THRD 0x89 #define AD2S1210_REG_DOS_MIS_THRD 0x8A #define AD2S1210_REG_DOS_RST_MAX_THRD 0x8B #define AD2S1210_REG_DOS_RST_MIN_THRD 0x8C #define AD2S1210_REG_LOT_HIGH_THRD 0x8D #define AD2S1210_REG_LOT_LOW_THRD 0x8E #define AD2S1210_REG_EXCIT_FREQ 0x91 #define AD2S1210_REG_CONTROL 0x92 #define AD2S1210_REG_SOFT_RESET 0xF0 #define AD2S1210_REG_FAULT 0xFF /* pin SAMPLE, A0, A1, RES0, RES1, is controlled by driver */ #define AD2S1210_SAA 3 #define AD2S1210_PN (AD2S1210_SAA + AD2S1210_RES) #define AD2S1210_MIN_CLKIN 6144000 #define AD2S1210_MAX_CLKIN 10240000 #define AD2S1210_MIN_EXCIT 2000 #define AD2S1210_MAX_EXCIT 20000 #define AD2S1210_MIN_FCW 0x4 #define AD2S1210_MAX_FCW 0x50 /* default input clock on serial interface */ #define AD2S1210_DEF_CLKIN 8192000 /* clock period in nano second */ #define AD2S1210_DEF_TCK (1000000000/AD2S1210_DEF_CLKIN) #define AD2S1210_DEF_EXCIT 10000 enum ad2s1210_mode { MOD_POS = 0, MOD_VEL, MOD_CONFIG, MOD_RESERVED, }; static const unsigned int ad2s1210_resolution_value[] = { 10, 12, 14, 16 }; struct ad2s1210_state { const struct ad2s1210_platform_data *pdata; struct mutex lock; struct spi_device *sdev; unsigned int fclkin; unsigned int fexcit; bool hysteresis; bool old_data; u8 resolution; enum ad2s1210_mode mode; u8 rx[2] ____cacheline_aligned; u8 tx[2] ____cacheline_aligned; }; static const int ad2s1210_mode_vals[4][2] = { [MOD_POS] = { 0, 0 }, [MOD_VEL] = { 0, 1 }, [MOD_CONFIG] = { 1, 0 }, }; static inline void ad2s1210_set_mode(enum ad2s1210_mode mode, struct ad2s1210_state *st) { gpio_set_value(st->pdata->a[0], ad2s1210_mode_vals[mode][0]); gpio_set_value(st->pdata->a[1], ad2s1210_mode_vals[mode][1]); st->mode = mode; } /* write 1 bytes (address or data) to the chip */ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data) { int ret; ad2s1210_set_mode(MOD_CONFIG, st); st->tx[0] = data; ret = spi_write(st->sdev, st->tx, 1); if (ret < 0) return ret; st->old_data = true; return 0; } /* read value from one of the registers */ static int ad2s1210_config_read(struct ad2s1210_state *st, unsigned char address) { struct spi_transfer xfer = { .len = 2, .rx_buf = st->rx, .tx_buf = st->tx, }; struct spi_message msg; int ret = 0; ad2s1210_set_mode(MOD_CONFIG, st); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); st->tx[0] = address | AD2S1210_MSB_IS_HIGH; st->tx[1] = AD2S1210_REG_FAULT; ret = spi_sync(st->sdev, &msg); if (ret < 0) return ret; st->old_data = true; return st->rx[1]; } static inline int ad2s1210_update_frequency_control_word(struct ad2s1210_state *st) { int ret; unsigned char fcw; fcw = (unsigned char)(st->fexcit * (1 << 15) / st->fclkin); if (fcw < AD2S1210_MIN_FCW || fcw > AD2S1210_MAX_FCW) { pr_err("ad2s1210: FCW out of range\n"); return -ERANGE; } ret = ad2s1210_config_write(st, AD2S1210_REG_EXCIT_FREQ); if (ret < 0) return ret; return ad2s1210_config_write(st, fcw); } static unsigned char ad2s1210_read_resolution_pin(struct ad2s1210_state *st) { return ad2s1210_resolution_value[ (gpio_get_value(st->pdata->res[0]) << 1) | gpio_get_value(st->pdata->res[1])]; } static const int ad2s1210_res_pins[4][2] = { { 0, 0 }, {0, 1}, {1, 0}, {1, 1} }; static inline void ad2s1210_set_resolution_pin(struct ad2s1210_state *st) { gpio_set_value(st->pdata->res[0], ad2s1210_res_pins[(st->resolution - 10)/2][0]); gpio_set_value(st->pdata->res[1], ad2s1210_res_pins[(st->resolution - 10)/2][1]); } static inline int ad2s1210_soft_reset(struct ad2s1210_state *st) { int ret; ret = ad2s1210_config_write(st, AD2S1210_REG_SOFT_RESET); if (ret < 0) return ret; return ad2s1210_config_write(st, 0x0); } static ssize_t ad2s1210_store_softreset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); int ret; mutex_lock(&st->lock); ret = ad2s1210_soft_reset(st); mutex_unlock(&st->lock); return ret < 0 ? ret : len; } static ssize_t ad2s1210_show_fclkin(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); return sprintf(buf, "%d\n", st->fclkin); } static ssize_t ad2s1210_store_fclkin(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); unsigned long fclkin; int ret; ret = strict_strtoul(buf, 10, &fclkin); if (ret) return ret; if (fclkin < AD2S1210_MIN_CLKIN || fclkin > AD2S1210_MAX_CLKIN) { pr_err("ad2s1210: fclkin out of range\n"); return -EINVAL; } mutex_lock(&st->lock); st->fclkin = fclkin; ret = ad2s1210_update_frequency_control_word(st); if (ret < 0) goto error_ret; ret = ad2s1210_soft_reset(st); error_ret: mutex_unlock(&st->lock); return ret < 0 ? ret : len; } static ssize_t ad2s1210_show_fexcit(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); return sprintf(buf, "%d\n", st->fexcit); } static ssize_t ad2s1210_store_fexcit(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); unsigned long fexcit; int ret; ret = strict_strtoul(buf, 10, &fexcit); if (ret < 0) return ret; if (fexcit < AD2S1210_MIN_EXCIT || fexcit > AD2S1210_MAX_EXCIT) { pr_err("ad2s1210: excitation frequency out of range\n"); return -EINVAL; } mutex_lock(&st->lock); st->fexcit = fexcit; ret = ad2s1210_update_frequency_control_word(st); if (ret < 0) goto error_ret; ret = ad2s1210_soft_reset(st); error_ret: mutex_unlock(&st->lock); return ret < 0 ? ret : len; } static ssize_t ad2s1210_show_control(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); int ret; mutex_lock(&st->lock); ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL); mutex_unlock(&st->lock); return ret < 0 ? ret : sprintf(buf, "0x%x\n", ret); } static ssize_t ad2s1210_store_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); unsigned long udata; unsigned char data; int ret; ret = strict_strtoul(buf, 16, &udata); if (ret) return -EINVAL; mutex_lock(&st->lock); ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; data = udata & AD2S1210_MSB_IS_LOW; ret = ad2s1210_config_write(st, data); if (ret < 0) goto error_ret; ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; if (ret & AD2S1210_MSB_IS_HIGH) { ret = -EIO; pr_err("ad2s1210: write control register fail\n"); goto error_ret; } st->resolution = ad2s1210_resolution_value[data & AD2S1210_SET_RESOLUTION]; if (st->pdata->gpioin) { data = ad2s1210_read_resolution_pin(st); if (data != st->resolution) pr_warning("ad2s1210: resolution settings not match\n"); } else ad2s1210_set_resolution_pin(st); ret = len; st->hysteresis = !!(data & AD2S1210_ENABLE_HYSTERESIS); error_ret: mutex_unlock(&st->lock); return ret; } static ssize_t ad2s1210_show_resolution(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); return sprintf(buf, "%d\n", st->resolution); } static ssize_t ad2s1210_store_resolution(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); unsigned char data; unsigned long udata; int ret; ret = strict_strtoul(buf, 10, &udata); if (ret || udata < 10 || udata > 16) { pr_err("ad2s1210: resolution out of range\n"); return -EINVAL; } mutex_lock(&st->lock); ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; data = ret; data &= ~AD2S1210_SET_RESOLUTION; data |= (udata - 10) >> 1; ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; ret = ad2s1210_config_write(st, data & AD2S1210_MSB_IS_LOW); if (ret < 0) goto error_ret; ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; data = ret; if (data & AD2S1210_MSB_IS_HIGH) { ret = -EIO; pr_err("ad2s1210: setting resolution fail\n"); goto error_ret; } st->resolution = ad2s1210_resolution_value[data & AD2S1210_SET_RESOLUTION]; if (st->pdata->gpioin) { data = ad2s1210_read_resolution_pin(st); if (data != st->resolution) pr_warning("ad2s1210: resolution settings not match\n"); } else ad2s1210_set_resolution_pin(st); ret = len; error_ret: mutex_unlock(&st->lock); return ret; } /* read the fault register since last sample */ static ssize_t ad2s1210_show_fault(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); int ret; mutex_lock(&st->lock); ret = ad2s1210_config_read(st, AD2S1210_REG_FAULT); mutex_unlock(&st->lock); return ret ? ret : sprintf(buf, "0x%x\n", ret); } static ssize_t ad2s1210_clear_fault(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); int ret; mutex_lock(&st->lock); gpio_set_value(st->pdata->sample, 0); /* delay (2 * tck + 20) nano seconds */ udelay(1); gpio_set_value(st->pdata->sample, 1); ret = ad2s1210_config_read(st, AD2S1210_REG_FAULT); if (ret < 0) goto error_ret; gpio_set_value(st->pdata->sample, 0); gpio_set_value(st->pdata->sample, 1); error_ret: mutex_unlock(&st->lock); return ret < 0 ? ret : len; } static ssize_t ad2s1210_show_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); struct iio_dev_attr *iattr = to_iio_dev_attr(attr); int ret; mutex_lock(&st->lock); ret = ad2s1210_config_read(st, iattr->address); mutex_unlock(&st->lock); return ret < 0 ? ret : sprintf(buf, "%d\n", ret); } static ssize_t ad2s1210_store_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev)); unsigned long data; int ret; struct iio_dev_attr *iattr = to_iio_dev_attr(attr); ret = strict_strtoul(buf, 10, &data); if (ret) return -EINVAL; mutex_lock(&st->lock); ret = ad2s1210_config_write(st, iattr->address); if (ret < 0) goto error_ret; ret = ad2s1210_config_write(st, data & AD2S1210_MSB_IS_LOW); error_ret: mutex_unlock(&st->lock); return ret < 0 ? ret : len; } static int ad2s1210_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { struct ad2s1210_state *st = iio_priv(indio_dev); bool negative; int ret = 0; u16 pos; s16 vel; mutex_lock(&st->lock); gpio_set_value(st->pdata->sample, 0); /* delay (6 * tck + 20) nano seconds */ udelay(1); switch (chan->type) { case IIO_ANGL: ad2s1210_set_mode(MOD_POS, st); break; case IIO_ANGL_VEL: ad2s1210_set_mode(MOD_VEL, st); break; default: ret = -EINVAL; break; } if (ret < 0) goto error_ret; ret = spi_read(st->sdev, st->rx, 2); if (ret < 0) goto error_ret; switch (chan->type) { case IIO_ANGL: pos = be16_to_cpup((u16 *)st->rx); if (st->hysteresis) pos >>= 16 - st->resolution; *val = pos; ret = IIO_VAL_INT; break; case IIO_ANGL_VEL: negative = st->rx[0] & 0x80; vel = be16_to_cpup((s16 *)st->rx); vel >>= 16 - st->resolution; if (vel & 0x8000) { negative = (0xffff >> st->resolution) << st->resolution; vel |= negative; } *val = vel; ret = IIO_VAL_INT; break; default: mutex_unlock(&st->lock); return -EINVAL; } error_ret: gpio_set_value(st->pdata->sample, 1); /* delay (2 * tck + 20) nano seconds */ udelay(1); mutex_unlock(&st->lock); return ret; } static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, ad2s1210_store_softreset, 0); static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR, ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0); static IIO_DEVICE_ATTR(fexcit, S_IRUGO | S_IWUSR, ad2s1210_show_fexcit, ad2s1210_store_fexcit, 0); static IIO_DEVICE_ATTR(control, S_IRUGO | S_IWUSR, ad2s1210_show_control, ad2s1210_store_control, 0); static IIO_DEVICE_ATTR(bits, S_IRUGO | S_IWUSR, ad2s1210_show_resolution, ad2s1210_store_resolution, 0); static IIO_DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, ad2s1210_show_fault, ad2s1210_clear_fault, 0); static IIO_DEVICE_ATTR(los_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_LOS_THRD); static IIO_DEVICE_ATTR(dos_ovr_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_DOS_OVR_THRD); static IIO_DEVICE_ATTR(dos_mis_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_DOS_MIS_THRD); static IIO_DEVICE_ATTR(dos_rst_max_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_DOS_RST_MAX_THRD); static IIO_DEVICE_ATTR(dos_rst_min_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_DOS_RST_MIN_THRD); static IIO_DEVICE_ATTR(lot_high_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_LOT_HIGH_THRD); static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_LOT_LOW_THRD); static struct iio_chan_spec ad2s1210_channels[] = { { .type = IIO_ANGL, .indexed = 1, .channel = 0, }, { .type = IIO_ANGL_VEL, .indexed = 1, .channel = 0, } }; static struct attribute *ad2s1210_attributes[] = { &iio_dev_attr_reset.dev_attr.attr, &iio_dev_attr_fclkin.dev_attr.attr, &iio_dev_attr_fexcit.dev_attr.attr, &iio_dev_attr_control.dev_attr.attr, &iio_dev_attr_bits.dev_attr.attr, &iio_dev_attr_fault.dev_attr.attr, &iio_dev_attr_los_thrd.dev_attr.attr, &iio_dev_attr_dos_ovr_thrd.dev_attr.attr, &iio_dev_attr_dos_mis_thrd.dev_attr.attr, &iio_dev_attr_dos_rst_max_thrd.dev_attr.attr, &iio_dev_attr_dos_rst_min_thrd.dev_attr.attr, &iio_dev_attr_lot_high_thrd.dev_attr.attr, &iio_dev_attr_lot_low_thrd.dev_attr.attr, NULL, }; static const struct attribute_group ad2s1210_attribute_group = { .attrs = ad2s1210_attributes, }; static int __devinit ad2s1210_initial(struct ad2s1210_state *st) { unsigned char data; int ret; mutex_lock(&st->lock); if (st->pdata->gpioin) st->resolution = ad2s1210_read_resolution_pin(st); else ad2s1210_set_resolution_pin(st); ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; data = AD2S1210_DEF_CONTROL & ~(AD2S1210_SET_RESOLUTION); data |= (st->resolution - 10) >> 1; ret = ad2s1210_config_write(st, data); if (ret < 0) goto error_ret; ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; if (ret & AD2S1210_MSB_IS_HIGH) { ret = -EIO; goto error_ret; } ret = ad2s1210_update_frequency_control_word(st); if (ret < 0) goto error_ret; ret = ad2s1210_soft_reset(st); error_ret: mutex_unlock(&st->lock); return ret; } static const struct iio_info ad2s1210_info = { .read_raw = &ad2s1210_read_raw, .attrs = &ad2s1210_attribute_group, .driver_module = THIS_MODULE, }; static int ad2s1210_setup_gpios(struct ad2s1210_state *st) { unsigned long flags = st->pdata->gpioin ? GPIOF_DIR_IN : GPIOF_DIR_OUT; struct gpio ad2s1210_gpios[] = { { st->pdata->sample, GPIOF_DIR_IN, "sample" }, { st->pdata->a[0], flags, "a0" }, { st->pdata->a[1], flags, "a1" }, { st->pdata->res[0], flags, "res0" }, { st->pdata->res[0], flags, "res1" }, }; return gpio_request_array(ad2s1210_gpios, ARRAY_SIZE(ad2s1210_gpios)); } static void ad2s1210_free_gpios(struct ad2s1210_state *st) { unsigned long flags = st->pdata->gpioin ? GPIOF_DIR_IN : GPIOF_DIR_OUT; struct gpio ad2s1210_gpios[] = { { st->pdata->sample, GPIOF_DIR_IN, "sample" }, { st->pdata->a[0], flags, "a0" }, { st->pdata->a[1], flags, "a1" }, { st->pdata->res[0], flags, "res0" }, { st->pdata->res[0], flags, "res1" }, }; gpio_free_array(ad2s1210_gpios, ARRAY_SIZE(ad2s1210_gpios)); } static int __devinit ad2s1210_probe(struct spi_device *spi) { struct iio_dev *indio_dev; struct ad2s1210_state *st; int ret; if (spi->dev.platform_data == NULL) return -EINVAL; indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); st->pdata = spi->dev.platform_data; ret = ad2s1210_setup_gpios(st); if (ret < 0) goto error_free_dev; spi_set_drvdata(spi, indio_dev); mutex_init(&st->lock); st->sdev = spi; st->hysteresis = true; st->mode = MOD_CONFIG; st->resolution = 12; st->fexcit = AD2S1210_DEF_EXCIT; indio_dev->dev.parent = &spi->dev; indio_dev->info = &ad2s1210_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = ad2s1210_channels; indio_dev->num_channels = ARRAY_SIZE(ad2s1210_channels); indio_dev->name = spi_get_device_id(spi)->name; ret = iio_device_register(indio_dev); if (ret) goto error_free_gpios; st->fclkin = spi->max_speed_hz; spi->mode = SPI_MODE_3; spi_setup(spi); ad2s1210_initial(st); return 0; error_free_gpios: ad2s1210_free_gpios(st); error_free_dev: iio_free_device(indio_dev); error_ret: return ret; } static int __devexit ad2s1210_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); iio_device_unregister(indio_dev); ad2s1210_free_gpios(iio_priv(indio_dev)); iio_free_device(indio_dev); return 0; } static const struct spi_device_id ad2s1210_id[] = { { "ad2s1210" }, {} }; MODULE_DEVICE_TABLE(spi, ad2s1210_id); static struct spi_driver ad2s1210_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = ad2s1210_probe, .remove = __devexit_p(ad2s1210_remove), .id_table = ad2s1210_id, }; module_spi_driver(ad2s1210_driver); MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>"); MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
obek/linux-sunxi
arch/arm/mach-w90x900/cpu.c
6244
5679
/* * linux/arch/arm/mach-w90x900/cpu.c * * Copyright (c) 2009 Nuvoton corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * NUC900 series cpu common support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/serial_8250.h> #include <linux/delay.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/irq.h> #include <asm/system_misc.h> #include <mach/hardware.h> #include <mach/regs-serial.h> #include <mach/regs-clock.h> #include <mach/regs-ebi.h> #include <mach/regs-timer.h> #include "cpu.h" #include "clock.h" #include "nuc9xx.h" /* Initial IO mappings */ static struct map_desc nuc900_iodesc[] __initdata = { IODESC_ENT(IRQ), IODESC_ENT(GCR), IODESC_ENT(UART), IODESC_ENT(TIMER), IODESC_ENT(EBI), IODESC_ENT(GPIO), }; /* Initial clock declarations. */ static DEFINE_CLK(lcd, 0); static DEFINE_CLK(audio, 1); static DEFINE_CLK(fmi, 4); static DEFINE_SUBCLK(ms, 0); static DEFINE_SUBCLK(sd, 1); static DEFINE_CLK(dmac, 5); static DEFINE_CLK(atapi, 6); static DEFINE_CLK(emc, 7); static DEFINE_SUBCLK(rmii, 2); static DEFINE_CLK(usbd, 8); static DEFINE_CLK(usbh, 9); static DEFINE_CLK(g2d, 10); static DEFINE_CLK(pwm, 18); static DEFINE_CLK(ps2, 24); static DEFINE_CLK(kpi, 25); static DEFINE_CLK(wdt, 26); static DEFINE_CLK(gdma, 27); static DEFINE_CLK(adc, 28); static DEFINE_CLK(usi, 29); static DEFINE_CLK(ext, 0); static DEFINE_CLK(timer0, 19); static DEFINE_CLK(timer1, 20); static DEFINE_CLK(timer2, 21); static DEFINE_CLK(timer3, 22); static DEFINE_CLK(timer4, 23); static struct clk_lookup nuc900_clkregs[] = { DEF_CLKLOOK(&clk_lcd, "nuc900-lcd", NULL), DEF_CLKLOOK(&clk_audio, "nuc900-ac97", NULL), DEF_CLKLOOK(&clk_fmi, "nuc900-fmi", NULL), DEF_CLKLOOK(&clk_ms, "nuc900-fmi", "MS"), DEF_CLKLOOK(&clk_sd, "nuc900-fmi", "SD"), DEF_CLKLOOK(&clk_dmac, "nuc900-dmac", NULL), DEF_CLKLOOK(&clk_atapi, "nuc900-atapi", NULL), DEF_CLKLOOK(&clk_emc, "nuc900-emc", NULL), DEF_CLKLOOK(&clk_rmii, "nuc900-emc", "RMII"), DEF_CLKLOOK(&clk_usbd, "nuc900-usbd", NULL), DEF_CLKLOOK(&clk_usbh, "nuc900-usbh", NULL), DEF_CLKLOOK(&clk_g2d, "nuc900-g2d", NULL), DEF_CLKLOOK(&clk_pwm, "nuc900-pwm", NULL), DEF_CLKLOOK(&clk_ps2, "nuc900-ps2", NULL), DEF_CLKLOOK(&clk_kpi, "nuc900-kpi", NULL), DEF_CLKLOOK(&clk_wdt, "nuc900-wdt", NULL), DEF_CLKLOOK(&clk_gdma, "nuc900-gdma", NULL), DEF_CLKLOOK(&clk_adc, "nuc900-ts", NULL), DEF_CLKLOOK(&clk_usi, "nuc900-spi", NULL), DEF_CLKLOOK(&clk_ext, NULL, "ext"), DEF_CLKLOOK(&clk_timer0, NULL, "timer0"), DEF_CLKLOOK(&clk_timer1, NULL, "timer1"), DEF_CLKLOOK(&clk_timer2, NULL, "timer2"), DEF_CLKLOOK(&clk_timer3, NULL, "timer3"), DEF_CLKLOOK(&clk_timer4, NULL, "timer4"), }; /* Initial serial platform data */ struct plat_serial8250_port nuc900_uart_data[] = { NUC900_8250PORT(UART0), {}, }; struct platform_device nuc900_serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = nuc900_uart_data, }, }; /*Set NUC900 series cpu frequence*/ static int __init nuc900_set_clkval(unsigned int cpufreq) { unsigned int pllclk, ahbclk, apbclk, val; pllclk = 0; ahbclk = 0; apbclk = 0; switch (cpufreq) { case 66: pllclk = PLL_66MHZ; ahbclk = AHB_CPUCLK_1_1; apbclk = APB_AHB_1_2; break; case 100: pllclk = PLL_100MHZ; ahbclk = AHB_CPUCLK_1_1; apbclk = APB_AHB_1_2; break; case 120: pllclk = PLL_120MHZ; ahbclk = AHB_CPUCLK_1_2; apbclk = APB_AHB_1_2; break; case 166: pllclk = PLL_166MHZ; ahbclk = AHB_CPUCLK_1_2; apbclk = APB_AHB_1_2; break; case 200: pllclk = PLL_200MHZ; ahbclk = AHB_CPUCLK_1_2; apbclk = APB_AHB_1_2; break; } __raw_writel(pllclk, REG_PLLCON0); val = __raw_readl(REG_CLKDIV); val &= ~(0x03 << 24 | 0x03 << 26); val |= (ahbclk << 24 | apbclk << 26); __raw_writel(val, REG_CLKDIV); return 0; } static int __init nuc900_set_cpufreq(char *str) { unsigned long cpufreq, val; if (!*str) return 0; strict_strtoul(str, 0, &cpufreq); nuc900_clock_source(NULL, "ext"); nuc900_set_clkval(cpufreq); mdelay(1); val = __raw_readl(REG_CKSKEW); val &= ~0xff; val |= DEFAULTSKEW; __raw_writel(val, REG_CKSKEW); nuc900_clock_source(NULL, "pll0"); return 1; } __setup("cpufreq=", nuc900_set_cpufreq); /*Init NUC900 evb io*/ void __init nuc900_map_io(struct map_desc *mach_desc, int mach_size) { unsigned long idcode = 0x0; iotable_init(mach_desc, mach_size); iotable_init(nuc900_iodesc, ARRAY_SIZE(nuc900_iodesc)); idcode = __raw_readl(NUC900PDID); if (idcode == NUC910_CPUID) printk(KERN_INFO "CPU type 0x%08lx is NUC910\n", idcode); else if (idcode == NUC920_CPUID) printk(KERN_INFO "CPU type 0x%08lx is NUC920\n", idcode); else if (idcode == NUC950_CPUID) printk(KERN_INFO "CPU type 0x%08lx is NUC950\n", idcode); else if (idcode == NUC960_CPUID) printk(KERN_INFO "CPU type 0x%08lx is NUC960\n", idcode); } /*Init NUC900 clock*/ void __init nuc900_init_clocks(void) { clkdev_add_table(nuc900_clkregs, ARRAY_SIZE(nuc900_clkregs)); } #define WTCR (TMR_BA + 0x1C) #define WTCLK (1 << 10) #define WTE (1 << 7) #define WTRE (1 << 1) void nuc9xx_restart(char mode, const char *cmd) { if (mode == 's') { /* Jump into ROM at address 0 */ soft_restart(0); } else { __raw_writel(WTE | WTRE | WTCLK, WTCR); } }
gpl-2.0
dzo/android_kernel_huawei_u8800-1
arch/ia64/kvm/vtlb.c
7012
14519
/* * vtlb.c: guest virtual tlb handling module. * Copyright (c) 2004, Intel Corporation. * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com> * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> * * Copyright (c) 2007, Intel Corporation. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> * Xiantao Zhang <xiantao.zhang@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include "vcpu.h" #include <linux/rwsem.h> #include <asm/tlb.h> /* * Check to see if the address rid:va is translated by the TLB */ static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va) { return ((trp->p) && (trp->rid == rid) && ((va-trp->vadr) < PSIZE(trp->ps))); } /* * Only for GUEST TR format. */ static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva) { u64 sa1, ea1; if (!trp->p || trp->rid != rid) return 0; sa1 = trp->vadr; ea1 = sa1 + PSIZE(trp->ps) - 1; eva -= 1; if ((sva > ea1) || (sa1 > eva)) return 0; else return 1; } void machine_tlb_purge(u64 va, u64 ps) { ia64_ptcl(va, ps << 2); } void local_flush_tlb_all(void) { int i, j; unsigned long flags, count0, count1; unsigned long stride0, stride1, addr; addr = current_vcpu->arch.ptce_base; count0 = current_vcpu->arch.ptce_count[0]; count1 = current_vcpu->arch.ptce_count[1]; stride0 = current_vcpu->arch.ptce_stride[0]; stride1 = current_vcpu->arch.ptce_stride[1]; local_irq_save(flags); for (i = 0; i < count0; ++i) { for (j = 0; j < count1; ++j) { ia64_ptce(addr); addr += stride1; } addr += stride0; } local_irq_restore(flags); ia64_srlz_i(); /* srlz.i implies srlz.d */ } int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref) { union ia64_rr vrr; union ia64_pta vpta; struct ia64_psr vpsr; vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); vrr.val = vcpu_get_rr(vcpu, vadr); vpta.val = vcpu_get_pta(vcpu); if (vrr.ve & vpta.ve) { switch (ref) { case DATA_REF: case NA_REF: return vpsr.dt; case INST_REF: return vpsr.dt && vpsr.it && vpsr.ic; case RSE_REF: return vpsr.dt && vpsr.rt; } } return 0; } struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag) { u64 index, pfn, rid, pfn_bits; pfn_bits = vpta.size - 5 - 8; pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr); rid = _REGION_ID(vrr); index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1)); *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16); return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) + (index << 5)); } struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type) { struct thash_data *trp; int i; u64 rid; rid = vcpu_get_rr(vcpu, va); rid = rid & RR_RID_MASK; if (type == D_TLB) { if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; i < NDTRS; i++, trp++) { if (__is_tr_translated(trp, rid, va)) return trp; } } } else { if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) { for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0; i < NITRS; i++, trp++) { if (__is_tr_translated(trp, rid, va)) return trp; } } } return NULL; } static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) { union ia64_rr rr; struct thash_data *head; unsigned long ps, gpaddr; ps = itir_ps(itir); rr.val = ia64_get_rr(ifa); gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | (ifa & ((1UL << ps) - 1)); head = (struct thash_data *)ia64_thash(ifa); head->etag = INVALID_TI_TAG; ia64_mf(); head->page_flags = pte & ~PAGE_FLAGS_RV_MASK; head->itir = rr.ps << 2; head->etag = ia64_ttag(ifa); head->gpaddr = gpaddr; } void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) { u64 i, dirty_pages = 1; u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; vmm_spin_lock(lock); for (i = 0; i < dirty_pages; i++) { /* avoid RMW */ if (!test_bit(base_gfn + i, dirty_bitmap)) set_bit(base_gfn + i , dirty_bitmap); } vmm_spin_unlock(lock); } void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type) { u64 phy_pte, psr; union ia64_rr mrr; mrr.val = ia64_get_rr(va); phy_pte = translate_phy_pte(&pte, itir, va); if (itir_ps(itir) >= mrr.ps) { vhpt_insert(phy_pte, itir, va, pte); } else { phy_pte &= ~PAGE_FLAGS_RV_MASK; psr = ia64_clear_ic(); ia64_itc(type, va, phy_pte, itir_ps(itir)); paravirt_dv_serialize_data(); ia64_set_psr(psr); } if (!(pte&VTLB_PTE_IO)) mark_pages_dirty(v, pte, itir_ps(itir)); } /* * vhpt lookup */ struct thash_data *vhpt_lookup(u64 va) { struct thash_data *head; u64 tag; head = (struct thash_data *)ia64_thash(va); tag = ia64_ttag(va); if (head->etag == tag) return head; return NULL; } u64 guest_vhpt_lookup(u64 iha, u64 *pte) { u64 ret; struct thash_data *data; data = __vtr_lookup(current_vcpu, iha, D_TLB); if (data != NULL) thash_vhpt_insert(current_vcpu, data->page_flags, data->itir, iha, D_TLB); asm volatile ("rsm psr.ic|psr.i;;" "srlz.d;;" "ld8.s r9=[%1];;" "tnat.nz p6,p7=r9;;" "(p6) mov %0=1;" "(p6) mov r9=r0;" "(p7) extr.u r9=r9,0,53;;" "(p7) mov %0=r0;" "(p7) st8 [%2]=r9;;" "ssm psr.ic;;" "srlz.d;;" "ssm psr.i;;" "srlz.d;;" : "=r"(ret) : "r"(iha), "r"(pte):"memory"); return ret; } /* * purge software guest tlb */ static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps) { struct thash_data *cur; u64 start, curadr, size, psbits, tag, rr_ps, num; union ia64_rr vrr; struct thash_cb *hcb = &v->arch.vtlb; vrr.val = vcpu_get_rr(v, va); psbits = VMX(v, psbits[(va >> 61)]); start = va & ~((1UL << ps) - 1); while (psbits) { curadr = start; rr_ps = __ffs(psbits); psbits &= ~(1UL << rr_ps); num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps)); size = PSIZE(rr_ps); vrr.ps = rr_ps; while (num) { cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag); if (cur->etag == tag && cur->ps == rr_ps) cur->etag = INVALID_TI_TAG; curadr += size; num--; } } } /* * purge VHPT and machine TLB */ static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps) { struct thash_data *cur; u64 start, size, tag, num; union ia64_rr rr; start = va & ~((1UL << ps) - 1); rr.val = ia64_get_rr(va); size = PSIZE(rr.ps); num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps)); while (num) { cur = (struct thash_data *)ia64_thash(start); tag = ia64_ttag(start); if (cur->etag == tag) cur->etag = INVALID_TI_TAG; start += size; num--; } machine_tlb_purge(va, ps); } /* * Insert an entry into hash TLB or VHPT. * NOTES: * 1: When inserting VHPT to thash, "va" is a must covered * address by the inserted machine VHPT entry. * 2: The format of entry is always in TLB. * 3: The caller need to make sure the new entry will not overlap * with any existed entry. */ void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va) { struct thash_data *head; union ia64_rr vrr; u64 tag; struct thash_cb *hcb = &v->arch.vtlb; vrr.val = vcpu_get_rr(v, va); vrr.ps = itir_ps(itir); VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps); head = vsa_thash(hcb->pta, va, vrr.val, &tag); head->page_flags = pte; head->itir = itir; head->etag = tag; } int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type) { struct thash_data *trp; int i; u64 end, rid; rid = vcpu_get_rr(vcpu, va); rid = rid & RR_RID_MASK; end = va + PSIZE(ps); if (type == D_TLB) { if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; i < NDTRS; i++, trp++) { if (__is_tr_overlap(trp, rid, va, end)) return i; } } } else { if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) { for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0; i < NITRS; i++, trp++) { if (__is_tr_overlap(trp, rid, va, end)) return i; } } } return -1; } /* * Purge entries in VTLB and VHPT */ void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps) { if (vcpu_quick_region_check(v->arch.tc_regions, va)) vtlb_purge(v, va, ps); vhpt_purge(v, va, ps); } void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps) { u64 old_va = va; va = REGION_OFFSET(va); if (vcpu_quick_region_check(v->arch.tc_regions, old_va)) vtlb_purge(v, va, ps); vhpt_purge(v, va, ps); } u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) { u64 ps, ps_mask, paddr, maddr, io_mask; union pte_flags phy_pte; ps = itir_ps(itir); ps_mask = ~((1UL << ps) - 1); phy_pte.val = *pte; paddr = *pte; paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT); io_mask = maddr & GPFN_IO_MASK; if (io_mask && (io_mask != GPFN_PHYS_MMIO)) { *pte |= VTLB_PTE_IO; return -1; } maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK); phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT; return phy_pte.val; } /* * Purge overlap TCs and then insert the new entry to emulate itc ops. * Notes: Only TC entry can purge and insert. */ void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 ifa, int type) { u64 ps; u64 phy_pte, io_mask, index; union ia64_rr vrr, mrr; ps = itir_ps(itir); vrr.val = vcpu_get_rr(v, ifa); mrr.val = ia64_get_rr(ifa); index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK; phy_pte = translate_phy_pte(&pte, itir, ifa); /* Ensure WB attribute if pte is related to a normal mem page, * which is required by vga acceleration since qemu maps shared * vram buffer with WB. */ if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) && io_mask != GPFN_PHYS_MMIO) { pte &= ~_PAGE_MA_MASK; phy_pte &= ~_PAGE_MA_MASK; } vtlb_purge(v, ifa, ps); vhpt_purge(v, ifa, ps); if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) { vtlb_insert(v, pte, itir, ifa); vcpu_quick_region_set(VMX(v, tc_regions), ifa); } if (pte & VTLB_PTE_IO) return; if (ps >= mrr.ps) vhpt_insert(phy_pte, itir, ifa, pte); else { u64 psr; phy_pte &= ~PAGE_FLAGS_RV_MASK; psr = ia64_clear_ic(); ia64_itc(type, ifa, phy_pte, ps); paravirt_dv_serialize_data(); ia64_set_psr(psr); } if (!(pte&VTLB_PTE_IO)) mark_pages_dirty(v, pte, ps); } /* * Purge all TCs or VHPT entries including those in Hash table. * */ void thash_purge_all(struct kvm_vcpu *v) { int i; struct thash_data *head; struct thash_cb *vtlb, *vhpt; vtlb = &v->arch.vtlb; vhpt = &v->arch.vhpt; for (i = 0; i < 8; i++) VMX(v, psbits[i]) = 0; head = vtlb->hash; for (i = 0; i < vtlb->num; i++) { head->page_flags = 0; head->etag = INVALID_TI_TAG; head->itir = 0; head->next = 0; head++; }; head = vhpt->hash; for (i = 0; i < vhpt->num; i++) { head->page_flags = 0; head->etag = INVALID_TI_TAG; head->itir = 0; head->next = 0; head++; }; local_flush_tlb_all(); } /* * Lookup the hash table and its collision chain to find an entry * covering this address rid:va or the entry. * * INPUT: * in: TLB format for both VHPT & TLB. */ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) { struct thash_data *cch; u64 psbits, ps, tag; union ia64_rr vrr; struct thash_cb *hcb = &v->arch.vtlb; cch = __vtr_lookup(v, va, is_data); if (cch) return cch; if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0) return NULL; psbits = VMX(v, psbits[(va >> 61)]); vrr.val = vcpu_get_rr(v, va); while (psbits) { ps = __ffs(psbits); psbits &= ~(1UL << ps); vrr.ps = ps; cch = vsa_thash(hcb->pta, va, vrr.val, &tag); if (cch->etag == tag && cch->ps == ps) return cch; } return NULL; } /* * Initialize internal control data before service. */ void thash_init(struct thash_cb *hcb, u64 sz) { int i; struct thash_data *head; hcb->pta.val = (unsigned long)hcb->hash; hcb->pta.vf = 1; hcb->pta.ve = 1; hcb->pta.size = sz; head = hcb->hash; for (i = 0; i < hcb->num; i++) { head->page_flags = 0; head->itir = 0; head->etag = INVALID_TI_TAG; head->next = 0; head++; } } u64 kvm_get_mpt_entry(u64 gpfn) { u64 *base = (u64 *) KVM_P2M_BASE; if (gpfn >= (KVM_P2M_SIZE >> 3)) panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn); return *(base + gpfn); } u64 kvm_lookup_mpa(u64 gpfn) { u64 maddr; maddr = kvm_get_mpt_entry(gpfn); return maddr&_PAGE_PPN_MASK; } u64 kvm_gpa_to_mpa(u64 gpa) { u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); } /* * Fetch guest bundle code. * INPUT: * gip: guest ip * pbundle: used to return fetched bundle. */ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle) { u64 gpip = 0; /* guest physical IP*/ u64 *vpa; struct thash_data *tlb; u64 maddr; if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) { /* I-side physical mode */ gpip = gip; } else { tlb = vtlb_lookup(vcpu, gip, I_TLB); if (tlb) gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | (gip & (PSIZE(tlb->ps) - 1)); } if (gpip) { maddr = kvm_gpa_to_mpa(gpip); } else { tlb = vhpt_lookup(gip); if (tlb == NULL) { ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2); return IA64_FAULT; } maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | (gip & (PSIZE(tlb->ps) - 1)); } vpa = (u64 *)__kvm_va(maddr); pbundle->i64[0] = *vpa++; pbundle->i64[1] = *vpa; return IA64_NO_FAULT; } void kvm_init_vhpt(struct kvm_vcpu *v) { v->arch.vhpt.num = VHPT_NUM_ENTRIES; thash_init(&v->arch.vhpt, VHPT_SHIFT); ia64_set_pta(v->arch.vhpt.pta.val); /*Enable VHPT here?*/ } void kvm_init_vtlb(struct kvm_vcpu *v) { v->arch.vtlb.num = VTLB_NUM_ENTRIES; thash_init(&v->arch.vtlb, VTLB_SHIFT); }
gpl-2.0
Spartonos/android_kernel_motorola_falcon_umts
drivers/platform/x86/classmate-laptop.c
8036
17418
/* * Copyright (C) 2009 Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <acpi/acpi_drivers.h> #include <linux/backlight.h> #include <linux/input.h> #include <linux/rfkill.h> MODULE_LICENSE("GPL"); struct cmpc_accel { int sensitivity; }; #define CMPC_ACCEL_SENSITIVITY_DEFAULT 5 #define CMPC_ACCEL_HID "ACCE0000" #define CMPC_TABLET_HID "TBLT0000" #define CMPC_IPML_HID "IPML200" #define CMPC_KEYS_HID "FnBT0000" /* * Generic input device code. */ typedef void (*input_device_init)(struct input_dev *dev); static int cmpc_add_acpi_notify_device(struct acpi_device *acpi, char *name, input_device_init idev_init) { struct input_dev *inputdev; int error; inputdev = input_allocate_device(); if (!inputdev) return -ENOMEM; inputdev->name = name; inputdev->dev.parent = &acpi->dev; idev_init(inputdev); error = input_register_device(inputdev); if (error) { input_free_device(inputdev); return error; } dev_set_drvdata(&acpi->dev, inputdev); return 0; } static int cmpc_remove_acpi_notify_device(struct acpi_device *acpi) { struct input_dev *inputdev = dev_get_drvdata(&acpi->dev); input_unregister_device(inputdev); return 0; } /* * Accelerometer code. */ static acpi_status cmpc_start_accel(acpi_handle handle) { union acpi_object param[2]; struct acpi_object_list input; acpi_status status; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0x3; param[1].type = ACPI_TYPE_INTEGER; input.count = 2; input.pointer = param; status = acpi_evaluate_object(handle, "ACMD", &input, NULL); return status; } static acpi_status cmpc_stop_accel(acpi_handle handle) { union acpi_object param[2]; struct acpi_object_list input; acpi_status status; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0x4; param[1].type = ACPI_TYPE_INTEGER; input.count = 2; input.pointer = param; status = acpi_evaluate_object(handle, "ACMD", &input, NULL); return status; } static acpi_status cmpc_accel_set_sensitivity(acpi_handle handle, int val) { union acpi_object param[2]; struct acpi_object_list input; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0x02; param[1].type = ACPI_TYPE_INTEGER; param[1].integer.value = val; input.count = 2; input.pointer = param; return acpi_evaluate_object(handle, "ACMD", &input, NULL); } static acpi_status cmpc_get_accel(acpi_handle handle, unsigned char *x, unsigned char *y, unsigned char *z) { union acpi_object param[2]; struct acpi_object_list input; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, 0 }; unsigned char *locs; acpi_status status; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0x01; param[1].type = ACPI_TYPE_INTEGER; input.count = 2; input.pointer = param; status = acpi_evaluate_object(handle, "ACMD", &input, &output); if (ACPI_SUCCESS(status)) { union acpi_object *obj; obj = output.pointer; locs = obj->buffer.pointer; *x = locs[0]; *y = locs[1]; *z = locs[2]; kfree(output.pointer); } return status; } static void cmpc_accel_handler(struct acpi_device *dev, u32 event) { if (event == 0x81) { unsigned char x, y, z; acpi_status status; status = cmpc_get_accel(dev->handle, &x, &y, &z); if (ACPI_SUCCESS(status)) { struct input_dev *inputdev = dev_get_drvdata(&dev->dev); input_report_abs(inputdev, ABS_X, x); input_report_abs(inputdev, ABS_Y, y); input_report_abs(inputdev, ABS_Z, z); input_sync(inputdev); } } } static ssize_t cmpc_accel_sensitivity_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi; struct input_dev *inputdev; struct cmpc_accel *accel; acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); accel = dev_get_drvdata(&inputdev->dev); return sprintf(buf, "%d\n", accel->sensitivity); } static ssize_t cmpc_accel_sensitivity_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct acpi_device *acpi; struct input_dev *inputdev; struct cmpc_accel *accel; unsigned long sensitivity; int r; acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); accel = dev_get_drvdata(&inputdev->dev); r = strict_strtoul(buf, 0, &sensitivity); if (r) return r; accel->sensitivity = sensitivity; cmpc_accel_set_sensitivity(acpi->handle, sensitivity); return strnlen(buf, count); } static struct device_attribute cmpc_accel_sensitivity_attr = { .attr = { .name = "sensitivity", .mode = 0660 }, .show = cmpc_accel_sensitivity_show, .store = cmpc_accel_sensitivity_store }; static int cmpc_accel_open(struct input_dev *input) { struct acpi_device *acpi; acpi = to_acpi_device(input->dev.parent); if (ACPI_SUCCESS(cmpc_start_accel(acpi->handle))) return 0; return -EIO; } static void cmpc_accel_close(struct input_dev *input) { struct acpi_device *acpi; acpi = to_acpi_device(input->dev.parent); cmpc_stop_accel(acpi->handle); } static void cmpc_accel_idev_init(struct input_dev *inputdev) { set_bit(EV_ABS, inputdev->evbit); input_set_abs_params(inputdev, ABS_X, 0, 255, 8, 0); input_set_abs_params(inputdev, ABS_Y, 0, 255, 8, 0); input_set_abs_params(inputdev, ABS_Z, 0, 255, 8, 0); inputdev->open = cmpc_accel_open; inputdev->close = cmpc_accel_close; } static int cmpc_accel_add(struct acpi_device *acpi) { int error; struct input_dev *inputdev; struct cmpc_accel *accel; accel = kmalloc(sizeof(*accel), GFP_KERNEL); if (!accel) return -ENOMEM; accel->sensitivity = CMPC_ACCEL_SENSITIVITY_DEFAULT; cmpc_accel_set_sensitivity(acpi->handle, accel->sensitivity); error = device_create_file(&acpi->dev, &cmpc_accel_sensitivity_attr); if (error) goto failed_file; error = cmpc_add_acpi_notify_device(acpi, "cmpc_accel", cmpc_accel_idev_init); if (error) goto failed_input; inputdev = dev_get_drvdata(&acpi->dev); dev_set_drvdata(&inputdev->dev, accel); return 0; failed_input: device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr); failed_file: kfree(accel); return error; } static int cmpc_accel_remove(struct acpi_device *acpi, int type) { struct input_dev *inputdev; struct cmpc_accel *accel; inputdev = dev_get_drvdata(&acpi->dev); accel = dev_get_drvdata(&inputdev->dev); device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr); return cmpc_remove_acpi_notify_device(acpi); } static const struct acpi_device_id cmpc_accel_device_ids[] = { {CMPC_ACCEL_HID, 0}, {"", 0} }; static struct acpi_driver cmpc_accel_acpi_driver = { .owner = THIS_MODULE, .name = "cmpc_accel", .class = "cmpc_accel", .ids = cmpc_accel_device_ids, .ops = { .add = cmpc_accel_add, .remove = cmpc_accel_remove, .notify = cmpc_accel_handler, } }; /* * Tablet mode code. */ static acpi_status cmpc_get_tablet(acpi_handle handle, unsigned long long *value) { union acpi_object param; struct acpi_object_list input; unsigned long long output; acpi_status status; param.type = ACPI_TYPE_INTEGER; param.integer.value = 0x01; input.count = 1; input.pointer = &param; status = acpi_evaluate_integer(handle, "TCMD", &input, &output); if (ACPI_SUCCESS(status)) *value = output; return status; } static void cmpc_tablet_handler(struct acpi_device *dev, u32 event) { unsigned long long val = 0; struct input_dev *inputdev = dev_get_drvdata(&dev->dev); if (event == 0x81) { if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) input_report_switch(inputdev, SW_TABLET_MODE, !val); } } static void cmpc_tablet_idev_init(struct input_dev *inputdev) { unsigned long long val = 0; struct acpi_device *acpi; set_bit(EV_SW, inputdev->evbit); set_bit(SW_TABLET_MODE, inputdev->swbit); acpi = to_acpi_device(inputdev->dev.parent); if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) input_report_switch(inputdev, SW_TABLET_MODE, !val); } static int cmpc_tablet_add(struct acpi_device *acpi) { return cmpc_add_acpi_notify_device(acpi, "cmpc_tablet", cmpc_tablet_idev_init); } static int cmpc_tablet_remove(struct acpi_device *acpi, int type) { return cmpc_remove_acpi_notify_device(acpi); } static int cmpc_tablet_resume(struct acpi_device *acpi) { struct input_dev *inputdev = dev_get_drvdata(&acpi->dev); unsigned long long val = 0; if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) input_report_switch(inputdev, SW_TABLET_MODE, !val); return 0; } static const struct acpi_device_id cmpc_tablet_device_ids[] = { {CMPC_TABLET_HID, 0}, {"", 0} }; static struct acpi_driver cmpc_tablet_acpi_driver = { .owner = THIS_MODULE, .name = "cmpc_tablet", .class = "cmpc_tablet", .ids = cmpc_tablet_device_ids, .ops = { .add = cmpc_tablet_add, .remove = cmpc_tablet_remove, .resume = cmpc_tablet_resume, .notify = cmpc_tablet_handler, } }; /* * Backlight code. */ static acpi_status cmpc_get_brightness(acpi_handle handle, unsigned long long *value) { union acpi_object param; struct acpi_object_list input; unsigned long long output; acpi_status status; param.type = ACPI_TYPE_INTEGER; param.integer.value = 0xC0; input.count = 1; input.pointer = &param; status = acpi_evaluate_integer(handle, "GRDI", &input, &output); if (ACPI_SUCCESS(status)) *value = output; return status; } static acpi_status cmpc_set_brightness(acpi_handle handle, unsigned long long value) { union acpi_object param[2]; struct acpi_object_list input; acpi_status status; unsigned long long output; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0xC0; param[1].type = ACPI_TYPE_INTEGER; param[1].integer.value = value; input.count = 2; input.pointer = param; status = acpi_evaluate_integer(handle, "GWRI", &input, &output); return status; } static int cmpc_bl_get_brightness(struct backlight_device *bd) { acpi_status status; acpi_handle handle; unsigned long long brightness; handle = bl_get_data(bd); status = cmpc_get_brightness(handle, &brightness); if (ACPI_SUCCESS(status)) return brightness; else return -1; } static int cmpc_bl_update_status(struct backlight_device *bd) { acpi_status status; acpi_handle handle; handle = bl_get_data(bd); status = cmpc_set_brightness(handle, bd->props.brightness); if (ACPI_SUCCESS(status)) return 0; else return -1; } static const struct backlight_ops cmpc_bl_ops = { .get_brightness = cmpc_bl_get_brightness, .update_status = cmpc_bl_update_status }; /* * RFKILL code. */ static acpi_status cmpc_get_rfkill_wlan(acpi_handle handle, unsigned long long *value) { union acpi_object param; struct acpi_object_list input; unsigned long long output; acpi_status status; param.type = ACPI_TYPE_INTEGER; param.integer.value = 0xC1; input.count = 1; input.pointer = &param; status = acpi_evaluate_integer(handle, "GRDI", &input, &output); if (ACPI_SUCCESS(status)) *value = output; return status; } static acpi_status cmpc_set_rfkill_wlan(acpi_handle handle, unsigned long long value) { union acpi_object param[2]; struct acpi_object_list input; acpi_status status; unsigned long long output; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0xC1; param[1].type = ACPI_TYPE_INTEGER; param[1].integer.value = value; input.count = 2; input.pointer = param; status = acpi_evaluate_integer(handle, "GWRI", &input, &output); return status; } static void cmpc_rfkill_query(struct rfkill *rfkill, void *data) { acpi_status status; acpi_handle handle; unsigned long long state; bool blocked; handle = data; status = cmpc_get_rfkill_wlan(handle, &state); if (ACPI_SUCCESS(status)) { blocked = state & 1 ? false : true; rfkill_set_sw_state(rfkill, blocked); } } static int cmpc_rfkill_block(void *data, bool blocked) { acpi_status status; acpi_handle handle; unsigned long long state; bool is_blocked; handle = data; status = cmpc_get_rfkill_wlan(handle, &state); if (ACPI_FAILURE(status)) return -ENODEV; /* Check if we really need to call cmpc_set_rfkill_wlan */ is_blocked = state & 1 ? false : true; if (is_blocked != blocked) { state = blocked ? 0 : 1; status = cmpc_set_rfkill_wlan(handle, state); if (ACPI_FAILURE(status)) return -ENODEV; } return 0; } static const struct rfkill_ops cmpc_rfkill_ops = { .query = cmpc_rfkill_query, .set_block = cmpc_rfkill_block, }; /* * Common backlight and rfkill code. */ struct ipml200_dev { struct backlight_device *bd; struct rfkill *rf; }; static int cmpc_ipml_add(struct acpi_device *acpi) { int retval; struct ipml200_dev *ipml; struct backlight_properties props; ipml = kmalloc(sizeof(*ipml), GFP_KERNEL); if (ipml == NULL) return -ENOMEM; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = 7; ipml->bd = backlight_device_register("cmpc_bl", &acpi->dev, acpi->handle, &cmpc_bl_ops, &props); if (IS_ERR(ipml->bd)) { retval = PTR_ERR(ipml->bd); goto out_bd; } ipml->rf = rfkill_alloc("cmpc_rfkill", &acpi->dev, RFKILL_TYPE_WLAN, &cmpc_rfkill_ops, acpi->handle); /* * If RFKILL is disabled, rfkill_alloc will return ERR_PTR(-ENODEV). * This is OK, however, since all other uses of the device will not * derefence it. */ if (ipml->rf) { retval = rfkill_register(ipml->rf); if (retval) { rfkill_destroy(ipml->rf); ipml->rf = NULL; } } dev_set_drvdata(&acpi->dev, ipml); return 0; out_bd: kfree(ipml); return retval; } static int cmpc_ipml_remove(struct acpi_device *acpi, int type) { struct ipml200_dev *ipml; ipml = dev_get_drvdata(&acpi->dev); backlight_device_unregister(ipml->bd); if (ipml->rf) { rfkill_unregister(ipml->rf); rfkill_destroy(ipml->rf); } kfree(ipml); return 0; } static const struct acpi_device_id cmpc_ipml_device_ids[] = { {CMPC_IPML_HID, 0}, {"", 0} }; static struct acpi_driver cmpc_ipml_acpi_driver = { .owner = THIS_MODULE, .name = "cmpc", .class = "cmpc", .ids = cmpc_ipml_device_ids, .ops = { .add = cmpc_ipml_add, .remove = cmpc_ipml_remove } }; /* * Extra keys code. */ static int cmpc_keys_codes[] = { KEY_UNKNOWN, KEY_WLAN, KEY_SWITCHVIDEOMODE, KEY_BRIGHTNESSDOWN, KEY_BRIGHTNESSUP, KEY_VENDOR, KEY_UNKNOWN, KEY_CAMERA, KEY_BACK, KEY_FORWARD, KEY_MAX }; static void cmpc_keys_handler(struct acpi_device *dev, u32 event) { struct input_dev *inputdev; int code = KEY_MAX; if ((event & 0x0F) < ARRAY_SIZE(cmpc_keys_codes)) code = cmpc_keys_codes[event & 0x0F]; inputdev = dev_get_drvdata(&dev->dev); input_report_key(inputdev, code, !(event & 0x10)); input_sync(inputdev); } static void cmpc_keys_idev_init(struct input_dev *inputdev) { int i; set_bit(EV_KEY, inputdev->evbit); for (i = 0; cmpc_keys_codes[i] != KEY_MAX; i++) set_bit(cmpc_keys_codes[i], inputdev->keybit); } static int cmpc_keys_add(struct acpi_device *acpi) { return cmpc_add_acpi_notify_device(acpi, "cmpc_keys", cmpc_keys_idev_init); } static int cmpc_keys_remove(struct acpi_device *acpi, int type) { return cmpc_remove_acpi_notify_device(acpi); } static const struct acpi_device_id cmpc_keys_device_ids[] = { {CMPC_KEYS_HID, 0}, {"", 0} }; static struct acpi_driver cmpc_keys_acpi_driver = { .owner = THIS_MODULE, .name = "cmpc_keys", .class = "cmpc_keys", .ids = cmpc_keys_device_ids, .ops = { .add = cmpc_keys_add, .remove = cmpc_keys_remove, .notify = cmpc_keys_handler, } }; /* * General init/exit code. */ static int cmpc_init(void) { int r; r = acpi_bus_register_driver(&cmpc_keys_acpi_driver); if (r) goto failed_keys; r = acpi_bus_register_driver(&cmpc_ipml_acpi_driver); if (r) goto failed_bl; r = acpi_bus_register_driver(&cmpc_tablet_acpi_driver); if (r) goto failed_tablet; r = acpi_bus_register_driver(&cmpc_accel_acpi_driver); if (r) goto failed_accel; return r; failed_accel: acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver); failed_tablet: acpi_bus_unregister_driver(&cmpc_ipml_acpi_driver); failed_bl: acpi_bus_unregister_driver(&cmpc_keys_acpi_driver); failed_keys: return r; } static void cmpc_exit(void) { acpi_bus_unregister_driver(&cmpc_accel_acpi_driver); acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver); acpi_bus_unregister_driver(&cmpc_ipml_acpi_driver); acpi_bus_unregister_driver(&cmpc_keys_acpi_driver); } module_init(cmpc_init); module_exit(cmpc_exit); static const struct acpi_device_id cmpc_device_ids[] = { {CMPC_ACCEL_HID, 0}, {CMPC_TABLET_HID, 0}, {CMPC_IPML_HID, 0}, {CMPC_KEYS_HID, 0}, {"", 0} }; MODULE_DEVICE_TABLE(acpi, cmpc_device_ids);
gpl-2.0
ABIP/android_kernel_samsung_msm7x30-common
drivers/input/serio/parkbd.c
13156
5191
/* * Parallel port to Keyboard port adapter driver for Linux * * Copyright (c) 1999-2004 Vojtech Pavlik */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ /* * To connect an AT or XT keyboard to the parallel port, a fairly simple adapter * can be made: * * Parallel port Keyboard port * * +5V --------------------- +5V (4) * * ______ * +5V -------|______|--. * | * ACK (10) ------------| * |--- KBD CLOCK (5) * STROBE (1) ---|<|----' * * ______ * +5V -------|______|--. * | * BUSY (11) -----------| * |--- KBD DATA (1) * AUTOFD (14) --|<|----' * * GND (18-25) ------------- GND (3) * * The diodes can be fairly any type, and the resistors should be somewhere * around 5 kOhm, but the adapter will likely work without the resistors, * too. * * The +5V source can be taken either from USB, from mouse or keyboard ports, * or from a joystick port. Unfortunately, the parallel port of a PC doesn't * have a +5V pin, and feeding the keyboard from signal pins is out of question * with 300 mA power reqirement of a typical AT keyboard. */ #include <linux/module.h> #include <linux/parport.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/serio.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Parallel port to Keyboard port adapter driver"); MODULE_LICENSE("GPL"); static unsigned int parkbd_pp_no; module_param_named(port, parkbd_pp_no, int, 0); MODULE_PARM_DESC(port, "Parallel port the adapter is connected to (default is 0)"); static unsigned int parkbd_mode = SERIO_8042; module_param_named(mode, parkbd_mode, uint, 0); MODULE_PARM_DESC(mode, "Mode of operation: XT = 0/AT = 1 (default)"); #define PARKBD_CLOCK 0x01 /* Strobe & Ack */ #define PARKBD_DATA 0x02 /* AutoFd & Busy */ static int parkbd_buffer; static int parkbd_counter; static unsigned long parkbd_last; static int parkbd_writing; static unsigned long parkbd_start; static struct pardevice *parkbd_dev; static struct serio *parkbd_port; static int parkbd_readlines(void) { return (parport_read_status(parkbd_dev->port) >> 6) ^ 2; } static void parkbd_writelines(int data) { parport_write_control(parkbd_dev->port, (~data & 3) | 0x10); } static int parkbd_write(struct serio *port, unsigned char c) { unsigned char p; if (!parkbd_mode) return -1; p = c ^ (c >> 4); p = p ^ (p >> 2); p = p ^ (p >> 1); parkbd_counter = 0; parkbd_writing = 1; parkbd_buffer = c | (((int) (~p & 1)) << 8) | 0x600; parkbd_writelines(2); return 0; } static void parkbd_interrupt(void *dev_id) { if (parkbd_writing) { if (parkbd_counter && ((parkbd_counter == 11) || time_after(jiffies, parkbd_last + HZ/100))) { parkbd_counter = 0; parkbd_buffer = 0; parkbd_writing = 0; parkbd_writelines(3); return; } parkbd_writelines(((parkbd_buffer >> parkbd_counter++) & 1) | 2); if (parkbd_counter == 11) { parkbd_counter = 0; parkbd_buffer = 0; parkbd_writing = 0; parkbd_writelines(3); } } else { if ((parkbd_counter == parkbd_mode + 10) || time_after(jiffies, parkbd_last + HZ/100)) { parkbd_counter = 0; parkbd_buffer = 0; } parkbd_buffer |= (parkbd_readlines() >> 1) << parkbd_counter++; if (parkbd_counter == parkbd_mode + 10) serio_interrupt(parkbd_port, (parkbd_buffer >> (2 - parkbd_mode)) & 0xff, 0); } parkbd_last = jiffies; } static int parkbd_getport(void) { struct parport *pp; pp = parport_find_number(parkbd_pp_no); if (pp == NULL) { printk(KERN_ERR "parkbd: no such parport\n"); return -ENODEV; } parkbd_dev = parport_register_device(pp, "parkbd", NULL, NULL, parkbd_interrupt, PARPORT_DEV_EXCL, NULL); parport_put_port(pp); if (!parkbd_dev) return -ENODEV; if (parport_claim(parkbd_dev)) { parport_unregister_device(parkbd_dev); return -EBUSY; } parkbd_start = jiffies; return 0; } static struct serio * __init parkbd_allocate_serio(void) { struct serio *serio; serio = kzalloc(sizeof(struct serio), GFP_KERNEL); if (serio) { serio->id.type = parkbd_mode; serio->write = parkbd_write, strlcpy(serio->name, "PARKBD AT/XT keyboard adapter", sizeof(serio->name)); snprintf(serio->phys, sizeof(serio->phys), "%s/serio0", parkbd_dev->port->name); } return serio; } static int __init parkbd_init(void) { int err; err = parkbd_getport(); if (err) return err; parkbd_port = parkbd_allocate_serio(); if (!parkbd_port) { parport_release(parkbd_dev); return -ENOMEM; } parkbd_writelines(3); serio_register_port(parkbd_port); printk(KERN_INFO "serio: PARKBD %s adapter on %s\n", parkbd_mode ? "AT" : "XT", parkbd_dev->port->name); return 0; } static void __exit parkbd_exit(void) { parport_release(parkbd_dev); serio_unregister_port(parkbd_port); parport_unregister_device(parkbd_dev); } module_init(parkbd_init); module_exit(parkbd_exit);
gpl-2.0
flar2/evita-bulletproof
drivers/s390/cio/fcx.c
13924
9488
/* * Functions for assembling fcx enabled I/O control blocks. * * Copyright IBM Corp. 2008 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/module.h> #include <asm/fcx.h> #include "cio.h" /** * tcw_get_intrg - return pointer to associated interrogate tcw * @tcw: pointer to the original tcw * * Return a pointer to the interrogate tcw associated with the specified tcw * or %NULL if there is no associated interrogate tcw. */ struct tcw *tcw_get_intrg(struct tcw *tcw) { return (struct tcw *) ((addr_t) tcw->intrg); } EXPORT_SYMBOL(tcw_get_intrg); /** * tcw_get_data - return pointer to input/output data associated with tcw * @tcw: pointer to the tcw * * Return the input or output data address specified in the tcw depending * on whether the r-bit or the w-bit is set. If neither bit is set, return * %NULL. */ void *tcw_get_data(struct tcw *tcw) { if (tcw->r) return (void *) ((addr_t) tcw->input); if (tcw->w) return (void *) ((addr_t) tcw->output); return NULL; } EXPORT_SYMBOL(tcw_get_data); /** * tcw_get_tccb - return pointer to tccb associated with tcw * @tcw: pointer to the tcw * * Return pointer to the tccb associated with this tcw. */ struct tccb *tcw_get_tccb(struct tcw *tcw) { return (struct tccb *) ((addr_t) tcw->tccb); } EXPORT_SYMBOL(tcw_get_tccb); /** * tcw_get_tsb - return pointer to tsb associated with tcw * @tcw: pointer to the tcw * * Return pointer to the tsb associated with this tcw. */ struct tsb *tcw_get_tsb(struct tcw *tcw) { return (struct tsb *) ((addr_t) tcw->tsb); } EXPORT_SYMBOL(tcw_get_tsb); /** * tcw_init - initialize tcw data structure * @tcw: pointer to the tcw to be initialized * @r: initial value of the r-bit * @w: initial value of the w-bit * * Initialize all fields of the specified tcw data structure with zero and * fill in the format, flags, r and w fields. */ void tcw_init(struct tcw *tcw, int r, int w) { memset(tcw, 0, sizeof(struct tcw)); tcw->format = TCW_FORMAT_DEFAULT; tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT); if (r) tcw->r = 1; if (w) tcw->w = 1; } EXPORT_SYMBOL(tcw_init); static inline size_t tca_size(struct tccb *tccb) { return tccb->tcah.tcal - 12; } static u32 calc_dcw_count(struct tccb *tccb) { int offset; struct dcw *dcw; u32 count = 0; size_t size; size = tca_size(tccb); for (offset = 0; offset < size;) { dcw = (struct dcw *) &tccb->tca[offset]; count += dcw->count; if (!(dcw->flags & DCW_FLAGS_CC)) break; offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4); } return count; } static u32 calc_cbc_size(struct tidaw *tidaw, int num) { int i; u32 cbc_data; u32 cbc_count = 0; u64 data_count = 0; for (i = 0; i < num; i++) { if (tidaw[i].flags & TIDAW_FLAGS_LAST) break; /* TODO: find out if padding applies to total of data * transferred or data transferred by this tidaw. Assumption: * applies to total. */ data_count += tidaw[i].count; if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) { cbc_data = 4 + ALIGN(data_count, 4) - data_count; cbc_count += cbc_data; data_count += cbc_data; } } return cbc_count; } /** * tcw_finalize - finalize tcw length fields and tidaw list * @tcw: pointer to the tcw * @num_tidaws: the number of tidaws used to address input/output data or zero * if no tida is used * * Calculate the input-/output-count and tccbl field in the tcw, add a * tcat the tccb and terminate the data tidaw list if used. * * Note: in case input- or output-tida is used, the tidaw-list must be stored * in contiguous storage (no ttic). The tcal field in the tccb must be * up-to-date. */ void tcw_finalize(struct tcw *tcw, int num_tidaws) { struct tidaw *tidaw; struct tccb *tccb; struct tccb_tcat *tcat; u32 count; /* Terminate tidaw list. */ tidaw = tcw_get_data(tcw); if (num_tidaws > 0) tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST; /* Add tcat to tccb. */ tccb = tcw_get_tccb(tcw); tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; memset(tcat, 0, sizeof(*tcat)); /* Calculate tcw input/output count and tcat transport count. */ count = calc_dcw_count(tccb); if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) count += calc_cbc_size(tidaw, num_tidaws); if (tcw->r) tcw->input_count = count; else if (tcw->w) tcw->output_count = count; tcat->count = ALIGN(count, 4) + 4; /* Calculate tccbl. */ tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) + sizeof(struct tccb_tcat) - 20) >> 2; } EXPORT_SYMBOL(tcw_finalize); /** * tcw_set_intrg - set the interrogate tcw address of a tcw * @tcw: the tcw address * @intrg_tcw: the address of the interrogate tcw * * Set the address of the interrogate tcw in the specified tcw. */ void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw) { tcw->intrg = (u32) ((addr_t) intrg_tcw); } EXPORT_SYMBOL(tcw_set_intrg); /** * tcw_set_data - set data address and tida flag of a tcw * @tcw: the tcw address * @data: the data address * @use_tidal: zero of the data address specifies a contiguous block of data, * non-zero if it specifies a list if tidaws. * * Set the input/output data address of a tcw (depending on the value of the * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag * is set as well. */ void tcw_set_data(struct tcw *tcw, void *data, int use_tidal) { if (tcw->r) { tcw->input = (u64) ((addr_t) data); if (use_tidal) tcw->flags |= TCW_FLAGS_INPUT_TIDA; } else if (tcw->w) { tcw->output = (u64) ((addr_t) data); if (use_tidal) tcw->flags |= TCW_FLAGS_OUTPUT_TIDA; } } EXPORT_SYMBOL(tcw_set_data); /** * tcw_set_tccb - set tccb address of a tcw * @tcw: the tcw address * @tccb: the tccb address * * Set the address of the tccb in the specified tcw. */ void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb) { tcw->tccb = (u64) ((addr_t) tccb); } EXPORT_SYMBOL(tcw_set_tccb); /** * tcw_set_tsb - set tsb address of a tcw * @tcw: the tcw address * @tsb: the tsb address * * Set the address of the tsb in the specified tcw. */ void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb) { tcw->tsb = (u64) ((addr_t) tsb); } EXPORT_SYMBOL(tcw_set_tsb); /** * tccb_init - initialize tccb * @tccb: the tccb address * @size: the maximum size of the tccb * @sac: the service-action-code to be user * * Initialize the header of the specified tccb by resetting all values to zero * and filling in defaults for format, sac and initial tcal fields. */ void tccb_init(struct tccb *tccb, size_t size, u32 sac) { memset(tccb, 0, size); tccb->tcah.format = TCCB_FORMAT_DEFAULT; tccb->tcah.sac = sac; tccb->tcah.tcal = 12; } EXPORT_SYMBOL(tccb_init); /** * tsb_init - initialize tsb * @tsb: the tsb address * * Initialize the specified tsb by resetting all values to zero. */ void tsb_init(struct tsb *tsb) { memset(tsb, 0, sizeof(*tsb)); } EXPORT_SYMBOL(tsb_init); /** * tccb_add_dcw - add a dcw to the tccb * @tccb: the tccb address * @tccb_size: the maximum tccb size * @cmd: the dcw command * @flags: flags for the dcw * @cd: pointer to control data for this dcw or NULL if none is required * @cd_count: number of control data bytes for this dcw * @count: number of data bytes for this dcw * * Add a new dcw to the specified tccb by writing the dcw information specified * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw * would exceed the available space as defined by @tccb_size. * * Note: the tcal field of the tccb header will be updates to reflect added * content. */ struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags, void *cd, u8 cd_count, u32 count) { struct dcw *dcw; int size; int tca_offset; /* Check for space. */ tca_offset = tca_size(tccb); size = ALIGN(sizeof(struct dcw) + cd_count, 4); if (sizeof(struct tccb_tcah) + tca_offset + size + sizeof(struct tccb_tcat) > tccb_size) return ERR_PTR(-ENOSPC); /* Add dcw to tca. */ dcw = (struct dcw *) &tccb->tca[tca_offset]; memset(dcw, 0, size); dcw->cmd = cmd; dcw->flags = flags; dcw->count = count; dcw->cd_count = cd_count; if (cd) memcpy(&dcw->cd[0], cd, cd_count); tccb->tcah.tcal += size; return dcw; } EXPORT_SYMBOL(tccb_add_dcw); /** * tcw_add_tidaw - add a tidaw to a tcw * @tcw: the tcw address * @num_tidaws: the current number of tidaws * @flags: flags for the new tidaw * @addr: address value for the new tidaw * @count: count value for the new tidaw * * Add a new tidaw to the input/output data tidaw-list of the specified tcw * (depending on the value of the r-flag and w-flag) and return a pointer to * the new tidaw. * * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller * must ensure that there is enough space for the new tidaw. The last-tidaw * flag for the last tidaw in the list will be set by tcw_finalize. */ struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags, void *addr, u32 count) { struct tidaw *tidaw; /* Add tidaw to tidaw-list. */ tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws; memset(tidaw, 0, sizeof(struct tidaw)); tidaw->flags = flags; tidaw->count = count; tidaw->addr = (u64) ((addr_t) addr); return tidaw; } EXPORT_SYMBOL(tcw_add_tidaw);
gpl-2.0
widz4rd/WIDzard-A850K
sound/pci/hda/patch_sigmatel.c
101
186843
/* * Universal Interface for Intel High Definition Audio Codec * * HD audio interface patch for SigmaTel STAC92xx * * Copyright (c) 2005 Embedded Alley Solutions, Inc. * Matt Porter <mporter@embeddedalley.com> * * Based on patch_cmedia.c and patch_realtek.c * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/dmi.h> #include <linux/module.h> #include <sound/core.h> #include <sound/asoundef.h> #include <sound/jack.h> #include <sound/tlv.h> #include "hda_codec.h" #include "hda_local.h" #include "hda_beep.h" #include "hda_jack.h" enum { STAC_VREF_EVENT = 1, STAC_INSERT_EVENT, STAC_PWR_EVENT, STAC_HP_EVENT, STAC_LO_EVENT, STAC_MIC_EVENT, }; enum { STAC_AUTO, STAC_REF, STAC_9200_OQO, STAC_9200_DELL_D21, STAC_9200_DELL_D22, STAC_9200_DELL_D23, STAC_9200_DELL_M21, STAC_9200_DELL_M22, STAC_9200_DELL_M23, STAC_9200_DELL_M24, STAC_9200_DELL_M25, STAC_9200_DELL_M26, STAC_9200_DELL_M27, STAC_9200_M4, STAC_9200_M4_2, STAC_9200_PANASONIC, STAC_9200_MODELS }; enum { STAC_9205_AUTO, STAC_9205_REF, STAC_9205_DELL_M42, STAC_9205_DELL_M43, STAC_9205_DELL_M44, STAC_9205_EAPD, STAC_9205_MODELS }; enum { STAC_92HD73XX_AUTO, STAC_92HD73XX_NO_JD, /* no jack-detection */ STAC_92HD73XX_REF, STAC_92HD73XX_INTEL, STAC_DELL_M6_AMIC, STAC_DELL_M6_DMIC, STAC_DELL_M6_BOTH, STAC_DELL_EQ, STAC_ALIENWARE_M17X, STAC_92HD73XX_MODELS }; enum { STAC_92HD83XXX_AUTO, STAC_92HD83XXX_REF, STAC_92HD83XXX_PWR_REF, STAC_DELL_S14, STAC_DELL_VOSTRO_3500, STAC_92HD83XXX_HP_cNB11_INTQUAD, STAC_HP_DV7_4000, STAC_HP_ZEPHYR, STAC_92HD83XXX_MODELS }; enum { STAC_92HD71BXX_AUTO, STAC_92HD71BXX_REF, STAC_DELL_M4_1, STAC_DELL_M4_2, STAC_DELL_M4_3, STAC_HP_M4, STAC_HP_DV4, STAC_HP_DV5, STAC_HP_HDX, STAC_HP_DV4_1222NR, STAC_92HD71BXX_MODELS }; enum { STAC_925x_AUTO, STAC_925x_REF, STAC_M1, STAC_M1_2, STAC_M2, STAC_M2_2, STAC_M3, STAC_M5, STAC_M6, STAC_925x_MODELS }; enum { STAC_922X_AUTO, STAC_D945_REF, STAC_D945GTP3, STAC_D945GTP5, STAC_INTEL_MAC_V1, STAC_INTEL_MAC_V2, STAC_INTEL_MAC_V3, STAC_INTEL_MAC_V4, STAC_INTEL_MAC_V5, STAC_INTEL_MAC_AUTO, /* This model is selected if no module parameter * is given, one of the above models will be * chosen according to the subsystem id. */ /* for backward compatibility */ STAC_MACMINI, STAC_MACBOOK, STAC_MACBOOK_PRO_V1, STAC_MACBOOK_PRO_V2, STAC_IMAC_INTEL, STAC_IMAC_INTEL_20, STAC_ECS_202, STAC_922X_DELL_D81, STAC_922X_DELL_D82, STAC_922X_DELL_M81, STAC_922X_DELL_M82, STAC_922X_MODELS }; enum { STAC_927X_AUTO, STAC_D965_REF_NO_JD, /* no jack-detection */ STAC_D965_REF, STAC_D965_3ST, STAC_D965_5ST, STAC_D965_5ST_NO_FP, STAC_DELL_3ST, STAC_DELL_BIOS, STAC_927X_VOLKNOB, STAC_927X_MODELS }; enum { STAC_9872_AUTO, STAC_9872_VAIO, STAC_9872_MODELS }; struct sigmatel_mic_route { hda_nid_t pin; signed char mux_idx; signed char dmux_idx; }; #define MAX_PINS_NUM 16 #define MAX_ADCS_NUM 4 #define MAX_DMICS_NUM 4 struct sigmatel_spec { struct snd_kcontrol_new *mixers[4]; unsigned int num_mixers; int board_config; unsigned int eapd_switch: 1; unsigned int surr_switch: 1; unsigned int alt_switch: 1; unsigned int hp_detect: 1; unsigned int spdif_mute: 1; unsigned int check_volume_offset:1; unsigned int auto_mic:1; unsigned int linear_tone_beep:1; /* gpio lines */ unsigned int eapd_mask; unsigned int gpio_mask; unsigned int gpio_dir; unsigned int gpio_data; unsigned int gpio_mute; unsigned int gpio_led; unsigned int gpio_led_polarity; unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */ unsigned int vref_led; /* stream */ unsigned int stream_delay; /* analog loopback */ const struct snd_kcontrol_new *aloopback_ctl; unsigned char aloopback_mask; unsigned char aloopback_shift; /* power management */ unsigned int num_pwrs; const hda_nid_t *pwr_nids; const hda_nid_t *dac_list; /* playback */ struct hda_input_mux *mono_mux; unsigned int cur_mmux; struct hda_multi_out multiout; hda_nid_t dac_nids[5]; hda_nid_t hp_dacs[5]; hda_nid_t speaker_dacs[5]; int volume_offset; /* capture */ const hda_nid_t *adc_nids; unsigned int num_adcs; const hda_nid_t *mux_nids; unsigned int num_muxes; const hda_nid_t *dmic_nids; unsigned int num_dmics; const hda_nid_t *dmux_nids; unsigned int num_dmuxes; const hda_nid_t *smux_nids; unsigned int num_smuxes; unsigned int num_analog_muxes; const unsigned long *capvols; /* amp-volume attr: HDA_COMPOSE_AMP_VAL() */ const unsigned long *capsws; /* amp-mute attr: HDA_COMPOSE_AMP_VAL() */ unsigned int num_caps; /* number of capture volume/switch elements */ struct sigmatel_mic_route ext_mic; struct sigmatel_mic_route int_mic; struct sigmatel_mic_route dock_mic; const char * const *spdif_labels; hda_nid_t dig_in_nid; hda_nid_t mono_nid; hda_nid_t anabeep_nid; hda_nid_t digbeep_nid; /* pin widgets */ const hda_nid_t *pin_nids; unsigned int num_pins; /* codec specific stuff */ const struct hda_verb *init; const struct snd_kcontrol_new *mixer; /* capture source */ struct hda_input_mux *dinput_mux; unsigned int cur_dmux[2]; struct hda_input_mux *input_mux; unsigned int cur_mux[3]; struct hda_input_mux *sinput_mux; unsigned int cur_smux[2]; unsigned int cur_amux; hda_nid_t *amp_nids; unsigned int powerdown_adcs; /* i/o switches */ unsigned int io_switch[2]; unsigned int clfe_swap; hda_nid_t line_switch; /* shared line-in for input and output */ hda_nid_t mic_switch; /* shared mic-in for input and output */ hda_nid_t hp_switch; /* NID of HP as line-out */ unsigned int aloopback; struct hda_pcm pcm_rec[2]; /* PCM information */ /* dynamic controls and input_mux */ struct auto_pin_cfg autocfg; struct snd_array kctls; struct hda_input_mux private_dimux; struct hda_input_mux private_imux; struct hda_input_mux private_smux; struct hda_input_mux private_mono_mux; /* auto spec */ unsigned auto_pin_cnt; hda_nid_t auto_pin_nids[MAX_PINS_NUM]; unsigned auto_adc_cnt; hda_nid_t auto_adc_nids[MAX_ADCS_NUM]; hda_nid_t auto_mux_nids[MAX_ADCS_NUM]; hda_nid_t auto_dmux_nids[MAX_ADCS_NUM]; unsigned long auto_capvols[MAX_ADCS_NUM]; unsigned auto_dmic_cnt; hda_nid_t auto_dmic_nids[MAX_DMICS_NUM]; struct hda_vmaster_mute_hook vmaster_mute; }; static const hda_nid_t stac9200_adc_nids[1] = { 0x03, }; static const hda_nid_t stac9200_mux_nids[1] = { 0x0c, }; static const hda_nid_t stac9200_dac_nids[1] = { 0x02, }; static const hda_nid_t stac92hd73xx_pwr_nids[8] = { 0x0a, 0x0b, 0x0c, 0xd, 0x0e, 0x0f, 0x10, 0x11 }; static const hda_nid_t stac92hd73xx_slave_dig_outs[2] = { 0x26, 0, }; static const hda_nid_t stac92hd73xx_adc_nids[2] = { 0x1a, 0x1b }; #define STAC92HD73XX_NUM_DMICS 2 static const hda_nid_t stac92hd73xx_dmic_nids[STAC92HD73XX_NUM_DMICS + 1] = { 0x13, 0x14, 0 }; #define STAC92HD73_DAC_COUNT 5 static const hda_nid_t stac92hd73xx_mux_nids[2] = { 0x20, 0x21, }; static const hda_nid_t stac92hd73xx_dmux_nids[2] = { 0x20, 0x21, }; static const hda_nid_t stac92hd73xx_smux_nids[2] = { 0x22, 0x23, }; #define STAC92HD73XX_NUM_CAPS 2 static const unsigned long stac92hd73xx_capvols[] = { HDA_COMPOSE_AMP_VAL(0x20, 3, 0, HDA_OUTPUT), HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT), }; #define stac92hd73xx_capsws stac92hd73xx_capvols #define STAC92HD83_DAC_COUNT 3 static const hda_nid_t stac92hd83xxx_pwr_nids[7] = { 0x0a, 0x0b, 0x0c, 0xd, 0x0e, 0x0f, 0x10 }; static const hda_nid_t stac92hd83xxx_slave_dig_outs[2] = { 0x1e, 0, }; static const hda_nid_t stac92hd83xxx_dmic_nids[] = { 0x11, 0x20, }; static const hda_nid_t stac92hd71bxx_pwr_nids[3] = { 0x0a, 0x0d, 0x0f }; static const hda_nid_t stac92hd71bxx_adc_nids[2] = { 0x12, 0x13, }; static const hda_nid_t stac92hd71bxx_mux_nids[2] = { 0x1a, 0x1b }; static const hda_nid_t stac92hd71bxx_dmux_nids[2] = { 0x1c, 0x1d, }; static const hda_nid_t stac92hd71bxx_smux_nids[2] = { 0x24, 0x25, }; #define STAC92HD71BXX_NUM_DMICS 2 static const hda_nid_t stac92hd71bxx_dmic_nids[STAC92HD71BXX_NUM_DMICS + 1] = { 0x18, 0x19, 0 }; static const hda_nid_t stac92hd71bxx_dmic_5port_nids[STAC92HD71BXX_NUM_DMICS] = { 0x18, 0 }; static const hda_nid_t stac92hd71bxx_slave_dig_outs[2] = { 0x22, 0 }; #define STAC92HD71BXX_NUM_CAPS 2 static const unsigned long stac92hd71bxx_capvols[] = { HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_OUTPUT), HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT), }; #define stac92hd71bxx_capsws stac92hd71bxx_capvols static const hda_nid_t stac925x_adc_nids[1] = { 0x03, }; static const hda_nid_t stac925x_mux_nids[1] = { 0x0f, }; static const hda_nid_t stac925x_dac_nids[1] = { 0x02, }; #define STAC925X_NUM_DMICS 1 static const hda_nid_t stac925x_dmic_nids[STAC925X_NUM_DMICS + 1] = { 0x15, 0 }; static const hda_nid_t stac925x_dmux_nids[1] = { 0x14, }; static const unsigned long stac925x_capvols[] = { HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_OUTPUT), }; static const unsigned long stac925x_capsws[] = { HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT), }; static const hda_nid_t stac922x_adc_nids[2] = { 0x06, 0x07, }; static const hda_nid_t stac922x_mux_nids[2] = { 0x12, 0x13, }; #define STAC922X_NUM_CAPS 2 static const unsigned long stac922x_capvols[] = { HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_INPUT), HDA_COMPOSE_AMP_VAL(0x18, 3, 0, HDA_INPUT), }; #define stac922x_capsws stac922x_capvols static const hda_nid_t stac927x_slave_dig_outs[2] = { 0x1f, 0, }; static const hda_nid_t stac927x_adc_nids[3] = { 0x07, 0x08, 0x09 }; static const hda_nid_t stac927x_mux_nids[3] = { 0x15, 0x16, 0x17 }; static const hda_nid_t stac927x_smux_nids[1] = { 0x21, }; static const hda_nid_t stac927x_dac_nids[6] = { 0x02, 0x03, 0x04, 0x05, 0x06, 0 }; static const hda_nid_t stac927x_dmux_nids[1] = { 0x1b, }; #define STAC927X_NUM_DMICS 2 static const hda_nid_t stac927x_dmic_nids[STAC927X_NUM_DMICS + 1] = { 0x13, 0x14, 0 }; #define STAC927X_NUM_CAPS 3 static const unsigned long stac927x_capvols[] = { HDA_COMPOSE_AMP_VAL(0x18, 3, 0, HDA_INPUT), HDA_COMPOSE_AMP_VAL(0x19, 3, 0, HDA_INPUT), HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_INPUT), }; static const unsigned long stac927x_capsws[] = { HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_OUTPUT), HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_OUTPUT), HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT), }; static const char * const stac927x_spdif_labels[5] = { "Digital Playback", "ADAT", "Analog Mux 1", "Analog Mux 2", "Analog Mux 3" }; static const hda_nid_t stac9205_adc_nids[2] = { 0x12, 0x13 }; static const hda_nid_t stac9205_mux_nids[2] = { 0x19, 0x1a }; static const hda_nid_t stac9205_dmux_nids[1] = { 0x1d, }; static const hda_nid_t stac9205_smux_nids[1] = { 0x21, }; #define STAC9205_NUM_DMICS 2 static const hda_nid_t stac9205_dmic_nids[STAC9205_NUM_DMICS + 1] = { 0x17, 0x18, 0 }; #define STAC9205_NUM_CAPS 2 static const unsigned long stac9205_capvols[] = { HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_INPUT), HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_INPUT), }; static const unsigned long stac9205_capsws[] = { HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT), HDA_COMPOSE_AMP_VAL(0x1e, 3, 0, HDA_OUTPUT), }; static const hda_nid_t stac9200_pin_nids[8] = { 0x08, 0x09, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, }; static const hda_nid_t stac925x_pin_nids[8] = { 0x07, 0x08, 0x0a, 0x0b, 0x0c, 0x0d, 0x10, 0x11, }; static const hda_nid_t stac922x_pin_nids[10] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x15, 0x1b, }; static const hda_nid_t stac92hd73xx_pin_nids[13] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x22, 0x23 }; #define STAC92HD71BXX_NUM_PINS 13 static const hda_nid_t stac92hd71bxx_pin_nids_4port[STAC92HD71BXX_NUM_PINS] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x00, 0x00, 0x14, 0x18, 0x19, 0x1e, 0x1f, 0x20, 0x27 }; static const hda_nid_t stac92hd71bxx_pin_nids_6port[STAC92HD71BXX_NUM_PINS] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x14, 0x18, 0x19, 0x1e, 0x1f, 0x20, 0x27 }; static const hda_nid_t stac927x_pin_nids[14] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x21, 0x22, 0x23, }; static const hda_nid_t stac9205_pin_nids[12] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x14, 0x16, 0x17, 0x18, 0x21, 0x22, }; static int stac92xx_dmux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; return snd_hda_input_mux_info(spec->dinput_mux, uinfo); } static int stac92xx_dmux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int dmux_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = spec->cur_dmux[dmux_idx]; return 0; } static int stac92xx_dmux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int dmux_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); return snd_hda_input_mux_put(codec, spec->dinput_mux, ucontrol, spec->dmux_nids[dmux_idx], &spec->cur_dmux[dmux_idx]); } static int stac92xx_smux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; return snd_hda_input_mux_info(spec->sinput_mux, uinfo); } static int stac92xx_smux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int smux_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = spec->cur_smux[smux_idx]; return 0; } static int stac92xx_smux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; struct hda_input_mux *smux = &spec->private_smux; unsigned int smux_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); int err, val; hda_nid_t nid; err = snd_hda_input_mux_put(codec, spec->sinput_mux, ucontrol, spec->smux_nids[smux_idx], &spec->cur_smux[smux_idx]); if (err < 0) return err; if (spec->spdif_mute) { if (smux_idx == 0) nid = spec->multiout.dig_out_nid; else nid = codec->slave_dig_outs[smux_idx - 1]; if (spec->cur_smux[smux_idx] == smux->num_items - 1) val = HDA_AMP_MUTE; else val = 0; /* un/mute SPDIF out */ snd_hda_codec_amp_stereo(codec, nid, HDA_OUTPUT, 0, HDA_AMP_MUTE, val); } return 0; } static int stac_vrefout_set(struct hda_codec *codec, hda_nid_t nid, unsigned int new_vref) { int error, pinctl; snd_printdd("%s, nid %x ctl %x\n", __func__, nid, new_vref); pinctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); if (pinctl < 0) return pinctl; pinctl &= 0xff; pinctl &= ~AC_PINCTL_VREFEN; pinctl |= (new_vref & AC_PINCTL_VREFEN); error = snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl); if (error < 0) return error; return 1; } static unsigned int stac92xx_vref_set(struct hda_codec *codec, hda_nid_t nid, unsigned int new_vref) { int error; unsigned int pincfg; pincfg = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); pincfg &= 0xff; pincfg &= ~(AC_PINCTL_VREFEN | AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN); pincfg |= new_vref; if (new_vref == AC_PINCTL_VREF_HIZ) pincfg |= AC_PINCTL_OUT_EN; else pincfg |= AC_PINCTL_IN_EN; error = snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pincfg); if (error < 0) return error; else return 1; } static unsigned int stac92xx_vref_get(struct hda_codec *codec, hda_nid_t nid) { unsigned int vref; vref = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); vref &= AC_PINCTL_VREFEN; return vref; } static int stac92xx_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; return snd_hda_input_mux_info(spec->input_mux, uinfo); } static int stac92xx_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = spec->cur_mux[adc_idx]; return 0; } static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); const struct hda_input_mux *imux = spec->input_mux; unsigned int idx, prev_idx, didx; idx = ucontrol->value.enumerated.item[0]; if (idx >= imux->num_items) idx = imux->num_items - 1; prev_idx = spec->cur_mux[adc_idx]; if (prev_idx == idx) return 0; if (idx < spec->num_analog_muxes) { snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, imux->items[idx].index); if (prev_idx >= spec->num_analog_muxes && spec->mux_nids[adc_idx] != spec->dmux_nids[adc_idx]) { imux = spec->dinput_mux; /* 0 = analog */ snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, imux->items[0].index); } } else { imux = spec->dinput_mux; /* first dimux item is hardcoded to select analog imux, * so lets skip it */ didx = idx - spec->num_analog_muxes + 1; snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, imux->items[didx].index); } spec->cur_mux[adc_idx] = idx; return 1; } static int stac92xx_mono_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; return snd_hda_input_mux_info(spec->mono_mux, uinfo); } static int stac92xx_mono_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; ucontrol->value.enumerated.item[0] = spec->cur_mmux; return 0; } static int stac92xx_mono_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; return snd_hda_input_mux_put(codec, spec->mono_mux, ucontrol, spec->mono_nid, &spec->cur_mmux); } #define stac92xx_aloopback_info snd_ctl_boolean_mono_info static int stac92xx_aloopback_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); struct sigmatel_spec *spec = codec->spec; ucontrol->value.integer.value[0] = !!(spec->aloopback & (spec->aloopback_mask << idx)); return 0; } static int stac92xx_aloopback_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); unsigned int dac_mode; unsigned int val, idx_val; idx_val = spec->aloopback_mask << idx; if (ucontrol->value.integer.value[0]) val = spec->aloopback | idx_val; else val = spec->aloopback & ~idx_val; if (spec->aloopback == val) return 0; spec->aloopback = val; /* Only return the bits defined by the shift value of the * first two bytes of the mask */ dac_mode = snd_hda_codec_read(codec, codec->afg, 0, kcontrol->private_value & 0xFFFF, 0x0); dac_mode >>= spec->aloopback_shift; if (spec->aloopback & idx_val) { snd_hda_power_up(codec); dac_mode |= idx_val; } else { snd_hda_power_down(codec); dac_mode &= ~idx_val; } snd_hda_codec_write_cache(codec, codec->afg, 0, kcontrol->private_value >> 16, dac_mode); return 1; } static const struct hda_verb stac9200_core_init[] = { /* set dac0mux for dac converter */ { 0x07, AC_VERB_SET_CONNECT_SEL, 0x00}, {} }; static const struct hda_verb stac9200_eapd_init[] = { /* set dac0mux for dac converter */ {0x07, AC_VERB_SET_CONNECT_SEL, 0x00}, {0x08, AC_VERB_SET_EAPD_BTLENABLE, 0x02}, {} }; static const struct hda_verb dell_eq_core_init[] = { /* set master volume to max value without distortion * and direct control */ { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xec}, {} }; static const struct hda_verb stac92hd73xx_core_init[] = { /* set master volume and direct control */ { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, {} }; static const struct hda_verb stac92hd83xxx_core_init[] = { /* power state controls amps */ { 0x01, AC_VERB_SET_EAPD, 1 << 2}, {} }; static const struct hda_verb stac92hd83xxx_hp_zephyr_init[] = { { 0x22, 0x785, 0x43 }, { 0x22, 0x782, 0xe0 }, { 0x22, 0x795, 0x00 }, {} }; static const struct hda_verb stac92hd71bxx_core_init[] = { /* set master volume and direct control */ { 0x28, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, {} }; static const struct hda_verb stac92hd71bxx_unmute_core_init[] = { /* unmute right and left channels for nodes 0x0f, 0xa, 0x0d */ { 0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, { 0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, { 0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, {} }; static const struct hda_verb stac925x_core_init[] = { /* set dac0mux for dac converter */ { 0x06, AC_VERB_SET_CONNECT_SEL, 0x00}, /* mute the master volume */ { 0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE }, {} }; static const struct hda_verb stac922x_core_init[] = { /* set master volume and direct control */ { 0x16, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, {} }; static const struct hda_verb d965_core_init[] = { /* set master volume and direct control */ { 0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, /* unmute node 0x1b */ { 0x1b, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000}, /* select node 0x03 as DAC */ { 0x0b, AC_VERB_SET_CONNECT_SEL, 0x01}, {} }; static const struct hda_verb dell_3st_core_init[] = { /* don't set delta bit */ {0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0x7f}, /* unmute node 0x1b */ {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000}, /* select node 0x03 as DAC */ {0x0b, AC_VERB_SET_CONNECT_SEL, 0x01}, {} }; static const struct hda_verb stac927x_core_init[] = { /* set master volume and direct control */ { 0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, /* enable analog pc beep path */ { 0x01, AC_VERB_SET_DIGI_CONVERT_2, 1 << 5}, {} }; static const struct hda_verb stac927x_volknob_core_init[] = { /* don't set delta bit */ {0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0x7f}, /* enable analog pc beep path */ {0x01, AC_VERB_SET_DIGI_CONVERT_2, 1 << 5}, {} }; static const struct hda_verb stac9205_core_init[] = { /* set master volume and direct control */ { 0x24, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, /* enable analog pc beep path */ { 0x01, AC_VERB_SET_DIGI_CONVERT_2, 1 << 5}, {} }; #define STAC_MONO_MUX \ { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = "Mono Mux", \ .count = 1, \ .info = stac92xx_mono_mux_enum_info, \ .get = stac92xx_mono_mux_enum_get, \ .put = stac92xx_mono_mux_enum_put, \ } #define STAC_ANALOG_LOOPBACK(verb_read, verb_write, cnt) \ { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = "Analog Loopback", \ .count = cnt, \ .info = stac92xx_aloopback_info, \ .get = stac92xx_aloopback_get, \ .put = stac92xx_aloopback_put, \ .private_value = verb_read | (verb_write << 16), \ } #define DC_BIAS(xname, idx, nid) \ { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .index = idx, \ .info = stac92xx_dc_bias_info, \ .get = stac92xx_dc_bias_get, \ .put = stac92xx_dc_bias_put, \ .private_value = nid, \ } static const struct snd_kcontrol_new stac9200_mixer[] = { HDA_CODEC_VOLUME_MIN_MUTE("PCM Playback Volume", 0xb, 0, HDA_OUTPUT), HDA_CODEC_MUTE("PCM Playback Switch", 0xb, 0, HDA_OUTPUT), HDA_CODEC_VOLUME("Capture Volume", 0x0a, 0, HDA_OUTPUT), HDA_CODEC_MUTE("Capture Switch", 0x0a, 0, HDA_OUTPUT), { } /* end */ }; static const struct snd_kcontrol_new stac92hd73xx_6ch_loopback[] = { STAC_ANALOG_LOOPBACK(0xFA0, 0x7A1, 3), {} }; static const struct snd_kcontrol_new stac92hd73xx_8ch_loopback[] = { STAC_ANALOG_LOOPBACK(0xFA0, 0x7A1, 4), {} }; static const struct snd_kcontrol_new stac92hd73xx_10ch_loopback[] = { STAC_ANALOG_LOOPBACK(0xFA0, 0x7A1, 5), {} }; static const struct snd_kcontrol_new stac92hd71bxx_loopback[] = { STAC_ANALOG_LOOPBACK(0xFA0, 0x7A0, 2) }; static const struct snd_kcontrol_new stac925x_mixer[] = { HDA_CODEC_VOLUME_MIN_MUTE("PCM Playback Volume", 0xe, 0, HDA_OUTPUT), HDA_CODEC_MUTE("PCM Playback Switch", 0x0e, 0, HDA_OUTPUT), { } /* end */ }; static const struct snd_kcontrol_new stac9205_loopback[] = { STAC_ANALOG_LOOPBACK(0xFE0, 0x7E0, 1), {} }; static const struct snd_kcontrol_new stac927x_loopback[] = { STAC_ANALOG_LOOPBACK(0xFEB, 0x7EB, 1), {} }; static struct snd_kcontrol_new stac_dmux_mixer = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Digital Input Source", /* count set later */ .info = stac92xx_dmux_enum_info, .get = stac92xx_dmux_enum_get, .put = stac92xx_dmux_enum_put, }; static struct snd_kcontrol_new stac_smux_mixer = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "IEC958 Playback Source", /* count set later */ .info = stac92xx_smux_enum_info, .get = stac92xx_smux_enum_get, .put = stac92xx_smux_enum_put, }; static const char * const slave_pfxs[] = { "Front", "Surround", "Center", "LFE", "Side", "Headphone", "Speaker", "IEC958", NULL }; static void stac92xx_update_led_status(struct hda_codec *codec, int enabled); static void stac92xx_vmaster_hook(void *private_data, int val) { stac92xx_update_led_status(private_data, val); } static void stac92xx_free_kctls(struct hda_codec *codec); static int stac92xx_build_controls(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; unsigned int vmaster_tlv[4]; int err; int i; if (spec->mixer) { err = snd_hda_add_new_ctls(codec, spec->mixer); if (err < 0) return err; } for (i = 0; i < spec->num_mixers; i++) { err = snd_hda_add_new_ctls(codec, spec->mixers[i]); if (err < 0) return err; } if (!spec->auto_mic && spec->num_dmuxes > 0 && snd_hda_get_bool_hint(codec, "separate_dmux") == 1) { stac_dmux_mixer.count = spec->num_dmuxes; err = snd_hda_ctl_add(codec, 0, snd_ctl_new1(&stac_dmux_mixer, codec)); if (err < 0) return err; } if (spec->num_smuxes > 0) { int wcaps = get_wcaps(codec, spec->multiout.dig_out_nid); struct hda_input_mux *smux = &spec->private_smux; /* check for mute support on SPDIF out */ if (wcaps & AC_WCAP_OUT_AMP) { snd_hda_add_imux_item(smux, "Off", 0, NULL); spec->spdif_mute = 1; } stac_smux_mixer.count = spec->num_smuxes; err = snd_hda_ctl_add(codec, 0, snd_ctl_new1(&stac_smux_mixer, codec)); if (err < 0) return err; } if (spec->multiout.dig_out_nid) { err = snd_hda_create_spdif_out_ctls(codec, spec->multiout.dig_out_nid, spec->multiout.dig_out_nid); if (err < 0) return err; err = snd_hda_create_spdif_share_sw(codec, &spec->multiout); if (err < 0) return err; spec->multiout.share_spdif = 1; } if (spec->dig_in_nid && !(spec->gpio_dir & 0x01)) { err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in_nid); if (err < 0) return err; } /* if we have no master control, let's create it */ snd_hda_set_vmaster_tlv(codec, spec->multiout.dac_nids[0], HDA_OUTPUT, vmaster_tlv); /* correct volume offset */ vmaster_tlv[2] += vmaster_tlv[3] * spec->volume_offset; /* minimum value is actually mute */ vmaster_tlv[3] |= TLV_DB_SCALE_MUTE; err = snd_hda_add_vmaster(codec, "Master Playback Volume", vmaster_tlv, slave_pfxs, "Playback Volume"); if (err < 0) return err; err = __snd_hda_add_vmaster(codec, "Master Playback Switch", NULL, slave_pfxs, "Playback Switch", true, &spec->vmaster_mute.sw_kctl); if (err < 0) return err; if (spec->gpio_led) { spec->vmaster_mute.hook = stac92xx_vmaster_hook; err = snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute, true); if (err < 0) return err; } if (spec->aloopback_ctl && snd_hda_get_bool_hint(codec, "loopback") == 1) { err = snd_hda_add_new_ctls(codec, spec->aloopback_ctl); if (err < 0) return err; } stac92xx_free_kctls(codec); /* no longer needed */ err = snd_hda_jack_add_kctls(codec, &spec->autocfg); if (err < 0) return err; return 0; } static const unsigned int ref9200_pin_configs[8] = { 0x01c47010, 0x01447010, 0x0221401f, 0x01114010, 0x02a19020, 0x01a19021, 0x90100140, 0x01813122, }; static const unsigned int gateway9200_m4_pin_configs[8] = { 0x400000fe, 0x404500f4, 0x400100f0, 0x90110010, 0x400100f1, 0x02a1902e, 0x500000f2, 0x500000f3, }; static const unsigned int gateway9200_m4_2_pin_configs[8] = { 0x400000fe, 0x404500f4, 0x400100f0, 0x90110010, 0x400100f1, 0x02a1902e, 0x500000f2, 0x500000f3, }; /* STAC 9200 pin configs for 102801A8 102801DE 102801E8 */ static const unsigned int dell9200_d21_pin_configs[8] = { 0x400001f0, 0x400001f1, 0x02214030, 0x01014010, 0x02a19020, 0x01a19021, 0x90100140, 0x01813122, }; /* STAC 9200 pin configs for 102801C0 102801C1 */ static const unsigned int dell9200_d22_pin_configs[8] = { 0x400001f0, 0x400001f1, 0x0221401f, 0x01014010, 0x01813020, 0x02a19021, 0x90100140, 0x400001f2, }; /* STAC 9200 pin configs for 102801C4 (Dell Dimension E310) 102801C5 102801C7 102801D9 102801DA 102801E3 */ static const unsigned int dell9200_d23_pin_configs[8] = { 0x400001f0, 0x400001f1, 0x0221401f, 0x01014010, 0x01813020, 0x01a19021, 0x90100140, 0x400001f2, }; /* STAC 9200-32 pin configs for 102801B5 (Dell Inspiron 630m) 102801D8 (Dell Inspiron 640m) */ static const unsigned int dell9200_m21_pin_configs[8] = { 0x40c003fa, 0x03441340, 0x0321121f, 0x90170310, 0x408003fb, 0x03a11020, 0x401003fc, 0x403003fd, }; /* STAC 9200-32 pin configs for 102801C2 (Dell Latitude D620) 102801C8 102801CC (Dell Latitude D820) 102801D4 102801D6 */ static const unsigned int dell9200_m22_pin_configs[8] = { 0x40c003fa, 0x0144131f, 0x0321121f, 0x90170310, 0x90a70321, 0x03a11020, 0x401003fb, 0x40f000fc, }; /* STAC 9200-32 pin configs for 102801CE (Dell XPS M1710) 102801CF (Dell Precision M90) */ static const unsigned int dell9200_m23_pin_configs[8] = { 0x40c003fa, 0x01441340, 0x0421421f, 0x90170310, 0x408003fb, 0x04a1102e, 0x90170311, 0x403003fc, }; /* STAC 9200-32 pin configs for 102801C9 102801CA 102801CB (Dell Latitude 120L) 102801D3 */ static const unsigned int dell9200_m24_pin_configs[8] = { 0x40c003fa, 0x404003fb, 0x0321121f, 0x90170310, 0x408003fc, 0x03a11020, 0x401003fd, 0x403003fe, }; /* STAC 9200-32 pin configs for 102801BD (Dell Inspiron E1505n) 102801EE 102801EF */ static const unsigned int dell9200_m25_pin_configs[8] = { 0x40c003fa, 0x01441340, 0x0421121f, 0x90170310, 0x408003fb, 0x04a11020, 0x401003fc, 0x403003fd, }; /* STAC 9200-32 pin configs for 102801F5 (Dell Inspiron 1501) 102801F6 */ static const unsigned int dell9200_m26_pin_configs[8] = { 0x40c003fa, 0x404003fb, 0x0421121f, 0x90170310, 0x408003fc, 0x04a11020, 0x401003fd, 0x403003fe, }; /* STAC 9200-32 102801CD (Dell Inspiron E1705/9400) */ static const unsigned int dell9200_m27_pin_configs[8] = { 0x40c003fa, 0x01441340, 0x0421121f, 0x90170310, 0x90170310, 0x04a11020, 0x90170310, 0x40f003fc, }; static const unsigned int oqo9200_pin_configs[8] = { 0x40c000f0, 0x404000f1, 0x0221121f, 0x02211210, 0x90170111, 0x90a70120, 0x400000f2, 0x400000f3, }; static const unsigned int *stac9200_brd_tbl[STAC_9200_MODELS] = { [STAC_REF] = ref9200_pin_configs, [STAC_9200_OQO] = oqo9200_pin_configs, [STAC_9200_DELL_D21] = dell9200_d21_pin_configs, [STAC_9200_DELL_D22] = dell9200_d22_pin_configs, [STAC_9200_DELL_D23] = dell9200_d23_pin_configs, [STAC_9200_DELL_M21] = dell9200_m21_pin_configs, [STAC_9200_DELL_M22] = dell9200_m22_pin_configs, [STAC_9200_DELL_M23] = dell9200_m23_pin_configs, [STAC_9200_DELL_M24] = dell9200_m24_pin_configs, [STAC_9200_DELL_M25] = dell9200_m25_pin_configs, [STAC_9200_DELL_M26] = dell9200_m26_pin_configs, [STAC_9200_DELL_M27] = dell9200_m27_pin_configs, [STAC_9200_M4] = gateway9200_m4_pin_configs, [STAC_9200_M4_2] = gateway9200_m4_2_pin_configs, [STAC_9200_PANASONIC] = ref9200_pin_configs, }; static const char * const stac9200_models[STAC_9200_MODELS] = { [STAC_AUTO] = "auto", [STAC_REF] = "ref", [STAC_9200_OQO] = "oqo", [STAC_9200_DELL_D21] = "dell-d21", [STAC_9200_DELL_D22] = "dell-d22", [STAC_9200_DELL_D23] = "dell-d23", [STAC_9200_DELL_M21] = "dell-m21", [STAC_9200_DELL_M22] = "dell-m22", [STAC_9200_DELL_M23] = "dell-m23", [STAC_9200_DELL_M24] = "dell-m24", [STAC_9200_DELL_M25] = "dell-m25", [STAC_9200_DELL_M26] = "dell-m26", [STAC_9200_DELL_M27] = "dell-m27", [STAC_9200_M4] = "gateway-m4", [STAC_9200_M4_2] = "gateway-m4-2", [STAC_9200_PANASONIC] = "panasonic", }; static const struct snd_pci_quirk stac9200_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_REF), /* Dell laptops have BIOS problem */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01a8, "unknown Dell", STAC_9200_DELL_D21), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01b5, "Dell Inspiron 630m", STAC_9200_DELL_M21), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01bd, "Dell Inspiron E1505n", STAC_9200_DELL_M25), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c0, "unknown Dell", STAC_9200_DELL_D22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c1, "unknown Dell", STAC_9200_DELL_D22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c2, "Dell Latitude D620", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c5, "unknown Dell", STAC_9200_DELL_D23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c7, "unknown Dell", STAC_9200_DELL_D23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c8, "unknown Dell", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01c9, "unknown Dell", STAC_9200_DELL_M24), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ca, "unknown Dell", STAC_9200_DELL_M24), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cb, "Dell Latitude 120L", STAC_9200_DELL_M24), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cc, "Dell Latitude D820", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cd, "Dell Inspiron E1705/9400", STAC_9200_DELL_M27), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ce, "Dell XPS M1710", STAC_9200_DELL_M23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cf, "Dell Precision M90", STAC_9200_DELL_M23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d3, "unknown Dell", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d4, "unknown Dell", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d6, "unknown Dell", STAC_9200_DELL_M22), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d8, "Dell Inspiron 640m", STAC_9200_DELL_M21), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d9, "unknown Dell", STAC_9200_DELL_D23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01da, "unknown Dell", STAC_9200_DELL_D23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01de, "unknown Dell", STAC_9200_DELL_D21), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01e3, "unknown Dell", STAC_9200_DELL_D23), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01e8, "unknown Dell", STAC_9200_DELL_D21), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ee, "unknown Dell", STAC_9200_DELL_M25), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ef, "unknown Dell", STAC_9200_DELL_M25), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f5, "Dell Inspiron 1501", STAC_9200_DELL_M26), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6, "unknown Dell", STAC_9200_DELL_M26), /* Panasonic */ SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC), /* Gateway machines needs EAPD to be set on resume */ SND_PCI_QUIRK(0x107b, 0x0205, "Gateway S-7110M", STAC_9200_M4), SND_PCI_QUIRK(0x107b, 0x0317, "Gateway MT3423, MX341*", STAC_9200_M4_2), SND_PCI_QUIRK(0x107b, 0x0318, "Gateway ML3019, MT3707", STAC_9200_M4_2), /* OQO Mobile */ SND_PCI_QUIRK(0x1106, 0x3288, "OQO Model 2", STAC_9200_OQO), {} /* terminator */ }; static const unsigned int ref925x_pin_configs[8] = { 0x40c003f0, 0x424503f2, 0x01813022, 0x02a19021, 0x90a70320, 0x02214210, 0x01019020, 0x9033032e, }; static const unsigned int stac925xM1_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e, }; static const unsigned int stac925xM1_2_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e, }; static const unsigned int stac925xM2_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e, }; static const unsigned int stac925xM2_2_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e, }; static const unsigned int stac925xM3_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x503303f3, }; static const unsigned int stac925xM5_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e, }; static const unsigned int stac925xM6_pin_configs[8] = { 0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020, 0x40a000f0, 0x90100210, 0x400003f1, 0x90330320, }; static const unsigned int *stac925x_brd_tbl[STAC_925x_MODELS] = { [STAC_REF] = ref925x_pin_configs, [STAC_M1] = stac925xM1_pin_configs, [STAC_M1_2] = stac925xM1_2_pin_configs, [STAC_M2] = stac925xM2_pin_configs, [STAC_M2_2] = stac925xM2_2_pin_configs, [STAC_M3] = stac925xM3_pin_configs, [STAC_M5] = stac925xM5_pin_configs, [STAC_M6] = stac925xM6_pin_configs, }; static const char * const stac925x_models[STAC_925x_MODELS] = { [STAC_925x_AUTO] = "auto", [STAC_REF] = "ref", [STAC_M1] = "m1", [STAC_M1_2] = "m1-2", [STAC_M2] = "m2", [STAC_M2_2] = "m2-2", [STAC_M3] = "m3", [STAC_M5] = "m5", [STAC_M6] = "m6", }; static const struct snd_pci_quirk stac925x_codec_id_cfg_tbl[] = { SND_PCI_QUIRK(0x107b, 0x0316, "Gateway M255", STAC_M2), SND_PCI_QUIRK(0x107b, 0x0366, "Gateway MP6954", STAC_M5), SND_PCI_QUIRK(0x107b, 0x0461, "Gateway NX560XL", STAC_M1), SND_PCI_QUIRK(0x107b, 0x0681, "Gateway NX860", STAC_M2), SND_PCI_QUIRK(0x107b, 0x0367, "Gateway MX6453", STAC_M1_2), /* Not sure about the brand name for those */ SND_PCI_QUIRK(0x107b, 0x0281, "Gateway mobile", STAC_M1), SND_PCI_QUIRK(0x107b, 0x0507, "Gateway mobile", STAC_M3), SND_PCI_QUIRK(0x107b, 0x0281, "Gateway mobile", STAC_M6), SND_PCI_QUIRK(0x107b, 0x0685, "Gateway mobile", STAC_M2_2), {} /* terminator */ }; static const struct snd_pci_quirk stac925x_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_REF), SND_PCI_QUIRK(0x8384, 0x7632, "Stac9202 Reference Board", STAC_REF), /* Default table for unknown ID */ SND_PCI_QUIRK(0x1002, 0x437b, "Gateway mobile", STAC_M2_2), {} /* terminator */ }; static const unsigned int ref92hd73xx_pin_configs[13] = { 0x02214030, 0x02a19040, 0x01a19020, 0x02214030, 0x0181302e, 0x01014010, 0x01014020, 0x01014030, 0x02319040, 0x90a000f0, 0x90a000f0, 0x01452050, 0x01452050, }; static const unsigned int dell_m6_pin_configs[13] = { 0x0321101f, 0x4f00000f, 0x4f0000f0, 0x90170110, 0x03a11020, 0x0321101f, 0x4f0000f0, 0x4f0000f0, 0x4f0000f0, 0x90a60160, 0x4f0000f0, 0x4f0000f0, 0x4f0000f0, }; static const unsigned int alienware_m17x_pin_configs[13] = { 0x0321101f, 0x0321101f, 0x03a11020, 0x03014020, 0x90170110, 0x4f0000f0, 0x4f0000f0, 0x4f0000f0, 0x4f0000f0, 0x90a60160, 0x4f0000f0, 0x4f0000f0, 0x904601b0, }; static const unsigned int intel_dg45id_pin_configs[13] = { 0x02214230, 0x02A19240, 0x01013214, 0x01014210, 0x01A19250, 0x01011212, 0x01016211 }; static const unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = { [STAC_92HD73XX_REF] = ref92hd73xx_pin_configs, [STAC_DELL_M6_AMIC] = dell_m6_pin_configs, [STAC_DELL_M6_DMIC] = dell_m6_pin_configs, [STAC_DELL_M6_BOTH] = dell_m6_pin_configs, [STAC_DELL_EQ] = dell_m6_pin_configs, [STAC_ALIENWARE_M17X] = alienware_m17x_pin_configs, [STAC_92HD73XX_INTEL] = intel_dg45id_pin_configs, }; static const char * const stac92hd73xx_models[STAC_92HD73XX_MODELS] = { [STAC_92HD73XX_AUTO] = "auto", [STAC_92HD73XX_NO_JD] = "no-jd", [STAC_92HD73XX_REF] = "ref", [STAC_92HD73XX_INTEL] = "intel", [STAC_DELL_M6_AMIC] = "dell-m6-amic", [STAC_DELL_M6_DMIC] = "dell-m6-dmic", [STAC_DELL_M6_BOTH] = "dell-m6", [STAC_DELL_EQ] = "dell-eq", [STAC_ALIENWARE_M17X] = "alienware", }; static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_92HD73XX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_92HD73XX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5002, "Intel DG45ID", STAC_92HD73XX_INTEL), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5003, "Intel DG45FC", STAC_92HD73XX_INTEL), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0254, "Dell Studio 1535", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0255, "unknown Dell", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0256, "unknown Dell", STAC_DELL_M6_BOTH), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0257, "unknown Dell", STAC_DELL_M6_BOTH), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x025e, "unknown Dell", STAC_DELL_M6_AMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x025f, "unknown Dell", STAC_DELL_M6_AMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0271, "unknown Dell", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0272, "unknown Dell", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x029f, "Dell Studio 1537", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a0, "Dell Studio 17", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02be, "Dell Studio 1555", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd, "Dell Studio 1557", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe, "Dell Studio XPS 1645", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413, "Dell Studio 1558", STAC_DELL_M6_DMIC), {} /* terminator */ }; static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = { SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1, "Alienware M17x", STAC_ALIENWARE_M17X), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a, "Alienware M17x", STAC_ALIENWARE_M17X), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490, "Alienware M17x R3", STAC_DELL_EQ), {} /* terminator */ }; static const unsigned int ref92hd83xxx_pin_configs[10] = { 0x02214030, 0x02211010, 0x02a19020, 0x02170130, 0x01014050, 0x01819040, 0x01014020, 0x90a3014e, 0x01451160, 0x98560170, }; static const unsigned int dell_s14_pin_configs[10] = { 0x0221403f, 0x0221101f, 0x02a19020, 0x90170110, 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x90a60160, 0x40f000f0, 0x40f000f0, }; static const unsigned int dell_vostro_3500_pin_configs[10] = { 0x02a11020, 0x0221101f, 0x400000f0, 0x90170110, 0x400000f1, 0x400000f2, 0x400000f3, 0x90a60160, 0x400000f4, 0x400000f5, }; static const unsigned int hp_dv7_4000_pin_configs[10] = { 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110, 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140, 0x40f000f0, 0x40f000f0, }; static const unsigned int hp_zephyr_pin_configs[10] = { 0x01813050, 0x0421201f, 0x04a1205e, 0x96130310, 0x96130310, 0x0101401f, 0x1111611f, 0xd5a30130, 0, 0, }; static const unsigned int hp_cNB11_intquad_pin_configs[10] = { 0x40f000f0, 0x0221101f, 0x02a11020, 0x92170110, 0x40f000f0, 0x92170110, 0x40f000f0, 0xd5a30130, 0x40f000f0, 0x40f000f0, }; static const unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, [STAC_DELL_S14] = dell_s14_pin_configs, [STAC_DELL_VOSTRO_3500] = dell_vostro_3500_pin_configs, [STAC_92HD83XXX_HP_cNB11_INTQUAD] = hp_cNB11_intquad_pin_configs, [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs, [STAC_HP_ZEPHYR] = hp_zephyr_pin_configs, }; static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_AUTO] = "auto", [STAC_92HD83XXX_REF] = "ref", [STAC_92HD83XXX_PWR_REF] = "mic-ref", [STAC_DELL_S14] = "dell-s14", [STAC_DELL_VOSTRO_3500] = "dell-vostro-3500", [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad", [STAC_HP_DV7_4000] = "hp-dv7-4000", [STAC_HP_ZEPHYR] = "hp-zephyr", }; static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_92HD83XXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_92HD83XXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, "unknown Dell", STAC_DELL_S14), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x1028, "Dell Vostro 3500", STAC_DELL_VOSTRO_3500), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1656, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1657, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1658, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1659, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165A, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165B, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3388, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3389, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355B, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355C, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355D, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355E, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355F, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3560, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x358B, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x358C, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x358D, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3591, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3592, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3593, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3561, "HP", STAC_HP_ZEPHYR), {} /* terminator */ }; static const struct snd_pci_quirk stac92hd83xxx_codec_id_cfg_tbl[] = { SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3561, "HP", STAC_HP_ZEPHYR), {} /* terminator */ }; static const unsigned int ref92hd71bxx_pin_configs[STAC92HD71BXX_NUM_PINS] = { 0x02214030, 0x02a19040, 0x01a19020, 0x01014010, 0x0181302e, 0x01014010, 0x01019020, 0x90a000f0, 0x90a000f0, 0x01452050, 0x01452050, 0x00000000, 0x00000000 }; static const unsigned int dell_m4_1_pin_configs[STAC92HD71BXX_NUM_PINS] = { 0x0421101f, 0x04a11221, 0x40f000f0, 0x90170110, 0x23a1902e, 0x23014250, 0x40f000f0, 0x90a000f0, 0x40f000f0, 0x4f0000f0, 0x4f0000f0, 0x00000000, 0x00000000 }; static const unsigned int dell_m4_2_pin_configs[STAC92HD71BXX_NUM_PINS] = { 0x0421101f, 0x04a11221, 0x90a70330, 0x90170110, 0x23a1902e, 0x23014250, 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x044413b0, 0x044413b0, 0x00000000, 0x00000000 }; static const unsigned int dell_m4_3_pin_configs[STAC92HD71BXX_NUM_PINS] = { 0x0421101f, 0x04a11221, 0x90a70330, 0x90170110, 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x90a000f0, 0x40f000f0, 0x044413b0, 0x044413b0, 0x00000000, 0x00000000 }; static const unsigned int *stac92hd71bxx_brd_tbl[STAC_92HD71BXX_MODELS] = { [STAC_92HD71BXX_REF] = ref92hd71bxx_pin_configs, [STAC_DELL_M4_1] = dell_m4_1_pin_configs, [STAC_DELL_M4_2] = dell_m4_2_pin_configs, [STAC_DELL_M4_3] = dell_m4_3_pin_configs, [STAC_HP_M4] = NULL, [STAC_HP_DV4] = NULL, [STAC_HP_DV5] = NULL, [STAC_HP_HDX] = NULL, [STAC_HP_DV4_1222NR] = NULL, }; static const char * const stac92hd71bxx_models[STAC_92HD71BXX_MODELS] = { [STAC_92HD71BXX_AUTO] = "auto", [STAC_92HD71BXX_REF] = "ref", [STAC_DELL_M4_1] = "dell-m4-1", [STAC_DELL_M4_2] = "dell-m4-2", [STAC_DELL_M4_3] = "dell-m4-3", [STAC_HP_M4] = "hp-m4", [STAC_HP_DV4] = "hp-dv4", [STAC_HP_DV5] = "hp-dv5", [STAC_HP_HDX] = "hp-hdx", [STAC_HP_DV4_1222NR] = "hp-dv4-1222nr", }; static const struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_92HD71BXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_92HD71BXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30fb, "HP dv4-1222nr", STAC_HP_DV4_1222NR), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x1720, "HP", STAC_HP_DV5), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3080, "HP", STAC_HP_DV5), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x30f0, "HP dv4-7", STAC_HP_DV4), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3600, "HP dv4-7", STAC_HP_DV5), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3610, "HP HDX", STAC_HP_HDX), /* HDX18 */ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361a, "HP mini 1000", STAC_HP_M4), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361b, "HP HDX", STAC_HP_HDX), /* HDX16 */ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620, "HP dv6", STAC_HP_DV5), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061, "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x363e, "HP DV6", STAC_HP_DV5), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010, "HP", STAC_HP_DV5), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0234, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0250, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x024f, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x024d, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0251, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0277, "unknown Dell", STAC_DELL_M4_1), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0263, "unknown Dell", STAC_DELL_M4_2), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0265, "unknown Dell", STAC_DELL_M4_2), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0262, "unknown Dell", STAC_DELL_M4_2), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0264, "unknown Dell", STAC_DELL_M4_2), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02aa, "unknown Dell", STAC_DELL_M4_3), {} /* terminator */ }; static const unsigned int ref922x_pin_configs[10] = { 0x01014010, 0x01016011, 0x01012012, 0x0221401f, 0x01813122, 0x01011014, 0x01441030, 0x01c41030, 0x40000100, 0x40000100, }; /* STAC 922X pin configs for 102801A7 102801AB 102801A9 102801D1 102801D2 */ static const unsigned int dell_922x_d81_pin_configs[10] = { 0x02214030, 0x01a19021, 0x01111012, 0x01114010, 0x02a19020, 0x01117011, 0x400001f0, 0x400001f1, 0x01813122, 0x400001f2, }; /* STAC 922X pin configs for 102801AC 102801D0 */ static const unsigned int dell_922x_d82_pin_configs[10] = { 0x02214030, 0x01a19021, 0x01111012, 0x01114010, 0x02a19020, 0x01117011, 0x01451140, 0x400001f0, 0x01813122, 0x400001f1, }; /* STAC 922X pin configs for 102801BF */ static const unsigned int dell_922x_m81_pin_configs[10] = { 0x0321101f, 0x01112024, 0x01111222, 0x91174220, 0x03a11050, 0x01116221, 0x90a70330, 0x01452340, 0x40C003f1, 0x405003f0, }; /* STAC 9221 A1 pin configs for 102801D7 (Dell XPS M1210) */ static const unsigned int dell_922x_m82_pin_configs[10] = { 0x02211211, 0x408103ff, 0x02a1123e, 0x90100310, 0x408003f1, 0x0221121f, 0x03451340, 0x40c003f2, 0x508003f3, 0x405003f4, }; static const unsigned int d945gtp3_pin_configs[10] = { 0x0221401f, 0x01a19022, 0x01813021, 0x01014010, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x02a19120, 0x40000100, }; static const unsigned int d945gtp5_pin_configs[10] = { 0x0221401f, 0x01011012, 0x01813024, 0x01014010, 0x01a19021, 0x01016011, 0x01452130, 0x40000100, 0x02a19320, 0x40000100, }; static const unsigned int intel_mac_v1_pin_configs[10] = { 0x0121e21f, 0x400000ff, 0x9017e110, 0x400000fd, 0x400000fe, 0x0181e020, 0x1145e030, 0x11c5e240, 0x400000fc, 0x400000fb, }; static const unsigned int intel_mac_v2_pin_configs[10] = { 0x0121e21f, 0x90a7012e, 0x9017e110, 0x400000fd, 0x400000fe, 0x0181e020, 0x1145e230, 0x500000fa, 0x400000fc, 0x400000fb, }; static const unsigned int intel_mac_v3_pin_configs[10] = { 0x0121e21f, 0x90a7012e, 0x9017e110, 0x400000fd, 0x400000fe, 0x0181e020, 0x1145e230, 0x11c5e240, 0x400000fc, 0x400000fb, }; static const unsigned int intel_mac_v4_pin_configs[10] = { 0x0321e21f, 0x03a1e02e, 0x9017e110, 0x9017e11f, 0x400000fe, 0x0381e020, 0x1345e230, 0x13c5e240, 0x400000fc, 0x400000fb, }; static const unsigned int intel_mac_v5_pin_configs[10] = { 0x0321e21f, 0x03a1e02e, 0x9017e110, 0x9017e11f, 0x400000fe, 0x0381e020, 0x1345e230, 0x13c5e240, 0x400000fc, 0x400000fb, }; static const unsigned int ecs202_pin_configs[10] = { 0x0221401f, 0x02a19020, 0x01a19020, 0x01114010, 0x408000f0, 0x01813022, 0x074510a0, 0x40c400f1, 0x9037012e, 0x40e000f2, }; static const unsigned int *stac922x_brd_tbl[STAC_922X_MODELS] = { [STAC_D945_REF] = ref922x_pin_configs, [STAC_D945GTP3] = d945gtp3_pin_configs, [STAC_D945GTP5] = d945gtp5_pin_configs, [STAC_INTEL_MAC_V1] = intel_mac_v1_pin_configs, [STAC_INTEL_MAC_V2] = intel_mac_v2_pin_configs, [STAC_INTEL_MAC_V3] = intel_mac_v3_pin_configs, [STAC_INTEL_MAC_V4] = intel_mac_v4_pin_configs, [STAC_INTEL_MAC_V5] = intel_mac_v5_pin_configs, [STAC_INTEL_MAC_AUTO] = intel_mac_v3_pin_configs, /* for backward compatibility */ [STAC_MACMINI] = intel_mac_v3_pin_configs, [STAC_MACBOOK] = intel_mac_v5_pin_configs, [STAC_MACBOOK_PRO_V1] = intel_mac_v3_pin_configs, [STAC_MACBOOK_PRO_V2] = intel_mac_v3_pin_configs, [STAC_IMAC_INTEL] = intel_mac_v2_pin_configs, [STAC_IMAC_INTEL_20] = intel_mac_v3_pin_configs, [STAC_ECS_202] = ecs202_pin_configs, [STAC_922X_DELL_D81] = dell_922x_d81_pin_configs, [STAC_922X_DELL_D82] = dell_922x_d82_pin_configs, [STAC_922X_DELL_M81] = dell_922x_m81_pin_configs, [STAC_922X_DELL_M82] = dell_922x_m82_pin_configs, }; static const char * const stac922x_models[STAC_922X_MODELS] = { [STAC_922X_AUTO] = "auto", [STAC_D945_REF] = "ref", [STAC_D945GTP5] = "5stack", [STAC_D945GTP3] = "3stack", [STAC_INTEL_MAC_V1] = "intel-mac-v1", [STAC_INTEL_MAC_V2] = "intel-mac-v2", [STAC_INTEL_MAC_V3] = "intel-mac-v3", [STAC_INTEL_MAC_V4] = "intel-mac-v4", [STAC_INTEL_MAC_V5] = "intel-mac-v5", [STAC_INTEL_MAC_AUTO] = "intel-mac-auto", /* for backward compatibility */ [STAC_MACMINI] = "macmini", [STAC_MACBOOK] = "macbook", [STAC_MACBOOK_PRO_V1] = "macbook-pro-v1", [STAC_MACBOOK_PRO_V2] = "macbook-pro", [STAC_IMAC_INTEL] = "imac-intel", [STAC_IMAC_INTEL_20] = "imac-intel-20", [STAC_ECS_202] = "ecs202", [STAC_922X_DELL_D81] = "dell-d81", [STAC_922X_DELL_D82] = "dell-d82", [STAC_922X_DELL_M81] = "dell-m81", [STAC_922X_DELL_M82] = "dell-m82", }; static const struct snd_pci_quirk stac922x_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_D945_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_D945_REF), /* Intel 945G based systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0101, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0202, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0606, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0601, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0111, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x1115, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x1116, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x1117, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x1118, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x1119, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x8826, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5049, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5055, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5048, "Intel D945G", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0110, "Intel D945G", STAC_D945GTP3), /* Intel D945G 5-stack systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0404, "Intel D945G", STAC_D945GTP5), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0303, "Intel D945G", STAC_D945GTP5), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0013, "Intel D945G", STAC_D945GTP5), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0417, "Intel D945G", STAC_D945GTP5), /* Intel 945P based systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0b0b, "Intel D945P", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0112, "Intel D945P", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0d0d, "Intel D945P", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0909, "Intel D945P", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0505, "Intel D945P", STAC_D945GTP3), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0707, "Intel D945P", STAC_D945GTP5), /* other intel */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x0204, "Intel D945", STAC_D945_REF), /* other systems */ /* Apple Intel Mac (Mac Mini, MacBook, MacBook Pro...) */ SND_PCI_QUIRK(0x8384, 0x7680, "Mac", STAC_INTEL_MAC_AUTO), /* Dell systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01a7, "unknown Dell", STAC_922X_DELL_D81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01a9, "unknown Dell", STAC_922X_DELL_D81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ab, "unknown Dell", STAC_922X_DELL_D81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ac, "unknown Dell", STAC_922X_DELL_D82), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01bf, "unknown Dell", STAC_922X_DELL_M81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d0, "unknown Dell", STAC_922X_DELL_D82), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d1, "unknown Dell", STAC_922X_DELL_D81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d2, "unknown Dell", STAC_922X_DELL_D81), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01d7, "Dell XPS M1210", STAC_922X_DELL_M82), /* ECS/PC Chips boards */ SND_PCI_QUIRK_MASK(0x1019, 0xf000, 0x2000, "ECS/PC chips", STAC_ECS_202), {} /* terminator */ }; static const unsigned int ref927x_pin_configs[14] = { 0x02214020, 0x02a19080, 0x0181304e, 0x01014010, 0x01a19040, 0x01011012, 0x01016011, 0x0101201f, 0x183301f0, 0x18a001f0, 0x18a001f0, 0x01442070, 0x01c42190, 0x40000100, }; static const unsigned int d965_3st_pin_configs[14] = { 0x0221401f, 0x02a19120, 0x40000100, 0x01014011, 0x01a19021, 0x01813024, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x40000100 }; static const unsigned int d965_5st_pin_configs[14] = { 0x02214020, 0x02a19080, 0x0181304e, 0x01014010, 0x01a19040, 0x01011012, 0x01016011, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x01442070, 0x40000100, 0x40000100 }; static const unsigned int d965_5st_no_fp_pin_configs[14] = { 0x40000100, 0x40000100, 0x0181304e, 0x01014010, 0x01a19040, 0x01011012, 0x01016011, 0x40000100, 0x40000100, 0x40000100, 0x40000100, 0x01442070, 0x40000100, 0x40000100 }; static const unsigned int dell_3st_pin_configs[14] = { 0x02211230, 0x02a11220, 0x01a19040, 0x01114210, 0x01111212, 0x01116211, 0x01813050, 0x01112214, 0x403003fa, 0x90a60040, 0x90a60040, 0x404003fb, 0x40c003fc, 0x40000100 }; static const unsigned int *stac927x_brd_tbl[STAC_927X_MODELS] = { [STAC_D965_REF_NO_JD] = ref927x_pin_configs, [STAC_D965_REF] = ref927x_pin_configs, [STAC_D965_3ST] = d965_3st_pin_configs, [STAC_D965_5ST] = d965_5st_pin_configs, [STAC_D965_5ST_NO_FP] = d965_5st_no_fp_pin_configs, [STAC_DELL_3ST] = dell_3st_pin_configs, [STAC_DELL_BIOS] = NULL, [STAC_927X_VOLKNOB] = NULL, }; static const char * const stac927x_models[STAC_927X_MODELS] = { [STAC_927X_AUTO] = "auto", [STAC_D965_REF_NO_JD] = "ref-no-jd", [STAC_D965_REF] = "ref", [STAC_D965_3ST] = "3stack", [STAC_D965_5ST] = "5stack", [STAC_D965_5ST_NO_FP] = "5stack-no-fp", [STAC_DELL_3ST] = "dell-3stack", [STAC_DELL_BIOS] = "dell-bios", [STAC_927X_VOLKNOB] = "volknob", }; static const struct snd_pci_quirk stac927x_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_D965_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_D965_REF), /* Intel 946 based systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x3d01, "Intel D946", STAC_D965_3ST), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0xa301, "Intel D946", STAC_D965_3ST), /* 965 based 3 stack systems */ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2100, "Intel D965", STAC_D965_3ST), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000, "Intel D965", STAC_D965_3ST), /* Dell 3 stack systems */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST), /* Dell 3 stack systems with verb table in BIOS */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0242, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0243, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ff, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0209, "Dell XPS 1330", STAC_DELL_BIOS), /* 965 based 5 stack systems */ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2300, "Intel D965", STAC_D965_5ST), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2500, "Intel D965", STAC_D965_5ST), /* volume-knob fixes */ SND_PCI_QUIRK_VENDOR(0x10cf, "FSC", STAC_927X_VOLKNOB), {} /* terminator */ }; static const unsigned int ref9205_pin_configs[12] = { 0x40000100, 0x40000100, 0x01016011, 0x01014010, 0x01813122, 0x01a19021, 0x01019020, 0x40000100, 0x90a000f0, 0x90a000f0, 0x01441030, 0x01c41030 }; /* STAC 9205 pin configs for 102801F1 102801F2 102801FC 102801FD 10280204 1028021F 10280228 (Dell Vostro 1500) 10280229 (Dell Vostro 1700) */ static const unsigned int dell_9205_m42_pin_configs[12] = { 0x0321101F, 0x03A11020, 0x400003FA, 0x90170310, 0x400003FB, 0x400003FC, 0x400003FD, 0x40F000F9, 0x90A60330, 0x400003FF, 0x0144131F, 0x40C003FE, }; /* STAC 9205 pin configs for 102801F9 102801FA 102801FE 102801FF (Dell Precision M4300) 10280206 10280200 10280201 */ static const unsigned int dell_9205_m43_pin_configs[12] = { 0x0321101f, 0x03a11020, 0x90a70330, 0x90170310, 0x400000fe, 0x400000ff, 0x400000fd, 0x40f000f9, 0x400000fa, 0x400000fc, 0x0144131f, 0x40c003f8, }; static const unsigned int dell_9205_m44_pin_configs[12] = { 0x0421101f, 0x04a11020, 0x400003fa, 0x90170310, 0x400003fb, 0x400003fc, 0x400003fd, 0x400003f9, 0x90a60330, 0x400003ff, 0x01441340, 0x40c003fe, }; static const unsigned int *stac9205_brd_tbl[STAC_9205_MODELS] = { [STAC_9205_REF] = ref9205_pin_configs, [STAC_9205_DELL_M42] = dell_9205_m42_pin_configs, [STAC_9205_DELL_M43] = dell_9205_m43_pin_configs, [STAC_9205_DELL_M44] = dell_9205_m44_pin_configs, [STAC_9205_EAPD] = NULL, }; static const char * const stac9205_models[STAC_9205_MODELS] = { [STAC_9205_AUTO] = "auto", [STAC_9205_REF] = "ref", [STAC_9205_DELL_M42] = "dell-m42", [STAC_9205_DELL_M43] = "dell-m43", [STAC_9205_DELL_M44] = "dell-m44", [STAC_9205_EAPD] = "eapd", }; static const struct snd_pci_quirk stac9205_cfg_tbl[] = { /* SigmaTel reference board */ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_9205_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0xfb30, "SigmaTel", STAC_9205_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_9205_REF), /* Dell */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f1, "unknown Dell", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f2, "unknown Dell", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f8, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f9, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01fa, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01fc, "unknown Dell", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01fd, "unknown Dell", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01fe, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ff, "Dell Precision M4300", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0204, "unknown Dell", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0206, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x021b, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x021c, "Dell Precision", STAC_9205_DELL_M43), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x021f, "Dell Inspiron", STAC_9205_DELL_M44), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0228, "Dell Vostro 1500", STAC_9205_DELL_M42), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0229, "Dell Vostro 1700", STAC_9205_DELL_M42), /* Gateway */ SND_PCI_QUIRK(0x107b, 0x0560, "Gateway T6834c", STAC_9205_EAPD), SND_PCI_QUIRK(0x107b, 0x0565, "Gateway T1616", STAC_9205_EAPD), {} /* terminator */ }; static void stac92xx_set_config_regs(struct hda_codec *codec, const unsigned int *pincfgs) { int i; struct sigmatel_spec *spec = codec->spec; if (!pincfgs) return; for (i = 0; i < spec->num_pins; i++) if (spec->pin_nids[i] && pincfgs[i]) snd_hda_codec_set_pincfg(codec, spec->pin_nids[i], pincfgs[i]); } /* * Analog playback callbacks */ static int stac92xx_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; if (spec->stream_delay) msleep(spec->stream_delay); return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream, hinfo); } static int stac92xx_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_analog_prepare(codec, &spec->multiout, stream_tag, format, substream); } static int stac92xx_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); } /* * Digital playback callbacks */ static int stac92xx_dig_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_dig_open(codec, &spec->multiout); } static int stac92xx_dig_playback_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_dig_close(codec, &spec->multiout); } static int stac92xx_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag, format, substream); } static int stac92xx_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout); } /* * Analog capture callbacks */ static int stac92xx_capture_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = spec->adc_nids[substream->number]; if (spec->powerdown_adcs) { msleep(40); snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D0); } snd_hda_codec_setup_stream(codec, nid, stream_tag, 0, format); return 0; } static int stac92xx_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = spec->adc_nids[substream->number]; snd_hda_codec_cleanup_stream(codec, nid); if (spec->powerdown_adcs) snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D3); return 0; } static const struct hda_pcm_stream stac92xx_pcm_digital_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, /* NID is set in stac92xx_build_pcms */ .ops = { .open = stac92xx_dig_playback_pcm_open, .close = stac92xx_dig_playback_pcm_close, .prepare = stac92xx_dig_playback_pcm_prepare, .cleanup = stac92xx_dig_playback_pcm_cleanup }, }; static const struct hda_pcm_stream stac92xx_pcm_digital_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, /* NID is set in stac92xx_build_pcms */ }; static const struct hda_pcm_stream stac92xx_pcm_analog_playback = { .substreams = 1, .channels_min = 2, .channels_max = 8, .nid = 0x02, /* NID to query formats and rates */ .ops = { .open = stac92xx_playback_pcm_open, .prepare = stac92xx_playback_pcm_prepare, .cleanup = stac92xx_playback_pcm_cleanup }, }; static const struct hda_pcm_stream stac92xx_pcm_analog_alt_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, .nid = 0x06, /* NID to query formats and rates */ .ops = { .open = stac92xx_playback_pcm_open, .prepare = stac92xx_playback_pcm_prepare, .cleanup = stac92xx_playback_pcm_cleanup }, }; static const struct hda_pcm_stream stac92xx_pcm_analog_capture = { .channels_min = 2, .channels_max = 2, /* NID + .substreams is set in stac92xx_build_pcms */ .ops = { .prepare = stac92xx_capture_pcm_prepare, .cleanup = stac92xx_capture_pcm_cleanup }, }; static int stac92xx_build_pcms(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct hda_pcm *info = spec->pcm_rec; codec->num_pcms = 1; codec->pcm_info = info; info->name = "STAC92xx Analog"; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = stac92xx_pcm_analog_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dac_nids[0]; info->stream[SNDRV_PCM_STREAM_CAPTURE] = stac92xx_pcm_analog_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0]; info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_adcs; if (spec->alt_switch) { codec->num_pcms++; info++; info->name = "STAC92xx Analog Alt"; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = stac92xx_pcm_analog_alt_playback; } if (spec->multiout.dig_out_nid || spec->dig_in_nid) { codec->num_pcms++; info++; info->name = "STAC92xx Digital"; info->pcm_type = spec->autocfg.dig_out_type[0]; if (spec->multiout.dig_out_nid) { info->stream[SNDRV_PCM_STREAM_PLAYBACK] = stac92xx_pcm_digital_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dig_out_nid; } if (spec->dig_in_nid) { info->stream[SNDRV_PCM_STREAM_CAPTURE] = stac92xx_pcm_digital_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in_nid; } } return 0; } static unsigned int stac92xx_get_default_vref(struct hda_codec *codec, hda_nid_t nid) { unsigned int pincap = snd_hda_query_pin_caps(codec, nid); pincap = (pincap & AC_PINCAP_VREF) >> AC_PINCAP_VREF_SHIFT; if (pincap & AC_PINCAP_VREF_100) return AC_PINCTL_VREF_100; if (pincap & AC_PINCAP_VREF_80) return AC_PINCTL_VREF_80; if (pincap & AC_PINCAP_VREF_50) return AC_PINCTL_VREF_50; if (pincap & AC_PINCAP_VREF_GRD) return AC_PINCTL_VREF_GRD; return 0; } static void stac92xx_auto_set_pinctl(struct hda_codec *codec, hda_nid_t nid, int pin_type) { snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pin_type); } #define stac92xx_hp_switch_info snd_ctl_boolean_mono_info static int stac92xx_hp_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; ucontrol->value.integer.value[0] = !!spec->hp_switch; return 0; } static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid); static int stac92xx_hp_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; int nid = kcontrol->private_value; spec->hp_switch = ucontrol->value.integer.value[0] ? nid : 0; /* check to be sure that the ports are up to date with * switch changes */ stac_issue_unsol_event(codec, nid); return 1; } static int stac92xx_dc_bias_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int i; static const char * const texts[] = { "Mic In", "Line In", "Line Out" }; struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = kcontrol->private_value; if (nid == spec->mic_switch || nid == spec->line_switch) i = 3; else i = 2; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->value.enumerated.items = i; uinfo->count = 1; if (uinfo->value.enumerated.item >= i) uinfo->value.enumerated.item = i-1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int stac92xx_dc_bias_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); hda_nid_t nid = kcontrol->private_value; unsigned int vref = stac92xx_vref_get(codec, nid); if (vref == stac92xx_get_default_vref(codec, nid)) ucontrol->value.enumerated.item[0] = 0; else if (vref == AC_PINCTL_VREF_GRD) ucontrol->value.enumerated.item[0] = 1; else if (vref == AC_PINCTL_VREF_HIZ) ucontrol->value.enumerated.item[0] = 2; return 0; } static int stac92xx_dc_bias_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int new_vref = 0; int error; hda_nid_t nid = kcontrol->private_value; if (ucontrol->value.enumerated.item[0] == 0) new_vref = stac92xx_get_default_vref(codec, nid); else if (ucontrol->value.enumerated.item[0] == 1) new_vref = AC_PINCTL_VREF_GRD; else if (ucontrol->value.enumerated.item[0] == 2) new_vref = AC_PINCTL_VREF_HIZ; else return 0; if (new_vref != stac92xx_vref_get(codec, nid)) { error = stac92xx_vref_set(codec, nid, new_vref); return error; } return 0; } static int stac92xx_io_switch_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { char *texts[2]; struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; if (kcontrol->private_value == spec->line_switch) texts[0] = "Line In"; else texts[0] = "Mic In"; texts[1] = "Line Out"; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->value.enumerated.items = 2; uinfo->count = 1; if (uinfo->value.enumerated.item >= 2) uinfo->value.enumerated.item = 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int stac92xx_io_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = kcontrol->private_value; int io_idx = (nid == spec->mic_switch) ? 1 : 0; ucontrol->value.enumerated.item[0] = spec->io_switch[io_idx]; return 0; } static int stac92xx_io_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = kcontrol->private_value; int io_idx = (nid == spec->mic_switch) ? 1 : 0; unsigned short val = !!ucontrol->value.enumerated.item[0]; spec->io_switch[io_idx] = val; if (val) stac92xx_auto_set_pinctl(codec, nid, AC_PINCTL_OUT_EN); else { unsigned int pinctl = AC_PINCTL_IN_EN; if (io_idx) /* set VREF for mic */ pinctl |= stac92xx_get_default_vref(codec, nid); stac92xx_auto_set_pinctl(codec, nid, pinctl); } /* check the auto-mute again: we need to mute/unmute the speaker * appropriately according to the pin direction */ if (spec->hp_detect) stac_issue_unsol_event(codec, nid); return 1; } #define stac92xx_clfe_switch_info snd_ctl_boolean_mono_info static int stac92xx_clfe_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; ucontrol->value.integer.value[0] = spec->clfe_swap; return 0; } static int stac92xx_clfe_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; hda_nid_t nid = kcontrol->private_value & 0xff; unsigned int val = !!ucontrol->value.integer.value[0]; if (spec->clfe_swap == val) return 0; spec->clfe_swap = val; snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_EAPD_BTLENABLE, spec->clfe_swap ? 0x4 : 0x0); return 1; } #define STAC_CODEC_HP_SWITCH(xname) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .index = 0, \ .info = stac92xx_hp_switch_info, \ .get = stac92xx_hp_switch_get, \ .put = stac92xx_hp_switch_put, \ } #define STAC_CODEC_IO_SWITCH(xname, xpval) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .index = 0, \ .info = stac92xx_io_switch_info, \ .get = stac92xx_io_switch_get, \ .put = stac92xx_io_switch_put, \ .private_value = xpval, \ } #define STAC_CODEC_CLFE_SWITCH(xname, xpval) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .index = 0, \ .info = stac92xx_clfe_switch_info, \ .get = stac92xx_clfe_switch_get, \ .put = stac92xx_clfe_switch_put, \ .private_value = xpval, \ } enum { STAC_CTL_WIDGET_VOL, STAC_CTL_WIDGET_MUTE, STAC_CTL_WIDGET_MUTE_BEEP, STAC_CTL_WIDGET_MONO_MUX, STAC_CTL_WIDGET_HP_SWITCH, STAC_CTL_WIDGET_IO_SWITCH, STAC_CTL_WIDGET_CLFE_SWITCH, STAC_CTL_WIDGET_DC_BIAS }; static const struct snd_kcontrol_new stac92xx_control_templates[] = { HDA_CODEC_VOLUME(NULL, 0, 0, 0), HDA_CODEC_MUTE(NULL, 0, 0, 0), HDA_CODEC_MUTE_BEEP(NULL, 0, 0, 0), STAC_MONO_MUX, STAC_CODEC_HP_SWITCH(NULL), STAC_CODEC_IO_SWITCH(NULL, 0), STAC_CODEC_CLFE_SWITCH(NULL, 0), DC_BIAS(NULL, 0, 0), }; /* add dynamic controls */ static struct snd_kcontrol_new * stac_control_new(struct sigmatel_spec *spec, const struct snd_kcontrol_new *ktemp, const char *name, unsigned int subdev) { struct snd_kcontrol_new *knew; snd_array_init(&spec->kctls, sizeof(*knew), 32); knew = snd_array_new(&spec->kctls); if (!knew) return NULL; *knew = *ktemp; knew->name = kstrdup(name, GFP_KERNEL); if (!knew->name) { /* roolback */ memset(knew, 0, sizeof(*knew)); spec->kctls.alloced--; return NULL; } knew->subdevice = subdev; return knew; } static int stac92xx_add_control_temp(struct sigmatel_spec *spec, const struct snd_kcontrol_new *ktemp, int idx, const char *name, unsigned long val) { struct snd_kcontrol_new *knew = stac_control_new(spec, ktemp, name, HDA_SUBDEV_AMP_FLAG); if (!knew) return -ENOMEM; knew->index = idx; knew->private_value = val; return 0; } static inline int stac92xx_add_control_idx(struct sigmatel_spec *spec, int type, int idx, const char *name, unsigned long val) { return stac92xx_add_control_temp(spec, &stac92xx_control_templates[type], idx, name, val); } /* add dynamic controls */ static inline int stac92xx_add_control(struct sigmatel_spec *spec, int type, const char *name, unsigned long val) { return stac92xx_add_control_idx(spec, type, 0, name, val); } static const struct snd_kcontrol_new stac_input_src_temp = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Input Source", .info = stac92xx_mux_enum_info, .get = stac92xx_mux_enum_get, .put = stac92xx_mux_enum_put, }; static inline int stac92xx_add_jack_mode_control(struct hda_codec *codec, hda_nid_t nid, int idx) { int def_conf = snd_hda_codec_get_pincfg(codec, nid); int control = 0; struct sigmatel_spec *spec = codec->spec; char name[22]; if (snd_hda_get_input_pin_attr(def_conf) != INPUT_PIN_ATTR_INT) { if (stac92xx_get_default_vref(codec, nid) == AC_PINCTL_VREF_GRD && nid == spec->line_switch) control = STAC_CTL_WIDGET_IO_SWITCH; else if (snd_hda_query_pin_caps(codec, nid) & (AC_PINCAP_VREF_GRD << AC_PINCAP_VREF_SHIFT)) control = STAC_CTL_WIDGET_DC_BIAS; else if (nid == spec->mic_switch) control = STAC_CTL_WIDGET_IO_SWITCH; } if (control) { snd_hda_get_pin_label(codec, nid, &spec->autocfg, name, sizeof(name), NULL); return stac92xx_add_control(codec->spec, control, strcat(name, " Jack Mode"), nid); } return 0; } static int stac92xx_add_input_source(struct sigmatel_spec *spec) { struct snd_kcontrol_new *knew; struct hda_input_mux *imux = &spec->private_imux; if (spec->auto_mic) return 0; /* no need for input source */ if (!spec->num_adcs || imux->num_items <= 1) return 0; /* no need for input source control */ knew = stac_control_new(spec, &stac_input_src_temp, stac_input_src_temp.name, 0); if (!knew) return -ENOMEM; knew->count = spec->num_adcs; return 0; } /* check whether the line-input can be used as line-out */ static hda_nid_t check_line_out_switch(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; hda_nid_t nid; unsigned int pincap; int i; if (cfg->line_out_type != AUTO_PIN_LINE_OUT) return 0; for (i = 0; i < cfg->num_inputs; i++) { if (cfg->inputs[i].type == AUTO_PIN_LINE_IN) { nid = cfg->inputs[i].pin; pincap = snd_hda_query_pin_caps(codec, nid); if (pincap & AC_PINCAP_OUT) return nid; } } return 0; } static hda_nid_t get_unassigned_dac(struct hda_codec *codec, hda_nid_t nid); /* check whether the mic-input can be used as line-out */ static hda_nid_t check_mic_out_switch(struct hda_codec *codec, hda_nid_t *dac) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; unsigned int def_conf, pincap; int i; *dac = 0; if (cfg->line_out_type != AUTO_PIN_LINE_OUT) return 0; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; if (cfg->inputs[i].type != AUTO_PIN_MIC) continue; def_conf = snd_hda_codec_get_pincfg(codec, nid); /* some laptops have an internal analog microphone * which can't be used as a output */ if (snd_hda_get_input_pin_attr(def_conf) != INPUT_PIN_ATTR_INT) { pincap = snd_hda_query_pin_caps(codec, nid); if (pincap & AC_PINCAP_OUT) { *dac = get_unassigned_dac(codec, nid); if (*dac) return nid; } } } return 0; } static int is_in_dac_nids(struct sigmatel_spec *spec, hda_nid_t nid) { int i; for (i = 0; i < spec->multiout.num_dacs; i++) { if (spec->multiout.dac_nids[i] == nid) return 1; } return 0; } static int check_all_dac_nids(struct sigmatel_spec *spec, hda_nid_t nid) { int i; if (is_in_dac_nids(spec, nid)) return 1; for (i = 0; i < spec->autocfg.hp_outs; i++) if (spec->hp_dacs[i] == nid) return 1; for (i = 0; i < spec->autocfg.speaker_outs; i++) if (spec->speaker_dacs[i] == nid) return 1; return 0; } static hda_nid_t get_unassigned_dac(struct hda_codec *codec, hda_nid_t nid) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int j, conn_len; hda_nid_t conn[HDA_MAX_CONNECTIONS], fallback_dac; unsigned int wcaps, wtype; conn_len = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); /* 92HD88: trace back up the link of nids to find the DAC */ while (conn_len == 1 && (get_wcaps_type(get_wcaps(codec, conn[0])) != AC_WID_AUD_OUT)) { nid = conn[0]; conn_len = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); } for (j = 0; j < conn_len; j++) { wcaps = get_wcaps(codec, conn[j]); wtype = get_wcaps_type(wcaps); /* we check only analog outputs */ if (wtype != AC_WID_AUD_OUT || (wcaps & AC_WCAP_DIGITAL)) continue; /* if this route has a free DAC, assign it */ if (!check_all_dac_nids(spec, conn[j])) { if (conn_len > 1) { /* select this DAC in the pin's input mux */ snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, j); } return conn[j]; } } /* if all DACs are already assigned, connect to the primary DAC, unless we're assigning a secondary headphone */ fallback_dac = spec->multiout.dac_nids[0]; if (spec->multiout.hp_nid) { for (j = 0; j < cfg->hp_outs; j++) if (cfg->hp_pins[j] == nid) { fallback_dac = spec->multiout.hp_nid; break; } } if (conn_len > 1) { for (j = 0; j < conn_len; j++) { if (conn[j] == fallback_dac) { snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, j); break; } } } return 0; } static int add_spec_dacs(struct sigmatel_spec *spec, hda_nid_t nid); static int add_spec_extra_dacs(struct sigmatel_spec *spec, hda_nid_t nid); /* * Fill in the dac_nids table from the parsed pin configuration * This function only works when every pin in line_out_pins[] * contains atleast one DAC in its connection list. Some 92xx * codecs are not connected directly to a DAC, such as the 9200 * and 9202/925x. For those, dac_nids[] must be hard-coded. */ static int stac92xx_auto_fill_dac_nids(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i; hda_nid_t nid, dac; for (i = 0; i < cfg->line_outs; i++) { nid = cfg->line_out_pins[i]; dac = get_unassigned_dac(codec, nid); if (!dac) { if (spec->multiout.num_dacs > 0) { /* we have already working output pins, * so let's drop the broken ones again */ cfg->line_outs = spec->multiout.num_dacs; break; } /* error out, no available DAC found */ snd_printk(KERN_ERR "%s: No available DAC for pin 0x%x\n", __func__, nid); return -ENODEV; } add_spec_dacs(spec, dac); } for (i = 0; i < cfg->hp_outs; i++) { nid = cfg->hp_pins[i]; dac = get_unassigned_dac(codec, nid); if (dac) { if (!spec->multiout.hp_nid) spec->multiout.hp_nid = dac; else add_spec_extra_dacs(spec, dac); } spec->hp_dacs[i] = dac; } for (i = 0; i < cfg->speaker_outs; i++) { nid = cfg->speaker_pins[i]; dac = get_unassigned_dac(codec, nid); if (dac) add_spec_extra_dacs(spec, dac); spec->speaker_dacs[i] = dac; } /* add line-in as output */ nid = check_line_out_switch(codec); if (nid) { dac = get_unassigned_dac(codec, nid); if (dac) { snd_printdd("STAC: Add line-in 0x%x as output %d\n", nid, cfg->line_outs); cfg->line_out_pins[cfg->line_outs] = nid; cfg->line_outs++; spec->line_switch = nid; add_spec_dacs(spec, dac); } } /* add mic as output */ nid = check_mic_out_switch(codec, &dac); if (nid && dac) { snd_printdd("STAC: Add mic-in 0x%x as output %d\n", nid, cfg->line_outs); cfg->line_out_pins[cfg->line_outs] = nid; cfg->line_outs++; spec->mic_switch = nid; add_spec_dacs(spec, dac); } snd_printd("stac92xx: dac_nids=%d (0x%x/0x%x/0x%x/0x%x/0x%x)\n", spec->multiout.num_dacs, spec->multiout.dac_nids[0], spec->multiout.dac_nids[1], spec->multiout.dac_nids[2], spec->multiout.dac_nids[3], spec->multiout.dac_nids[4]); return 0; } /* create volume control/switch for the given prefx type */ static int create_controls_idx(struct hda_codec *codec, const char *pfx, int idx, hda_nid_t nid, int chs) { struct sigmatel_spec *spec = codec->spec; char name[32]; int err; if (!spec->check_volume_offset) { unsigned int caps, step, nums, db_scale; caps = query_amp_caps(codec, nid, HDA_OUTPUT); step = (caps & AC_AMPCAP_STEP_SIZE) >> AC_AMPCAP_STEP_SIZE_SHIFT; step = (step + 1) * 25; /* in .01dB unit */ nums = (caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT; db_scale = nums * step; /* if dB scale is over -64dB, and finer enough, * let's reduce it to half */ if (db_scale > 6400 && nums >= 0x1f) spec->volume_offset = nums / 2; spec->check_volume_offset = 1; } sprintf(name, "%s Playback Volume", pfx); err = stac92xx_add_control_idx(spec, STAC_CTL_WIDGET_VOL, idx, name, HDA_COMPOSE_AMP_VAL_OFS(nid, chs, 0, HDA_OUTPUT, spec->volume_offset)); if (err < 0) return err; sprintf(name, "%s Playback Switch", pfx); err = stac92xx_add_control_idx(spec, STAC_CTL_WIDGET_MUTE, idx, name, HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT)); if (err < 0) return err; return 0; } #define create_controls(codec, pfx, nid, chs) \ create_controls_idx(codec, pfx, 0, nid, chs) static int add_spec_dacs(struct sigmatel_spec *spec, hda_nid_t nid) { if (spec->multiout.num_dacs > 4) { printk(KERN_WARNING "stac92xx: No space for DAC 0x%x\n", nid); return 1; } else { snd_BUG_ON(spec->multiout.dac_nids != spec->dac_nids); spec->dac_nids[spec->multiout.num_dacs] = nid; spec->multiout.num_dacs++; } return 0; } static int add_spec_extra_dacs(struct sigmatel_spec *spec, hda_nid_t nid) { int i; for (i = 0; i < ARRAY_SIZE(spec->multiout.extra_out_nid); i++) { if (!spec->multiout.extra_out_nid[i]) { spec->multiout.extra_out_nid[i] = nid; return 0; } } printk(KERN_WARNING "stac92xx: No space for extra DAC 0x%x\n", nid); return 1; } /* Create output controls * The mixer elements are named depending on the given type (AUTO_PIN_XXX_OUT) */ static int create_multi_out_ctls(struct hda_codec *codec, int num_outs, const hda_nid_t *pins, const hda_nid_t *dac_nids, int type) { struct sigmatel_spec *spec = codec->spec; static const char * const chname[4] = { "Front", "Surround", NULL /*CLFE*/, "Side" }; hda_nid_t nid; int i, err; unsigned int wid_caps; for (i = 0; i < num_outs && i < ARRAY_SIZE(chname); i++) { if (type == AUTO_PIN_HP_OUT && !spec->hp_detect) { if (is_jack_detectable(codec, pins[i])) spec->hp_detect = 1; } nid = dac_nids[i]; if (!nid) continue; if (type != AUTO_PIN_HP_OUT && i == 2) { /* Center/LFE */ err = create_controls(codec, "Center", nid, 1); if (err < 0) return err; err = create_controls(codec, "LFE", nid, 2); if (err < 0) return err; wid_caps = get_wcaps(codec, nid); if (wid_caps & AC_WCAP_LR_SWAP) { err = stac92xx_add_control(spec, STAC_CTL_WIDGET_CLFE_SWITCH, "Swap Center/LFE Playback Switch", nid); if (err < 0) return err; } } else { const char *name; int idx; switch (type) { case AUTO_PIN_HP_OUT: name = "Headphone"; idx = i; break; case AUTO_PIN_SPEAKER_OUT: name = "Speaker"; idx = i; break; default: name = chname[i]; idx = 0; break; } err = create_controls_idx(codec, name, idx, nid, 3); if (err < 0) return err; } } return 0; } static int stac92xx_add_capvol_ctls(struct hda_codec *codec, unsigned long vol, unsigned long sw, int idx) { int err; err = stac92xx_add_control_idx(codec->spec, STAC_CTL_WIDGET_VOL, idx, "Capture Volume", vol); if (err < 0) return err; err = stac92xx_add_control_idx(codec->spec, STAC_CTL_WIDGET_MUTE, idx, "Capture Switch", sw); if (err < 0) return err; return 0; } /* add playback controls from the parsed DAC table */ static int stac92xx_auto_create_multi_out_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; hda_nid_t nid; int err; int idx; err = create_multi_out_ctls(codec, cfg->line_outs, cfg->line_out_pins, spec->multiout.dac_nids, cfg->line_out_type); if (err < 0) return err; if (cfg->hp_outs > 1 && cfg->line_out_type == AUTO_PIN_LINE_OUT) { err = stac92xx_add_control(spec, STAC_CTL_WIDGET_HP_SWITCH, "Headphone as Line Out Switch", cfg->hp_pins[cfg->hp_outs - 1]); if (err < 0) return err; } for (idx = 0; idx < cfg->num_inputs; idx++) { if (cfg->inputs[idx].type > AUTO_PIN_LINE_IN) break; nid = cfg->inputs[idx].pin; err = stac92xx_add_jack_mode_control(codec, nid, idx); if (err < 0) return err; } return 0; } /* add playback controls for Speaker and HP outputs */ static int stac92xx_auto_create_hp_ctls(struct hda_codec *codec, struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; int err; err = create_multi_out_ctls(codec, cfg->hp_outs, cfg->hp_pins, spec->hp_dacs, AUTO_PIN_HP_OUT); if (err < 0) return err; err = create_multi_out_ctls(codec, cfg->speaker_outs, cfg->speaker_pins, spec->speaker_dacs, AUTO_PIN_SPEAKER_OUT); if (err < 0) return err; return 0; } /* labels for mono mux outputs */ static const char * const stac92xx_mono_labels[4] = { "DAC0", "DAC1", "Mixer", "DAC2" }; /* create mono mux for mono out on capable codecs */ static int stac92xx_auto_create_mono_output_ctls(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct hda_input_mux *mono_mux = &spec->private_mono_mux; int i, num_cons; hda_nid_t con_lst[ARRAY_SIZE(stac92xx_mono_labels)]; num_cons = snd_hda_get_connections(codec, spec->mono_nid, con_lst, HDA_MAX_NUM_INPUTS); if (num_cons <= 0 || num_cons > ARRAY_SIZE(stac92xx_mono_labels)) return -EINVAL; for (i = 0; i < num_cons; i++) snd_hda_add_imux_item(mono_mux, stac92xx_mono_labels[i], i, NULL); return stac92xx_add_control(spec, STAC_CTL_WIDGET_MONO_MUX, "Mono Mux", spec->mono_nid); } /* create PC beep volume controls */ static int stac92xx_auto_create_beep_ctls(struct hda_codec *codec, hda_nid_t nid) { struct sigmatel_spec *spec = codec->spec; u32 caps = query_amp_caps(codec, nid, HDA_OUTPUT); int err, type = STAC_CTL_WIDGET_MUTE_BEEP; if (spec->anabeep_nid == nid) type = STAC_CTL_WIDGET_MUTE; /* check for mute support for the the amp */ if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) { err = stac92xx_add_control(spec, type, "Beep Playback Switch", HDA_COMPOSE_AMP_VAL(nid, 1, 0, HDA_OUTPUT)); if (err < 0) return err; } /* check to see if there is volume support for the amp */ if ((caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT) { err = stac92xx_add_control(spec, STAC_CTL_WIDGET_VOL, "Beep Playback Volume", HDA_COMPOSE_AMP_VAL(nid, 1, 0, HDA_OUTPUT)); if (err < 0) return err; } return 0; } #ifdef CONFIG_SND_HDA_INPUT_BEEP #define stac92xx_dig_beep_switch_info snd_ctl_boolean_mono_info static int stac92xx_dig_beep_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = codec->beep->enabled; return 0; } static int stac92xx_dig_beep_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); return snd_hda_enable_beep_device(codec, ucontrol->value.integer.value[0]); } static const struct snd_kcontrol_new stac92xx_dig_beep_ctrl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = stac92xx_dig_beep_switch_info, .get = stac92xx_dig_beep_switch_get, .put = stac92xx_dig_beep_switch_put, }; static int stac92xx_beep_switch_ctl(struct hda_codec *codec) { return stac92xx_add_control_temp(codec->spec, &stac92xx_dig_beep_ctrl, 0, "Beep Playback Switch", 0); } #endif static int stac92xx_auto_create_mux_input_ctls(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int i, j, err = 0; for (i = 0; i < spec->num_muxes; i++) { hda_nid_t nid; unsigned int wcaps; unsigned long val; nid = spec->mux_nids[i]; wcaps = get_wcaps(codec, nid); if (!(wcaps & AC_WCAP_OUT_AMP)) continue; /* check whether already the same control was created as * normal Capture Volume. */ val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT); for (j = 0; j < spec->num_caps; j++) { if (spec->capvols[j] == val) break; } if (j < spec->num_caps) continue; err = stac92xx_add_control_idx(spec, STAC_CTL_WIDGET_VOL, i, "Mux Capture Volume", val); if (err < 0) return err; } return 0; }; static const char * const stac92xx_spdif_labels[3] = { "Digital Playback", "Analog Mux 1", "Analog Mux 2", }; static int stac92xx_auto_create_spdif_mux_ctls(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct hda_input_mux *spdif_mux = &spec->private_smux; const char * const *labels = spec->spdif_labels; int i, num_cons; hda_nid_t con_lst[HDA_MAX_NUM_INPUTS]; num_cons = snd_hda_get_connections(codec, spec->smux_nids[0], con_lst, HDA_MAX_NUM_INPUTS); if (num_cons <= 0) return -EINVAL; if (!labels) labels = stac92xx_spdif_labels; for (i = 0; i < num_cons; i++) snd_hda_add_imux_item(spdif_mux, labels[i], i, NULL); return 0; } /* labels for dmic mux inputs */ static const char * const stac92xx_dmic_labels[5] = { "Analog Inputs", "Digital Mic 1", "Digital Mic 2", "Digital Mic 3", "Digital Mic 4" }; static hda_nid_t get_connected_node(struct hda_codec *codec, hda_nid_t mux, int idx) { hda_nid_t conn[HDA_MAX_NUM_INPUTS]; int nums; nums = snd_hda_get_connections(codec, mux, conn, ARRAY_SIZE(conn)); if (idx >= 0 && idx < nums) return conn[idx]; return 0; } /* look for NID recursively */ #define get_connection_index(codec, mux, nid) \ snd_hda_get_conn_index(codec, mux, nid, 1) /* create a volume assigned to the given pin (only if supported) */ /* return 1 if the volume control is created */ static int create_elem_capture_vol(struct hda_codec *codec, hda_nid_t nid, const char *label, int idx, int direction) { unsigned int caps, nums; char name[32]; int err; if (direction == HDA_OUTPUT) caps = AC_WCAP_OUT_AMP; else caps = AC_WCAP_IN_AMP; if (!(get_wcaps(codec, nid) & caps)) return 0; caps = query_amp_caps(codec, nid, direction); nums = (caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT; if (!nums) return 0; snprintf(name, sizeof(name), "%s Capture Volume", label); err = stac92xx_add_control_idx(codec->spec, STAC_CTL_WIDGET_VOL, idx, name, HDA_COMPOSE_AMP_VAL(nid, 3, 0, direction)); if (err < 0) return err; return 1; } /* create playback/capture controls for input pins on dmic capable codecs */ static int stac92xx_auto_create_dmic_input_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->private_imux; struct hda_input_mux *dimux = &spec->private_dimux; int err, i; unsigned int def_conf; snd_hda_add_imux_item(dimux, stac92xx_dmic_labels[0], 0, NULL); for (i = 0; i < spec->num_dmics; i++) { hda_nid_t nid; int index, type_idx; char label[32]; nid = spec->dmic_nids[i]; if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN) continue; def_conf = snd_hda_codec_get_pincfg(codec, nid); if (get_defcfg_connect(def_conf) == AC_JACK_PORT_NONE) continue; index = get_connection_index(codec, spec->dmux_nids[0], nid); if (index < 0) continue; snd_hda_get_pin_label(codec, nid, &spec->autocfg, label, sizeof(label), NULL); snd_hda_add_imux_item(dimux, label, index, &type_idx); if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) snd_hda_add_imux_item(imux, label, index, &type_idx); err = create_elem_capture_vol(codec, nid, label, type_idx, HDA_INPUT); if (err < 0) return err; if (!err) { err = create_elem_capture_vol(codec, nid, label, type_idx, HDA_OUTPUT); if (err < 0) return err; if (!err) { nid = get_connected_node(codec, spec->dmux_nids[0], index); if (nid) err = create_elem_capture_vol(codec, nid, label, type_idx, HDA_INPUT); if (err < 0) return err; } } } return 0; } static int check_mic_pin(struct hda_codec *codec, hda_nid_t nid, hda_nid_t *fixed, hda_nid_t *ext, hda_nid_t *dock) { unsigned int cfg; unsigned int type; if (!nid) return 0; cfg = snd_hda_codec_get_pincfg(codec, nid); type = get_defcfg_device(cfg); switch (snd_hda_get_input_pin_attr(cfg)) { case INPUT_PIN_ATTR_INT: if (*fixed) return 1; /* already occupied */ if (type != AC_JACK_MIC_IN) return 1; /* invalid type */ *fixed = nid; break; case INPUT_PIN_ATTR_UNUSED: break; case INPUT_PIN_ATTR_DOCK: if (*dock) return 1; /* already occupied */ if (type != AC_JACK_MIC_IN && type != AC_JACK_LINE_IN) return 1; /* invalid type */ *dock = nid; break; default: if (*ext) return 1; /* already occupied */ if (type != AC_JACK_MIC_IN) return 1; /* invalid type */ *ext = nid; break; } return 0; } static int set_mic_route(struct hda_codec *codec, struct sigmatel_mic_route *mic, hda_nid_t pin) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i; mic->pin = pin; if (pin == 0) return 0; for (i = 0; i < cfg->num_inputs; i++) { if (pin == cfg->inputs[i].pin) break; } if (i < cfg->num_inputs && cfg->inputs[i].type == AUTO_PIN_MIC) { /* analog pin */ i = get_connection_index(codec, spec->mux_nids[0], pin); if (i < 0) return -1; mic->mux_idx = i; mic->dmux_idx = -1; if (spec->dmux_nids) mic->dmux_idx = get_connection_index(codec, spec->dmux_nids[0], spec->mux_nids[0]); } else if (spec->dmux_nids) { /* digital pin */ i = get_connection_index(codec, spec->dmux_nids[0], pin); if (i < 0) return -1; mic->dmux_idx = i; mic->mux_idx = -1; if (spec->mux_nids) mic->mux_idx = get_connection_index(codec, spec->mux_nids[0], spec->dmux_nids[0]); } return 0; } /* return non-zero if the device is for automatic mic switch */ static int stac_check_auto_mic(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; hda_nid_t fixed, ext, dock; int i; fixed = ext = dock = 0; for (i = 0; i < cfg->num_inputs; i++) if (check_mic_pin(codec, cfg->inputs[i].pin, &fixed, &ext, &dock)) return 0; for (i = 0; i < spec->num_dmics; i++) if (check_mic_pin(codec, spec->dmic_nids[i], &fixed, &ext, &dock)) return 0; if (!fixed || (!ext && !dock)) return 0; /* no input to switch */ if (!is_jack_detectable(codec, ext)) return 0; /* no unsol support */ if (set_mic_route(codec, &spec->ext_mic, ext) || set_mic_route(codec, &spec->int_mic, fixed) || set_mic_route(codec, &spec->dock_mic, dock)) return 0; /* something is wrong */ return 1; } /* create playback/capture controls for input pins */ static int stac92xx_auto_create_analog_input_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->private_imux; int i, j; const char *label; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; int index, err, type_idx; index = -1; for (j = 0; j < spec->num_muxes; j++) { index = get_connection_index(codec, spec->mux_nids[j], nid); if (index >= 0) break; } if (index < 0) continue; label = hda_get_autocfg_input_label(codec, cfg, i); snd_hda_add_imux_item(imux, label, index, &type_idx); err = create_elem_capture_vol(codec, nid, label, type_idx, HDA_INPUT); if (err < 0) return err; } spec->num_analog_muxes = imux->num_items; if (imux->num_items) { /* * Set the current input for the muxes. * The STAC9221 has two input muxes with identical source * NID lists. Hopefully this won't get confused. */ for (i = 0; i < spec->num_muxes; i++) { snd_hda_codec_write_cache(codec, spec->mux_nids[i], 0, AC_VERB_SET_CONNECT_SEL, imux->items[0].index); } } return 0; } static void stac92xx_auto_init_multi_out(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int i; for (i = 0; i < spec->autocfg.line_outs; i++) { hda_nid_t nid = spec->autocfg.line_out_pins[i]; stac92xx_auto_set_pinctl(codec, nid, AC_PINCTL_OUT_EN); } } static void stac92xx_auto_init_hp_out(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int i; for (i = 0; i < spec->autocfg.hp_outs; i++) { hda_nid_t pin; pin = spec->autocfg.hp_pins[i]; if (pin) /* connect to front */ stac92xx_auto_set_pinctl(codec, pin, AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN); } for (i = 0; i < spec->autocfg.speaker_outs; i++) { hda_nid_t pin; pin = spec->autocfg.speaker_pins[i]; if (pin) /* connect to front */ stac92xx_auto_set_pinctl(codec, pin, AC_PINCTL_OUT_EN); } } static int is_dual_headphones(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int i, valid_hps; if (spec->autocfg.line_out_type != AUTO_PIN_SPEAKER_OUT || spec->autocfg.hp_outs <= 1) return 0; valid_hps = 0; for (i = 0; i < spec->autocfg.hp_outs; i++) { hda_nid_t nid = spec->autocfg.hp_pins[i]; unsigned int cfg = snd_hda_codec_get_pincfg(codec, nid); if (get_defcfg_location(cfg) & AC_JACK_LOC_SEPARATE) continue; valid_hps++; } return (valid_hps > 1); } static int stac92xx_parse_auto_config(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; hda_nid_t dig_out = 0, dig_in = 0; int hp_swap = 0; int i, err; if ((err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, spec->dmic_nids)) < 0) return err; if (! spec->autocfg.line_outs) return 0; /* can't find valid pin config */ /* If we have no real line-out pin and multiple hp-outs, HPs should * be set up as multi-channel outputs. */ if (is_dual_headphones(codec)) { /* Copy hp_outs to line_outs, backup line_outs in * speaker_outs so that the following routines can handle * HP pins as primary outputs. */ snd_printdd("stac92xx: Enabling multi-HPs workaround\n"); memcpy(spec->autocfg.speaker_pins, spec->autocfg.line_out_pins, sizeof(spec->autocfg.line_out_pins)); spec->autocfg.speaker_outs = spec->autocfg.line_outs; memcpy(spec->autocfg.line_out_pins, spec->autocfg.hp_pins, sizeof(spec->autocfg.hp_pins)); spec->autocfg.line_outs = spec->autocfg.hp_outs; spec->autocfg.line_out_type = AUTO_PIN_HP_OUT; spec->autocfg.hp_outs = 0; hp_swap = 1; } if (spec->autocfg.mono_out_pin) { int dir = get_wcaps(codec, spec->autocfg.mono_out_pin) & (AC_WCAP_OUT_AMP | AC_WCAP_IN_AMP); u32 caps = query_amp_caps(codec, spec->autocfg.mono_out_pin, dir); hda_nid_t conn_list[1]; /* get the mixer node and then the mono mux if it exists */ if (snd_hda_get_connections(codec, spec->autocfg.mono_out_pin, conn_list, 1) && snd_hda_get_connections(codec, conn_list[0], conn_list, 1) > 0) { int wcaps = get_wcaps(codec, conn_list[0]); int wid_type = get_wcaps_type(wcaps); /* LR swap check, some stac925x have a mux that * changes the DACs output path instead of the * mono-mux path. */ if (wid_type == AC_WID_AUD_SEL && !(wcaps & AC_WCAP_LR_SWAP)) spec->mono_nid = conn_list[0]; } if (dir) { hda_nid_t nid = spec->autocfg.mono_out_pin; /* most mono outs have a least a mute/unmute switch */ dir = (dir & AC_WCAP_OUT_AMP) ? HDA_OUTPUT : HDA_INPUT; err = stac92xx_add_control(spec, STAC_CTL_WIDGET_MUTE, "Mono Playback Switch", HDA_COMPOSE_AMP_VAL(nid, 1, 0, dir)); if (err < 0) return err; /* check for volume support for the amp */ if ((caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT) { err = stac92xx_add_control(spec, STAC_CTL_WIDGET_VOL, "Mono Playback Volume", HDA_COMPOSE_AMP_VAL(nid, 1, 0, dir)); if (err < 0) return err; } } stac92xx_auto_set_pinctl(codec, spec->autocfg.mono_out_pin, AC_PINCTL_OUT_EN); } if (!spec->multiout.num_dacs) { err = stac92xx_auto_fill_dac_nids(codec); if (err < 0) return err; err = stac92xx_auto_create_multi_out_ctls(codec, &spec->autocfg); if (err < 0) return err; } /* setup analog beep controls */ if (spec->anabeep_nid > 0) { err = stac92xx_auto_create_beep_ctls(codec, spec->anabeep_nid); if (err < 0) return err; } /* setup digital beep controls and input device */ #ifdef CONFIG_SND_HDA_INPUT_BEEP if (spec->digbeep_nid > 0) { hda_nid_t nid = spec->digbeep_nid; unsigned int caps; err = stac92xx_auto_create_beep_ctls(codec, nid); if (err < 0) return err; err = snd_hda_attach_beep_device(codec, nid); if (err < 0) return err; if (codec->beep) { /* IDT/STAC codecs have linear beep tone parameter */ codec->beep->linear_tone = spec->linear_tone_beep; /* if no beep switch is available, make its own one */ caps = query_amp_caps(codec, nid, HDA_OUTPUT); if (!(caps & AC_AMPCAP_MUTE)) { err = stac92xx_beep_switch_ctl(codec); if (err < 0) return err; } } } #endif err = stac92xx_auto_create_hp_ctls(codec, &spec->autocfg); if (err < 0) return err; /* All output parsing done, now restore the swapped hp pins */ if (hp_swap) { memcpy(spec->autocfg.hp_pins, spec->autocfg.line_out_pins, sizeof(spec->autocfg.hp_pins)); spec->autocfg.hp_outs = spec->autocfg.line_outs; spec->autocfg.line_out_type = AUTO_PIN_HP_OUT; spec->autocfg.line_outs = 0; } if (stac_check_auto_mic(codec)) { spec->auto_mic = 1; /* only one capture for auto-mic */ spec->num_adcs = 1; spec->num_caps = 1; spec->num_muxes = 1; } for (i = 0; i < spec->num_caps; i++) { err = stac92xx_add_capvol_ctls(codec, spec->capvols[i], spec->capsws[i], i); if (err < 0) return err; } err = stac92xx_auto_create_analog_input_ctls(codec, &spec->autocfg); if (err < 0) return err; if (spec->mono_nid > 0) { err = stac92xx_auto_create_mono_output_ctls(codec); if (err < 0) return err; } if (spec->num_dmics > 0 && !spec->dinput_mux) if ((err = stac92xx_auto_create_dmic_input_ctls(codec, &spec->autocfg)) < 0) return err; if (spec->num_muxes > 0) { err = stac92xx_auto_create_mux_input_ctls(codec); if (err < 0) return err; } if (spec->num_smuxes > 0) { err = stac92xx_auto_create_spdif_mux_ctls(codec); if (err < 0) return err; } err = stac92xx_add_input_source(spec); if (err < 0) return err; spec->multiout.max_channels = spec->multiout.num_dacs * 2; if (spec->multiout.max_channels > 2) spec->surr_switch = 1; /* find digital out and in converters */ for (i = codec->start_nid; i < codec->start_nid + codec->num_nodes; i++) { unsigned int wid_caps = get_wcaps(codec, i); if (wid_caps & AC_WCAP_DIGITAL) { switch (get_wcaps_type(wid_caps)) { case AC_WID_AUD_OUT: if (!dig_out) dig_out = i; break; case AC_WID_AUD_IN: if (!dig_in) dig_in = i; break; } } } if (spec->autocfg.dig_outs) spec->multiout.dig_out_nid = dig_out; if (dig_in && spec->autocfg.dig_in_pin) spec->dig_in_nid = dig_in; if (spec->kctls.list) spec->mixers[spec->num_mixers++] = spec->kctls.list; spec->input_mux = &spec->private_imux; if (!spec->dinput_mux) spec->dinput_mux = &spec->private_dimux; spec->sinput_mux = &spec->private_smux; spec->mono_mux = &spec->private_mono_mux; return 1; } /* add playback controls for HP output */ static int stac9200_auto_create_hp_ctls(struct hda_codec *codec, struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; hda_nid_t pin = cfg->hp_pins[0]; if (! pin) return 0; if (is_jack_detectable(codec, pin)) spec->hp_detect = 1; return 0; } /* add playback controls for LFE output */ static int stac9200_auto_create_lfe_ctls(struct hda_codec *codec, struct auto_pin_cfg *cfg) { struct sigmatel_spec *spec = codec->spec; int err; hda_nid_t lfe_pin = 0x0; int i; /* * search speaker outs and line outs for a mono speaker pin * with an amp. If one is found, add LFE controls * for it. */ for (i = 0; i < spec->autocfg.speaker_outs && lfe_pin == 0x0; i++) { hda_nid_t pin = spec->autocfg.speaker_pins[i]; unsigned int wcaps = get_wcaps(codec, pin); wcaps &= (AC_WCAP_STEREO | AC_WCAP_OUT_AMP); if (wcaps == AC_WCAP_OUT_AMP) /* found a mono speaker with an amp, must be lfe */ lfe_pin = pin; } /* if speaker_outs is 0, then speakers may be in line_outs */ if (lfe_pin == 0 && spec->autocfg.speaker_outs == 0) { for (i = 0; i < spec->autocfg.line_outs && lfe_pin == 0x0; i++) { hda_nid_t pin = spec->autocfg.line_out_pins[i]; unsigned int defcfg; defcfg = snd_hda_codec_get_pincfg(codec, pin); if (get_defcfg_device(defcfg) == AC_JACK_SPEAKER) { unsigned int wcaps = get_wcaps(codec, pin); wcaps &= (AC_WCAP_STEREO | AC_WCAP_OUT_AMP); if (wcaps == AC_WCAP_OUT_AMP) /* found a mono speaker with an amp, must be lfe */ lfe_pin = pin; } } } if (lfe_pin) { err = create_controls(codec, "LFE", lfe_pin, 1); if (err < 0) return err; } return 0; } static int stac9200_parse_auto_config(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int err; if ((err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL)) < 0) return err; if ((err = stac92xx_auto_create_analog_input_ctls(codec, &spec->autocfg)) < 0) return err; if ((err = stac9200_auto_create_hp_ctls(codec, &spec->autocfg)) < 0) return err; if ((err = stac9200_auto_create_lfe_ctls(codec, &spec->autocfg)) < 0) return err; if (spec->num_muxes > 0) { err = stac92xx_auto_create_mux_input_ctls(codec); if (err < 0) return err; } err = stac92xx_add_input_source(spec); if (err < 0) return err; if (spec->autocfg.dig_outs) spec->multiout.dig_out_nid = 0x05; if (spec->autocfg.dig_in_pin) spec->dig_in_nid = 0x04; if (spec->kctls.list) spec->mixers[spec->num_mixers++] = spec->kctls.list; spec->input_mux = &spec->private_imux; spec->dinput_mux = &spec->private_dimux; return 1; } /* * Early 2006 Intel Macintoshes with STAC9220X5 codecs seem to have a * funky external mute control using GPIO pins. */ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask, unsigned int dir_mask, unsigned int data) { unsigned int gpiostate, gpiomask, gpiodir; snd_printdd("%s msk %x dir %x gpio %x\n", __func__, mask, dir_mask, data); gpiostate = snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_GET_GPIO_DATA, 0); gpiostate = (gpiostate & ~dir_mask) | (data & dir_mask); gpiomask = snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_GET_GPIO_MASK, 0); gpiomask |= mask; gpiodir = snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_GET_GPIO_DIRECTION, 0); gpiodir |= dir_mask; /* Configure GPIOx as CMOS */ snd_hda_codec_write(codec, codec->afg, 0, 0x7e7, 0); snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_SET_GPIO_MASK, gpiomask); snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_SET_GPIO_DIRECTION, gpiodir); /* sync */ msleep(1); snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_SET_GPIO_DATA, gpiostate); /* sync */ } static int stac_add_event(struct hda_codec *codec, hda_nid_t nid, unsigned char type, int data) { struct hda_jack_tbl *event; event = snd_hda_jack_tbl_new(codec, nid); if (!event) return -ENOMEM; event->action = type; event->private_data = data; return 0; } /* check if given nid is a valid pin and no other events are assigned * to it. If OK, assign the event, set the unsol flag, and returns 1. * Otherwise, returns zero. */ static int enable_pin_detect(struct hda_codec *codec, hda_nid_t nid, unsigned int type) { struct hda_jack_tbl *event; if (!is_jack_detectable(codec, nid)) return 0; event = snd_hda_jack_tbl_new(codec, nid); if (!event) return -ENOMEM; if (event->action && event->action != type) return 0; event->action = type; snd_hda_jack_detect_enable(codec, nid, 0); return 1; } static int is_nid_out_jack_pin(struct auto_pin_cfg *cfg, hda_nid_t nid) { int i; for (i = 0; i < cfg->hp_outs; i++) if (cfg->hp_pins[i] == nid) return 1; /* nid is a HP-Out */ for (i = 0; i < cfg->line_outs; i++) if (cfg->line_out_pins[i] == nid) return 1; /* nid is a line-Out */ return 0; /* nid is not a HP-Out */ }; static void stac92xx_power_down(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; /* power down inactive DACs */ const hda_nid_t *dac; for (dac = spec->dac_list; *dac; dac++) if (!check_all_dac_nids(spec, *dac)) snd_hda_codec_write(codec, *dac, 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D3); } static void stac_toggle_power_map(struct hda_codec *codec, hda_nid_t nid, int enable); static inline int get_int_hint(struct hda_codec *codec, const char *key, int *valp) { const char *p; p = snd_hda_get_hint(codec, key); if (p) { unsigned long val; if (!strict_strtoul(p, 0, &val)) { *valp = val; return 1; } } return 0; } /* override some hints from the hwdep entry */ static void stac_store_hints(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; int val; val = snd_hda_get_bool_hint(codec, "hp_detect"); if (val >= 0) spec->hp_detect = val; if (get_int_hint(codec, "gpio_mask", &spec->gpio_mask)) { spec->eapd_mask = spec->gpio_dir = spec->gpio_data = spec->gpio_mask; } if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir)) spec->gpio_mask &= spec->gpio_mask; if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) spec->gpio_dir &= spec->gpio_mask; if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask)) spec->eapd_mask &= spec->gpio_mask; if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute)) spec->gpio_mute &= spec->gpio_mask; val = snd_hda_get_bool_hint(codec, "eapd_switch"); if (val >= 0) spec->eapd_switch = val; get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity); if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) { spec->gpio_mask |= spec->gpio_led; spec->gpio_dir |= spec->gpio_led; if (spec->gpio_led_polarity) spec->gpio_data |= spec->gpio_led; } } static void stac_issue_unsol_events(struct hda_codec *codec, int num_pins, const hda_nid_t *pins) { while (num_pins--) stac_issue_unsol_event(codec, *pins++); } /* fake event to set up pins */ static void stac_fake_hp_events(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; if (spec->autocfg.hp_outs) stac_issue_unsol_events(codec, spec->autocfg.hp_outs, spec->autocfg.hp_pins); if (spec->autocfg.line_outs && spec->autocfg.line_out_pins[0] != spec->autocfg.hp_pins[0]) stac_issue_unsol_events(codec, spec->autocfg.line_outs, spec->autocfg.line_out_pins); } static int stac92xx_init(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; unsigned int gpio; int i; snd_hda_sequence_write(codec, spec->init); /* power down adcs initially */ if (spec->powerdown_adcs) for (i = 0; i < spec->num_adcs; i++) snd_hda_codec_write(codec, spec->adc_nids[i], 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D3); /* override some hints */ stac_store_hints(codec); /* set up GPIO */ gpio = spec->gpio_data; /* turn on EAPD statically when spec->eapd_switch isn't set. * otherwise, unsol event will turn it on/off dynamically */ if (!spec->eapd_switch) gpio |= spec->eapd_mask; stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, gpio); /* set up pins */ if (spec->hp_detect) { /* Enable unsolicited responses on the HP widget */ for (i = 0; i < cfg->hp_outs; i++) { hda_nid_t nid = cfg->hp_pins[i]; enable_pin_detect(codec, nid, STAC_HP_EVENT); } if (cfg->line_out_type == AUTO_PIN_LINE_OUT && cfg->speaker_outs > 0) { /* enable pin-detect for line-outs as well */ for (i = 0; i < cfg->line_outs; i++) { hda_nid_t nid = cfg->line_out_pins[i]; enable_pin_detect(codec, nid, STAC_LO_EVENT); } } /* force to enable the first line-out; the others are set up * in unsol_event */ stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0], AC_PINCTL_OUT_EN); /* fake event to set up pins */ stac_fake_hp_events(codec); } else { stac92xx_auto_init_multi_out(codec); stac92xx_auto_init_hp_out(codec); for (i = 0; i < cfg->hp_outs; i++) stac_toggle_power_map(codec, cfg->hp_pins[i], 1); } if (spec->auto_mic) { /* initialize connection to analog input */ if (spec->dmux_nids) snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0, AC_VERB_SET_CONNECT_SEL, 0); if (enable_pin_detect(codec, spec->ext_mic.pin, STAC_MIC_EVENT)) stac_issue_unsol_event(codec, spec->ext_mic.pin); if (enable_pin_detect(codec, spec->dock_mic.pin, STAC_MIC_EVENT)) stac_issue_unsol_event(codec, spec->dock_mic.pin); } for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; int type = cfg->inputs[i].type; unsigned int pinctl, conf; if (type == AUTO_PIN_MIC) { /* for mic pins, force to initialize */ pinctl = stac92xx_get_default_vref(codec, nid); pinctl |= AC_PINCTL_IN_EN; stac92xx_auto_set_pinctl(codec, nid, pinctl); } else { pinctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); /* if PINCTL already set then skip */ /* Also, if both INPUT and OUTPUT are set, * it must be a BIOS bug; need to override, too */ if (!(pinctl & AC_PINCTL_IN_EN) || (pinctl & AC_PINCTL_OUT_EN)) { pinctl &= ~AC_PINCTL_OUT_EN; pinctl |= AC_PINCTL_IN_EN; stac92xx_auto_set_pinctl(codec, nid, pinctl); } } conf = snd_hda_codec_get_pincfg(codec, nid); if (get_defcfg_connect(conf) != AC_JACK_PORT_FIXED) { if (enable_pin_detect(codec, nid, STAC_INSERT_EVENT)) stac_issue_unsol_event(codec, nid); } } for (i = 0; i < spec->num_dmics; i++) stac92xx_auto_set_pinctl(codec, spec->dmic_nids[i], AC_PINCTL_IN_EN); if (cfg->dig_out_pins[0]) stac92xx_auto_set_pinctl(codec, cfg->dig_out_pins[0], AC_PINCTL_OUT_EN); if (cfg->dig_in_pin) stac92xx_auto_set_pinctl(codec, cfg->dig_in_pin, AC_PINCTL_IN_EN); for (i = 0; i < spec->num_pwrs; i++) { hda_nid_t nid = spec->pwr_nids[i]; unsigned int pinctl, def_conf; /* power on when no jack detection is available */ /* or when the VREF is used for controlling LED */ if (!spec->hp_detect || spec->vref_mute_led_nid == nid) { stac_toggle_power_map(codec, nid, 1); continue; } if (is_nid_out_jack_pin(cfg, nid)) continue; /* already has an unsol event */ pinctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); /* outputs are only ports capable of power management * any attempts on powering down a input port cause the * referenced VREF to act quirky. */ if (pinctl & AC_PINCTL_IN_EN) { stac_toggle_power_map(codec, nid, 1); continue; } def_conf = snd_hda_codec_get_pincfg(codec, nid); def_conf = get_defcfg_connect(def_conf); /* skip any ports that don't have jacks since presence * detection is useless */ if (def_conf != AC_JACK_PORT_COMPLEX || !is_jack_detectable(codec, nid)) { stac_toggle_power_map(codec, nid, 1); continue; } if (enable_pin_detect(codec, nid, STAC_PWR_EVENT)) { stac_issue_unsol_event(codec, nid); continue; } /* none of the above, turn the port OFF */ stac_toggle_power_map(codec, nid, 0); } snd_hda_jack_report_sync(codec); /* sync mute LED */ snd_hda_sync_vmaster_hook(&spec->vmaster_mute); if (spec->dac_list) stac92xx_power_down(codec); return 0; } static void stac92xx_free_kctls(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; if (spec->kctls.list) { struct snd_kcontrol_new *kctl = spec->kctls.list; int i; for (i = 0; i < spec->kctls.used; i++) kfree(kctl[i].name); } snd_array_free(&spec->kctls); } static void stac92xx_shutup_pins(struct hda_codec *codec) { unsigned int i, def_conf; if (codec->bus->shutdown) return; for (i = 0; i < codec->init_pins.used; i++) { struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i); def_conf = snd_hda_codec_get_pincfg(codec, pin->nid); if (get_defcfg_connect(def_conf) != AC_JACK_PORT_NONE) snd_hda_codec_write(codec, pin->nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0); } } static void stac92xx_shutup(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; stac92xx_shutup_pins(codec); if (spec->eapd_mask) stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data & ~spec->eapd_mask); } static void stac92xx_free(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; if (! spec) return; stac92xx_shutup(codec); kfree(spec); snd_hda_detach_beep_device(codec); } static void stac92xx_set_pinctl(struct hda_codec *codec, hda_nid_t nid, unsigned int flag) { unsigned int old_ctl, pin_ctl; pin_ctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0x00); if (pin_ctl & AC_PINCTL_IN_EN) { /* * we need to check the current set-up direction of * shared input pins since they can be switched via * "xxx as Output" mixer switch */ struct sigmatel_spec *spec = codec->spec; if (nid == spec->line_switch || nid == spec->mic_switch) return; } old_ctl = pin_ctl; /* if setting pin direction bits, clear the current direction bits first */ if (flag & (AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN)) pin_ctl &= ~(AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN); pin_ctl |= flag; if (old_ctl != pin_ctl) snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl); } static void stac92xx_reset_pinctl(struct hda_codec *codec, hda_nid_t nid, unsigned int flag) { unsigned int pin_ctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0x00); if (pin_ctl & flag) snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl & ~flag); } static inline int get_pin_presence(struct hda_codec *codec, hda_nid_t nid) { if (!nid) return 0; return snd_hda_jack_detect(codec, nid); } static void stac92xx_line_out_detect(struct hda_codec *codec, int presence) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i; for (i = 0; i < cfg->line_outs; i++) { if (presence) break; presence = get_pin_presence(codec, cfg->line_out_pins[i]); if (presence) { unsigned int pinctl; pinctl = snd_hda_codec_read(codec, cfg->line_out_pins[i], 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); if (pinctl & AC_PINCTL_IN_EN) presence = 0; /* mic- or line-input */ } } if (presence) { /* disable speakers */ for (i = 0; i < cfg->speaker_outs; i++) stac92xx_reset_pinctl(codec, cfg->speaker_pins[i], AC_PINCTL_OUT_EN); if (spec->eapd_mask && spec->eapd_switch) stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data & ~spec->eapd_mask); } else { /* enable speakers */ for (i = 0; i < cfg->speaker_outs; i++) stac92xx_set_pinctl(codec, cfg->speaker_pins[i], AC_PINCTL_OUT_EN); if (spec->eapd_mask && spec->eapd_switch) stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data | spec->eapd_mask); } } /* return non-zero if the hp-pin of the given array index isn't * a jack-detection target */ static int no_hp_sensing(struct sigmatel_spec *spec, int i) { struct auto_pin_cfg *cfg = &spec->autocfg; /* ignore sensing of shared line and mic jacks */ if (cfg->hp_pins[i] == spec->line_switch) return 1; if (cfg->hp_pins[i] == spec->mic_switch) return 1; /* ignore if the pin is set as line-out */ if (cfg->hp_pins[i] == spec->hp_switch) return 1; return 0; } static void stac92xx_hp_detect(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i, presence; presence = 0; if (spec->gpio_mute) presence = !(snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_GET_GPIO_DATA, 0) & spec->gpio_mute); for (i = 0; i < cfg->hp_outs; i++) { if (presence) break; if (no_hp_sensing(spec, i)) continue; presence = get_pin_presence(codec, cfg->hp_pins[i]); if (presence) { unsigned int pinctl; pinctl = snd_hda_codec_read(codec, cfg->hp_pins[i], 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); if (pinctl & AC_PINCTL_IN_EN) presence = 0; /* mic- or line-input */ } } if (presence) { /* disable lineouts */ if (spec->hp_switch) stac92xx_reset_pinctl(codec, spec->hp_switch, AC_PINCTL_OUT_EN); for (i = 0; i < cfg->line_outs; i++) stac92xx_reset_pinctl(codec, cfg->line_out_pins[i], AC_PINCTL_OUT_EN); } else { /* enable lineouts */ if (spec->hp_switch) stac92xx_set_pinctl(codec, spec->hp_switch, AC_PINCTL_OUT_EN); for (i = 0; i < cfg->line_outs; i++) stac92xx_set_pinctl(codec, cfg->line_out_pins[i], AC_PINCTL_OUT_EN); } stac92xx_line_out_detect(codec, presence); /* toggle hp outs */ for (i = 0; i < cfg->hp_outs; i++) { unsigned int val = AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN; if (no_hp_sensing(spec, i)) continue; if (1 /*presence*/) stac92xx_set_pinctl(codec, cfg->hp_pins[i], val); #if 0 /* FIXME */ /* Resetting the pinctl like below may lead to (a sort of) regressions * on some devices since they use the HP pin actually for line/speaker * outs although the default pin config shows a different pin (that is * wrong and useless). * * So, it's basically a problem of default pin configs, likely a BIOS issue. * But, disabling the code below just works around it, and I'm too tired of * bug reports with such devices... */ else stac92xx_reset_pinctl(codec, cfg->hp_pins[i], val); #endif /* FIXME */ } } static void stac_toggle_power_map(struct hda_codec *codec, hda_nid_t nid, int enable) { struct sigmatel_spec *spec = codec->spec; unsigned int idx, val; for (idx = 0; idx < spec->num_pwrs; idx++) { if (spec->pwr_nids[idx] == nid) break; } if (idx >= spec->num_pwrs) return; idx = 1 << idx; val = snd_hda_codec_read(codec, codec->afg, 0, 0x0fec, 0x0) & 0xff; if (enable) val &= ~idx; else val |= idx; /* power down unused output ports */ snd_hda_codec_write(codec, codec->afg, 0, 0x7ec, val); } static void stac92xx_pin_sense(struct hda_codec *codec, hda_nid_t nid) { stac_toggle_power_map(codec, nid, get_pin_presence(codec, nid)); } /* get the pin connection (fixed, none, etc) */ static unsigned int stac_get_defcfg_connect(struct hda_codec *codec, int idx) { struct sigmatel_spec *spec = codec->spec; unsigned int cfg; cfg = snd_hda_codec_get_pincfg(codec, spec->pin_nids[idx]); return get_defcfg_connect(cfg); } static int stac92xx_connected_ports(struct hda_codec *codec, const hda_nid_t *nids, int num_nids) { struct sigmatel_spec *spec = codec->spec; int idx, num; unsigned int def_conf; for (num = 0; num < num_nids; num++) { for (idx = 0; idx < spec->num_pins; idx++) if (spec->pin_nids[idx] == nids[num]) break; if (idx >= spec->num_pins) break; def_conf = stac_get_defcfg_connect(codec, idx); if (def_conf == AC_JACK_PORT_NONE) break; } return num; } static void stac92xx_mic_detect(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; struct sigmatel_mic_route *mic; if (get_pin_presence(codec, spec->ext_mic.pin)) mic = &spec->ext_mic; else if (get_pin_presence(codec, spec->dock_mic.pin)) mic = &spec->dock_mic; else mic = &spec->int_mic; if (mic->dmux_idx >= 0) snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0, AC_VERB_SET_CONNECT_SEL, mic->dmux_idx); if (mic->mux_idx >= 0) snd_hda_codec_write_cache(codec, spec->mux_nids[0], 0, AC_VERB_SET_CONNECT_SEL, mic->mux_idx); } static void handle_unsol_event(struct hda_codec *codec, struct hda_jack_tbl *event) { struct sigmatel_spec *spec = codec->spec; int data; switch (event->action) { case STAC_HP_EVENT: case STAC_LO_EVENT: stac92xx_hp_detect(codec); break; case STAC_MIC_EVENT: stac92xx_mic_detect(codec); break; } switch (event->action) { case STAC_HP_EVENT: case STAC_LO_EVENT: case STAC_MIC_EVENT: case STAC_INSERT_EVENT: case STAC_PWR_EVENT: if (spec->num_pwrs > 0) stac92xx_pin_sense(codec, event->nid); switch (codec->subsystem_id) { case 0x103c308f: if (event->nid == 0xb) { int pin = AC_PINCTL_IN_EN; if (get_pin_presence(codec, 0xa) && get_pin_presence(codec, 0xb)) pin |= AC_PINCTL_VREF_80; if (!get_pin_presence(codec, 0xb)) pin |= AC_PINCTL_VREF_80; /* toggle VREF state based on mic + hp pin * status */ stac92xx_auto_set_pinctl(codec, 0x0a, pin); } } break; case STAC_VREF_EVENT: data = snd_hda_codec_read(codec, codec->afg, 0, AC_VERB_GET_GPIO_DATA, 0); /* toggle VREF state based on GPIOx status */ snd_hda_codec_write(codec, codec->afg, 0, 0x7e0, !!(data & (1 << event->private_data))); break; } } static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid) { struct hda_jack_tbl *event = snd_hda_jack_tbl_get(codec, nid); if (!event) return; handle_unsol_event(codec, event); } static void stac92xx_unsol_event(struct hda_codec *codec, unsigned int res) { struct hda_jack_tbl *event; int tag; tag = (res >> 26) & 0x7f; event = snd_hda_jack_tbl_get_from_tag(codec, tag); if (!event) return; event->jack_dirty = 1; handle_unsol_event(codec, event); snd_hda_jack_report_sync(codec); } static int hp_blike_system(u32 subsystem_id); static void set_hp_led_gpio(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; unsigned int gpio; if (spec->gpio_led) return; gpio = snd_hda_param_read(codec, codec->afg, AC_PAR_GPIO_CAP); gpio &= AC_GPIO_IO_COUNT; if (gpio > 3) spec->gpio_led = 0x08; /* GPIO 3 */ else spec->gpio_led = 0x01; /* GPIO 0 */ } /* * This method searches for the mute LED GPIO configuration * provided as OEM string in SMBIOS. The format of that string * is HP_Mute_LED_P_G or HP_Mute_LED_P * where P can be 0 or 1 and defines mute LED GPIO control state (low/high) * that corresponds to the NOT muted state of the master volume * and G is the index of the GPIO to use as the mute LED control (0..9) * If _G portion is missing it is assigned based on the codec ID * * So, HP B-series like systems may have HP_Mute_LED_0 (current models) * or HP_Mute_LED_0_3 (future models) OEM SMBIOS strings * * * The dv-series laptops don't seem to have the HP_Mute_LED* strings in * SMBIOS - at least the ones I have seen do not have them - which include * my own system (HP Pavilion dv6-1110ax) and my cousin's * HP Pavilion dv9500t CTO. * Need more information on whether it is true across the entire series. * -- kunal */ static int find_mute_led_cfg(struct hda_codec *codec, int default_polarity) { struct sigmatel_spec *spec = codec->spec; const struct dmi_device *dev = NULL; if ((codec->subsystem_id >> 16) == PCI_VENDOR_ID_HP) { while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) { if (sscanf(dev->name, "HP_Mute_LED_%d_%x", &spec->gpio_led_polarity, &spec->gpio_led) == 2) { unsigned int max_gpio; max_gpio = snd_hda_param_read(codec, codec->afg, AC_PAR_GPIO_CAP); max_gpio &= AC_GPIO_IO_COUNT; if (spec->gpio_led < max_gpio) spec->gpio_led = 1 << spec->gpio_led; else spec->vref_mute_led_nid = spec->gpio_led; return 1; } if (sscanf(dev->name, "HP_Mute_LED_%d", &spec->gpio_led_polarity) == 1) { set_hp_led_gpio(codec); return 1; } /* BIOS bug: unfilled OEM string */ if (strstr(dev->name, "HP_Mute_LED_P_G")) { set_hp_led_gpio(codec); switch (codec->subsystem_id) { case 0x103c148a: spec->gpio_led_polarity = 0; break; default: spec->gpio_led_polarity = 1; break; } return 1; } } /* * Fallback case - if we don't find the DMI strings, * we statically set the GPIO - if not a B-series system * and default polarity is provided */ if (!hp_blike_system(codec->subsystem_id) && (default_polarity == 0 || default_polarity == 1)) { set_hp_led_gpio(codec); spec->gpio_led_polarity = default_polarity; return 1; } } return 0; } static int hp_blike_system(u32 subsystem_id) { switch (subsystem_id) { case 0x103c1520: case 0x103c1521: case 0x103c1523: case 0x103c1524: case 0x103c1525: case 0x103c1722: case 0x103c1723: case 0x103c1724: case 0x103c1725: case 0x103c1726: case 0x103c1727: case 0x103c1728: case 0x103c1729: case 0x103c172a: case 0x103c172b: case 0x103c307e: case 0x103c307f: case 0x103c3080: case 0x103c3081: case 0x103c7007: case 0x103c7008: return 1; } return 0; } #ifdef CONFIG_PROC_FS static void stac92hd_proc_hook(struct snd_info_buffer *buffer, struct hda_codec *codec, hda_nid_t nid) { if (nid == codec->afg) snd_iprintf(buffer, "Power-Map: 0x%02x\n", snd_hda_codec_read(codec, nid, 0, 0x0fec, 0x0)); } static void analog_loop_proc_hook(struct snd_info_buffer *buffer, struct hda_codec *codec, unsigned int verb) { snd_iprintf(buffer, "Analog Loopback: 0x%02x\n", snd_hda_codec_read(codec, codec->afg, 0, verb, 0)); } /* stac92hd71bxx, stac92hd73xx */ static void stac92hd7x_proc_hook(struct snd_info_buffer *buffer, struct hda_codec *codec, hda_nid_t nid) { stac92hd_proc_hook(buffer, codec, nid); if (nid == codec->afg) analog_loop_proc_hook(buffer, codec, 0xfa0); } static void stac9205_proc_hook(struct snd_info_buffer *buffer, struct hda_codec *codec, hda_nid_t nid) { if (nid == codec->afg) analog_loop_proc_hook(buffer, codec, 0xfe0); } static void stac927x_proc_hook(struct snd_info_buffer *buffer, struct hda_codec *codec, hda_nid_t nid) { if (nid == codec->afg) analog_loop_proc_hook(buffer, codec, 0xfeb); } #else #define stac92hd_proc_hook NULL #define stac92hd7x_proc_hook NULL #define stac9205_proc_hook NULL #define stac927x_proc_hook NULL #endif #ifdef CONFIG_PM static int stac92xx_resume(struct hda_codec *codec) { stac92xx_init(codec); snd_hda_codec_resume_amp(codec); snd_hda_codec_resume_cache(codec); /* fake event to set up pins again to override cached values */ stac_fake_hp_events(codec); return 0; } static int stac92xx_suspend(struct hda_codec *codec, pm_message_t state) { stac92xx_shutup(codec); return 0; } static int stac92xx_pre_resume(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; /* sync mute LED */ if (spec->vref_mute_led_nid) stac_vrefout_set(codec, spec->vref_mute_led_nid, spec->vref_led); else if (spec->gpio_led) stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data); return 0; } static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg, unsigned int power_state) { unsigned int afg_power_state = power_state; struct sigmatel_spec *spec = codec->spec; if (power_state == AC_PWRST_D3) { if (spec->vref_mute_led_nid) { /* with vref-out pin used for mute led control * codec AFG is prevented from D3 state */ afg_power_state = AC_PWRST_D1; } /* this delay seems necessary to avoid click noise at power-down */ msleep(100); } snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE, afg_power_state); snd_hda_codec_set_power_to_all(codec, fg, power_state, true); } #else #define stac92xx_suspend NULL #define stac92xx_resume NULL #define stac92xx_pre_resume NULL #define stac92xx_set_power_state NULL #endif /* CONFIG_PM */ /* update mute-LED accoring to the master switch */ static void stac92xx_update_led_status(struct hda_codec *codec, int enabled) { struct sigmatel_spec *spec = codec->spec; int muted = !enabled; if (!spec->gpio_led) return; /* LED state is inverted on these systems */ if (spec->gpio_led_polarity) muted = !muted; if (!spec->vref_mute_led_nid) { if (muted) spec->gpio_data |= spec->gpio_led; else spec->gpio_data &= ~spec->gpio_led; stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data); } else { spec->vref_led = muted ? AC_PINCTL_VREF_50 : AC_PINCTL_VREF_GRD; stac_vrefout_set(codec, spec->vref_mute_led_nid, spec->vref_led); } } static const struct hda_codec_ops stac92xx_patch_ops = { .build_controls = stac92xx_build_controls, .build_pcms = stac92xx_build_pcms, .init = stac92xx_init, .free = stac92xx_free, .unsol_event = stac92xx_unsol_event, #ifdef CONFIG_PM .suspend = stac92xx_suspend, .resume = stac92xx_resume, #endif .reboot_notify = stac92xx_shutup, }; static int patch_stac9200(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac9200_pin_nids); spec->pin_nids = stac9200_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_9200_MODELS, stac9200_models, stac9200_cfg_tbl); if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac9200_brd_tbl[spec->board_config]); spec->multiout.max_channels = 2; spec->multiout.num_dacs = 1; spec->multiout.dac_nids = stac9200_dac_nids; spec->adc_nids = stac9200_adc_nids; spec->mux_nids = stac9200_mux_nids; spec->num_muxes = 1; spec->num_dmics = 0; spec->num_adcs = 1; spec->num_pwrs = 0; if (spec->board_config == STAC_9200_M4 || spec->board_config == STAC_9200_M4_2 || spec->board_config == STAC_9200_OQO) spec->init = stac9200_eapd_init; else spec->init = stac9200_core_init; spec->mixer = stac9200_mixer; if (spec->board_config == STAC_9200_PANASONIC) { spec->gpio_mask = spec->gpio_dir = 0x09; spec->gpio_data = 0x00; } err = stac9200_parse_auto_config(codec); if (err < 0) { stac92xx_free(codec); return err; } /* CF-74 has no headphone detection, and the driver should *NOT* * do detection and HP/speaker toggle because the hardware does it. */ if (spec->board_config == STAC_9200_PANASONIC) spec->hp_detect = 0; codec->patch_ops = stac92xx_patch_ops; return 0; } static int patch_stac925x(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac925x_pin_nids); spec->pin_nids = stac925x_pin_nids; /* Check first for codec ID */ spec->board_config = snd_hda_check_board_codec_sid_config(codec, STAC_925x_MODELS, stac925x_models, stac925x_codec_id_cfg_tbl); /* Now checks for PCI ID, if codec ID is not found */ if (spec->board_config < 0) spec->board_config = snd_hda_check_board_config(codec, STAC_925x_MODELS, stac925x_models, stac925x_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac925x_brd_tbl[spec->board_config]); spec->multiout.max_channels = 2; spec->multiout.num_dacs = 1; spec->multiout.dac_nids = stac925x_dac_nids; spec->adc_nids = stac925x_adc_nids; spec->mux_nids = stac925x_mux_nids; spec->num_muxes = 1; spec->num_adcs = 1; spec->num_pwrs = 0; switch (codec->vendor_id) { case 0x83847632: /* STAC9202 */ case 0x83847633: /* STAC9202D */ case 0x83847636: /* STAC9251 */ case 0x83847637: /* STAC9251D */ spec->num_dmics = STAC925X_NUM_DMICS; spec->dmic_nids = stac925x_dmic_nids; spec->num_dmuxes = ARRAY_SIZE(stac925x_dmux_nids); spec->dmux_nids = stac925x_dmux_nids; break; default: spec->num_dmics = 0; break; } spec->init = stac925x_core_init; spec->mixer = stac925x_mixer; spec->num_caps = 1; spec->capvols = stac925x_capvols; spec->capsws = stac925x_capsws; err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_925x_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } codec->patch_ops = stac92xx_patch_ops; return 0; } static int patch_stac92hd73xx(struct hda_codec *codec) { struct sigmatel_spec *spec; hda_nid_t conn[STAC92HD73_DAC_COUNT + 2]; int err = 0; int num_dacs; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 0; codec->slave_dig_outs = stac92hd73xx_slave_dig_outs; spec->num_pins = ARRAY_SIZE(stac92hd73xx_pin_nids); spec->pin_nids = stac92hd73xx_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_92HD73XX_MODELS, stac92hd73xx_models, stac92hd73xx_cfg_tbl); /* check codec subsystem id if not found */ if (spec->board_config < 0) spec->board_config = snd_hda_check_board_codec_sid_config(codec, STAC_92HD73XX_MODELS, stac92hd73xx_models, stac92hd73xx_codec_id_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac92hd73xx_brd_tbl[spec->board_config]); num_dacs = snd_hda_get_connections(codec, 0x0a, conn, STAC92HD73_DAC_COUNT + 2) - 1; if (num_dacs < 3 || num_dacs > 5) { printk(KERN_WARNING "hda_codec: Could not determine " "number of channels defaulting to DAC count\n"); num_dacs = STAC92HD73_DAC_COUNT; } spec->init = stac92hd73xx_core_init; switch (num_dacs) { case 0x3: /* 6 Channel */ spec->aloopback_ctl = stac92hd73xx_6ch_loopback; break; case 0x4: /* 8 Channel */ spec->aloopback_ctl = stac92hd73xx_8ch_loopback; break; case 0x5: /* 10 Channel */ spec->aloopback_ctl = stac92hd73xx_10ch_loopback; break; } spec->multiout.dac_nids = spec->dac_nids; spec->aloopback_mask = 0x01; spec->aloopback_shift = 8; spec->digbeep_nid = 0x1c; spec->mux_nids = stac92hd73xx_mux_nids; spec->adc_nids = stac92hd73xx_adc_nids; spec->dmic_nids = stac92hd73xx_dmic_nids; spec->dmux_nids = stac92hd73xx_dmux_nids; spec->smux_nids = stac92hd73xx_smux_nids; spec->num_muxes = ARRAY_SIZE(stac92hd73xx_mux_nids); spec->num_adcs = ARRAY_SIZE(stac92hd73xx_adc_nids); spec->num_dmuxes = ARRAY_SIZE(stac92hd73xx_dmux_nids); spec->num_caps = STAC92HD73XX_NUM_CAPS; spec->capvols = stac92hd73xx_capvols; spec->capsws = stac92hd73xx_capsws; switch (spec->board_config) { case STAC_DELL_EQ: spec->init = dell_eq_core_init; /* fallthru */ case STAC_DELL_M6_AMIC: case STAC_DELL_M6_DMIC: case STAC_DELL_M6_BOTH: spec->num_smuxes = 0; spec->eapd_switch = 0; switch (spec->board_config) { case STAC_DELL_M6_AMIC: /* Analog Mics */ snd_hda_codec_set_pincfg(codec, 0x0b, 0x90A70170); spec->num_dmics = 0; break; case STAC_DELL_M6_DMIC: /* Digital Mics */ snd_hda_codec_set_pincfg(codec, 0x13, 0x90A60160); spec->num_dmics = 1; break; case STAC_DELL_M6_BOTH: /* Both */ snd_hda_codec_set_pincfg(codec, 0x0b, 0x90A70170); snd_hda_codec_set_pincfg(codec, 0x13, 0x90A60160); spec->num_dmics = 1; break; } break; case STAC_ALIENWARE_M17X: spec->num_dmics = STAC92HD73XX_NUM_DMICS; spec->num_smuxes = ARRAY_SIZE(stac92hd73xx_smux_nids); spec->eapd_switch = 0; break; default: spec->num_dmics = STAC92HD73XX_NUM_DMICS; spec->num_smuxes = ARRAY_SIZE(stac92hd73xx_smux_nids); spec->eapd_switch = 1; break; } if (spec->board_config != STAC_92HD73XX_REF) { /* GPIO0 High = Enable EAPD */ spec->eapd_mask = spec->gpio_mask = spec->gpio_dir = 0x1; spec->gpio_data = 0x01; } spec->num_pwrs = ARRAY_SIZE(stac92hd73xx_pwr_nids); spec->pwr_nids = stac92hd73xx_pwr_nids; err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_92HD73XX_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } if (spec->board_config == STAC_92HD73XX_NO_JD) spec->hp_detect = 0; codec->patch_ops = stac92xx_patch_ops; codec->proc_widget_hook = stac92hd7x_proc_hook; return 0; } static int hp_bnb2011_with_dock(struct hda_codec *codec) { if (codec->vendor_id != 0x111d7605 && codec->vendor_id != 0x111d76d1) return 0; switch (codec->subsystem_id) { case 0x103c1618: case 0x103c1619: case 0x103c161a: case 0x103c161b: case 0x103c161c: case 0x103c161d: case 0x103c161e: case 0x103c161f: case 0x103c162a: case 0x103c162b: case 0x103c1630: case 0x103c1631: case 0x103c1633: case 0x103c1634: case 0x103c1635: case 0x103c3587: case 0x103c3588: case 0x103c3589: case 0x103c358a: case 0x103c3667: case 0x103c3668: case 0x103c3669: return 1; } return 0; } static void stac92hd8x_add_pin(struct hda_codec *codec, hda_nid_t nid) { struct sigmatel_spec *spec = codec->spec; unsigned int def_conf = snd_hda_codec_get_pincfg(codec, nid); int i; spec->auto_pin_nids[spec->auto_pin_cnt] = nid; spec->auto_pin_cnt++; if (get_defcfg_device(def_conf) == AC_JACK_MIC_IN && get_defcfg_connect(def_conf) != AC_JACK_PORT_NONE) { for (i = 0; i < ARRAY_SIZE(stac92hd83xxx_dmic_nids); i++) { if (nid == stac92hd83xxx_dmic_nids[i]) { spec->auto_dmic_nids[spec->auto_dmic_cnt] = nid; spec->auto_dmic_cnt++; } } } } static void stac92hd8x_add_adc(struct hda_codec *codec, hda_nid_t nid) { struct sigmatel_spec *spec = codec->spec; spec->auto_adc_nids[spec->auto_adc_cnt] = nid; spec->auto_adc_cnt++; } static void stac92hd8x_add_mux(struct hda_codec *codec, hda_nid_t nid) { int i, j; struct sigmatel_spec *spec = codec->spec; for (i = 0; i < spec->auto_adc_cnt; i++) { if (get_connection_index(codec, spec->auto_adc_nids[i], nid) >= 0) { /* mux and volume for adc_nids[i] */ if (!spec->auto_mux_nids[i]) { spec->auto_mux_nids[i] = nid; /* 92hd codecs capture volume is in mux */ spec->auto_capvols[i] = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT); } for (j = 0; j < spec->auto_dmic_cnt; j++) { if (get_connection_index(codec, nid, spec->auto_dmic_nids[j]) >= 0) { /* dmux for adc_nids[i] */ if (!spec->auto_dmux_nids[i]) spec->auto_dmux_nids[i] = nid; break; } } break; } } } static void stac92hd8x_fill_auto_spec(struct hda_codec *codec) { hda_nid_t nid, end_nid; unsigned int wid_caps, wid_type; struct sigmatel_spec *spec = codec->spec; end_nid = codec->start_nid + codec->num_nodes; for (nid = codec->start_nid; nid < end_nid; nid++) { wid_caps = get_wcaps(codec, nid); wid_type = get_wcaps_type(wid_caps); if (wid_type == AC_WID_PIN) stac92hd8x_add_pin(codec, nid); if (wid_type == AC_WID_AUD_IN && !(wid_caps & AC_WCAP_DIGITAL)) stac92hd8x_add_adc(codec, nid); } for (nid = codec->start_nid; nid < end_nid; nid++) { wid_caps = get_wcaps(codec, nid); wid_type = get_wcaps_type(wid_caps); if (wid_type == AC_WID_AUD_SEL) stac92hd8x_add_mux(codec, nid); } spec->pin_nids = spec->auto_pin_nids; spec->num_pins = spec->auto_pin_cnt; spec->adc_nids = spec->auto_adc_nids; spec->num_adcs = spec->auto_adc_cnt; spec->capvols = spec->auto_capvols; spec->capsws = spec->auto_capvols; spec->num_caps = spec->auto_adc_cnt; spec->mux_nids = spec->auto_mux_nids; spec->num_muxes = spec->auto_adc_cnt; spec->dmux_nids = spec->auto_dmux_nids; spec->num_dmuxes = spec->auto_adc_cnt; spec->dmic_nids = spec->auto_dmic_nids; spec->num_dmics = spec->auto_dmic_cnt; } static int patch_stac92hd83xxx(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; if (hp_bnb2011_with_dock(codec)) { snd_hda_codec_set_pincfg(codec, 0xa, 0x2101201f); snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); } codec->no_trigger_sense = 1; codec->spec = spec; stac92hd8x_fill_auto_spec(codec); spec->linear_tone_beep = 0; codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs; spec->digbeep_nid = 0x21; spec->pwr_nids = stac92hd83xxx_pwr_nids; spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids); spec->multiout.dac_nids = spec->dac_nids; spec->init = stac92hd83xxx_core_init; spec->board_config = snd_hda_check_board_config(codec, STAC_92HD83XXX_MODELS, stac92hd83xxx_models, stac92hd83xxx_cfg_tbl); /* check codec subsystem id if not found */ if (spec->board_config < 0) spec->board_config = snd_hda_check_board_codec_sid_config(codec, STAC_92HD83XXX_MODELS, stac92hd83xxx_models, stac92hd83xxx_codec_id_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac92hd83xxx_brd_tbl[spec->board_config]); codec->patch_ops = stac92xx_patch_ops; switch (spec->board_config) { case STAC_HP_ZEPHYR: spec->init = stac92hd83xxx_hp_zephyr_init; break; } if (find_mute_led_cfg(codec, -1/*no default cfg*/)) snd_printd("mute LED gpio %d polarity %d\n", spec->gpio_led, spec->gpio_led_polarity); if (spec->gpio_led) { if (!spec->vref_mute_led_nid) { spec->gpio_mask |= spec->gpio_led; spec->gpio_dir |= spec->gpio_led; spec->gpio_data |= spec->gpio_led; } else { codec->patch_ops.set_power_state = stac92xx_set_power_state; } #ifdef CONFIG_PM codec->patch_ops.pre_resume = stac92xx_pre_resume; #endif } err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_92HD83XXX_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } codec->proc_widget_hook = stac92hd_proc_hook; return 0; } static int stac92hd71bxx_connected_smuxes(struct hda_codec *codec, hda_nid_t dig0pin) { struct sigmatel_spec *spec = codec->spec; int idx; for (idx = 0; idx < spec->num_pins; idx++) if (spec->pin_nids[idx] == dig0pin) break; if ((idx + 2) >= spec->num_pins) return 0; /* dig1pin case */ if (stac_get_defcfg_connect(codec, idx + 1) != AC_JACK_PORT_NONE) return 2; /* dig0pin + dig2pin case */ if (stac_get_defcfg_connect(codec, idx + 2) != AC_JACK_PORT_NONE) return 2; if (stac_get_defcfg_connect(codec, idx) != AC_JACK_PORT_NONE) return 1; else return 0; } /* HP dv7 bass switch - GPIO5 */ #define stac_hp_bass_gpio_info snd_ctl_boolean_mono_info static int stac_hp_bass_gpio_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; ucontrol->value.integer.value[0] = !!(spec->gpio_data & 0x20); return 0; } static int stac_hp_bass_gpio_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct sigmatel_spec *spec = codec->spec; unsigned int gpio_data; gpio_data = (spec->gpio_data & ~0x20) | (ucontrol->value.integer.value[0] ? 0x20 : 0); if (gpio_data == spec->gpio_data) return 0; spec->gpio_data = gpio_data; stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data); return 1; } static const struct snd_kcontrol_new stac_hp_bass_sw_ctrl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = stac_hp_bass_gpio_info, .get = stac_hp_bass_gpio_get, .put = stac_hp_bass_gpio_put, }; static int stac_add_hp_bass_switch(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; if (!stac_control_new(spec, &stac_hp_bass_sw_ctrl, "Bass Speaker Playback Switch", 0)) return -ENOMEM; spec->gpio_mask |= 0x20; spec->gpio_dir |= 0x20; spec->gpio_data |= 0x20; return 0; } static int patch_stac92hd71bxx(struct hda_codec *codec) { struct sigmatel_spec *spec; const struct hda_verb *unmute_init = stac92hd71bxx_unmute_core_init; unsigned int pin_cfg; int err = 0; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 0; codec->patch_ops = stac92xx_patch_ops; spec->num_pins = STAC92HD71BXX_NUM_PINS; switch (codec->vendor_id) { case 0x111d76b6: case 0x111d76b7: spec->pin_nids = stac92hd71bxx_pin_nids_4port; break; case 0x111d7603: case 0x111d7608: /* On 92HD75Bx 0x27 isn't a pin nid */ spec->num_pins--; /* fallthrough */ default: spec->pin_nids = stac92hd71bxx_pin_nids_6port; } spec->num_pwrs = ARRAY_SIZE(stac92hd71bxx_pwr_nids); spec->board_config = snd_hda_check_board_config(codec, STAC_92HD71BXX_MODELS, stac92hd71bxx_models, stac92hd71bxx_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac92hd71bxx_brd_tbl[spec->board_config]); if (spec->board_config != STAC_92HD71BXX_REF) { /* GPIO0 = EAPD */ spec->gpio_mask = 0x01; spec->gpio_dir = 0x01; spec->gpio_data = 0x01; } spec->dmic_nids = stac92hd71bxx_dmic_nids; spec->dmux_nids = stac92hd71bxx_dmux_nids; spec->num_caps = STAC92HD71BXX_NUM_CAPS; spec->capvols = stac92hd71bxx_capvols; spec->capsws = stac92hd71bxx_capsws; switch (codec->vendor_id) { case 0x111d76b6: /* 4 Port without Analog Mixer */ case 0x111d76b7: unmute_init++; /* fallthru */ case 0x111d76b4: /* 6 Port without Analog Mixer */ case 0x111d76b5: spec->init = stac92hd71bxx_core_init; codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd71bxx_dmic_nids, STAC92HD71BXX_NUM_DMICS); break; case 0x111d7608: /* 5 Port with Analog Mixer */ switch (spec->board_config) { case STAC_HP_M4: /* Enable VREF power saving on GPIO1 detect */ err = stac_add_event(codec, codec->afg, STAC_VREF_EVENT, 0x02); if (err < 0) return err; snd_hda_codec_write_cache(codec, codec->afg, 0, AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x02); snd_hda_jack_detect_enable(codec, codec->afg, 0); spec->gpio_mask |= 0x02; break; } if ((codec->revision_id & 0xf) == 0 || (codec->revision_id & 0xf) == 1) spec->stream_delay = 40; /* 40 milliseconds */ /* disable VSW */ spec->init = stac92hd71bxx_core_init; unmute_init++; snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0); snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3); spec->dmic_nids = stac92hd71bxx_dmic_5port_nids; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd71bxx_dmic_5port_nids, STAC92HD71BXX_NUM_DMICS - 1); break; case 0x111d7603: /* 6 Port with Analog Mixer */ if ((codec->revision_id & 0xf) == 1) spec->stream_delay = 40; /* 40 milliseconds */ /* fallthru */ default: spec->init = stac92hd71bxx_core_init; codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd71bxx_dmic_nids, STAC92HD71BXX_NUM_DMICS); break; } if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP) snd_hda_sequence_write_cache(codec, unmute_init); spec->aloopback_ctl = stac92hd71bxx_loopback; spec->aloopback_mask = 0x50; spec->aloopback_shift = 0; spec->powerdown_adcs = 1; spec->digbeep_nid = 0x26; spec->mux_nids = stac92hd71bxx_mux_nids; spec->adc_nids = stac92hd71bxx_adc_nids; spec->smux_nids = stac92hd71bxx_smux_nids; spec->pwr_nids = stac92hd71bxx_pwr_nids; spec->num_muxes = ARRAY_SIZE(stac92hd71bxx_mux_nids); spec->num_adcs = ARRAY_SIZE(stac92hd71bxx_adc_nids); spec->num_dmuxes = ARRAY_SIZE(stac92hd71bxx_dmux_nids); spec->num_smuxes = stac92hd71bxx_connected_smuxes(codec, 0x1e); snd_printdd("Found board config: %d\n", spec->board_config); switch (spec->board_config) { case STAC_HP_M4: /* enable internal microphone */ snd_hda_codec_set_pincfg(codec, 0x0e, 0x01813040); stac92xx_auto_set_pinctl(codec, 0x0e, AC_PINCTL_IN_EN | AC_PINCTL_VREF_80); /* fallthru */ case STAC_DELL_M4_2: spec->num_dmics = 0; spec->num_smuxes = 0; spec->num_dmuxes = 0; break; case STAC_DELL_M4_1: case STAC_DELL_M4_3: spec->num_dmics = 1; spec->num_smuxes = 0; spec->num_dmuxes = 1; break; case STAC_HP_DV4_1222NR: spec->num_dmics = 1; /* I don't know if it needs 1 or 2 smuxes - will wait for * bug reports to fix if needed */ spec->num_smuxes = 1; spec->num_dmuxes = 1; /* fallthrough */ case STAC_HP_DV4: spec->gpio_led = 0x01; /* fallthrough */ case STAC_HP_DV5: snd_hda_codec_set_pincfg(codec, 0x0d, 0x90170010); stac92xx_auto_set_pinctl(codec, 0x0d, AC_PINCTL_OUT_EN); /* HP dv6 gives the headphone pin as a line-out. Thus we * need to set hp_detect flag here to force to enable HP * detection. */ spec->hp_detect = 1; break; case STAC_HP_HDX: spec->num_dmics = 1; spec->num_dmuxes = 1; spec->num_smuxes = 1; spec->gpio_led = 0x08; break; } if (hp_blike_system(codec->subsystem_id)) { pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f); if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT || get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER || get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT) { /* It was changed in the BIOS to just satisfy MS DTM. * Lets turn it back into slaved HP */ pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) | (AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT); pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | AC_DEFCFG_SEQUENCE))) | 0x1f; snd_hda_codec_set_pincfg(codec, 0x0f, pin_cfg); } } if (find_mute_led_cfg(codec, 1)) snd_printd("mute LED gpio %d polarity %d\n", spec->gpio_led, spec->gpio_led_polarity); if (spec->gpio_led) { if (!spec->vref_mute_led_nid) { spec->gpio_mask |= spec->gpio_led; spec->gpio_dir |= spec->gpio_led; spec->gpio_data |= spec->gpio_led; } else { codec->patch_ops.set_power_state = stac92xx_set_power_state; } #ifdef CONFIG_PM codec->patch_ops.pre_resume = stac92xx_pre_resume; #endif } spec->multiout.dac_nids = spec->dac_nids; err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_92HD71BXX_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } /* enable bass on HP dv7 */ if (spec->board_config == STAC_HP_DV4 || spec->board_config == STAC_HP_DV5) { unsigned int cap; cap = snd_hda_param_read(codec, 0x1, AC_PAR_GPIO_CAP); cap &= AC_GPIO_IO_COUNT; if (cap >= 6) stac_add_hp_bass_switch(codec); } codec->proc_widget_hook = stac92hd7x_proc_hook; return 0; } static int patch_stac922x(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac922x_pin_nids); spec->pin_nids = stac922x_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_922X_MODELS, stac922x_models, stac922x_cfg_tbl); if (spec->board_config == STAC_INTEL_MAC_AUTO) { spec->gpio_mask = spec->gpio_dir = 0x03; spec->gpio_data = 0x03; /* Intel Macs have all same PCI SSID, so we need to check * codec SSID to distinguish the exact models */ printk(KERN_INFO "hda_codec: STAC922x, Apple subsys_id=%x\n", codec->subsystem_id); switch (codec->subsystem_id) { case 0x106b0800: spec->board_config = STAC_INTEL_MAC_V1; break; case 0x106b0600: case 0x106b0700: spec->board_config = STAC_INTEL_MAC_V2; break; case 0x106b0e00: case 0x106b0f00: case 0x106b1600: case 0x106b1700: case 0x106b0200: case 0x106b1e00: spec->board_config = STAC_INTEL_MAC_V3; break; case 0x106b1a00: case 0x00000100: spec->board_config = STAC_INTEL_MAC_V4; break; case 0x106b0a00: case 0x106b2200: spec->board_config = STAC_INTEL_MAC_V5; break; default: spec->board_config = STAC_INTEL_MAC_V3; break; } } again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac922x_brd_tbl[spec->board_config]); spec->adc_nids = stac922x_adc_nids; spec->mux_nids = stac922x_mux_nids; spec->num_muxes = ARRAY_SIZE(stac922x_mux_nids); spec->num_adcs = ARRAY_SIZE(stac922x_adc_nids); spec->num_dmics = 0; spec->num_pwrs = 0; spec->init = stac922x_core_init; spec->num_caps = STAC922X_NUM_CAPS; spec->capvols = stac922x_capvols; spec->capsws = stac922x_capsws; spec->multiout.dac_nids = spec->dac_nids; err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_D945_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } codec->patch_ops = stac92xx_patch_ops; /* Fix Mux capture level; max to 2 */ snd_hda_override_amp_caps(codec, 0x12, HDA_OUTPUT, (0 << AC_AMPCAP_OFFSET_SHIFT) | (2 << AC_AMPCAP_NUM_STEPS_SHIFT) | (0x27 << AC_AMPCAP_STEP_SIZE_SHIFT) | (0 << AC_AMPCAP_MUTE_SHIFT)); return 0; } static int patch_stac927x(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; codec->slave_dig_outs = stac927x_slave_dig_outs; spec->num_pins = ARRAY_SIZE(stac927x_pin_nids); spec->pin_nids = stac927x_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_927X_MODELS, stac927x_models, stac927x_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac927x_brd_tbl[spec->board_config]); spec->digbeep_nid = 0x23; spec->adc_nids = stac927x_adc_nids; spec->num_adcs = ARRAY_SIZE(stac927x_adc_nids); spec->mux_nids = stac927x_mux_nids; spec->num_muxes = ARRAY_SIZE(stac927x_mux_nids); spec->smux_nids = stac927x_smux_nids; spec->num_smuxes = ARRAY_SIZE(stac927x_smux_nids); spec->spdif_labels = stac927x_spdif_labels; spec->dac_list = stac927x_dac_nids; spec->multiout.dac_nids = spec->dac_nids; if (spec->board_config != STAC_D965_REF) { /* GPIO0 High = Enable EAPD */ spec->eapd_mask = spec->gpio_mask = 0x01; spec->gpio_dir = spec->gpio_data = 0x01; } switch (spec->board_config) { case STAC_D965_3ST: case STAC_D965_5ST: /* GPIO0 High = Enable EAPD */ spec->num_dmics = 0; spec->init = d965_core_init; break; case STAC_DELL_BIOS: switch (codec->subsystem_id) { case 0x10280209: case 0x1028022e: /* correct the device field to SPDIF out */ snd_hda_codec_set_pincfg(codec, 0x21, 0x01442070); break; } /* configure the analog microphone on some laptops */ snd_hda_codec_set_pincfg(codec, 0x0c, 0x90a79130); /* correct the front output jack as a hp out */ snd_hda_codec_set_pincfg(codec, 0x0f, 0x0227011f); /* correct the front input jack as a mic */ snd_hda_codec_set_pincfg(codec, 0x0e, 0x02a79130); /* fallthru */ case STAC_DELL_3ST: if (codec->subsystem_id != 0x1028022f) { /* GPIO2 High = Enable EAPD */ spec->eapd_mask = spec->gpio_mask = 0x04; spec->gpio_dir = spec->gpio_data = 0x04; } spec->dmic_nids = stac927x_dmic_nids; spec->num_dmics = STAC927X_NUM_DMICS; spec->init = dell_3st_core_init; spec->dmux_nids = stac927x_dmux_nids; spec->num_dmuxes = ARRAY_SIZE(stac927x_dmux_nids); break; case STAC_927X_VOLKNOB: spec->num_dmics = 0; spec->init = stac927x_volknob_core_init; break; default: spec->num_dmics = 0; spec->init = stac927x_core_init; break; } spec->num_caps = STAC927X_NUM_CAPS; spec->capvols = stac927x_capvols; spec->capsws = stac927x_capsws; spec->num_pwrs = 0; spec->aloopback_ctl = stac927x_loopback; spec->aloopback_mask = 0x40; spec->aloopback_shift = 0; spec->eapd_switch = 1; err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_D965_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } codec->patch_ops = stac92xx_patch_ops; codec->proc_widget_hook = stac927x_proc_hook; /* * !!FIXME!! * The STAC927x seem to require fairly long delays for certain * command sequences. With too short delays (even if the answer * is set to RIRB properly), it results in the silence output * on some hardwares like Dell. * * The below flag enables the longer delay (see get_response * in hda_intel.c). */ codec->bus->needs_damn_long_delay = 1; /* no jack detecion for ref-no-jd model */ if (spec->board_config == STAC_D965_REF_NO_JD) spec->hp_detect = 0; return 0; } static int patch_stac9205(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac9205_pin_nids); spec->pin_nids = stac9205_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_9205_MODELS, stac9205_models, stac9205_cfg_tbl); again: if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac9205_brd_tbl[spec->board_config]); spec->digbeep_nid = 0x23; spec->adc_nids = stac9205_adc_nids; spec->num_adcs = ARRAY_SIZE(stac9205_adc_nids); spec->mux_nids = stac9205_mux_nids; spec->num_muxes = ARRAY_SIZE(stac9205_mux_nids); spec->smux_nids = stac9205_smux_nids; spec->num_smuxes = ARRAY_SIZE(stac9205_smux_nids); spec->dmic_nids = stac9205_dmic_nids; spec->num_dmics = STAC9205_NUM_DMICS; spec->dmux_nids = stac9205_dmux_nids; spec->num_dmuxes = ARRAY_SIZE(stac9205_dmux_nids); spec->num_pwrs = 0; spec->init = stac9205_core_init; spec->aloopback_ctl = stac9205_loopback; spec->num_caps = STAC9205_NUM_CAPS; spec->capvols = stac9205_capvols; spec->capsws = stac9205_capsws; spec->aloopback_mask = 0x40; spec->aloopback_shift = 0; /* Turn on/off EAPD per HP plugging */ if (spec->board_config != STAC_9205_EAPD) spec->eapd_switch = 1; spec->multiout.dac_nids = spec->dac_nids; switch (spec->board_config){ case STAC_9205_DELL_M43: /* Enable SPDIF in/out */ snd_hda_codec_set_pincfg(codec, 0x1f, 0x01441030); snd_hda_codec_set_pincfg(codec, 0x20, 0x1c410030); /* Enable unsol response for GPIO4/Dock HP connection */ err = stac_add_event(codec, codec->afg, STAC_VREF_EVENT, 0x01); if (err < 0) return err; snd_hda_codec_write_cache(codec, codec->afg, 0, AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x10); snd_hda_jack_detect_enable(codec, codec->afg, 0); spec->gpio_dir = 0x0b; spec->eapd_mask = 0x01; spec->gpio_mask = 0x1b; spec->gpio_mute = 0x10; /* GPIO0 High = EAPD, GPIO1 Low = Headphone Mute, * GPIO3 Low = DRM */ spec->gpio_data = 0x01; break; case STAC_9205_REF: /* SPDIF-In enabled */ break; default: /* GPIO0 High = EAPD */ spec->eapd_mask = spec->gpio_mask = spec->gpio_dir = 0x1; spec->gpio_data = 0x01; break; } err = stac92xx_parse_auto_config(codec); if (!err) { if (spec->board_config < 0) { printk(KERN_WARNING "hda_codec: No auto-config is " "available, default to model=ref\n"); spec->board_config = STAC_9205_REF; goto again; } err = -EINVAL; } if (err < 0) { stac92xx_free(codec); return err; } codec->patch_ops = stac92xx_patch_ops; codec->proc_widget_hook = stac9205_proc_hook; return 0; } /* * STAC9872 hack */ static const struct hda_verb stac9872_core_init[] = { {0x15, AC_VERB_SET_CONNECT_SEL, 0x1}, /* mic-sel: 0a,0d,14,02 */ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, /* Mic-in -> 0x9 */ {} }; static const hda_nid_t stac9872_pin_nids[] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x11, 0x13, 0x14, }; static const hda_nid_t stac9872_adc_nids[] = { 0x8 /*,0x6*/ }; static const hda_nid_t stac9872_mux_nids[] = { 0x15 }; static const unsigned long stac9872_capvols[] = { HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT), }; #define stac9872_capsws stac9872_capvols static const unsigned int stac9872_vaio_pin_configs[9] = { 0x03211020, 0x411111f0, 0x411111f0, 0x03a15030, 0x411111f0, 0x90170110, 0x411111f0, 0x411111f0, 0x90a7013e }; static const char * const stac9872_models[STAC_9872_MODELS] = { [STAC_9872_AUTO] = "auto", [STAC_9872_VAIO] = "vaio", }; static const unsigned int *stac9872_brd_tbl[STAC_9872_MODELS] = { [STAC_9872_VAIO] = stac9872_vaio_pin_configs, }; static const struct snd_pci_quirk stac9872_cfg_tbl[] = { SND_PCI_QUIRK_MASK(0x104d, 0xfff0, 0x81e0, "Sony VAIO F/S", STAC_9872_VAIO), {} /* terminator */ }; static int patch_stac9872(struct hda_codec *codec) { struct sigmatel_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac9872_pin_nids); spec->pin_nids = stac9872_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_9872_MODELS, stac9872_models, stac9872_cfg_tbl); if (spec->board_config < 0) snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n", codec->chip_name); else stac92xx_set_config_regs(codec, stac9872_brd_tbl[spec->board_config]); spec->multiout.dac_nids = spec->dac_nids; spec->num_adcs = ARRAY_SIZE(stac9872_adc_nids); spec->adc_nids = stac9872_adc_nids; spec->num_muxes = ARRAY_SIZE(stac9872_mux_nids); spec->mux_nids = stac9872_mux_nids; spec->init = stac9872_core_init; spec->num_caps = 1; spec->capvols = stac9872_capvols; spec->capsws = stac9872_capsws; err = stac92xx_parse_auto_config(codec); if (err < 0) { stac92xx_free(codec); return -EINVAL; } spec->input_mux = &spec->private_imux; codec->patch_ops = stac92xx_patch_ops; return 0; } /* * patch entries */ static const struct hda_codec_preset snd_hda_preset_sigmatel[] = { { .id = 0x83847690, .name = "STAC9200", .patch = patch_stac9200 }, { .id = 0x83847882, .name = "STAC9220 A1", .patch = patch_stac922x }, { .id = 0x83847680, .name = "STAC9221 A1", .patch = patch_stac922x }, { .id = 0x83847880, .name = "STAC9220 A2", .patch = patch_stac922x }, { .id = 0x83847681, .name = "STAC9220D/9223D A2", .patch = patch_stac922x }, { .id = 0x83847682, .name = "STAC9221 A2", .patch = patch_stac922x }, { .id = 0x83847683, .name = "STAC9221D A2", .patch = patch_stac922x }, { .id = 0x83847618, .name = "STAC9227", .patch = patch_stac927x }, { .id = 0x83847619, .name = "STAC9227", .patch = patch_stac927x }, { .id = 0x83847616, .name = "STAC9228", .patch = patch_stac927x }, { .id = 0x83847617, .name = "STAC9228", .patch = patch_stac927x }, { .id = 0x83847614, .name = "STAC9229", .patch = patch_stac927x }, { .id = 0x83847615, .name = "STAC9229", .patch = patch_stac927x }, { .id = 0x83847620, .name = "STAC9274", .patch = patch_stac927x }, { .id = 0x83847621, .name = "STAC9274D", .patch = patch_stac927x }, { .id = 0x83847622, .name = "STAC9273X", .patch = patch_stac927x }, { .id = 0x83847623, .name = "STAC9273D", .patch = patch_stac927x }, { .id = 0x83847624, .name = "STAC9272X", .patch = patch_stac927x }, { .id = 0x83847625, .name = "STAC9272D", .patch = patch_stac927x }, { .id = 0x83847626, .name = "STAC9271X", .patch = patch_stac927x }, { .id = 0x83847627, .name = "STAC9271D", .patch = patch_stac927x }, { .id = 0x83847628, .name = "STAC9274X5NH", .patch = patch_stac927x }, { .id = 0x83847629, .name = "STAC9274D5NH", .patch = patch_stac927x }, { .id = 0x83847632, .name = "STAC9202", .patch = patch_stac925x }, { .id = 0x83847633, .name = "STAC9202D", .patch = patch_stac925x }, { .id = 0x83847634, .name = "STAC9250", .patch = patch_stac925x }, { .id = 0x83847635, .name = "STAC9250D", .patch = patch_stac925x }, { .id = 0x83847636, .name = "STAC9251", .patch = patch_stac925x }, { .id = 0x83847637, .name = "STAC9250D", .patch = patch_stac925x }, { .id = 0x83847645, .name = "92HD206X", .patch = patch_stac927x }, { .id = 0x83847646, .name = "92HD206D", .patch = patch_stac927x }, /* The following does not take into account .id=0x83847661 when subsys = * 104D0C00 which is STAC9225s. Because of this, some SZ Notebooks are * currently not fully supported. */ { .id = 0x83847661, .name = "CXD9872RD/K", .patch = patch_stac9872 }, { .id = 0x83847662, .name = "STAC9872AK", .patch = patch_stac9872 }, { .id = 0x83847664, .name = "CXD9872AKD", .patch = patch_stac9872 }, { .id = 0x83847698, .name = "STAC9205", .patch = patch_stac9205 }, { .id = 0x838476a0, .name = "STAC9205", .patch = patch_stac9205 }, { .id = 0x838476a1, .name = "STAC9205D", .patch = patch_stac9205 }, { .id = 0x838476a2, .name = "STAC9204", .patch = patch_stac9205 }, { .id = 0x838476a3, .name = "STAC9204D", .patch = patch_stac9205 }, { .id = 0x838476a4, .name = "STAC9255", .patch = patch_stac9205 }, { .id = 0x838476a5, .name = "STAC9255D", .patch = patch_stac9205 }, { .id = 0x838476a6, .name = "STAC9254", .patch = patch_stac9205 }, { .id = 0x838476a7, .name = "STAC9254D", .patch = patch_stac9205 }, { .id = 0x111d7603, .name = "92HD75B3X5", .patch = patch_stac92hd71bxx}, { .id = 0x111d7604, .name = "92HD83C1X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76d4, .name = "92HD83C1C5", .patch = patch_stac92hd83xxx}, { .id = 0x111d7605, .name = "92HD81B1X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76d5, .name = "92HD81B1C5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76d1, .name = "92HD87B1/3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76d9, .name = "92HD87B2/4", .patch = patch_stac92hd83xxx}, { .id = 0x111d7666, .name = "92HD88B3", .patch = patch_stac92hd83xxx}, { .id = 0x111d7667, .name = "92HD88B1", .patch = patch_stac92hd83xxx}, { .id = 0x111d7668, .name = "92HD88B2", .patch = patch_stac92hd83xxx}, { .id = 0x111d7669, .name = "92HD88B4", .patch = patch_stac92hd83xxx}, { .id = 0x111d7608, .name = "92HD75B2X5", .patch = patch_stac92hd71bxx}, { .id = 0x111d7674, .name = "92HD73D1X5", .patch = patch_stac92hd73xx }, { .id = 0x111d7675, .name = "92HD73C1X5", .patch = patch_stac92hd73xx }, { .id = 0x111d7676, .name = "92HD73E1X5", .patch = patch_stac92hd73xx }, { .id = 0x111d76b0, .name = "92HD71B8X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b1, .name = "92HD71B8X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b2, .name = "92HD71B7X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b3, .name = "92HD71B7X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b4, .name = "92HD71B6X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b5, .name = "92HD71B6X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b6, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76b7, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, { .id = 0x111d76c0, .name = "92HD89C3", .patch = patch_stac92hd73xx }, { .id = 0x111d76c1, .name = "92HD89C2", .patch = patch_stac92hd73xx }, { .id = 0x111d76c2, .name = "92HD89C1", .patch = patch_stac92hd73xx }, { .id = 0x111d76c3, .name = "92HD89B3", .patch = patch_stac92hd73xx }, { .id = 0x111d76c4, .name = "92HD89B2", .patch = patch_stac92hd73xx }, { .id = 0x111d76c5, .name = "92HD89B1", .patch = patch_stac92hd73xx }, { .id = 0x111d76c6, .name = "92HD89E3", .patch = patch_stac92hd73xx }, { .id = 0x111d76c7, .name = "92HD89E2", .patch = patch_stac92hd73xx }, { .id = 0x111d76c8, .name = "92HD89E1", .patch = patch_stac92hd73xx }, { .id = 0x111d76c9, .name = "92HD89D3", .patch = patch_stac92hd73xx }, { .id = 0x111d76ca, .name = "92HD89D2", .patch = patch_stac92hd73xx }, { .id = 0x111d76cb, .name = "92HD89D1", .patch = patch_stac92hd73xx }, { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx }, { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, { .id = 0x111d76df, .name = "92HD93BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e8, .name = "92HD66B1X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e9, .name = "92HD66B2X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76ea, .name = "92HD66B3X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76eb, .name = "92HD66C1X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76ec, .name = "92HD66C2X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76ed, .name = "92HD66C3X5", .patch = patch_stac92hd83xxx}, { .id = 0x111d76ee, .name = "92HD66B1X3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76ef, .name = "92HD66B2X3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76f0, .name = "92HD66B3X3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76f1, .name = "92HD66C1X3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76f2, .name = "92HD66C2X3", .patch = patch_stac92hd83xxx}, { .id = 0x111d76f3, .name = "92HD66C3/65", .patch = patch_stac92hd83xxx}, {} /* terminator */ }; MODULE_ALIAS("snd-hda-codec-id:8384*"); MODULE_ALIAS("snd-hda-codec-id:111d*"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IDT/Sigmatel HD-audio codec"); static struct hda_codec_preset_list sigmatel_list = { .preset = snd_hda_preset_sigmatel, .owner = THIS_MODULE, }; static int __init patch_sigmatel_init(void) { return snd_hda_add_codec_preset(&sigmatel_list); } static void __exit patch_sigmatel_exit(void) { snd_hda_delete_codec_preset(&sigmatel_list); } module_init(patch_sigmatel_init) module_exit(patch_sigmatel_exit)
gpl-2.0
jeremytrimble/adi-linux
sound/core/seq/oss/seq_oss_synth.c
613
14921
/* * OSS compatible sequencer driver * * synth device handlers * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "seq_oss_synth.h" #include "seq_oss_midi.h" #include "../seq_lock.h" #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> /* * constants */ #define SNDRV_SEQ_OSS_MAX_SYNTH_NAME 30 #define MAX_SYSEX_BUFLEN 128 /* * definition of synth info records */ /* sysex buffer */ struct seq_oss_synth_sysex { int len; int skip; unsigned char buf[MAX_SYSEX_BUFLEN]; }; /* synth info */ struct seq_oss_synth { int seq_device; /* for synth_info */ int synth_type; int synth_subtype; int nr_voices; char name[SNDRV_SEQ_OSS_MAX_SYNTH_NAME]; struct snd_seq_oss_callback oper; int opened; void *private_data; snd_use_lock_t use_lock; }; /* * device table */ static int max_synth_devs; static struct seq_oss_synth *synth_devs[SNDRV_SEQ_OSS_MAX_SYNTH_DEVS]; static struct seq_oss_synth midi_synth_dev = { -1, /* seq_device */ SYNTH_TYPE_MIDI, /* synth_type */ 0, /* synth_subtype */ 16, /* nr_voices */ "MIDI", /* name */ }; static DEFINE_SPINLOCK(register_lock); /* * prototypes */ static struct seq_oss_synth *get_synthdev(struct seq_oss_devinfo *dp, int dev); static void reset_channels(struct seq_oss_synthinfo *info); /* * global initialization */ void __init snd_seq_oss_synth_init(void) { snd_use_lock_init(&midi_synth_dev.use_lock); } /* * registration of the synth device */ int snd_seq_oss_synth_register(struct snd_seq_device *dev) { int i; struct seq_oss_synth *rec; struct snd_seq_oss_reg *reg = SNDRV_SEQ_DEVICE_ARGPTR(dev); unsigned long flags; if ((rec = kzalloc(sizeof(*rec), GFP_KERNEL)) == NULL) { pr_err("ALSA: seq_oss: can't malloc synth info\n"); return -ENOMEM; } rec->seq_device = -1; rec->synth_type = reg->type; rec->synth_subtype = reg->subtype; rec->nr_voices = reg->nvoices; rec->oper = reg->oper; rec->private_data = reg->private_data; rec->opened = 0; snd_use_lock_init(&rec->use_lock); /* copy and truncate the name of synth device */ strlcpy(rec->name, dev->name, sizeof(rec->name)); /* registration */ spin_lock_irqsave(&register_lock, flags); for (i = 0; i < max_synth_devs; i++) { if (synth_devs[i] == NULL) break; } if (i >= max_synth_devs) { if (max_synth_devs >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS) { spin_unlock_irqrestore(&register_lock, flags); pr_err("ALSA: seq_oss: no more synth slot\n"); kfree(rec); return -ENOMEM; } max_synth_devs++; } rec->seq_device = i; synth_devs[i] = rec; spin_unlock_irqrestore(&register_lock, flags); dev->driver_data = rec; #ifdef SNDRV_OSS_INFO_DEV_SYNTH if (i < SNDRV_CARDS) snd_oss_info_register(SNDRV_OSS_INFO_DEV_SYNTH, i, rec->name); #endif return 0; } int snd_seq_oss_synth_unregister(struct snd_seq_device *dev) { int index; struct seq_oss_synth *rec = dev->driver_data; unsigned long flags; spin_lock_irqsave(&register_lock, flags); for (index = 0; index < max_synth_devs; index++) { if (synth_devs[index] == rec) break; } if (index >= max_synth_devs) { spin_unlock_irqrestore(&register_lock, flags); pr_err("ALSA: seq_oss: can't unregister synth\n"); return -EINVAL; } synth_devs[index] = NULL; if (index == max_synth_devs - 1) { for (index--; index >= 0; index--) { if (synth_devs[index]) break; } max_synth_devs = index + 1; } spin_unlock_irqrestore(&register_lock, flags); #ifdef SNDRV_OSS_INFO_DEV_SYNTH if (rec->seq_device < SNDRV_CARDS) snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_SYNTH, rec->seq_device); #endif snd_use_lock_sync(&rec->use_lock); kfree(rec); return 0; } /* */ static struct seq_oss_synth * get_sdev(int dev) { struct seq_oss_synth *rec; unsigned long flags; spin_lock_irqsave(&register_lock, flags); rec = synth_devs[dev]; if (rec) snd_use_lock_use(&rec->use_lock); spin_unlock_irqrestore(&register_lock, flags); return rec; } /* * set up synth tables */ void snd_seq_oss_synth_setup(struct seq_oss_devinfo *dp) { int i; struct seq_oss_synth *rec; struct seq_oss_synthinfo *info; dp->max_synthdev = max_synth_devs; dp->synth_opened = 0; memset(dp->synths, 0, sizeof(dp->synths)); for (i = 0; i < dp->max_synthdev; i++) { rec = get_sdev(i); if (rec == NULL) continue; if (rec->oper.open == NULL || rec->oper.close == NULL) { snd_use_lock_free(&rec->use_lock); continue; } info = &dp->synths[i]; info->arg.app_index = dp->port; info->arg.file_mode = dp->file_mode; info->arg.seq_mode = dp->seq_mode; if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_SYNTH) info->arg.event_passing = SNDRV_SEQ_OSS_PROCESS_EVENTS; else info->arg.event_passing = SNDRV_SEQ_OSS_PASS_EVENTS; info->opened = 0; if (!try_module_get(rec->oper.owner)) { snd_use_lock_free(&rec->use_lock); continue; } if (rec->oper.open(&info->arg, rec->private_data) < 0) { module_put(rec->oper.owner); snd_use_lock_free(&rec->use_lock); continue; } info->nr_voices = rec->nr_voices; if (info->nr_voices > 0) { info->ch = kcalloc(info->nr_voices, sizeof(struct seq_oss_chinfo), GFP_KERNEL); if (!info->ch) { pr_err("ALSA: seq_oss: Cannot malloc voices\n"); rec->oper.close(&info->arg); module_put(rec->oper.owner); snd_use_lock_free(&rec->use_lock); continue; } reset_channels(info); } info->opened++; rec->opened++; dp->synth_opened++; snd_use_lock_free(&rec->use_lock); } } /* * set up synth tables for MIDI emulation - /dev/music mode only */ void snd_seq_oss_synth_setup_midi(struct seq_oss_devinfo *dp) { int i; if (dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS) return; for (i = 0; i < dp->max_mididev; i++) { struct seq_oss_synthinfo *info; info = &dp->synths[dp->max_synthdev]; if (snd_seq_oss_midi_open(dp, i, dp->file_mode) < 0) continue; info->arg.app_index = dp->port; info->arg.file_mode = dp->file_mode; info->arg.seq_mode = dp->seq_mode; info->arg.private_data = info; info->is_midi = 1; info->midi_mapped = i; info->arg.event_passing = SNDRV_SEQ_OSS_PASS_EVENTS; snd_seq_oss_midi_get_addr(dp, i, &info->arg.addr); info->opened = 1; midi_synth_dev.opened++; dp->max_synthdev++; if (dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS) break; } } /* * clean up synth tables */ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp) { int i; struct seq_oss_synth *rec; struct seq_oss_synthinfo *info; if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS)) return; for (i = 0; i < dp->max_synthdev; i++) { info = &dp->synths[i]; if (! info->opened) continue; if (info->is_midi) { if (midi_synth_dev.opened > 0) { snd_seq_oss_midi_close(dp, info->midi_mapped); midi_synth_dev.opened--; } } else { rec = get_sdev(i); if (rec == NULL) continue; if (rec->opened > 0) { rec->oper.close(&info->arg); module_put(rec->oper.owner); rec->opened = 0; } snd_use_lock_free(&rec->use_lock); } kfree(info->sysex); info->sysex = NULL; kfree(info->ch); info->ch = NULL; } dp->synth_opened = 0; dp->max_synthdev = 0; } /* * check if the specified device is MIDI mapped device */ static int is_midi_dev(struct seq_oss_devinfo *dp, int dev) { if (dev < 0 || dev >= dp->max_synthdev) return 0; if (dp->synths[dev].is_midi) return 1; return 0; } /* * return synth device information pointer */ static struct seq_oss_synth * get_synthdev(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_synth *rec; if (dev < 0 || dev >= dp->max_synthdev) return NULL; if (! dp->synths[dev].opened) return NULL; if (dp->synths[dev].is_midi) return &midi_synth_dev; if ((rec = get_sdev(dev)) == NULL) return NULL; if (! rec->opened) { snd_use_lock_free(&rec->use_lock); return NULL; } return rec; } /* * reset note and velocity on each channel. */ static void reset_channels(struct seq_oss_synthinfo *info) { int i; if (info->ch == NULL || ! info->nr_voices) return; for (i = 0; i < info->nr_voices; i++) { info->ch[i].note = -1; info->ch[i].vel = 0; } } /* * reset synth device: * call reset callback. if no callback is defined, send a heartbeat * event to the corresponding port. */ void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_synth *rec; struct seq_oss_synthinfo *info; if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev)) return; info = &dp->synths[dev]; if (! info->opened) return; if (info->sysex) info->sysex->len = 0; /* reset sysex */ reset_channels(info); if (info->is_midi) { if (midi_synth_dev.opened <= 0) return; snd_seq_oss_midi_reset(dp, info->midi_mapped); /* reopen the device */ snd_seq_oss_midi_close(dp, dev); if (snd_seq_oss_midi_open(dp, info->midi_mapped, dp->file_mode) < 0) { midi_synth_dev.opened--; info->opened = 0; kfree(info->sysex); info->sysex = NULL; kfree(info->ch); info->ch = NULL; } return; } rec = get_sdev(dev); if (rec == NULL) return; if (rec->oper.reset) { rec->oper.reset(&info->arg); } else { struct snd_seq_event ev; memset(&ev, 0, sizeof(ev)); snd_seq_oss_fill_addr(dp, &ev, info->arg.addr.client, info->arg.addr.port); ev.type = SNDRV_SEQ_EVENT_RESET; snd_seq_oss_dispatch(dp, &ev, 0, 0); } snd_use_lock_free(&rec->use_lock); } /* * load a patch record: * call load_patch callback function */ int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt, const char __user *buf, int p, int c) { struct seq_oss_synth *rec; int rc; if (dev < 0 || dev >= dp->max_synthdev) return -ENXIO; if (is_midi_dev(dp, dev)) return 0; if ((rec = get_synthdev(dp, dev)) == NULL) return -ENXIO; if (rec->oper.load_patch == NULL) rc = -ENXIO; else rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c); snd_use_lock_free(&rec->use_lock); return rc; } /* * check if the device is valid synth device */ int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_synth *rec; rec = get_synthdev(dp, dev); if (rec) { snd_use_lock_free(&rec->use_lock); return 1; } return 0; } /* * receive OSS 6 byte sysex packet: * the full sysex message will be sent if it reaches to the end of data * (0xff). */ int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, struct snd_seq_event *ev) { int i, send; unsigned char *dest; struct seq_oss_synth_sysex *sysex; if (! snd_seq_oss_synth_is_valid(dp, dev)) return -ENXIO; sysex = dp->synths[dev].sysex; if (sysex == NULL) { sysex = kzalloc(sizeof(*sysex), GFP_KERNEL); if (sysex == NULL) return -ENOMEM; dp->synths[dev].sysex = sysex; } send = 0; dest = sysex->buf + sysex->len; /* copy 6 byte packet to the buffer */ for (i = 0; i < 6; i++) { if (buf[i] == 0xff) { send = 1; break; } dest[i] = buf[i]; sysex->len++; if (sysex->len >= MAX_SYSEX_BUFLEN) { sysex->len = 0; sysex->skip = 1; break; } } if (sysex->len && send) { if (sysex->skip) { sysex->skip = 0; sysex->len = 0; return -EINVAL; /* skip */ } /* copy the data to event record and send it */ ev->flags = SNDRV_SEQ_EVENT_LENGTH_VARIABLE; if (snd_seq_oss_synth_addr(dp, dev, ev)) return -EINVAL; ev->data.ext.len = sysex->len; ev->data.ext.ptr = sysex->buf; sysex->len = 0; return 0; } return -EINVAL; /* skip */ } /* * fill the event source/destination addresses */ int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev) { if (! snd_seq_oss_synth_is_valid(dp, dev)) return -EINVAL; snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client, dp->synths[dev].arg.addr.port); return 0; } /* * OSS compatible ioctl */ int snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr) { struct seq_oss_synth *rec; int rc; if (is_midi_dev(dp, dev)) return -ENXIO; if ((rec = get_synthdev(dp, dev)) == NULL) return -ENXIO; if (rec->oper.ioctl == NULL) rc = -ENXIO; else rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr); snd_use_lock_free(&rec->use_lock); return rc; } /* * send OSS raw events - SEQ_PRIVATE and SEQ_VOLUME */ int snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev) { if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev)) return -ENXIO; ev->type = SNDRV_SEQ_EVENT_OSS; memcpy(ev->data.raw8.d, data, 8); return snd_seq_oss_synth_addr(dp, dev, ev); } /* * create OSS compatible synth_info record */ int snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf) { struct seq_oss_synth *rec; if (dev < 0 || dev >= dp->max_synthdev) return -ENXIO; if (dp->synths[dev].is_midi) { struct midi_info minf; snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf); inf->synth_type = SYNTH_TYPE_MIDI; inf->synth_subtype = 0; inf->nr_voices = 16; inf->device = dev; strlcpy(inf->name, minf.name, sizeof(inf->name)); } else { if ((rec = get_synthdev(dp, dev)) == NULL) return -ENXIO; inf->synth_type = rec->synth_type; inf->synth_subtype = rec->synth_subtype; inf->nr_voices = rec->nr_voices; inf->device = dev; strlcpy(inf->name, rec->name, sizeof(inf->name)); snd_use_lock_free(&rec->use_lock); } return 0; } #ifdef CONFIG_PROC_FS /* * proc interface */ void snd_seq_oss_synth_info_read(struct snd_info_buffer *buf) { int i; struct seq_oss_synth *rec; snd_iprintf(buf, "\nNumber of synth devices: %d\n", max_synth_devs); for (i = 0; i < max_synth_devs; i++) { snd_iprintf(buf, "\nsynth %d: ", i); rec = get_sdev(i); if (rec == NULL) { snd_iprintf(buf, "*empty*\n"); continue; } snd_iprintf(buf, "[%s]\n", rec->name); snd_iprintf(buf, " type 0x%x : subtype 0x%x : voices %d\n", rec->synth_type, rec->synth_subtype, rec->nr_voices); snd_iprintf(buf, " capabilities : ioctl %s / load_patch %s\n", enabled_str((long)rec->oper.ioctl), enabled_str((long)rec->oper.load_patch)); snd_use_lock_free(&rec->use_lock); } } #endif /* CONFIG_PROC_FS */
gpl-2.0
jianpingye/linux
drivers/ata/pata_artop.c
1893
12401
/* * pata_artop.c - ARTOP ATA controller driver * * (C) 2006 Red Hat * (C) 2007,2011 Bartlomiej Zolnierkiewicz * * Based in part on drivers/ide/pci/aec62xx.c * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org> * 865/865R fixes for Macintosh card version from a patch to the old * driver by Thibaut VARENE <varenet@parisc-linux.org> * When setting the PCI latency we must set 0x80 or higher for burst * performance Alessandro Zummo <alessandro.zummo@towertech.it> * * TODO * Investigate no_dsc on 850R * Clock detect */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> #define DRV_NAME "pata_artop" #define DRV_VERSION "0.4.6" /* * The ARTOP has 33 Mhz and "over clocked" timing tables. Until we * get PCI bus speed functionality we leave this as 0. Its a variable * for when we get the functionality and also for folks wanting to * test stuff. */ static int clock = 0; /** * artop62x0_pre_reset - probe begin * @link: link * @deadline: deadline jiffies for the operation * * Nothing complicated needed here. */ static int artop62x0_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits artop_enable_bits[] = { { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */ }; struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); /* Odd numbered device ids are the units with enable bits. */ if ((pdev->device & 1) && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * artop6260_cable_detect - identify cable type * @ap: Port * * Identify the cable type for the ARTOP interface in question */ static int artop6260_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 tmp; pci_read_config_byte(pdev, 0x49, &tmp); if (tmp & (1 << ap->port_no)) return ATA_CBL_PATA40; return ATA_CBL_PATA80; } /** * artop6210_load_piomode - Load a set of PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device * @pio: PIO mode * * Set PIO mode for device, in host controller PCI config space. This * is used both to set PIO timings in PIO mode and also to set the * matching PIO clocking for UDMA, as well as the MWDMA timings. * * LOCKING: * None (inherited from caller). */ static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = adev->devno + 2 * ap->port_no; const u16 timing[2][5] = { { 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 }, { 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 } }; /* Load the PIO timing active/recovery bits */ pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]); } /** * artop6210_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device we are configuring * * Set PIO mode for device, in host controller PCI config space. For * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In * the event UDMA is used the later call to set_dmamode will set the * bits as required. * * LOCKING: * None (inherited from caller). */ static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = adev->devno + 2 * ap->port_no; u8 ultra; artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */ pci_read_config_byte(pdev, 0x54, &ultra); ultra &= ~(3 << (2 * dn)); pci_write_config_byte(pdev, 0x54, ultra); } /** * artop6260_load_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device we are configuring * @pio: PIO mode * * Set PIO mode for device, in host controller PCI config space. The * ARTOP6260 and relatives store the timing data differently. * * LOCKING: * None (inherited from caller). */ static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = adev->devno + 2 * ap->port_no; const u8 timing[2][5] = { { 0x00, 0x0A, 0x08, 0x33, 0x31 }, { 0x70, 0x7A, 0x78, 0x43, 0x41 } }; /* Load the PIO timing active/recovery bits */ pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]); } /** * artop6260_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device we are configuring * * Set PIO mode for device, in host controller PCI config space. For * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In * the event UDMA is used the later call to set_dmamode will set the * bits as required. * * LOCKING: * None (inherited from caller). */ static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 ultra; artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */ pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra); ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */ pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra); } /** * artop6210_set_dmamode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device whose timings we are configuring * * Set DMA mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio; struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = adev->devno + 2 * ap->port_no; u8 ultra; if (adev->dma_mode == XFER_MW_DMA_0) pio = 1; else pio = 4; /* Load the PIO timing active/recovery bits */ artop6210_load_piomode(ap, adev, pio); pci_read_config_byte(pdev, 0x54, &ultra); ultra &= ~(3 << (2 * dn)); /* Add ultra DMA bits if in UDMA mode */ if (adev->dma_mode >= XFER_UDMA_0) { u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock; if (mode == 0) mode = 1; ultra |= (mode << (2 * dn)); } pci_write_config_byte(pdev, 0x54, ultra); } /** * artop6260_set_dmamode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device we are configuring * * Set DMA mode for device, in host controller PCI config space. The * ARTOP6260 and relatives store the timing data differently. * * LOCKING: * None (inherited from caller). */ static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 ultra; if (adev->dma_mode == XFER_MW_DMA_0) pio = 1; else pio = 4; /* Load the PIO timing active/recovery bits */ artop6260_load_piomode(ap, adev, pio); /* Add ultra DMA bits if in UDMA mode */ pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra); ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */ if (adev->dma_mode >= XFER_UDMA_0) { u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock; if (mode == 0) mode = 1; ultra |= (mode << (4 * adev->devno)); } pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra); } /** * artop_6210_qc_defer - implement serialization * @qc: command * * Issue commands per host on this chip. */ static int artop6210_qc_defer(struct ata_queued_cmd *qc) { struct ata_host *host = qc->ap->host; struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; int rc; /* First apply the usual rules */ rc = ata_std_qc_defer(qc); if (rc != 0) return rc; /* Now apply serialization rules. Only allow a command if the other channel state machine is idle */ if (alt && alt->qc_active) return ATA_DEFER_PORT; return 0; } static struct scsi_host_template artop_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations artop6210_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = artop6210_set_piomode, .set_dmamode = artop6210_set_dmamode, .prereset = artop62x0_pre_reset, .qc_defer = artop6210_qc_defer, }; static struct ata_port_operations artop6260_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = artop6260_cable_detect, .set_piomode = artop6260_set_piomode, .set_dmamode = artop6260_set_dmamode, .prereset = artop62x0_pre_reset, }; static void atp8xx_fixup(struct pci_dev *pdev) { if (pdev->device == 0x0005) /* BIOS may have left us in UDMA, clear it before libata probe */ pci_write_config_byte(pdev, 0x54, 0); else if (pdev->device == 0x0008 || pdev->device == 0x0009) { u8 reg; /* Mac systems come up with some registers not set as we will need them */ /* Clear reset & test bits */ pci_read_config_byte(pdev, 0x49, &reg); pci_write_config_byte(pdev, 0x49, reg & ~0x30); /* PCI latency must be > 0x80 for burst mode, tweak it * if required. */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg); if (reg <= 0x80) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90); /* Enable IRQ output and burst mode */ pci_read_config_byte(pdev, 0x4a, &reg); pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80); } } /** * artop_init_one - Register ARTOP ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in artop_pci_tbl matching with @pdev * * Called from kernel PCI layer. * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info_6210 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &artop6210_ops, }; static const struct ata_port_info info_626x = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &artop6260_ops, }; static const struct ata_port_info info_628x = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &artop6260_ops, }; static const struct ata_port_info info_628x_fast = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &artop6260_ops, }; const struct ata_port_info *ppi[] = { NULL, NULL }; int rc; ata_print_version_once(&pdev->dev, DRV_VERSION); rc = pcim_enable_device(pdev); if (rc) return rc; if (id->driver_data == 0) /* 6210 variant */ ppi[0] = &info_6210; else if (id->driver_data == 1) /* 6260 */ ppi[0] = &info_626x; else if (id->driver_data == 2) { /* 6280 or 6280 + fast */ unsigned long io = pci_resource_start(pdev, 4); ppi[0] = &info_628x; if (inb(io) & 0x10) ppi[0] = &info_628x_fast; } BUG_ON(ppi[0] == NULL); atp8xx_fixup(pdev); return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0); } static const struct pci_device_id artop_pci_tbl[] = { { PCI_VDEVICE(ARTOP, 0x0005), 0 }, { PCI_VDEVICE(ARTOP, 0x0006), 1 }, { PCI_VDEVICE(ARTOP, 0x0007), 1 }, { PCI_VDEVICE(ARTOP, 0x0008), 2 }, { PCI_VDEVICE(ARTOP, 0x0009), 2 }, { } /* terminate list */ }; #ifdef CONFIG_PM_SLEEP static int atp8xx_reinit_one(struct pci_dev *pdev) { struct ata_host *host = pci_get_drvdata(pdev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; atp8xx_fixup(pdev); ata_host_resume(host); return 0; } #endif static struct pci_driver artop_pci_driver = { .name = DRV_NAME, .id_table = artop_pci_tbl, .probe = artop_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM_SLEEP .suspend = ata_pci_device_suspend, .resume = atp8xx_reinit_one, #endif }; module_pci_driver(artop_pci_driver); MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, artop_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
mukulsoni/android_kernel_samsung_ms013g-new
drivers/mtd/nand/nand_ids.c
2149
7553
/* * drivers/mtd/nandids.c * * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/mtd/nand.h> /* * Chip ID list * * Name. ID code, pagesize, chipsize in MegaByte, eraseblock size, * options * * Pagesize; 0, 256, 512 * 0 get this information from the extended chip ID + 256 256 Byte page size * 512 512 Byte page size */ struct nand_flash_dev nand_flash_ids[] = { #ifdef CONFIG_MTD_NAND_MUSEUM_IDS {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, #endif {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0}, {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, /* * These are the new chips with large page size. The pagesize and the * erasesize is determined from the extended id bytes */ #define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR) #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) /* 512 Megabit */ {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS}, {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS}, {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS}, {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS}, {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS}, {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16}, {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16}, {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16}, {"NAND 64MiB 3,3V 16-bit", 0xC0, 0, 64, 0, LP_OPTIONS16}, /* 1 Gigabit */ {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS}, {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS}, {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS}, {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16}, {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16}, {"NAND 128MiB 1,8V 16-bit", 0xAD, 0, 128, 0, LP_OPTIONS16}, /* 2 Gigabit */ {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS}, {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, LP_OPTIONS}, {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, LP_OPTIONS16}, {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, LP_OPTIONS16}, /* 4 Gigabit */ {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, LP_OPTIONS}, {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, LP_OPTIONS}, {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, LP_OPTIONS16}, {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, LP_OPTIONS16}, /* 8 Gigabit */ {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, LP_OPTIONS}, {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, LP_OPTIONS}, {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, LP_OPTIONS16}, {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, LP_OPTIONS16}, /* 16 Gigabit */ {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, LP_OPTIONS}, {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, LP_OPTIONS}, {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16}, {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16}, /* 32 Gigabit */ {"NAND 4GiB 1,8V 8-bit", 0xA7, 0, 4096, 0, LP_OPTIONS}, {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS}, {"NAND 4GiB 1,8V 16-bit", 0xB7, 0, 4096, 0, LP_OPTIONS16}, {"NAND 4GiB 3,3V 16-bit", 0xC7, 0, 4096, 0, LP_OPTIONS16}, /* 64 Gigabit */ {"NAND 8GiB 1,8V 8-bit", 0xAE, 0, 8192, 0, LP_OPTIONS}, {"NAND 8GiB 3,3V 8-bit", 0xDE, 0, 8192, 0, LP_OPTIONS}, {"NAND 8GiB 1,8V 16-bit", 0xBE, 0, 8192, 0, LP_OPTIONS16}, {"NAND 8GiB 3,3V 16-bit", 0xCE, 0, 8192, 0, LP_OPTIONS16}, /* 128 Gigabit */ {"NAND 16GiB 1,8V 8-bit", 0x1A, 0, 16384, 0, LP_OPTIONS}, {"NAND 16GiB 3,3V 8-bit", 0x3A, 0, 16384, 0, LP_OPTIONS}, {"NAND 16GiB 1,8V 16-bit", 0x2A, 0, 16384, 0, LP_OPTIONS16}, {"NAND 16GiB 3,3V 16-bit", 0x4A, 0, 16384, 0, LP_OPTIONS16}, /* 256 Gigabit */ {"NAND 32GiB 1,8V 8-bit", 0x1C, 0, 32768, 0, LP_OPTIONS}, {"NAND 32GiB 3,3V 8-bit", 0x3C, 0, 32768, 0, LP_OPTIONS}, {"NAND 32GiB 1,8V 16-bit", 0x2C, 0, 32768, 0, LP_OPTIONS16}, {"NAND 32GiB 3,3V 16-bit", 0x4C, 0, 32768, 0, LP_OPTIONS16}, /* 512 Gigabit */ {"NAND 64GiB 1,8V 8-bit", 0x1E, 0, 65536, 0, LP_OPTIONS}, {"NAND 64GiB 3,3V 8-bit", 0x3E, 0, 65536, 0, LP_OPTIONS}, {"NAND 64GiB 1,8V 16-bit", 0x2E, 0, 65536, 0, LP_OPTIONS16}, {"NAND 64GiB 3,3V 16-bit", 0x4E, 0, 65536, 0, LP_OPTIONS16}, /* * Renesas AND 1 Gigabit. Those chips do not support extended id and * have a strange page/block layout ! The chosen minimum erasesize is * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page * planes 1 block = 2 pages, but due to plane arrangement the blocks * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would * increase the eraseblock size so we chose a combined one which can be * erased in one go There are more speed improvements for reads and * writes possible, but not implemented now */ {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH }, {"NAND 4GiB 1,8V 8-bit", 0xAC, 2048, 4096, 0x20000, 0}, {NULL,} }; /* * Manufacturer ID list */ struct nand_manufacturers nand_manuf_ids[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, {NAND_MFR_SAMSUNG, "Samsung"}, {NAND_MFR_FUJITSU, "Fujitsu"}, {NAND_MFR_NATIONAL, "National"}, {NAND_MFR_RENESAS, "Renesas"}, {NAND_MFR_STMICRO, "ST Micro"}, {NAND_MFR_HYNIX, "Hynix"}, {NAND_MFR_MICRON, "Micron"}, {NAND_MFR_AMD, "AMD"}, {NAND_MFR_MACRONIX, "Macronix"}, {0x0, "Unknown"} }; EXPORT_SYMBOL(nand_manuf_ids); EXPORT_SYMBOL(nand_flash_ids); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); MODULE_DESCRIPTION("Nand device & manufacturer IDs");
gpl-2.0
SM-G920P/S6-MM
drivers/spi/spi-omap-uwire.c
2149
13549
/* * MicroWire interface driver for OMAP * * Copyright 2003 MontaVista Software Inc. <source@mvista.com> * * Ported to 2.6 OMAP uwire interface. * Copyright (C) 2004 Texas Instruments. * * Generalization patches by Juha Yrjola <juha.yrjola@nokia.com> * * Copyright (C) 2005 David Brownell (ported to 2.6 SPI interface) * Copyright (C) 2006 Nokia * * Many updates by Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/module.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/io.h> #include <asm/mach-types.h> #include <mach/mux.h> #include <mach/omap7xx.h> /* OMAP7XX_IO_CONF registers */ /* FIXME address is now a platform device resource, * and irqs should show there too... */ #define UWIRE_BASE_PHYS 0xFFFB3000 /* uWire Registers: */ #define UWIRE_IO_SIZE 0x20 #define UWIRE_TDR 0x00 #define UWIRE_RDR 0x00 #define UWIRE_CSR 0x01 #define UWIRE_SR1 0x02 #define UWIRE_SR2 0x03 #define UWIRE_SR3 0x04 #define UWIRE_SR4 0x05 #define UWIRE_SR5 0x06 /* CSR bits */ #define RDRB (1 << 15) #define CSRB (1 << 14) #define START (1 << 13) #define CS_CMD (1 << 12) /* SR1 or SR2 bits */ #define UWIRE_READ_FALLING_EDGE 0x0001 #define UWIRE_READ_RISING_EDGE 0x0000 #define UWIRE_WRITE_FALLING_EDGE 0x0000 #define UWIRE_WRITE_RISING_EDGE 0x0002 #define UWIRE_CS_ACTIVE_LOW 0x0000 #define UWIRE_CS_ACTIVE_HIGH 0x0004 #define UWIRE_FREQ_DIV_2 0x0000 #define UWIRE_FREQ_DIV_4 0x0008 #define UWIRE_FREQ_DIV_8 0x0010 #define UWIRE_CHK_READY 0x0020 #define UWIRE_CLK_INVERTED 0x0040 struct uwire_spi { struct spi_bitbang bitbang; struct clk *ck; }; struct uwire_state { unsigned bits_per_word; unsigned div1_idx; }; /* REVISIT compile time constant for idx_shift? */ /* * Or, put it in a structure which is used throughout the driver; * that avoids having to issue two loads for each bit of static data. */ static unsigned int uwire_idx_shift; static void __iomem *uwire_base; static inline void uwire_write_reg(int idx, u16 val) { __raw_writew(val, uwire_base + (idx << uwire_idx_shift)); } static inline u16 uwire_read_reg(int idx) { return __raw_readw(uwire_base + (idx << uwire_idx_shift)); } static inline void omap_uwire_configure_mode(u8 cs, unsigned long flags) { u16 w, val = 0; int shift, reg; if (flags & UWIRE_CLK_INVERTED) val ^= 0x03; val = flags & 0x3f; if (cs & 1) shift = 6; else shift = 0; if (cs <= 1) reg = UWIRE_SR1; else reg = UWIRE_SR2; w = uwire_read_reg(reg); w &= ~(0x3f << shift); w |= val << shift; uwire_write_reg(reg, w); } static int wait_uwire_csr_flag(u16 mask, u16 val, int might_not_catch) { u16 w; int c = 0; unsigned long max_jiffies = jiffies + HZ; for (;;) { w = uwire_read_reg(UWIRE_CSR); if ((w & mask) == val) break; if (time_after(jiffies, max_jiffies)) { printk(KERN_ERR "%s: timeout. reg=%#06x " "mask=%#06x val=%#06x\n", __func__, w, mask, val); return -1; } c++; if (might_not_catch && c > 64) break; } return 0; } static void uwire_set_clk1_div(int div1_idx) { u16 w; w = uwire_read_reg(UWIRE_SR3); w &= ~(0x03 << 1); w |= div1_idx << 1; uwire_write_reg(UWIRE_SR3, w); } static void uwire_chipselect(struct spi_device *spi, int value) { struct uwire_state *ust = spi->controller_state; u16 w; int old_cs; BUG_ON(wait_uwire_csr_flag(CSRB, 0, 0)); w = uwire_read_reg(UWIRE_CSR); old_cs = (w >> 10) & 0x03; if (value == BITBANG_CS_INACTIVE || old_cs != spi->chip_select) { /* Deselect this CS, or the previous CS */ w &= ~CS_CMD; uwire_write_reg(UWIRE_CSR, w); } /* activate specfied chipselect */ if (value == BITBANG_CS_ACTIVE) { uwire_set_clk1_div(ust->div1_idx); /* invert clock? */ if (spi->mode & SPI_CPOL) uwire_write_reg(UWIRE_SR4, 1); else uwire_write_reg(UWIRE_SR4, 0); w = spi->chip_select << 10; w |= CS_CMD; uwire_write_reg(UWIRE_CSR, w); } } static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t) { struct uwire_state *ust = spi->controller_state; unsigned len = t->len; unsigned bits = ust->bits_per_word; unsigned bytes; u16 val, w; int status = 0; if (!t->tx_buf && !t->rx_buf) return 0; /* Microwire doesn't read and write concurrently */ if (t->tx_buf && t->rx_buf) return -EPERM; w = spi->chip_select << 10; w |= CS_CMD; if (t->tx_buf) { const u8 *buf = t->tx_buf; /* NOTE: DMA could be used for TX transfers */ /* write one or two bytes at a time */ while (len >= 1) { /* tx bit 15 is first sent; we byteswap multibyte words * (msb-first) on the way out from memory. */ val = *buf++; if (bits > 8) { bytes = 2; val |= *buf++ << 8; } else bytes = 1; val <<= 16 - bits; #ifdef VERBOSE pr_debug("%s: write-%d =%04x\n", dev_name(&spi->dev), bits, val); #endif if (wait_uwire_csr_flag(CSRB, 0, 0)) goto eio; uwire_write_reg(UWIRE_TDR, val); /* start write */ val = START | w | (bits << 5); uwire_write_reg(UWIRE_CSR, val); len -= bytes; /* Wait till write actually starts. * This is needed with MPU clock 60+ MHz. * REVISIT: we may not have time to catch it... */ if (wait_uwire_csr_flag(CSRB, CSRB, 1)) goto eio; status += bytes; } /* REVISIT: save this for later to get more i/o overlap */ if (wait_uwire_csr_flag(CSRB, 0, 0)) goto eio; } else if (t->rx_buf) { u8 *buf = t->rx_buf; /* read one or two bytes at a time */ while (len) { if (bits > 8) { bytes = 2; } else bytes = 1; /* start read */ val = START | w | (bits << 0); uwire_write_reg(UWIRE_CSR, val); len -= bytes; /* Wait till read actually starts */ (void) wait_uwire_csr_flag(CSRB, CSRB, 1); if (wait_uwire_csr_flag(RDRB | CSRB, RDRB, 0)) goto eio; /* rx bit 0 is last received; multibyte words will * be properly byteswapped on the way to memory. */ val = uwire_read_reg(UWIRE_RDR); val &= (1 << bits) - 1; *buf++ = (u8) val; if (bytes == 2) *buf++ = val >> 8; status += bytes; #ifdef VERBOSE pr_debug("%s: read-%d =%04x\n", dev_name(&spi->dev), bits, val); #endif } } return status; eio: return -EIO; } static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { struct uwire_state *ust = spi->controller_state; struct uwire_spi *uwire; unsigned flags = 0; unsigned bits; unsigned hz; unsigned long rate; int div1_idx; int div1; int div2; int status; uwire = spi_master_get_devdata(spi->master); if (spi->chip_select > 3) { pr_debug("%s: cs%d?\n", dev_name(&spi->dev), spi->chip_select); status = -ENODEV; goto done; } bits = spi->bits_per_word; if (t != NULL && t->bits_per_word) bits = t->bits_per_word; if (bits > 16) { pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits); status = -ENODEV; goto done; } ust->bits_per_word = bits; /* mode 0..3, clock inverted separately; * standard nCS signaling; * don't treat DI=high as "not ready" */ if (spi->mode & SPI_CS_HIGH) flags |= UWIRE_CS_ACTIVE_HIGH; if (spi->mode & SPI_CPOL) flags |= UWIRE_CLK_INVERTED; switch (spi->mode & (SPI_CPOL | SPI_CPHA)) { case SPI_MODE_0: case SPI_MODE_3: flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE; break; case SPI_MODE_1: case SPI_MODE_2: flags |= UWIRE_WRITE_RISING_EDGE | UWIRE_READ_FALLING_EDGE; break; } /* assume it's already enabled */ rate = clk_get_rate(uwire->ck); hz = spi->max_speed_hz; if (t != NULL && t->speed_hz) hz = t->speed_hz; if (!hz) { pr_debug("%s: zero speed?\n", dev_name(&spi->dev)); status = -EINVAL; goto done; } /* F_INT = mpu_xor_clk / DIV1 */ for (div1_idx = 0; div1_idx < 4; div1_idx++) { switch (div1_idx) { case 0: div1 = 2; break; case 1: div1 = 4; break; case 2: div1 = 7; break; default: case 3: div1 = 10; break; } div2 = (rate / div1 + hz - 1) / hz; if (div2 <= 8) break; } if (div1_idx == 4) { pr_debug("%s: lowest clock %ld, need %d\n", dev_name(&spi->dev), rate / 10 / 8, hz); status = -EDOM; goto done; } /* we have to cache this and reset in uwire_chipselect as this is a * global parameter and another uwire device can change it under * us */ ust->div1_idx = div1_idx; uwire_set_clk1_div(div1_idx); rate /= div1; switch (div2) { case 0: case 1: case 2: flags |= UWIRE_FREQ_DIV_2; rate /= 2; break; case 3: case 4: flags |= UWIRE_FREQ_DIV_4; rate /= 4; break; case 5: case 6: case 7: case 8: flags |= UWIRE_FREQ_DIV_8; rate /= 8; break; } omap_uwire_configure_mode(spi->chip_select, flags); pr_debug("%s: uwire flags %02x, armxor %lu KHz, SCK %lu KHz\n", __func__, flags, clk_get_rate(uwire->ck) / 1000, rate / 1000); status = 0; done: return status; } static int uwire_setup(struct spi_device *spi) { struct uwire_state *ust = spi->controller_state; if (ust == NULL) { ust = kzalloc(sizeof(*ust), GFP_KERNEL); if (ust == NULL) return -ENOMEM; spi->controller_state = ust; } return uwire_setup_transfer(spi, NULL); } static void uwire_cleanup(struct spi_device *spi) { kfree(spi->controller_state); } static void uwire_off(struct uwire_spi *uwire) { uwire_write_reg(UWIRE_SR3, 0); clk_disable(uwire->ck); clk_put(uwire->ck); spi_master_put(uwire->bitbang.master); } static int uwire_probe(struct platform_device *pdev) { struct spi_master *master; struct uwire_spi *uwire; int status; master = spi_alloc_master(&pdev->dev, sizeof *uwire); if (!master) return -ENODEV; uwire = spi_master_get_devdata(master); uwire_base = ioremap(UWIRE_BASE_PHYS, UWIRE_IO_SIZE); if (!uwire_base) { dev_dbg(&pdev->dev, "can't ioremap UWIRE\n"); spi_master_put(master); return -ENOMEM; } dev_set_drvdata(&pdev->dev, uwire); uwire->ck = clk_get(&pdev->dev, "fck"); if (IS_ERR(uwire->ck)) { status = PTR_ERR(uwire->ck); dev_dbg(&pdev->dev, "no functional clock?\n"); spi_master_put(master); return status; } clk_enable(uwire->ck); if (cpu_is_omap7xx()) uwire_idx_shift = 1; else uwire_idx_shift = 2; uwire_write_reg(UWIRE_SR3, 1); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->flags = SPI_MASTER_HALF_DUPLEX; master->bus_num = 2; /* "official" */ master->num_chipselect = 4; master->setup = uwire_setup; master->cleanup = uwire_cleanup; uwire->bitbang.master = master; uwire->bitbang.chipselect = uwire_chipselect; uwire->bitbang.setup_transfer = uwire_setup_transfer; uwire->bitbang.txrx_bufs = uwire_txrx; status = spi_bitbang_start(&uwire->bitbang); if (status < 0) { uwire_off(uwire); iounmap(uwire_base); } return status; } static int uwire_remove(struct platform_device *pdev) { struct uwire_spi *uwire = dev_get_drvdata(&pdev->dev); int status; // FIXME remove all child devices, somewhere ... status = spi_bitbang_stop(&uwire->bitbang); uwire_off(uwire); iounmap(uwire_base); return status; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:omap_uwire"); static struct platform_driver uwire_driver = { .driver = { .name = "omap_uwire", .owner = THIS_MODULE, }, .remove = uwire_remove, // suspend ... unuse ck // resume ... use ck }; static int __init omap_uwire_init(void) { /* FIXME move these into the relevant board init code. also, include * H3 support; it uses tsc2101 like H2 (on a different chipselect). */ if (machine_is_omap_h2()) { /* defaults: W21 SDO, U18 SDI, V19 SCL */ omap_cfg_reg(N14_1610_UWIRE_CS0); omap_cfg_reg(N15_1610_UWIRE_CS1); } if (machine_is_omap_perseus2()) { /* configure pins: MPU_UW_nSCS1, MPU_UW_SDO, MPU_UW_SCLK */ int val = omap_readl(OMAP7XX_IO_CONF_9) & ~0x00EEE000; omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9); } return platform_driver_probe(&uwire_driver, uwire_probe); } static void __exit omap_uwire_exit(void) { platform_driver_unregister(&uwire_driver); } subsys_initcall(omap_uwire_init); module_exit(omap_uwire_exit); MODULE_LICENSE("GPL");
gpl-2.0
djvoleur/S6_UniKernel
drivers/staging/sbe-2t3e3/cpld.c
2405
8417
/* * SBE 2T3E3 synchronous serial card driver for Linux * * Copyright (C) 2009-2010 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This code is based on a driver written by SBE Inc. */ #include <linux/delay.h> #include "2t3e3.h" #include "ctrl.h" #define bootrom_set_bit(sc, reg, bit) \ bootrom_write((sc), (reg), \ bootrom_read((sc), (reg)) | (bit)) #define bootrom_clear_bit(sc, reg, bit) \ bootrom_write((sc), (reg), \ bootrom_read((sc), (reg)) & ~(bit)) static inline void cpld_set_bit(struct channel *channel, unsigned reg, u32 bit) { unsigned long flags; spin_lock_irqsave(&channel->card->bootrom_lock, flags); bootrom_set_bit(channel, CPLD_MAP_REG(reg, channel), bit); spin_unlock_irqrestore(&channel->card->bootrom_lock, flags); } static inline void cpld_clear_bit(struct channel *channel, unsigned reg, u32 bit) { unsigned long flags; spin_lock_irqsave(&channel->card->bootrom_lock, flags); bootrom_clear_bit(channel, CPLD_MAP_REG(reg, channel), bit); spin_unlock_irqrestore(&channel->card->bootrom_lock, flags); } void cpld_init(struct channel *sc) { u32 val; /* PCRA */ val = SBE_2T3E3_CPLD_VAL_CRC32 | cpld_val_map[SBE_2T3E3_CPLD_VAL_LOOP_TIMING_SOURCE][sc->h.slot]; cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRA, val); /* PCRB */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRB, val); /* PCRC */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, val); /* PBWF */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWF, val); /* PBWL */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWL, val); /* PLTR */ val = SBE_2T3E3_CPLD_VAL_LCV_COUNTER; cpld_write(sc, SBE_2T3E3_CPLD_REG_PLTR, val); udelay(1000); /* PLCR */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PLCR, val); udelay(1000); /* PPFR */ val = 0x55; cpld_write(sc, SBE_2T3E3_CPLD_REG_PPFR, val); /* TODO: this doesn't work!!! */ /* SERIAL_CHIP_SELECT */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_SERIAL_CHIP_SELECT, val); /* PICSR */ val = SBE_2T3E3_CPLD_VAL_DMO_SIGNAL_DETECTED | SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_LOCK_DETECTED | SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_SIGNAL_DETECTED; cpld_write(sc, SBE_2T3E3_CPLD_REG_PICSR, val); cpld_start_intr(sc); udelay(1000); } void cpld_start_intr(struct channel *sc) { u32 val; /* PIER */ val = SBE_2T3E3_CPLD_VAL_INTERRUPT_FROM_ETHERNET_ENABLE | SBE_2T3E3_CPLD_VAL_INTERRUPT_FROM_FRAMER_ENABLE; cpld_write(sc, SBE_2T3E3_CPLD_REG_PIER, val); } void cpld_stop_intr(struct channel *sc) { u32 val; /* PIER */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PIER, val); } void cpld_set_frame_mode(struct channel *sc, u32 mode) { if (sc->p.frame_mode == mode) return; switch (mode) { case SBE_2T3E3_FRAME_MODE_HDLC: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_TRANSPARENT_MODE | SBE_2T3E3_CPLD_VAL_RAW_MODE); exar7250_unipolar_onoff(sc, SBE_2T3E3_OFF); exar7300_unipolar_onoff(sc, SBE_2T3E3_OFF); break; case SBE_2T3E3_FRAME_MODE_TRANSPARENT: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_RAW_MODE); cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_TRANSPARENT_MODE); exar7250_unipolar_onoff(sc, SBE_2T3E3_OFF); exar7300_unipolar_onoff(sc, SBE_2T3E3_OFF); break; case SBE_2T3E3_FRAME_MODE_RAW: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_RAW_MODE); exar7250_unipolar_onoff(sc, SBE_2T3E3_ON); exar7300_unipolar_onoff(sc, SBE_2T3E3_ON); break; default: return; } sc->p.frame_mode = mode; } /* set rate of the local clock */ void cpld_set_frame_type(struct channel *sc, u32 type) { switch (type) { case SBE_2T3E3_FRAME_TYPE_E3_G751: case SBE_2T3E3_FRAME_TYPE_E3_G832: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_LOCAL_CLOCK_E3); break; case SBE_2T3E3_FRAME_TYPE_T3_CBIT: case SBE_2T3E3_FRAME_TYPE_T3_M13: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_LOCAL_CLOCK_E3); break; default: return; } } void cpld_set_scrambler(struct channel *sc, u32 mode) { if (sc->p.scrambler == mode) return; switch (mode) { case SBE_2T3E3_SCRAMBLER_OFF: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_SCRAMBLER_ENABLE); break; case SBE_2T3E3_SCRAMBLER_LARSCOM: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_SCRAMBLER_TYPE); cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_SCRAMBLER_ENABLE); break; case SBE_2T3E3_SCRAMBLER_ADC_KENTROX_DIGITAL: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_SCRAMBLER_TYPE); cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_SCRAMBLER_ENABLE); break; default: return; } sc->p.scrambler = mode; } void cpld_set_crc(struct channel *sc, u32 crc) { if (sc->p.crc == crc) return; switch (crc) { case SBE_2T3E3_CRC_16: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_CRC32); break; case SBE_2T3E3_CRC_32: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_CRC32); break; default: return; } sc->p.crc = crc; } void cpld_select_panel(struct channel *sc, u32 panel) { if (sc->p.panel == panel) return; switch (panel) { case SBE_2T3E3_PANEL_FRONT: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_REAR_PANEL); break; case SBE_2T3E3_PANEL_REAR: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_REAR_PANEL); break; default: return; } udelay(100); sc->p.panel = panel; } extern void cpld_set_clock(struct channel *sc, u32 mode) { if (sc->p.clock_source == mode) return; switch (mode) { case SBE_2T3E3_TIMING_LOCAL: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_ALT); break; case SBE_2T3E3_TIMING_LOOP: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_ALT); break; default: return; } sc->p.clock_source = mode; } void cpld_set_pad_count(struct channel *sc, u32 count) { u32 val; if (sc->p.pad_count == count) return; switch (count) { case SBE_2T3E3_PAD_COUNT_1: val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_1; break; case SBE_2T3E3_PAD_COUNT_2: val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_2; break; case SBE_2T3E3_PAD_COUNT_3: val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_3; break; case SBE_2T3E3_PAD_COUNT_4: val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_4; break; default: return; } cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_PAD_COUNT); cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, val); sc->p.pad_count = count; } void cpld_LOS_update(struct channel *sc) { u_int8_t los; cpld_write(sc, SBE_2T3E3_CPLD_REG_PICSR, SBE_2T3E3_CPLD_VAL_DMO_SIGNAL_DETECTED | SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_LOCK_DETECTED | SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_SIGNAL_DETECTED); los = cpld_read(sc, SBE_2T3E3_CPLD_REG_PICSR) & SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_SIGNAL_DETECTED; if (los != sc->s.LOS) dev_info(&sc->pdev->dev, "SBE 2T3E3: LOS status: %s\n", los ? "Loss of signal" : "Signal OK"); sc->s.LOS = los; } void cpld_set_fractional_mode(struct channel *sc, u32 mode, u32 start, u32 stop) { if (mode == SBE_2T3E3_FRACTIONAL_MODE_NONE) { start = 0; stop = 0; } if (sc->p.fractional_mode == mode && sc->p.bandwidth_start == start && sc->p.bandwidth_stop == stop) return; switch (mode) { case SBE_2T3E3_FRACTIONAL_MODE_NONE: cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_NONE); break; case SBE_2T3E3_FRACTIONAL_MODE_0: cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_0); break; case SBE_2T3E3_FRACTIONAL_MODE_1: cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_1); break; case SBE_2T3E3_FRACTIONAL_MODE_2: cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_2); break; default: netdev_err(sc->dev, "wrong mode in set_fractional_mode\n"); return; } cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWF, start); cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWL, stop); sc->p.fractional_mode = mode; sc->p.bandwidth_start = start; sc->p.bandwidth_stop = stop; }
gpl-2.0
Elite-Kernels/HTC-10
sound/pci/ctxfi/cthardware.c
2405
1707
/** * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. * * @File cthardware.c * * @Brief * This file contains the implementation of hardware access methord. * * @Author Liu Chun * @Date Jun 26 2008 * */ #include "cthardware.h" #include "cthw20k1.h" #include "cthw20k2.h" #include <linux/bug.h> int create_hw_obj(struct pci_dev *pci, enum CHIPTYP chip_type, enum CTCARDS model, struct hw **rhw) { int err; switch (chip_type) { case ATC20K1: err = create_20k1_hw_obj(rhw); break; case ATC20K2: err = create_20k2_hw_obj(rhw); break; default: err = -ENODEV; break; } if (err) return err; (*rhw)->pci = pci; (*rhw)->chip_type = chip_type; (*rhw)->model = model; return 0; } int destroy_hw_obj(struct hw *hw) { int err; switch (hw->pci->device) { case 0x0005: /* 20k1 device */ err = destroy_20k1_hw_obj(hw); break; case 0x000B: /* 20k2 device */ err = destroy_20k2_hw_obj(hw); break; default: err = -ENODEV; break; } return err; } unsigned int get_field(unsigned int data, unsigned int field) { int i; if (WARN_ON(!field)) return 0; /* @field should always be greater than 0 */ for (i = 0; !(field & (1 << i)); ) i++; return (data & field) >> i; } void set_field(unsigned int *data, unsigned int field, unsigned int value) { int i; if (WARN_ON(!field)) return; /* @field should always be greater than 0 */ for (i = 0; !(field & (1 << i)); ) i++; *data = (*data & (~field)) | ((value << i) & field); }
gpl-2.0
SimonSickle/android_kernel_htc_primou
drivers/net/sun3_82586.c
3173
33385
/* * Sun3 i82586 Ethernet driver * * Cloned from ni52.c for the Sun3 by Sam Creasey (sammy@sammy.net) * * Original copyright follows: * -------------------------- * * net-3-driver for the NI5210 card (i82586 Ethernet chip) * * This is an extension to the Linux operating system, and is covered by the * same Gnu Public License that covers that work. * * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later) * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de) * -------------------------- * * Consult ni52.c for further notes from the original driver. * * This incarnation currently supports the OBIO version of the i82586 chip * used in certain sun3 models. It should be fairly doable to expand this * to support VME if I should every acquire such a board. * */ static int debuglevel = 0; /* debug-printk 0: off 1: a few 2: more */ static int automatic_resume = 0; /* experimental .. better should be zero */ static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */ static int fifo=0x8; /* don't change */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/idprom.h> #include <asm/machines.h> #include <asm/sun3mmu.h> #include <asm/dvma.h> #include <asm/byteorder.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include "sun3_82586.h" #define DRV_NAME "sun3_82586" #define DEBUG /* debug on */ #define SYSBUSVAL 0 /* 16 Bit */ #define SUN3_82586_TOTAL_SIZE PAGE_SIZE #define sun3_attn586() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_ATTEN; *(volatile unsigned char *)(dev->base_addr) &= ~IEOB_ATTEN;} #define sun3_reset586() {*(volatile unsigned char *)(dev->base_addr) = 0; udelay(100); *(volatile unsigned char *)(dev->base_addr) = IEOB_NORSET;} #define sun3_disint() {*(volatile unsigned char *)(dev->base_addr) &= ~IEOB_IENAB;} #define sun3_enaint() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_IENAB;} #define sun3_active() {*(volatile unsigned char *)(dev->base_addr) |= (IEOB_IENAB|IEOB_ONAIR|IEOB_NORSET);} #define make32(ptr16) (p->memtop + (swab16((unsigned short) (ptr16))) ) #define make24(ptr32) (char *)swab32(( ((unsigned long) (ptr32)) - p->base)) #define make16(ptr32) (swab16((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop ))) /******************* how to calculate the buffers ***************************** * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works * --------------- in a different (more stable?) mode. Only in this mode it's * possible to configure the driver with 'NO_NOPCOMMANDS' sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8; sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT sizeof(rfd) = 24; sizeof(rbd) = 12; sizeof(tbd) = 8; sizeof(transmit_cmd) = 16; sizeof(nop_cmd) = 8; * if you don't know the driver, better do not change these values: */ #define RECV_BUFF_SIZE 1536 /* slightly oversized */ #define XMIT_BUFF_SIZE 1536 /* slightly oversized */ #define NUM_XMIT_BUFFS 1 /* config for 32K shmem */ #define NUM_RECV_BUFFS_8 4 /* config for 32K shared mem */ #define NUM_RECV_BUFFS_16 9 /* config for 32K shared mem */ #define NUM_RECV_BUFFS_32 16 /* config for 32K shared mem */ #define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */ /**************************************************************************/ /* different DELAYs */ #define DELAY(x) mdelay(32 * x); #define DELAY_16(); { udelay(16); } #define DELAY_18(); { udelay(4); } /* wait for command with timeout: */ #define WAIT_4_SCB_CMD() \ { int i; \ for(i=0;i<16384;i++) { \ if(!p->scb->cmd_cuc) break; \ DELAY_18(); \ if(i == 16383) { \ printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \ if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } } #define WAIT_4_SCB_CMD_RUC() { int i; \ for(i=0;i<16384;i++) { \ if(!p->scb->cmd_ruc) break; \ DELAY_18(); \ if(i == 16383) { \ printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \ if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } } #define WAIT_4_STAT_COMPL(addr) { int i; \ for(i=0;i<32767;i++) { \ if(swab16((addr)->cmd_status) & STAT_COMPL) break; \ DELAY_16(); DELAY_16(); } } static int sun3_82586_probe1(struct net_device *dev,int ioaddr); static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); static int sun3_82586_open(struct net_device *dev); static int sun3_82586_close(struct net_device *dev); static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static void sun3_82586_timeout(struct net_device *dev); #if 0 static void sun3_82586_dump(struct net_device *,void *); #endif /* helper-functions */ static int init586(struct net_device *dev); static int check586(struct net_device *dev,char *where,unsigned size); static void alloc586(struct net_device *dev); static void startrecv586(struct net_device *dev); static void *alloc_rfa(struct net_device *dev,void *ptr); static void sun3_82586_rcv_int(struct net_device *dev); static void sun3_82586_xmt_int(struct net_device *dev); static void sun3_82586_rnr_int(struct net_device *dev); struct priv { unsigned long base; char *memtop; long int lock; int reseted; volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first; volatile struct scp_struct *scp; /* volatile is important */ volatile struct iscp_struct *iscp; /* volatile is important */ volatile struct scb_struct *scb; /* volatile is important */ volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; #if (NUM_XMIT_BUFFS == 1) volatile struct nop_cmd_struct *nop_cmds[2]; #else volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; #endif volatile int nop_point,num_recv_buffs; volatile char *xmit_cbuffs[NUM_XMIT_BUFFS]; volatile int xmit_count,xmit_last; }; /********************************************** * close device */ static int sun3_82586_close(struct net_device *dev) { free_irq(dev->irq, dev); sun3_reset586(); /* the hard way to stop the receiver */ netif_stop_queue(dev); return 0; } /********************************************** * open device */ static int sun3_82586_open(struct net_device *dev) { int ret; sun3_disint(); alloc586(dev); init586(dev); startrecv586(dev); sun3_enaint(); ret = request_irq(dev->irq, sun3_82586_interrupt,0,dev->name,dev); if (ret) { sun3_reset586(); return ret; } netif_start_queue(dev); return 0; /* most done by init */ } /********************************************** * Check to see if there's an 82586 out there. */ static int check586(struct net_device *dev,char *where,unsigned size) { struct priv pb; struct priv *p = &pb; char *iscp_addr; int i; p->base = (unsigned long) dvma_btov(0); p->memtop = (char *)dvma_btov((unsigned long)where); p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); memset((char *)p->scp,0, sizeof(struct scp_struct)); for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */ if(((char *)p->scp)[i]) return 0; p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */ if(p->scp->sysbus != SYSBUSVAL) return 0; iscp_addr = (char *)dvma_btov((unsigned long)where); p->iscp = (struct iscp_struct *) iscp_addr; memset((char *)p->iscp,0, sizeof(struct iscp_struct)); p->scp->iscp = make24(p->iscp); p->iscp->busy = 1; sun3_reset586(); sun3_attn586(); DELAY(1); /* wait a while... */ if(p->iscp->busy) /* i82586 clears 'busy' after successful init */ return 0; return 1; } /****************************************************************** * set iscp at the right place, called by sun3_82586_probe1 and open586. */ static void alloc586(struct net_device *dev) { struct priv *p = netdev_priv(dev); sun3_reset586(); DELAY(1); p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS); p->iscp = (struct iscp_struct *) dvma_btov(dev->mem_start); p->scb = (struct scb_struct *) ((char *)p->iscp + sizeof(struct iscp_struct)); memset((char *) p->iscp,0,sizeof(struct iscp_struct)); memset((char *) p->scp ,0,sizeof(struct scp_struct)); p->scp->iscp = make24(p->iscp); p->scp->sysbus = SYSBUSVAL; p->iscp->scb_offset = make16(p->scb); p->iscp->scb_base = make24(dvma_btov(dev->mem_start)); p->iscp->busy = 1; sun3_reset586(); sun3_attn586(); DELAY(1); if(p->iscp->busy) printk("%s: Init-Problems (alloc).\n",dev->name); p->reseted = 0; memset((char *)p->scb,0,sizeof(struct scb_struct)); } struct net_device * __init sun3_82586_probe(int unit) { struct net_device *dev; unsigned long ioaddr; static int found = 0; int err = -ENOMEM; /* check that this machine has an onboard 82586 */ switch(idprom->id_machtype) { case SM_SUN3|SM_3_160: case SM_SUN3|SM_3_260: /* these machines have 82586 */ break; default: return ERR_PTR(-ENODEV); } if (found) return ERR_PTR(-ENODEV); ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE); if (!ioaddr) return ERR_PTR(-ENOMEM); found = 1; dev = alloc_etherdev(sizeof(struct priv)); if (!dev) goto out; if (unit >= 0) { sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } dev->irq = IE_IRQ; dev->base_addr = ioaddr; err = sun3_82586_probe1(dev, ioaddr); if (err) goto out1; err = register_netdev(dev); if (err) goto out2; return dev; out2: release_region(ioaddr, SUN3_82586_TOTAL_SIZE); out1: free_netdev(dev); out: iounmap((void __iomem *)ioaddr); return ERR_PTR(err); } static const struct net_device_ops sun3_82586_netdev_ops = { .ndo_open = sun3_82586_open, .ndo_stop = sun3_82586_close, .ndo_start_xmit = sun3_82586_send_packet, .ndo_set_multicast_list = set_multicast_list, .ndo_tx_timeout = sun3_82586_timeout, .ndo_get_stats = sun3_82586_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, }; static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) { int i, size, retval; if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME)) return -EBUSY; /* copy in the ethernet address from the prom */ for(i = 0; i < 6 ; i++) dev->dev_addr[i] = idprom->id_ethaddr[i]; printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr); /* * check (or search) IO-Memory, 32K */ size = 0x8000; dev->mem_start = (unsigned long)dvma_malloc_align(0x8000, 0x1000); dev->mem_end = dev->mem_start + size; if(size != 0x2000 && size != 0x4000 && size != 0x8000) { printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 or 0x8000 bytes.\n",dev->name,size); retval = -ENODEV; goto out; } if(!check586(dev,(char *) dev->mem_start,size)) { printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size); retval = -ENODEV; goto out; } ((struct priv *)netdev_priv(dev))->memtop = (char *)dvma_btov(dev->mem_start); ((struct priv *)netdev_priv(dev))->base = (unsigned long) dvma_btov(0); alloc586(dev); /* set number of receive-buffs according to memsize */ if(size == 0x2000) ((struct priv *)netdev_priv(dev))->num_recv_buffs = NUM_RECV_BUFFS_8; else if(size == 0x4000) ((struct priv *)netdev_priv(dev))->num_recv_buffs = NUM_RECV_BUFFS_16; else ((struct priv *)netdev_priv(dev))->num_recv_buffs = NUM_RECV_BUFFS_32; printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); dev->netdev_ops = &sun3_82586_netdev_ops; dev->watchdog_timeo = HZ/20; dev->if_port = 0; return 0; out: release_region(ioaddr, SUN3_82586_TOTAL_SIZE); return retval; } static int init586(struct net_device *dev) { void *ptr; int i,result=0; struct priv *p = netdev_priv(dev); volatile struct configure_cmd_struct *cfg_cmd; volatile struct iasetup_cmd_struct *ias_cmd; volatile struct tdr_cmd_struct *tdr_cmd; volatile struct mcsetup_cmd_struct *mc_cmd; struct netdev_hw_addr *ha; int num_addrs=netdev_mc_count(dev); ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */ cfg_cmd->cmd_status = 0; cfg_cmd->cmd_cmd = swab16(CMD_CONFIGURE | CMD_LAST); cfg_cmd->cmd_link = 0xffff; cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ cfg_cmd->priority = 0x00; cfg_cmd->ifs = 0x60; cfg_cmd->time_low = 0x00; cfg_cmd->time_high = 0xf2; cfg_cmd->promisc = 0; if(dev->flags & IFF_ALLMULTI) { int len = ((char *) p->iscp - (char *) ptr - 8) / 6; if(num_addrs > len) { printk("%s: switching to promisc. mode\n",dev->name); cfg_cmd->promisc = 1; } } if(dev->flags&IFF_PROMISC) cfg_cmd->promisc = 1; cfg_cmd->carr_coll = 0x00; p->scb->cbl_offset = make16(cfg_cmd); p->scb->cmd_ruc = 0; p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ sun3_attn586(); WAIT_4_STAT_COMPL(cfg_cmd); if((swab16(cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK)) { printk("%s: configure command failed: %x\n",dev->name,swab16(cfg_cmd->cmd_status)); return 1; } /* * individual address setup */ ias_cmd = (struct iasetup_cmd_struct *)ptr; ias_cmd->cmd_status = 0; ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST); ias_cmd->cmd_link = 0xffff; memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN); p->scb->cbl_offset = make16(ias_cmd); p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ sun3_attn586(); WAIT_4_STAT_COMPL(ias_cmd); if((swab16(ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) { printk("%s (82586): individual address setup command failed: %04x\n",dev->name,swab16(ias_cmd->cmd_status)); return 1; } /* * TDR, wire check .. e.g. no resistor e.t.c */ tdr_cmd = (struct tdr_cmd_struct *)ptr; tdr_cmd->cmd_status = 0; tdr_cmd->cmd_cmd = swab16(CMD_TDR | CMD_LAST); tdr_cmd->cmd_link = 0xffff; tdr_cmd->status = 0; p->scb->cbl_offset = make16(tdr_cmd); p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ sun3_attn586(); WAIT_4_STAT_COMPL(tdr_cmd); if(!(swab16(tdr_cmd->cmd_status) & STAT_COMPL)) { printk("%s: Problems while running the TDR.\n",dev->name); } else { DELAY_16(); /* wait for result */ result = swab16(tdr_cmd->status); p->scb->cmd_cuc = p->scb->cus & STAT_MASK; sun3_attn586(); /* ack the interrupts */ if(result & TDR_LNK_OK) ; else if(result & TDR_XCVR_PRB) printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name); else if(result & TDR_ET_OPN) printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK); else if(result & TDR_ET_SRT) { if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK); } else printk("%s: TDR: Unknown status %04x\n",dev->name,result); } /* * Multicast setup */ if(num_addrs && !(dev->flags & IFF_PROMISC) ) { mc_cmd = (struct mcsetup_cmd_struct *) ptr; mc_cmd->cmd_status = 0; mc_cmd->cmd_cmd = swab16(CMD_MCSETUP | CMD_LAST); mc_cmd->cmd_link = 0xffff; mc_cmd->mc_cnt = swab16(num_addrs * 6); i = 0; netdev_for_each_mc_addr(ha, dev) memcpy((char *) mc_cmd->mc_list[i++], ha->addr, ETH_ALEN); p->scb->cbl_offset = make16(mc_cmd); p->scb->cmd_cuc = CUC_START; sun3_attn586(); WAIT_4_STAT_COMPL(mc_cmd); if( (swab16(mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) printk("%s: Can't apply multicast-address-list.\n",dev->name); } /* * alloc nop/xmit-cmds */ #if (NUM_XMIT_BUFFS == 1) for(i=0;i<2;i++) { p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP); p->nop_cmds[i]->cmd_status = 0; p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); ptr = (char *) ptr + sizeof(struct nop_cmd_struct); } #else for(i=0;i<NUM_XMIT_BUFFS;i++) { p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP); p->nop_cmds[i]->cmd_status = 0; p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); ptr = (char *) ptr + sizeof(struct nop_cmd_struct); } #endif ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */ /* * alloc xmit-buffs / init xmit_cmds */ for(i=0;i<NUM_XMIT_BUFFS;i++) { p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */ ptr = (char *) ptr + XMIT_BUFF_SIZE; p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ ptr = (char *) ptr + sizeof(struct tbd_struct); if((void *)ptr > (void *)dev->mem_end) { printk("%s: not enough shared-mem for your configuration!\n",dev->name); return 1; } memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct)); memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct)); p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]); p->xmit_cmds[i]->cmd_status = swab16(STAT_COMPL); p->xmit_cmds[i]->cmd_cmd = swab16(CMD_XMIT | CMD_INT); p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); p->xmit_buffs[i]->next = 0xffff; p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); } p->xmit_count = 0; p->xmit_last = 0; #ifndef NO_NOPCOMMANDS p->nop_point = 0; #endif /* * 'start transmitter' */ #ifndef NO_NOPCOMMANDS p->scb->cbl_offset = make16(p->nop_cmds[0]); p->scb->cmd_cuc = CUC_START; sun3_attn586(); WAIT_4_SCB_CMD(); #else p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]); p->xmit_cmds[0]->cmd_cmd = swab16(CMD_XMIT | CMD_SUSPEND | CMD_INT); #endif /* * ack. interrupts */ p->scb->cmd_cuc = p->scb->cus & STAT_MASK; sun3_attn586(); DELAY_16(); sun3_enaint(); sun3_active(); return 0; } /****************************************************** * This is a helper routine for sun3_82586_rnr_int() and init586(). * It sets up the Receive Frame Area (RFA). */ static void *alloc_rfa(struct net_device *dev,void *ptr) { volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr; volatile struct rbd_struct *rbd; int i; struct priv *p = netdev_priv(dev); memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd)); p->rfd_first = rfd; for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) { rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) ); rfd[i].rbd_offset = 0xffff; } rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */ ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) ); rbd = (struct rbd_struct *) ptr; ptr = (void *) (rbd + p->num_recv_buffs); /* clr descriptors */ memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs)); for(i=0;i<p->num_recv_buffs;i++) { rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs)); rbd[i].size = swab16(RECV_BUFF_SIZE); rbd[i].buffer = make24(ptr); ptr = (char *) ptr + RECV_BUFF_SIZE; } p->rfd_top = p->rfd_first; p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); p->scb->rfa_offset = make16(p->rfd_first); p->rfd_first->rbd_offset = make16(rbd); return ptr; } /************************************************** * Interrupt Handler ... */ static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id) { struct net_device *dev = dev_id; unsigned short stat; int cnt=0; struct priv *p; if (!dev) { printk ("sun3_82586-interrupt: irq %d for unknown device.\n",irq); return IRQ_NONE; } p = netdev_priv(dev); if(debuglevel > 1) printk("I"); WAIT_4_SCB_CMD(); /* wait for last command */ while((stat=p->scb->cus & STAT_MASK)) { p->scb->cmd_cuc = stat; sun3_attn586(); if(stat & STAT_FR) /* received a frame */ sun3_82586_rcv_int(dev); if(stat & STAT_RNR) /* RU went 'not ready' */ { printk("(R)"); if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */ { WAIT_4_SCB_CMD(); p->scb->cmd_ruc = RUC_RESUME; sun3_attn586(); WAIT_4_SCB_CMD_RUC(); } else { printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus); sun3_82586_rnr_int(dev); } } if(stat & STAT_CX) /* command with I-bit set complete */ sun3_82586_xmt_int(dev); #ifndef NO_NOPCOMMANDS if(stat & STAT_CNA) /* CU went 'not ready' */ { if(netif_running(dev)) printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus); } #endif if(debuglevel > 1) printk("%d",cnt++); WAIT_4_SCB_CMD(); /* wait for ack. (sun3_82586_xmt_int can be faster than ack!!) */ if(p->scb->cmd_cuc) /* timed out? */ { printk("%s: Acknowledge timed out.\n",dev->name); sun3_disint(); break; } } if(debuglevel > 1) printk("i"); return IRQ_HANDLED; } /******************************************************* * receive-interrupt */ static void sun3_82586_rcv_int(struct net_device *dev) { int status,cnt=0; unsigned short totlen; struct sk_buff *skb; struct rbd_struct *rbd; struct priv *p = netdev_priv(dev); if(debuglevel > 0) printk("R"); for(;(status = p->rfd_top->stat_high) & RFD_COMPL;) { rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); if(status & RFD_OK) /* frame received without error? */ { if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */ { totlen &= RBD_MASK; /* length of this frame */ rbd->status = 0; skb = (struct sk_buff *) dev_alloc_skb(totlen+2); if(skb != NULL) { skb_reserve(skb,2); skb_put(skb,totlen); skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->stats.rx_packets++; } else dev->stats.rx_dropped++; } else { int rstat; /* free all RBD's until RBD_LAST is set */ totlen = 0; while(!((rstat=swab16(rbd->status)) & RBD_LAST)) { totlen += rstat & RBD_MASK; if(!rstat) { printk("%s: Whoops .. no end mark in RBD list\n",dev->name); break; } rbd->status = 0; rbd = (struct rbd_struct *) make32(rbd->next); } totlen += rstat & RBD_MASK; rbd->status = 0; printk("%s: received oversized frame! length: %d\n",dev->name,totlen); dev->stats.rx_dropped++; } } else /* frame !(ok), only with 'save-bad-frames' */ { printk("%s: oops! rfd-error-status: %04x\n",dev->name,status); dev->stats.rx_errors++; } p->rfd_top->stat_high = 0; p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */ p->rfd_top->rbd_offset = 0xffff; p->rfd_last->last = 0; /* delete RFD_SUSP */ p->rfd_last = p->rfd_top; p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ p->scb->rfa_offset = make16(p->rfd_top); if(debuglevel > 0) printk("%d",cnt++); } if(automatic_resume) { WAIT_4_SCB_CMD(); p->scb->cmd_ruc = RUC_RESUME; sun3_attn586(); WAIT_4_SCB_CMD_RUC(); } #ifdef WAIT_4_BUSY { int i; for(i=0;i<1024;i++) { if(p->rfd_top->status) break; DELAY_16(); if(i == 1023) printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name); } } #endif #if 0 if(!at_least_one) { int i; volatile struct rfd_struct *rfds=p->rfd_top; volatile struct rbd_struct *rbds; printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least); for(i=0;i< (p->num_recv_buffs+4);i++) { rbds = (struct rbd_struct *) make32(rfds->rbd_offset); printk("%04x:%04x ",rfds->status,rbds->status); rfds = (struct rfd_struct *) make32(rfds->next); } printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status); printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus); } old_at_least = at_least_one; #endif if(debuglevel > 0) printk("r"); } /********************************************************** * handle 'Receiver went not ready'. */ static void sun3_82586_rnr_int(struct net_device *dev) { struct priv *p = netdev_priv(dev); dev->stats.rx_errors++; WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ sun3_attn586(); WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */ alloc_rfa(dev,(char *)p->rfd_first); /* maybe add a check here, before restarting the RU */ startrecv586(dev); /* restart RU */ printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus); } /********************************************************** * handle xmit - interrupt */ static void sun3_82586_xmt_int(struct net_device *dev) { int status; struct priv *p = netdev_priv(dev); if(debuglevel > 0) printk("X"); status = swab16(p->xmit_cmds[p->xmit_last]->cmd_status); if(!(status & STAT_COMPL)) printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name); if(status & STAT_OK) { dev->stats.tx_packets++; dev->stats.collisions += (status & TCMD_MAXCOLLMASK); } else { dev->stats.tx_errors++; if(status & TCMD_LATECOLL) { printk("%s: late collision detected.\n",dev->name); dev->stats.collisions++; } else if(status & TCMD_NOCARRIER) { dev->stats.tx_carrier_errors++; printk("%s: no carrier detected.\n",dev->name); } else if(status & TCMD_LOSTCTS) printk("%s: loss of CTS detected.\n",dev->name); else if(status & TCMD_UNDERRUN) { dev->stats.tx_fifo_errors++; printk("%s: DMA underrun detected.\n",dev->name); } else if(status & TCMD_MAXCOLL) { printk("%s: Max. collisions exceeded.\n",dev->name); dev->stats.collisions += 16; } } #if (NUM_XMIT_BUFFS > 1) if( (++p->xmit_last) == NUM_XMIT_BUFFS) p->xmit_last = 0; #endif netif_wake_queue(dev); } /*********************************************************** * (re)start the receiver */ static void startrecv586(struct net_device *dev) { struct priv *p = netdev_priv(dev); WAIT_4_SCB_CMD(); WAIT_4_SCB_CMD_RUC(); p->scb->rfa_offset = make16(p->rfd_first); p->scb->cmd_ruc = RUC_START; sun3_attn586(); /* start cmd. */ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */ } static void sun3_82586_timeout(struct net_device *dev) { struct priv *p = netdev_priv(dev); #ifndef NO_NOPCOMMANDS if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */ { netif_wake_queue(dev); #ifdef DEBUG printk("%s: strange ... timeout with CU active?!?\n",dev->name); printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)swab16(p->xmit_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[1]->cmd_status),(int)p->nop_point); #endif p->scb->cmd_cuc = CUC_ABORT; sun3_attn586(); WAIT_4_SCB_CMD(); p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]); p->scb->cmd_cuc = CUC_START; sun3_attn586(); WAIT_4_SCB_CMD(); dev->trans_start = jiffies; /* prevent tx timeout */ return 0; } #endif { #ifdef DEBUG printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus); printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status)); printk("%s: check, whether you set the right interrupt number!\n",dev->name); #endif sun3_82586_close(dev); sun3_82586_open(dev); } dev->trans_start = jiffies; /* prevent tx timeout */ } /****************************************************** * send frame */ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) { int len,i; #ifndef NO_NOPCOMMANDS int next_nop; #endif struct priv *p = netdev_priv(dev); if(skb->len > XMIT_BUFF_SIZE) { printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); return NETDEV_TX_OK; } netif_stop_queue(dev); #if(NUM_XMIT_BUFFS > 1) if(test_and_set_bit(0,(void *) &p->lock)) { printk("%s: Queue was locked\n",dev->name); return NETDEV_TX_BUSY; } else #endif { len = skb->len; if (len < ETH_ZLEN) { memset((void *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); len = ETH_ZLEN; } skb_copy_from_linear_data(skb, (void *)p->xmit_cbuffs[p->xmit_count], skb->len); #if (NUM_XMIT_BUFFS == 1) # ifdef NO_NOPCOMMANDS #ifdef DEBUG if(p->scb->cus & CU_ACTIVE) { printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name); printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,swab16(p->xmit_cmds[0]->cmd_status)); } #endif p->xmit_buffs[0]->size = swab16(TBD_LAST | len); for(i=0;i<16;i++) { p->xmit_cmds[0]->cmd_status = 0; WAIT_4_SCB_CMD(); if( (p->scb->cus & CU_STATUS) == CU_SUSPEND) p->scb->cmd_cuc = CUC_RESUME; else { p->scb->cbl_offset = make16(p->xmit_cmds[0]); p->scb->cmd_cuc = CUC_START; } sun3_attn586(); if(!i) dev_kfree_skb(skb); WAIT_4_SCB_CMD(); if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */ break; if(p->xmit_cmds[0]->cmd_status) break; if(i==15) printk("%s: Can't start transmit-command.\n",dev->name); } # else next_nop = (p->nop_point + 1) & 0x1; p->xmit_buffs[0]->size = swab16(TBD_LAST | len); p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop])); p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); p->nop_point = next_nop; dev_kfree_skb(skb); # endif #else p->xmit_buffs[p->xmit_count]->size = swab16(TBD_LAST | len); if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS ) next_nop = 0; p->xmit_cmds[p->xmit_count]->cmd_status = 0; /* linkpointer of xmit-command already points to next nop cmd */ p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop])); p->nop_cmds[next_nop]->cmd_status = 0; p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); p->xmit_count = next_nop; { unsigned long flags; local_irq_save(flags); if(p->xmit_count != p->xmit_last) netif_wake_queue(dev); p->lock = 0; local_irq_restore(flags); } dev_kfree_skb(skb); #endif } return NETDEV_TX_OK; } /******************************************* * Someone wanna have the statistics */ static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev) { struct priv *p = netdev_priv(dev); unsigned short crc,aln,rsc,ovrn; crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */ p->scb->crc_errs = 0; aln = swab16(p->scb->aln_errs); p->scb->aln_errs = 0; rsc = swab16(p->scb->rsc_errs); p->scb->rsc_errs = 0; ovrn = swab16(p->scb->ovrn_errs); p->scb->ovrn_errs = 0; dev->stats.rx_crc_errors += crc; dev->stats.rx_fifo_errors += ovrn; dev->stats.rx_frame_errors += aln; dev->stats.rx_dropped += rsc; return &dev->stats; } /******************************************************** * Set MC list .. */ static void set_multicast_list(struct net_device *dev) { netif_stop_queue(dev); sun3_disint(); alloc586(dev); init586(dev); startrecv586(dev); sun3_enaint(); netif_wake_queue(dev); } #ifdef MODULE #error This code is not currently supported as a module static struct net_device *dev_sun3_82586; int init_module(void) { dev_sun3_82586 = sun3_82586_probe(-1); if (IS_ERR(dev_sun3_82586)) return PTR_ERR(dev_sun3_82586); return 0; } void cleanup_module(void) { unsigned long ioaddr = dev_sun3_82586->base_addr; unregister_netdev(dev_sun3_82586); release_region(ioaddr, SUN3_82586_TOTAL_SIZE); iounmap((void *)ioaddr); free_netdev(dev_sun3_82586); } #endif /* MODULE */ #if 0 /* * DUMP .. we expect a not running CMD unit and enough space */ void sun3_82586_dump(struct net_device *dev,void *ptr) { struct priv *p = netdev_priv(dev); struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr; int i; p->scb->cmd_cuc = CUC_ABORT; sun3_attn586(); WAIT_4_SCB_CMD(); WAIT_4_SCB_CMD_RUC(); dump_cmd->cmd_status = 0; dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST; dump_cmd->dump_offset = make16((dump_cmd + 1)); dump_cmd->cmd_link = 0xffff; p->scb->cbl_offset = make16(dump_cmd); p->scb->cmd_cuc = CUC_START; sun3_attn586(); WAIT_4_STAT_COMPL(dump_cmd); if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) printk("%s: Can't get dump information.\n",dev->name); for(i=0;i<170;i++) { printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]); if(i % 24 == 23) printk("\n"); } printk("\n"); } #endif MODULE_LICENSE("GPL");
gpl-2.0
arnavgosain/tomato
drivers/mtd/maps/ichxrom.c
3685
9893
/* * ichxrom.c * * Normal mappings of chips in physical memory */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> #include <linux/mtd/flashchip.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/list.h> #define xstr(s) str(s) #define str(s) #s #define MOD_NAME xstr(KBUILD_BASENAME) #define ADDRESS_NAME_LEN 18 #define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */ #define BIOS_CNTL 0x4e #define FWH_DEC_EN1 0xE3 #define FWH_DEC_EN2 0xF0 #define FWH_SEL1 0xE8 #define FWH_SEL2 0xEE struct ichxrom_window { void __iomem* virt; unsigned long phys; unsigned long size; struct list_head maps; struct resource rsrc; struct pci_dev *pdev; }; struct ichxrom_map_info { struct list_head list; struct map_info map; struct mtd_info *mtd; struct resource rsrc; char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN]; }; static struct ichxrom_window ichxrom_window = { .maps = LIST_HEAD_INIT(ichxrom_window.maps), }; static void ichxrom_cleanup(struct ichxrom_window *window) { struct ichxrom_map_info *map, *scratch; u16 word; /* Disable writes through the rom window */ pci_read_config_word(window->pdev, BIOS_CNTL, &word); pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1); pci_dev_put(window->pdev); /* Free all of the mtd devices */ list_for_each_entry_safe(map, scratch, &window->maps, list) { if (map->rsrc.parent) release_resource(&map->rsrc); mtd_device_unregister(map->mtd); map_destroy(map->mtd); list_del(&map->list); kfree(map); } if (window->rsrc.parent) release_resource(&window->rsrc); if (window->virt) { iounmap(window->virt); window->virt = NULL; window->phys = 0; window->size = 0; window->pdev = NULL; } } static int ichxrom_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; struct ichxrom_window *window = &ichxrom_window; struct ichxrom_map_info *map = NULL; unsigned long map_top; u8 byte; u16 word; /* For now I just handle the ichx and I assume there * are not a lot of resources up at the top of the address * space. It is possible to handle other devices in the * top 16MB but it is very painful. Also since * you can only really attach a FWH to an ICHX there * a number of simplifications you can make. * * Also you can page firmware hubs if an 8MB window isn't enough * but don't currently handle that case either. */ window->pdev = pdev; /* Find a region continuous to the end of the ROM window */ window->phys = 0; pci_read_config_byte(pdev, FWH_DEC_EN1, &byte); if (byte == 0xff) { window->phys = 0xffc00000; pci_read_config_byte(pdev, FWH_DEC_EN2, &byte); if ((byte & 0x0f) == 0x0f) { window->phys = 0xff400000; } else if ((byte & 0x0e) == 0x0e) { window->phys = 0xff500000; } else if ((byte & 0x0c) == 0x0c) { window->phys = 0xff600000; } else if ((byte & 0x08) == 0x08) { window->phys = 0xff700000; } } else if ((byte & 0xfe) == 0xfe) { window->phys = 0xffc80000; } else if ((byte & 0xfc) == 0xfc) { window->phys = 0xffd00000; } else if ((byte & 0xf8) == 0xf8) { window->phys = 0xffd80000; } else if ((byte & 0xf0) == 0xf0) { window->phys = 0xffe00000; } else if ((byte & 0xe0) == 0xe0) { window->phys = 0xffe80000; } else if ((byte & 0xc0) == 0xc0) { window->phys = 0xfff00000; } else if ((byte & 0x80) == 0x80) { window->phys = 0xfff80000; } if (window->phys == 0) { printk(KERN_ERR MOD_NAME ": Rom window is closed\n"); goto out; } window->phys -= 0x400000UL; window->size = (0xffffffffUL - window->phys) + 1UL; /* Enable writes through the rom window */ pci_read_config_word(pdev, BIOS_CNTL, &word); if (!(word & 1) && (word & (1<<1))) { /* The BIOS will generate an error if I enable * this device, so don't even try. */ printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n"); goto out; } pci_write_config_word(pdev, BIOS_CNTL, word | 1); /* * Try to reserve the window mem region. If this fails then * it is likely due to the window being "reseved" by the BIOS. */ window->rsrc.name = MOD_NAME; window->rsrc.start = window->phys; window->rsrc.end = window->phys + window->size - 1; window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; if (request_resource(&iomem_resource, &window->rsrc)) { window->rsrc.parent = NULL; printk(KERN_DEBUG MOD_NAME ": " "%s(): Unable to register resource %pR - kernel bug?\n", __func__, &window->rsrc); } /* Map the firmware hub into my address space. */ window->virt = ioremap_nocache(window->phys, window->size); if (!window->virt) { printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n", window->phys, window->size); goto out; } /* Get the first address to look for an rom chip at */ map_top = window->phys; if ((window->phys & 0x3fffff) != 0) { map_top = window->phys + 0x400000; } #if 1 /* The probe sequence run over the firmware hub lock * registers sets them to 0x7 (no access). * Probe at most the last 4M of the address space. */ if (map_top < 0xffc00000) { map_top = 0xffc00000; } #endif /* Loop through and look for rom chips */ while((map_top - 1) < 0xffffffffUL) { struct cfi_private *cfi; unsigned long offset; int i; if (!map) { map = kmalloc(sizeof(*map), GFP_KERNEL); } if (!map) { printk(KERN_ERR MOD_NAME ": kmalloc failed"); goto out; } memset(map, 0, sizeof(*map)); INIT_LIST_HEAD(&map->list); map->map.name = map->map_name; map->map.phys = map_top; offset = map_top - window->phys; map->map.virt = (void __iomem *) (((unsigned long)(window->virt)) + offset); map->map.size = 0xffffffffUL - map_top + 1UL; /* Set the name of the map to the address I am trying */ sprintf(map->map_name, "%s @%08Lx", MOD_NAME, (unsigned long long)map->map.phys); /* Firmware hubs only use vpp when being programmed * in a factory setting. So in-place programming * needs to use a different method. */ for(map->map.bankwidth = 32; map->map.bankwidth; map->map.bankwidth >>= 1) { char **probe_type; /* Skip bankwidths that are not supported */ if (!map_bankwidth_supported(map->map.bankwidth)) continue; /* Setup the map methods */ simple_map_init(&map->map); /* Try all of the probe methods */ probe_type = rom_probe_types; for(; *probe_type; probe_type++) { map->mtd = do_map_probe(*probe_type, &map->map); if (map->mtd) goto found; } } map_top += ROM_PROBE_STEP_SIZE; continue; found: /* Trim the size if we are larger than the map */ if (map->mtd->size > map->map.size) { printk(KERN_WARNING MOD_NAME " rom(%llu) larger than window(%lu). fixing...\n", (unsigned long long)map->mtd->size, map->map.size); map->mtd->size = map->map.size; } if (window->rsrc.parent) { /* * Registering the MTD device in iomem may not be possible * if there is a BIOS "reserved" and BUSY range. If this * fails then continue anyway. */ map->rsrc.name = map->map_name; map->rsrc.start = map->map.phys; map->rsrc.end = map->map.phys + map->mtd->size - 1; map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; if (request_resource(&window->rsrc, &map->rsrc)) { printk(KERN_ERR MOD_NAME ": cannot reserve MTD resource\n"); map->rsrc.parent = NULL; } } /* Make the whole region visible in the map */ map->map.virt = window->virt; map->map.phys = window->phys; cfi = map->map.fldrv_priv; for(i = 0; i < cfi->numchips; i++) { cfi->chips[i].start += offset; } /* Now that the mtd devices is complete claim and export it */ map->mtd->owner = THIS_MODULE; if (mtd_device_register(map->mtd, NULL, 0)) { map_destroy(map->mtd); map->mtd = NULL; goto out; } /* Calculate the new value of map_top */ map_top += map->mtd->size; /* File away the map structure */ list_add(&map->list, &window->maps); map = NULL; } out: /* Free any left over map structures */ kfree(map); /* See if I have any map structures */ if (list_empty(&window->maps)) { ichxrom_cleanup(window); return -ENODEV; } return 0; } static void ichxrom_remove_one(struct pci_dev *pdev) { struct ichxrom_window *window = &ichxrom_window; ichxrom_cleanup(window); } static struct pci_device_id ichxrom_pci_tbl[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, }; #if 0 MODULE_DEVICE_TABLE(pci, ichxrom_pci_tbl); static struct pci_driver ichxrom_driver = { .name = MOD_NAME, .id_table = ichxrom_pci_tbl, .probe = ichxrom_init_one, .remove = ichxrom_remove_one, }; #endif static int __init init_ichxrom(void) { struct pci_dev *pdev; struct pci_device_id *id; pdev = NULL; for (id = ichxrom_pci_tbl; id->vendor; id++) { pdev = pci_get_device(id->vendor, id->device, NULL); if (pdev) { break; } } if (pdev) { return ichxrom_init_one(pdev, &ichxrom_pci_tbl[0]); } return -ENXIO; #if 0 return pci_register_driver(&ichxrom_driver); #endif } static void __exit cleanup_ichxrom(void) { ichxrom_remove_one(ichxrom_window.pdev); } module_init(init_ichxrom); module_exit(cleanup_ichxrom); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>"); MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ICHX southbridge");
gpl-2.0
allanm84/linux-imx
arch/ia64/kernel/sys_ia64.c
4197
4501
/* * This file contains various system calls that have different calling * conventions on different platforms. * * Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/sched.h> #include <linux/shm.h> #include <linux/file.h> /* doh, must come after sched.h... */ #include <linux/smp.h> #include <linux/syscalls.h> #include <linux/highuid.h> #include <linux/hugetlb.h> #include <asm/shmparam.h> #include <asm/uaccess.h> unsigned long arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { long map_shared = (flags & MAP_SHARED); unsigned long align_mask = 0; struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; if (len > RGN_MAP_LIMIT) return -ENOMEM; /* handle fixed mapping: prevent overlap with huge pages */ if (flags & MAP_FIXED) { if (is_hugepage_only_range(mm, addr, len)) return -EINVAL; return addr; } #ifdef CONFIG_HUGETLB_PAGE if (REGION_NUMBER(addr) == RGN_HPAGE) addr = 0; #endif if (!addr) addr = TASK_UNMAPPED_BASE; if (map_shared && (TASK_SIZE > 0xfffffffful)) /* * For 64-bit tasks, align shared segments to 1MB to avoid potential * performance penalty due to virtual aliasing (see ASDM). For 32-bit * tasks, we prefer to avoid exhausting the address space too quickly by * limiting alignment to a single page. */ align_mask = PAGE_MASK & (SHMLBA - 1); info.flags = 0; info.length = len; info.low_limit = addr; info.high_limit = TASK_SIZE; info.align_mask = align_mask; info.align_offset = 0; return vm_unmapped_area(&info); } asmlinkage long ia64_getpriority (int which, int who) { long prio; prio = sys_getpriority(which, who); if (prio >= 0) { force_successful_syscall_return(); prio = 20 - prio; } return prio; } /* XXX obsolete, but leave it here until the old libc is gone... */ asmlinkage unsigned long sys_getpagesize (void) { return PAGE_SIZE; } asmlinkage unsigned long ia64_brk (unsigned long brk) { unsigned long retval = sys_brk(brk); force_successful_syscall_return(); return retval; } /* * On IA-64, we return the two file descriptors in ret0 and ret1 (r8 * and r9) as this is faster than doing a copy_to_user(). */ asmlinkage long sys_ia64_pipe (void) { struct pt_regs *regs = task_pt_regs(current); int fd[2]; int retval; retval = do_pipe_flags(fd, 0); if (retval) goto out; retval = fd[0]; regs->r9 = fd[1]; out: return retval; } int ia64_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { unsigned long roff; /* * Don't permit mappings into unmapped space, the virtual page table * of a region, or across a region boundary. Note: RGN_MAP_LIMIT is * equal to 2^n-PAGE_SIZE (for some integer n <= 61) and len > 0. */ roff = REGION_OFFSET(addr); if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) return -EINVAL; return 0; } /* * mmap2() is like mmap() except that the offset is expressed in units * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces * of) files that are larger than the address space of the CPU. */ asmlinkage unsigned long sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) { addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); if (!IS_ERR((void *) addr)) force_successful_syscall_return(); return addr; } asmlinkage unsigned long sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, long off) { if (offset_in_page(off) != 0) return -EINVAL; addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); if (!IS_ERR((void *) addr)) force_successful_syscall_return(); return addr; } asmlinkage unsigned long ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { addr = sys_mremap(addr, old_len, new_len, flags, new_addr); if (!IS_ERR((void *) addr)) force_successful_syscall_return(); return addr; } #ifndef CONFIG_PCI asmlinkage long sys_pciconfig_read (unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, void *buf) { return -ENOSYS; } asmlinkage long sys_pciconfig_write (unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, void *buf) { return -ENOSYS; } #endif /* CONFIG_PCI */
gpl-2.0
CryToCry96/android_kernel_mt6572
drivers/net/wireless/brcm80211/brcmsmac/channel.c
4965
43552
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> #include <net/mac80211.h> #include <defs.h> #include "pub.h" #include "phy/phy_hal.h" #include "main.h" #include "stf.h" #include "channel.h" /* QDB() macro takes a dB value and converts to a quarter dB value */ #define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR) #define LOCALE_CHAN_01_11 (1<<0) #define LOCALE_CHAN_12_13 (1<<1) #define LOCALE_CHAN_14 (1<<2) #define LOCALE_SET_5G_LOW_JP1 (1<<3) /* 34-48, step 2 */ #define LOCALE_SET_5G_LOW_JP2 (1<<4) /* 34-46, step 4 */ #define LOCALE_SET_5G_LOW1 (1<<5) /* 36-48, step 4 */ #define LOCALE_SET_5G_LOW2 (1<<6) /* 52 */ #define LOCALE_SET_5G_LOW3 (1<<7) /* 56-64, step 4 */ #define LOCALE_SET_5G_MID1 (1<<8) /* 100-116, step 4 */ #define LOCALE_SET_5G_MID2 (1<<9) /* 120-124, step 4 */ #define LOCALE_SET_5G_MID3 (1<<10) /* 128 */ #define LOCALE_SET_5G_HIGH1 (1<<11) /* 132-140, step 4 */ #define LOCALE_SET_5G_HIGH2 (1<<12) /* 149-161, step 4 */ #define LOCALE_SET_5G_HIGH3 (1<<13) /* 165 */ #define LOCALE_CHAN_52_140_ALL (1<<14) #define LOCALE_SET_5G_HIGH4 (1<<15) /* 184-216 */ #define LOCALE_CHAN_36_64 (LOCALE_SET_5G_LOW1 | \ LOCALE_SET_5G_LOW2 | \ LOCALE_SET_5G_LOW3) #define LOCALE_CHAN_52_64 (LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3) #define LOCALE_CHAN_100_124 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2) #define LOCALE_CHAN_100_140 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2 | \ LOCALE_SET_5G_MID3 | LOCALE_SET_5G_HIGH1) #define LOCALE_CHAN_149_165 (LOCALE_SET_5G_HIGH2 | LOCALE_SET_5G_HIGH3) #define LOCALE_CHAN_184_216 LOCALE_SET_5G_HIGH4 #define LOCALE_CHAN_01_14 (LOCALE_CHAN_01_11 | \ LOCALE_CHAN_12_13 | \ LOCALE_CHAN_14) #define LOCALE_RADAR_SET_NONE 0 #define LOCALE_RADAR_SET_1 1 #define LOCALE_RESTRICTED_NONE 0 #define LOCALE_RESTRICTED_SET_2G_SHORT 1 #define LOCALE_RESTRICTED_CHAN_165 2 #define LOCALE_CHAN_ALL_5G 3 #define LOCALE_RESTRICTED_JAPAN_LEGACY 4 #define LOCALE_RESTRICTED_11D_2G 5 #define LOCALE_RESTRICTED_11D_5G 6 #define LOCALE_RESTRICTED_LOW_HI 7 #define LOCALE_RESTRICTED_12_13_14 8 #define LOCALE_2G_IDX_i 0 #define LOCALE_5G_IDX_11 0 #define LOCALE_MIMO_IDX_bn 0 #define LOCALE_MIMO_IDX_11n 0 /* max of BAND_5G_PWR_LVLS and 6 for 2.4 GHz */ #define BRCMS_MAXPWR_TBL_SIZE 6 /* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */ #define BRCMS_MAXPWR_MIMO_TBL_SIZE 14 /* power level in group of 2.4GHz band channels: * maxpwr[0] - CCK channels [1] * maxpwr[1] - CCK channels [2-10] * maxpwr[2] - CCK channels [11-14] * maxpwr[3] - OFDM channels [1] * maxpwr[4] - OFDM channels [2-10] * maxpwr[5] - OFDM channels [11-14] */ /* maxpwr mapping to 5GHz band channels: * maxpwr[0] - channels [34-48] * maxpwr[1] - channels [52-60] * maxpwr[2] - channels [62-64] * maxpwr[3] - channels [100-140] * maxpwr[4] - channels [149-165] */ #define BAND_5G_PWR_LVLS 5 /* 5 power levels for 5G */ #define LC(id) LOCALE_MIMO_IDX_ ## id #define LC_2G(id) LOCALE_2G_IDX_ ## id #define LC_5G(id) LOCALE_5G_IDX_ ## id #define LOCALES(band2, band5, mimo2, mimo5) \ {LC_2G(band2), LC_5G(band5), LC(mimo2), LC(mimo5)} /* macro to get 2.4 GHz channel group index for tx power */ #define CHANNEL_POWER_IDX_2G_CCK(c) (((c) < 2) ? 0 : (((c) < 11) ? 1 : 2)) #define CHANNEL_POWER_IDX_2G_OFDM(c) (((c) < 2) ? 3 : (((c) < 11) ? 4 : 5)) /* macro to get 5 GHz channel group index for tx power */ #define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \ (((c) < 62) ? 1 : \ (((c) < 100) ? 2 : \ (((c) < 149) ? 3 : 4)))) #define ISDFS_EU(fl) (((fl) & BRCMS_DFS_EU) == BRCMS_DFS_EU) struct brcms_cm_band { /* struct locale_info flags */ u8 locale_flags; /* List of valid channels in the country */ struct brcms_chanvec valid_channels; /* List of restricted use channels */ const struct brcms_chanvec *restricted_channels; /* List of radar sensitive channels */ const struct brcms_chanvec *radar_channels; u8 PAD[8]; }; /* locale per-channel tx power limits for MIMO frames * maxpwr arrays are index by channel for 2.4 GHz limits, and * by sub-band for 5 GHz limits using CHANNEL_POWER_IDX_5G(channel) */ struct locale_mimo_info { /* tx 20 MHz power limits, qdBm units */ s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE]; /* tx 40 MHz power limits, qdBm units */ s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE]; u8 flags; }; /* Country names and abbreviations with locale defined from ISO 3166 */ struct country_info { const u8 locale_2G; /* 2.4G band locale */ const u8 locale_5G; /* 5G band locale */ const u8 locale_mimo_2G; /* 2.4G mimo info */ const u8 locale_mimo_5G; /* 5G mimo info */ }; struct brcms_cm_info { struct brcms_pub *pub; struct brcms_c_info *wlc; char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */ uint srom_regrev; /* Regulatory Rev for the SROM ccode */ const struct country_info *country; /* current country def */ char ccode[BRCM_CNTRY_BUF_SZ]; /* current internal Country Code */ uint regrev; /* current Regulatory Revision */ char country_abbrev[BRCM_CNTRY_BUF_SZ]; /* current advertised ccode */ /* per-band state (one per phy/radio) */ struct brcms_cm_band bandstate[MAXBANDS]; /* quiet channels currently for radar sensitivity or 11h support */ /* channels on which we cannot transmit */ struct brcms_chanvec quiet_channels; }; /* locale channel and power info. */ struct locale_info { u32 valid_channels; /* List of radar sensitive channels */ u8 radar_channels; /* List of channels used only if APs are detected */ u8 restricted_channels; /* Max tx pwr in qdBm for each sub-band */ s8 maxpwr[BRCMS_MAXPWR_TBL_SIZE]; /* Country IE advertised max tx pwr in dBm per sub-band */ s8 pub_maxpwr[BAND_5G_PWR_LVLS]; u8 flags; }; /* Regulatory Matrix Spreadsheet (CLM) MIMO v3.7.9 */ /* * Some common channel sets */ /* No channels */ static const struct brcms_chanvec chanvec_none = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* All 2.4 GHz HW channels */ static const struct brcms_chanvec chanvec_all_2G = { {0xfe, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* All 5 GHz HW channels */ static const struct brcms_chanvec chanvec_all_5G = { {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x11, 0x11, 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, 0x11, 0x11, 0x20, 0x22, 0x22, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x01} }; /* * Radar channel sets */ /* Channels 52 - 64, 100 - 140 */ static const struct brcms_chanvec radar_set1 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, /* 52 - 60 */ 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, /* 64, 100 - 124 */ 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 128 - 140 */ 0x00, 0x00, 0x00, 0x00} }; /* * Restricted channel sets */ /* Channels 34, 38, 42, 46 */ static const struct brcms_chanvec restricted_set_japan_legacy = { {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* Channels 12, 13 */ static const struct brcms_chanvec restricted_set_2g_short = { {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* Channel 165 */ static const struct brcms_chanvec restricted_chan_165 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* Channels 36 - 48 & 149 - 165 */ static const struct brcms_chanvec restricted_low_hi = { {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x22, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* Channels 12 - 14 */ static const struct brcms_chanvec restricted_set_12_13_14 = { {0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* global memory to provide working buffer for expanded locale */ static const struct brcms_chanvec *g_table_radar_set[] = { &chanvec_none, &radar_set1 }; static const struct brcms_chanvec *g_table_restricted_chan[] = { &chanvec_none, /* restricted_set_none */ &restricted_set_2g_short, &restricted_chan_165, &chanvec_all_5G, &restricted_set_japan_legacy, &chanvec_all_2G, /* restricted_set_11d_2G */ &chanvec_all_5G, /* restricted_set_11d_5G */ &restricted_low_hi, &restricted_set_12_13_14 }; static const struct brcms_chanvec locale_2g_01_11 = { {0xfe, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_2g_12_13 = { {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_2g_14 = { {0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_LOW_JP1 = { {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_LOW_JP2 = { {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_LOW1 = { {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_LOW2 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_LOW3 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_MID1 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_MID2 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_MID3 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_HIGH1 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_HIGH2 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x22, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_HIGH3 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_52_140_ALL = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_chanvec locale_5g_HIGH4 = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11} }; static const struct brcms_chanvec *g_table_locale_base[] = { &locale_2g_01_11, &locale_2g_12_13, &locale_2g_14, &locale_5g_LOW_JP1, &locale_5g_LOW_JP2, &locale_5g_LOW1, &locale_5g_LOW2, &locale_5g_LOW3, &locale_5g_MID1, &locale_5g_MID2, &locale_5g_MID3, &locale_5g_HIGH1, &locale_5g_HIGH2, &locale_5g_HIGH3, &locale_5g_52_140_ALL, &locale_5g_HIGH4 }; static void brcms_c_locale_add_channels(struct brcms_chanvec *target, const struct brcms_chanvec *channels) { u8 i; for (i = 0; i < sizeof(struct brcms_chanvec); i++) target->vec[i] |= channels->vec[i]; } static void brcms_c_locale_get_channels(const struct locale_info *locale, struct brcms_chanvec *channels) { u8 i; memset(channels, 0, sizeof(struct brcms_chanvec)); for (i = 0; i < ARRAY_SIZE(g_table_locale_base); i++) { if (locale->valid_channels & (1 << i)) brcms_c_locale_add_channels(channels, g_table_locale_base[i]); } } /* * Locale Definitions - 2.4 GHz */ static const struct locale_info locale_i = { /* locale i. channel 1 - 13 */ LOCALE_CHAN_01_11 | LOCALE_CHAN_12_13, LOCALE_RADAR_SET_NONE, LOCALE_RESTRICTED_SET_2G_SHORT, {QDB(19), QDB(19), QDB(19), QDB(19), QDB(19), QDB(19)}, {20, 20, 20, 0}, BRCMS_EIRP }; /* * Locale Definitions - 5 GHz */ static const struct locale_info locale_11 = { /* locale 11. channel 36 - 48, 52 - 64, 100 - 140, 149 - 165 */ LOCALE_CHAN_36_64 | LOCALE_CHAN_100_140 | LOCALE_CHAN_149_165, LOCALE_RADAR_SET_1, LOCALE_RESTRICTED_NONE, {QDB(21), QDB(21), QDB(21), QDB(21), QDB(21)}, {23, 23, 23, 30, 30}, BRCMS_EIRP | BRCMS_DFS_EU }; static const struct locale_info *g_locale_2g_table[] = { &locale_i }; static const struct locale_info *g_locale_5g_table[] = { &locale_11 }; /* * MIMO Locale Definitions - 2.4 GHz */ static const struct locale_mimo_info locale_bn = { {QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13)}, {0, 0, QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), 0, 0}, 0 }; static const struct locale_mimo_info *g_mimo_2g_table[] = { &locale_bn }; /* * MIMO Locale Definitions - 5 GHz */ static const struct locale_mimo_info locale_11n = { { /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)}, {QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)}, 0 }; static const struct locale_mimo_info *g_mimo_5g_table[] = { &locale_11n }; static const struct { char abbrev[BRCM_CNTRY_BUF_SZ]; /* country abbreviation */ struct country_info country; } cntry_locales[] = { { "X2", LOCALES(i, 11, bn, 11n)}, /* Worldwide RoW 2 */ }; #ifdef SUPPORT_40MHZ /* 20MHz channel info for 40MHz pairing support */ struct chan20_info { u8 sb; u8 adj_sbs; }; /* indicates adjacent channels that are allowed for a 40 Mhz channel and * those that permitted by the HT */ struct chan20_info chan20_info[] = { /* 11b/11g */ /* 0 */ {1, (CH_UPPER_SB | CH_EWA_VALID)}, /* 1 */ {2, (CH_UPPER_SB | CH_EWA_VALID)}, /* 2 */ {3, (CH_UPPER_SB | CH_EWA_VALID)}, /* 3 */ {4, (CH_UPPER_SB | CH_EWA_VALID)}, /* 4 */ {5, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)}, /* 5 */ {6, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)}, /* 6 */ {7, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)}, /* 7 */ {8, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)}, /* 8 */ {9, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)}, /* 9 */ {10, (CH_LOWER_SB | CH_EWA_VALID)}, /* 10 */ {11, (CH_LOWER_SB | CH_EWA_VALID)}, /* 11 */ {12, (CH_LOWER_SB)}, /* 12 */ {13, (CH_LOWER_SB)}, /* 13 */ {14, (CH_LOWER_SB)}, /* 11a japan high */ /* 14 */ {34, (CH_UPPER_SB)}, /* 15 */ {38, (CH_LOWER_SB)}, /* 16 */ {42, (CH_LOWER_SB)}, /* 17 */ {46, (CH_LOWER_SB)}, /* 11a usa low */ /* 18 */ {36, (CH_UPPER_SB | CH_EWA_VALID)}, /* 19 */ {40, (CH_LOWER_SB | CH_EWA_VALID)}, /* 20 */ {44, (CH_UPPER_SB | CH_EWA_VALID)}, /* 21 */ {48, (CH_LOWER_SB | CH_EWA_VALID)}, /* 22 */ {52, (CH_UPPER_SB | CH_EWA_VALID)}, /* 23 */ {56, (CH_LOWER_SB | CH_EWA_VALID)}, /* 24 */ {60, (CH_UPPER_SB | CH_EWA_VALID)}, /* 25 */ {64, (CH_LOWER_SB | CH_EWA_VALID)}, /* 11a Europe */ /* 26 */ {100, (CH_UPPER_SB | CH_EWA_VALID)}, /* 27 */ {104, (CH_LOWER_SB | CH_EWA_VALID)}, /* 28 */ {108, (CH_UPPER_SB | CH_EWA_VALID)}, /* 29 */ {112, (CH_LOWER_SB | CH_EWA_VALID)}, /* 30 */ {116, (CH_UPPER_SB | CH_EWA_VALID)}, /* 31 */ {120, (CH_LOWER_SB | CH_EWA_VALID)}, /* 32 */ {124, (CH_UPPER_SB | CH_EWA_VALID)}, /* 33 */ {128, (CH_LOWER_SB | CH_EWA_VALID)}, /* 34 */ {132, (CH_UPPER_SB | CH_EWA_VALID)}, /* 35 */ {136, (CH_LOWER_SB | CH_EWA_VALID)}, /* 36 */ {140, (CH_LOWER_SB)}, /* 11a usa high, ref5 only */ /* The 0x80 bit in pdiv means these are REF5, other entries are REF20 */ /* 37 */ {149, (CH_UPPER_SB | CH_EWA_VALID)}, /* 38 */ {153, (CH_LOWER_SB | CH_EWA_VALID)}, /* 39 */ {157, (CH_UPPER_SB | CH_EWA_VALID)}, /* 40 */ {161, (CH_LOWER_SB | CH_EWA_VALID)}, /* 41 */ {165, (CH_LOWER_SB)}, /* 11a japan */ /* 42 */ {184, (CH_UPPER_SB)}, /* 43 */ {188, (CH_LOWER_SB)}, /* 44 */ {192, (CH_UPPER_SB)}, /* 45 */ {196, (CH_LOWER_SB)}, /* 46 */ {200, (CH_UPPER_SB)}, /* 47 */ {204, (CH_LOWER_SB)}, /* 48 */ {208, (CH_UPPER_SB)}, /* 49 */ {212, (CH_LOWER_SB)}, /* 50 */ {216, (CH_LOWER_SB)} }; #endif /* SUPPORT_40MHZ */ static const struct locale_info *brcms_c_get_locale_2g(u8 locale_idx) { if (locale_idx >= ARRAY_SIZE(g_locale_2g_table)) return NULL; /* error condition */ return g_locale_2g_table[locale_idx]; } static const struct locale_info *brcms_c_get_locale_5g(u8 locale_idx) { if (locale_idx >= ARRAY_SIZE(g_locale_5g_table)) return NULL; /* error condition */ return g_locale_5g_table[locale_idx]; } static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx) { if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table)) return NULL; return g_mimo_2g_table[locale_idx]; } static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx) { if (locale_idx >= ARRAY_SIZE(g_mimo_5g_table)) return NULL; return g_mimo_5g_table[locale_idx]; } static int brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode, char *mapped_ccode, uint *mapped_regrev) { return false; } /* Lookup a country info structure from a null terminated country * abbreviation and regrev directly with no translation. */ static const struct country_info * brcms_c_country_lookup_direct(const char *ccode, uint regrev) { uint size, i; /* Should just return 0 for single locale driver. */ /* Keep it this way in case we add more locales. (for now anyway) */ /* * all other country def arrays are for regrev == 0, so if * regrev is non-zero, fail */ if (regrev > 0) return NULL; /* find matched table entry from country code */ size = ARRAY_SIZE(cntry_locales); for (i = 0; i < size; i++) { if (strcmp(ccode, cntry_locales[i].abbrev) == 0) return &cntry_locales[i].country; } return NULL; } static const struct country_info * brcms_c_countrycode_map(struct brcms_cm_info *wlc_cm, const char *ccode, char *mapped_ccode, uint *mapped_regrev) { struct brcms_c_info *wlc = wlc_cm->wlc; const struct country_info *country; uint srom_regrev = wlc_cm->srom_regrev; const char *srom_ccode = wlc_cm->srom_ccode; int mapped; /* check for currently supported ccode size */ if (strlen(ccode) > (BRCM_CNTRY_BUF_SZ - 1)) { wiphy_err(wlc->wiphy, "wl%d: %s: ccode \"%s\" too long for " "match\n", wlc->pub->unit, __func__, ccode); return NULL; } /* default mapping is the given ccode and regrev 0 */ strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ); *mapped_regrev = 0; /* If the desired country code matches the srom country code, * then the mapped country is the srom regulatory rev. * Otherwise look for an aggregate mapping. */ if (!strcmp(srom_ccode, ccode)) { *mapped_regrev = srom_regrev; mapped = 0; wiphy_err(wlc->wiphy, "srom_code == ccode %s\n", __func__); } else { mapped = brcms_c_country_aggregate_map(wlc_cm, ccode, mapped_ccode, mapped_regrev); } /* find the matching built-in country definition */ country = brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev); /* if there is not an exact rev match, default to rev zero */ if (country == NULL && *mapped_regrev != 0) { *mapped_regrev = 0; country = brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev); } return country; } /* Lookup a country info structure from a null terminated country code * The lookup is case sensitive. */ static const struct country_info * brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode) { const struct country_info *country; char mapped_ccode[BRCM_CNTRY_BUF_SZ]; uint mapped_regrev; /* * map the country code to a built-in country code, regrev, and * country_info struct */ country = brcms_c_countrycode_map(wlc->cmi, ccode, mapped_ccode, &mapped_regrev); return country; } /* * reset the quiet channels vector to the union * of the restricted and radar channel sets */ static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm) { struct brcms_c_info *wlc = wlc_cm->wlc; uint i, j; struct brcms_band *band; const struct brcms_chanvec *chanvec; memset(&wlc_cm->quiet_channels, 0, sizeof(struct brcms_chanvec)); band = wlc->band; for (i = 0; i < wlc->pub->_nbands; i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) { /* initialize quiet channels for restricted channels */ chanvec = wlc_cm->bandstate[band->bandunit].restricted_channels; for (j = 0; j < sizeof(struct brcms_chanvec); j++) wlc_cm->quiet_channels.vec[j] |= chanvec->vec[j]; } } /* Is the channel valid for the current locale and current band? */ static bool brcms_c_valid_channel20(struct brcms_cm_info *wlc_cm, uint val) { struct brcms_c_info *wlc = wlc_cm->wlc; return ((val < MAXCHANNEL) && isset(wlc_cm->bandstate[wlc->band->bandunit].valid_channels.vec, val)); } /* Is the channel valid for the current locale and specified band? */ static bool brcms_c_valid_channel20_in_band(struct brcms_cm_info *wlc_cm, uint bandunit, uint val) { return ((val < MAXCHANNEL) && isset(wlc_cm->bandstate[bandunit].valid_channels.vec, val)); } /* Is the channel valid for the current locale? (but don't consider channels not * available due to bandlocking) */ static bool brcms_c_valid_channel20_db(struct brcms_cm_info *wlc_cm, uint val) { struct brcms_c_info *wlc = wlc_cm->wlc; return brcms_c_valid_channel20(wlc->cmi, val) || (!wlc->bandlocked && brcms_c_valid_channel20_in_band(wlc->cmi, OTHERBANDUNIT(wlc), val)); } /* JP, J1 - J10 are Japan ccodes */ static bool brcms_c_japan_ccode(const char *ccode) { return (ccode[0] == 'J' && (ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9'))); } /* Returns true if currently set country is Japan or variant */ static bool brcms_c_japan(struct brcms_c_info *wlc) { return brcms_c_japan_ccode(wlc->cmi->country_abbrev); } static void brcms_c_channel_min_txpower_limits_with_local_constraint( struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr, u8 local_constraint_qdbm) { int j; /* CCK Rates */ for (j = 0; j < WL_TX_POWER_CCK_NUM; j++) txpwr->cck[j] = min(txpwr->cck[j], local_constraint_qdbm); /* 20 MHz Legacy OFDM SISO */ for (j = 0; j < WL_TX_POWER_OFDM_NUM; j++) txpwr->ofdm[j] = min(txpwr->ofdm[j], local_constraint_qdbm); /* 20 MHz Legacy OFDM CDD */ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) txpwr->ofdm_cdd[j] = min(txpwr->ofdm_cdd[j], local_constraint_qdbm); /* 40 MHz Legacy OFDM SISO */ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) txpwr->ofdm_40_siso[j] = min(txpwr->ofdm_40_siso[j], local_constraint_qdbm); /* 40 MHz Legacy OFDM CDD */ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) txpwr->ofdm_40_cdd[j] = min(txpwr->ofdm_40_cdd[j], local_constraint_qdbm); /* 20MHz MCS 0-7 SISO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_20_siso[j] = min(txpwr->mcs_20_siso[j], local_constraint_qdbm); /* 20MHz MCS 0-7 CDD */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_20_cdd[j] = min(txpwr->mcs_20_cdd[j], local_constraint_qdbm); /* 20MHz MCS 0-7 STBC */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_20_stbc[j] = min(txpwr->mcs_20_stbc[j], local_constraint_qdbm); /* 20MHz MCS 8-15 MIMO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++) txpwr->mcs_20_mimo[j] = min(txpwr->mcs_20_mimo[j], local_constraint_qdbm); /* 40MHz MCS 0-7 SISO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_40_siso[j] = min(txpwr->mcs_40_siso[j], local_constraint_qdbm); /* 40MHz MCS 0-7 CDD */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_40_cdd[j] = min(txpwr->mcs_40_cdd[j], local_constraint_qdbm); /* 40MHz MCS 0-7 STBC */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_40_stbc[j] = min(txpwr->mcs_40_stbc[j], local_constraint_qdbm); /* 40MHz MCS 8-15 MIMO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++) txpwr->mcs_40_mimo[j] = min(txpwr->mcs_40_mimo[j], local_constraint_qdbm); /* 40MHz MCS 32 */ txpwr->mcs32 = min(txpwr->mcs32, local_constraint_qdbm); } /* Update the radio state (enable/disable) and tx power targets * based on a new set of channel/regulatory information */ static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm) { struct brcms_c_info *wlc = wlc_cm->wlc; uint chan; struct txpwr_limits txpwr; /* search for the existence of any valid channel */ for (chan = 0; chan < MAXCHANNEL; chan++) { if (brcms_c_valid_channel20_db(wlc->cmi, chan)) break; } if (chan == MAXCHANNEL) chan = INVCHANNEL; /* * based on the channel search above, set or * clear WL_RADIO_COUNTRY_DISABLE. */ if (chan == INVCHANNEL) { /* * country/locale with no valid channels, set * the radio disable bit */ mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE); wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\" " "nbands %d bandlocked %d\n", wlc->pub->unit, __func__, wlc_cm->country_abbrev, wlc->pub->_nbands, wlc->bandlocked); } else if (mboolisset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE)) { /* * country/locale with valid channel, clear * the radio disable bit */ mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE); } /* * Now that the country abbreviation is set, if the radio supports 2G, * then set channel 14 restrictions based on the new locale. */ if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G) wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi, brcms_c_japan(wlc) ? true : false); if (wlc->pub->up && chan != INVCHANNEL) { brcms_c_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr); brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm, &txpwr, BRCMS_TXPWR_MAX); wlc_phy_txpower_limit_set(wlc->band->pi, &txpwr, wlc->chanspec); } } static int brcms_c_channels_init(struct brcms_cm_info *wlc_cm, const struct country_info *country) { struct brcms_c_info *wlc = wlc_cm->wlc; uint i, j; struct brcms_band *band; const struct locale_info *li; struct brcms_chanvec sup_chan; const struct locale_mimo_info *li_mimo; band = wlc->band; for (i = 0; i < wlc->pub->_nbands; i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) { li = (band->bandtype == BRCM_BAND_5G) ? brcms_c_get_locale_5g(country->locale_5G) : brcms_c_get_locale_2g(country->locale_2G); wlc_cm->bandstate[band->bandunit].locale_flags = li->flags; li_mimo = (band->bandtype == BRCM_BAND_5G) ? brcms_c_get_mimo_5g(country->locale_mimo_5G) : brcms_c_get_mimo_2g(country->locale_mimo_2G); /* merge the mimo non-mimo locale flags */ wlc_cm->bandstate[band->bandunit].locale_flags |= li_mimo->flags; wlc_cm->bandstate[band->bandunit].restricted_channels = g_table_restricted_chan[li->restricted_channels]; wlc_cm->bandstate[band->bandunit].radar_channels = g_table_radar_set[li->radar_channels]; /* * set the channel availability, masking out the channels * that may not be supported on this phy. */ wlc_phy_chanspec_band_validch(band->pi, band->bandtype, &sup_chan); brcms_c_locale_get_channels(li, &wlc_cm->bandstate[band->bandunit]. valid_channels); for (j = 0; j < sizeof(struct brcms_chanvec); j++) wlc_cm->bandstate[band->bandunit].valid_channels. vec[j] &= sup_chan.vec[j]; } brcms_c_quiet_channels_reset(wlc_cm); brcms_c_channels_commit(wlc_cm); return 0; } /* * set the driver's current country and regulatory information * using a country code as the source. Look up built in country * information found with the country code. */ static void brcms_c_set_country_common(struct brcms_cm_info *wlc_cm, const char *country_abbrev, const char *ccode, uint regrev, const struct country_info *country) { const struct locale_info *locale; struct brcms_c_info *wlc = wlc_cm->wlc; char prev_country_abbrev[BRCM_CNTRY_BUF_SZ]; /* save current country state */ wlc_cm->country = country; memset(&prev_country_abbrev, 0, BRCM_CNTRY_BUF_SZ); strncpy(prev_country_abbrev, wlc_cm->country_abbrev, BRCM_CNTRY_BUF_SZ - 1); strncpy(wlc_cm->country_abbrev, country_abbrev, BRCM_CNTRY_BUF_SZ - 1); strncpy(wlc_cm->ccode, ccode, BRCM_CNTRY_BUF_SZ - 1); wlc_cm->regrev = regrev; if ((wlc->pub->_n_enab & SUPPORT_11N) != wlc->protection->nmode_user) brcms_c_set_nmode(wlc); brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]); brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]); /* set or restore gmode as required by regulatory */ locale = brcms_c_get_locale_2g(country->locale_2G); if (locale && (locale->flags & BRCMS_NO_OFDM)) brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false); else brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); brcms_c_channels_init(wlc_cm, country); return; } static int brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm, const char *country_abbrev, const char *ccode, int regrev) { const struct country_info *country; char mapped_ccode[BRCM_CNTRY_BUF_SZ]; uint mapped_regrev; /* if regrev is -1, lookup the mapped country code, * otherwise use the ccode and regrev directly */ if (regrev == -1) { /* * map the country code to a built-in country * code, regrev, and country_info */ country = brcms_c_countrycode_map(wlc_cm, ccode, mapped_ccode, &mapped_regrev); } else { /* find the matching built-in country definition */ country = brcms_c_country_lookup_direct(ccode, regrev); strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ); mapped_regrev = regrev; } if (country == NULL) return -EINVAL; /* set the driver state for the country */ brcms_c_set_country_common(wlc_cm, country_abbrev, mapped_ccode, mapped_regrev, country); return 0; } /* * set the driver's current country and regulatory information using * a country code as the source. Lookup built in country information * found with the country code. */ static int brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm, const char *ccode) { char country_abbrev[BRCM_CNTRY_BUF_SZ]; strncpy(country_abbrev, ccode, BRCM_CNTRY_BUF_SZ); return brcms_c_set_countrycode_rev(wlc_cm, country_abbrev, ccode, -1); } struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc) { struct brcms_cm_info *wlc_cm; char country_abbrev[BRCM_CNTRY_BUF_SZ]; const struct country_info *country; struct brcms_pub *pub = wlc->pub; char *ccode; BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC); if (wlc_cm == NULL) return NULL; wlc_cm->pub = pub; wlc_cm->wlc = wlc; wlc->cmi = wlc_cm; /* store the country code for passing up as a regulatory hint */ ccode = getvar(wlc->hw->sih, BRCMS_SROM_CCODE); if (ccode) strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1); /* * internal country information which must match * regulatory constraints in firmware */ memset(country_abbrev, 0, BRCM_CNTRY_BUF_SZ); strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1); country = brcms_c_country_lookup(wlc, country_abbrev); /* save default country for exiting 11d regulatory mode */ strncpy(wlc->country_default, country_abbrev, BRCM_CNTRY_BUF_SZ - 1); /* initialize autocountry_default to driver default */ strncpy(wlc->autocountry_default, "X2", BRCM_CNTRY_BUF_SZ - 1); brcms_c_set_countrycode(wlc_cm, country_abbrev); return wlc_cm; } void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm) { kfree(wlc_cm); } u8 brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm, uint bandunit) { return wlc_cm->bandstate[bandunit].locale_flags; } static bool brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm, u16 chspec) { return (wlc_cm->wlc->pub->_n_enab & SUPPORT_11N) && CHSPEC_IS40(chspec) ? (isset(wlc_cm->quiet_channels.vec, lower_20_sb(CHSPEC_CHANNEL(chspec))) || isset(wlc_cm->quiet_channels.vec, upper_20_sb(CHSPEC_CHANNEL(chspec)))) : isset(wlc_cm->quiet_channels.vec, CHSPEC_CHANNEL(chspec)); } void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, u8 local_constraint_qdbm) { struct brcms_c_info *wlc = wlc_cm->wlc; struct txpwr_limits txpwr; brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); brcms_c_channel_min_txpower_limits_with_local_constraint( wlc_cm, &txpwr, local_constraint_qdbm ); brcms_b_set_chanspec(wlc->hw, chanspec, (brcms_c_quiet_chanspec(wlc_cm, chanspec) != 0), &txpwr); } void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec, struct txpwr_limits *txpwr) { struct brcms_c_info *wlc = wlc_cm->wlc; uint i; uint chan; int maxpwr; int delta; const struct country_info *country; struct brcms_band *band; const struct locale_info *li; int conducted_max = BRCMS_TXPWR_MAX; int conducted_ofdm_max = BRCMS_TXPWR_MAX; const struct locale_mimo_info *li_mimo; int maxpwr20, maxpwr40; int maxpwr_idx; uint j; memset(txpwr, 0, sizeof(struct txpwr_limits)); if (!brcms_c_valid_chanspec_db(wlc_cm, chanspec)) { country = brcms_c_country_lookup(wlc, wlc->autocountry_default); if (country == NULL) return; } else { country = wlc_cm->country; } chan = CHSPEC_CHANNEL(chanspec); band = wlc->bandstate[chspec_bandunit(chanspec)]; li = (band->bandtype == BRCM_BAND_5G) ? brcms_c_get_locale_5g(country->locale_5G) : brcms_c_get_locale_2g(country->locale_2G); li_mimo = (band->bandtype == BRCM_BAND_5G) ? brcms_c_get_mimo_5g(country->locale_mimo_5G) : brcms_c_get_mimo_2g(country->locale_mimo_2G); if (li->flags & BRCMS_EIRP) { delta = band->antgain; } else { delta = 0; if (band->antgain > QDB(6)) delta = band->antgain - QDB(6); /* Excess over 6 dB */ } if (li == &locale_i) { conducted_max = QDB(22); conducted_ofdm_max = QDB(22); } /* CCK txpwr limits for 2.4G band */ if (band->bandtype == BRCM_BAND_2G) { maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_CCK(chan)]; maxpwr = maxpwr - delta; maxpwr = max(maxpwr, 0); maxpwr = min(maxpwr, conducted_max); for (i = 0; i < BRCMS_NUM_RATES_CCK; i++) txpwr->cck[i] = (u8) maxpwr; } /* OFDM txpwr limits for 2.4G or 5G bands */ if (band->bandtype == BRCM_BAND_2G) maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_OFDM(chan)]; else maxpwr = li->maxpwr[CHANNEL_POWER_IDX_5G(chan)]; maxpwr = maxpwr - delta; maxpwr = max(maxpwr, 0); maxpwr = min(maxpwr, conducted_ofdm_max); /* Keep OFDM lmit below CCK limit */ if (band->bandtype == BRCM_BAND_2G) maxpwr = min_t(int, maxpwr, txpwr->cck[0]); for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) txpwr->ofdm[i] = (u8) maxpwr; for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) { /* * OFDM 40 MHz SISO has the same power as the corresponding * MCS0-7 rate unless overriden by the locale specific code. * We set this value to 0 as a flag (presumably 0 dBm isn't * a possibility) and then copy the MCS0-7 value to the 40 MHz * value if it wasn't explicitly set. */ txpwr->ofdm_40_siso[i] = 0; txpwr->ofdm_cdd[i] = (u8) maxpwr; txpwr->ofdm_40_cdd[i] = 0; } /* MIMO/HT specific limits */ if (li_mimo->flags & BRCMS_EIRP) { delta = band->antgain; } else { delta = 0; if (band->antgain > QDB(6)) delta = band->antgain - QDB(6); /* Excess over 6 dB */ } if (band->bandtype == BRCM_BAND_2G) maxpwr_idx = (chan - 1); else maxpwr_idx = CHANNEL_POWER_IDX_5G(chan); maxpwr20 = li_mimo->maxpwr20[maxpwr_idx]; maxpwr40 = li_mimo->maxpwr40[maxpwr_idx]; maxpwr20 = maxpwr20 - delta; maxpwr20 = max(maxpwr20, 0); maxpwr40 = maxpwr40 - delta; maxpwr40 = max(maxpwr40, 0); /* Fill in the MCS 0-7 (SISO) rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { /* * 20 MHz has the same power as the corresponding OFDM rate * unless overriden by the locale specific code. */ txpwr->mcs_20_siso[i] = txpwr->ofdm[i]; txpwr->mcs_40_siso[i] = 0; } /* Fill in the MCS 0-7 CDD rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { txpwr->mcs_20_cdd[i] = (u8) maxpwr20; txpwr->mcs_40_cdd[i] = (u8) maxpwr40; } /* * These locales have SISO expressed in the * table and override CDD later */ if (li_mimo == &locale_bn) { if (li_mimo == &locale_bn) { maxpwr20 = QDB(16); maxpwr40 = 0; if (chan >= 3 && chan <= 11) maxpwr40 = QDB(16); } for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { txpwr->mcs_20_siso[i] = (u8) maxpwr20; txpwr->mcs_40_siso[i] = (u8) maxpwr40; } } /* Fill in the MCS 0-7 STBC rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { txpwr->mcs_20_stbc[i] = 0; txpwr->mcs_40_stbc[i] = 0; } /* Fill in the MCS 8-15 SDM rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) { txpwr->mcs_20_mimo[i] = (u8) maxpwr20; txpwr->mcs_40_mimo[i] = (u8) maxpwr40; } /* Fill in MCS32 */ txpwr->mcs32 = (u8) maxpwr40; for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) { if (txpwr->ofdm_40_cdd[i] == 0) txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j]; if (i == 0) { i = i + 1; if (txpwr->ofdm_40_cdd[i] == 0) txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j]; } } /* * Copy the 40 MHZ MCS 0-7 CDD value to the 40 MHZ MCS 0-7 SISO * value if it wasn't provided explicitly. */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { if (txpwr->mcs_40_siso[i] == 0) txpwr->mcs_40_siso[i] = txpwr->mcs_40_cdd[i]; } for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) { if (txpwr->ofdm_40_siso[i] == 0) txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j]; if (i == 0) { i = i + 1; if (txpwr->ofdm_40_siso[i] == 0) txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j]; } } /* * Copy the 20 and 40 MHz MCS0-7 CDD values to the corresponding * STBC values if they weren't provided explicitly. */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { if (txpwr->mcs_20_stbc[i] == 0) txpwr->mcs_20_stbc[i] = txpwr->mcs_20_cdd[i]; if (txpwr->mcs_40_stbc[i] == 0) txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i]; } return; } /* * Verify the chanspec is using a legal set of parameters, i.e. that the * chanspec specified a band, bw, ctl_sb and channel and that the * combination could be legal given any set of circumstances. * RETURNS: true is the chanspec is malformed, false if it looks good. */ static bool brcms_c_chspec_malformed(u16 chanspec) { /* must be 2G or 5G band */ if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec)) return true; /* must be 20 or 40 bandwidth */ if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec)) return true; /* 20MHZ b/w must have no ctl sb, 40 must have a ctl sb */ if (CHSPEC_IS20(chanspec)) { if (!CHSPEC_SB_NONE(chanspec)) return true; } else if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec)) { return true; } return false; } /* * Validate the chanspec for this locale, for 40MHZ we need to also * check that the sidebands are valid 20MZH channels in this locale * and they are also a legal HT combination */ static bool brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec, bool dualband) { struct brcms_c_info *wlc = wlc_cm->wlc; u8 channel = CHSPEC_CHANNEL(chspec); /* check the chanspec */ if (brcms_c_chspec_malformed(chspec)) { wiphy_err(wlc->wiphy, "wl%d: malformed chanspec 0x%x\n", wlc->pub->unit, chspec); return false; } if (CHANNEL_BANDUNIT(wlc_cm->wlc, channel) != chspec_bandunit(chspec)) return false; /* Check a 20Mhz channel */ if (CHSPEC_IS20(chspec)) { if (dualband) return brcms_c_valid_channel20_db(wlc_cm->wlc->cmi, channel); else return brcms_c_valid_channel20(wlc_cm->wlc->cmi, channel); } #ifdef SUPPORT_40MHZ /* * We know we are now checking a 40MHZ channel, so we should * only be here for NPHYS */ if (BRCMS_ISNPHY(wlc->band) || BRCMS_ISSSLPNPHY(wlc->band)) { u8 upper_sideband = 0, idx; u8 num_ch20_entries = sizeof(chan20_info) / sizeof(struct chan20_info); if (!VALID_40CHANSPEC_IN_BAND(wlc, chspec_bandunit(chspec))) return false; if (dualband) { if (!brcms_c_valid_channel20_db(wlc->cmi, lower_20_sb(channel)) || !brcms_c_valid_channel20_db(wlc->cmi, upper_20_sb(channel))) return false; } else { if (!brcms_c_valid_channel20(wlc->cmi, lower_20_sb(channel)) || !brcms_c_valid_channel20(wlc->cmi, upper_20_sb(channel))) return false; } /* find the lower sideband info in the sideband array */ for (idx = 0; idx < num_ch20_entries; idx++) { if (chan20_info[idx].sb == lower_20_sb(channel)) upper_sideband = chan20_info[idx].adj_sbs; } /* check that the lower sideband allows an upper sideband */ if ((upper_sideband & (CH_UPPER_SB | CH_EWA_VALID)) == (CH_UPPER_SB | CH_EWA_VALID)) return true; return false; } #endif /* 40 MHZ */ return false; } bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec) { return brcms_c_valid_chanspec_ext(wlc_cm, chspec, true); }
gpl-2.0
jiangbeilengyu/famkernel
Documentation/prctl/disable-tsc-on-off-stress-test.c
12901
1717
/* * Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...) * * Tests if the control register is updated correctly * when set with prctl() * * Warning: this test will cause a very high load for a few seconds * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <signal.h> #include <inttypes.h> #include <wait.h> #include <sys/prctl.h> #include <linux/prctl.h> /* Get/set the process' ability to use the timestamp counter instruction */ #ifndef PR_GET_TSC #define PR_GET_TSC 25 #define PR_SET_TSC 26 # define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */ # define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */ #endif /* snippet from wikipedia :-) */ uint64_t rdtsc() { uint32_t lo, hi; /* We cannot use "=A", since this would use %rax on x86_64 */ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); return (uint64_t)hi << 32 | lo; } int should_segv = 0; void sigsegv_cb(int sig) { if (!should_segv) { fprintf(stderr, "FATAL ERROR, rdtsc() failed while enabled\n"); exit(0); } if (prctl(PR_SET_TSC, PR_TSC_ENABLE) < 0) { perror("prctl"); exit(0); } should_segv = 0; rdtsc(); } void task(void) { signal(SIGSEGV, sigsegv_cb); alarm(10); for(;;) { rdtsc(); if (should_segv) { fprintf(stderr, "FATAL ERROR, rdtsc() succeeded while disabled\n"); exit(0); } if (prctl(PR_SET_TSC, PR_TSC_SIGSEGV) < 0) { perror("prctl"); exit(0); } should_segv = 1; } } int main(int argc, char **argv) { int n_tasks = 100, i; fprintf(stderr, "[No further output means we're allright]\n"); for (i=0; i<n_tasks; i++) if (fork() == 0) task(); for (i=0; i<n_tasks; i++) wait(NULL); exit(0); }
gpl-2.0
xcaliburinhand/I9000_I897_hybrid_kernel
drivers/net/wireless/rndis_wlan.c
102
76513
/* * Driver for RNDIS based wireless USB devices. * * Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net> * Copyright (C) 2008 by Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Portions of this file are based on NDISwrapper project, * Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani * http://ndiswrapper.sourceforge.net/ */ // #define DEBUG // error path messages, extra info // #define VERBOSE // more; success messages #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <linux/if_arp.h> #include <linux/ctype.h> #include <linux/spinlock.h> #include <net/iw_handler.h> #include <linux/usb/usbnet.h> #include <linux/usb/rndis_host.h> /* NOTE: All these are settings for Broadcom chipset */ static char modparam_country[4] = "EU"; module_param_string(country, modparam_country, 4, 0444); MODULE_PARM_DESC(country, "Country code (ISO 3166-1 alpha-2), default: EU"); static int modparam_frameburst = 1; module_param_named(frameburst, modparam_frameburst, int, 0444); MODULE_PARM_DESC(frameburst, "enable frame bursting (default: on)"); static int modparam_afterburner = 0; module_param_named(afterburner, modparam_afterburner, int, 0444); MODULE_PARM_DESC(afterburner, "enable afterburner aka '125 High Speed Mode' (default: off)"); static int modparam_power_save = 0; module_param_named(power_save, modparam_power_save, int, 0444); MODULE_PARM_DESC(power_save, "set power save mode: 0=off, 1=on, 2=fast (default: off)"); static int modparam_power_output = 3; module_param_named(power_output, modparam_power_output, int, 0444); MODULE_PARM_DESC(power_output, "set power output: 0=25%, 1=50%, 2=75%, 3=100% (default: 100%)"); static int modparam_roamtrigger = -70; module_param_named(roamtrigger, modparam_roamtrigger, int, 0444); MODULE_PARM_DESC(roamtrigger, "set roaming dBm trigger: -80=optimize for distance, " "-60=bandwidth (default: -70)"); static int modparam_roamdelta = 1; module_param_named(roamdelta, modparam_roamdelta, int, 0444); MODULE_PARM_DESC(roamdelta, "set roaming tendency: 0=aggressive, 1=moderate, " "2=conservative (default: moderate)"); static int modparam_workaround_interval = 500; module_param_named(workaround_interval, modparam_workaround_interval, int, 0444); MODULE_PARM_DESC(workaround_interval, "set stall workaround interval in msecs (default: 500)"); /* various RNDIS OID defs */ #define OID_GEN_LINK_SPEED ccpu2(0x00010107) #define OID_GEN_RNDIS_CONFIG_PARAMETER ccpu2(0x0001021b) #define OID_GEN_XMIT_OK ccpu2(0x00020101) #define OID_GEN_RCV_OK ccpu2(0x00020102) #define OID_GEN_XMIT_ERROR ccpu2(0x00020103) #define OID_GEN_RCV_ERROR ccpu2(0x00020104) #define OID_GEN_RCV_NO_BUFFER ccpu2(0x00020105) #define OID_802_3_PERMANENT_ADDRESS ccpu2(0x01010101) #define OID_802_3_CURRENT_ADDRESS ccpu2(0x01010102) #define OID_802_3_MULTICAST_LIST ccpu2(0x01010103) #define OID_802_3_MAXIMUM_LIST_SIZE ccpu2(0x01010104) #define OID_802_11_BSSID ccpu2(0x0d010101) #define OID_802_11_SSID ccpu2(0x0d010102) #define OID_802_11_INFRASTRUCTURE_MODE ccpu2(0x0d010108) #define OID_802_11_ADD_WEP ccpu2(0x0d010113) #define OID_802_11_REMOVE_WEP ccpu2(0x0d010114) #define OID_802_11_DISASSOCIATE ccpu2(0x0d010115) #define OID_802_11_AUTHENTICATION_MODE ccpu2(0x0d010118) #define OID_802_11_PRIVACY_FILTER ccpu2(0x0d010119) #define OID_802_11_BSSID_LIST_SCAN ccpu2(0x0d01011a) #define OID_802_11_ENCRYPTION_STATUS ccpu2(0x0d01011b) #define OID_802_11_ADD_KEY ccpu2(0x0d01011d) #define OID_802_11_REMOVE_KEY ccpu2(0x0d01011e) #define OID_802_11_ASSOCIATION_INFORMATION ccpu2(0x0d01011f) #define OID_802_11_PMKID ccpu2(0x0d010123) #define OID_802_11_NETWORK_TYPES_SUPPORTED ccpu2(0x0d010203) #define OID_802_11_NETWORK_TYPE_IN_USE ccpu2(0x0d010204) #define OID_802_11_TX_POWER_LEVEL ccpu2(0x0d010205) #define OID_802_11_RSSI ccpu2(0x0d010206) #define OID_802_11_RSSI_TRIGGER ccpu2(0x0d010207) #define OID_802_11_FRAGMENTATION_THRESHOLD ccpu2(0x0d010209) #define OID_802_11_RTS_THRESHOLD ccpu2(0x0d01020a) #define OID_802_11_SUPPORTED_RATES ccpu2(0x0d01020e) #define OID_802_11_CONFIGURATION ccpu2(0x0d010211) #define OID_802_11_BSSID_LIST ccpu2(0x0d010217) /* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */ #define WL_NOISE -96 /* typical noise level in dBm */ #define WL_SIGMAX -32 /* typical maximum signal level in dBm */ /* Assume that Broadcom 4320 (only chipset at time of writing known to be * based on wireless rndis) has default txpower of 13dBm. * This value is from Linksys WUSB54GSC User Guide, Appendix F: Specifications. * 13dBm == 19.9mW */ #define BCM4320_DEFAULT_TXPOWER 20 /* codes for "status" field of completion messages */ #define RNDIS_STATUS_ADAPTER_NOT_READY ccpu2(0xc0010011) #define RNDIS_STATUS_ADAPTER_NOT_OPEN ccpu2(0xc0010012) /* NDIS data structures. Taken from wpa_supplicant driver_ndis.c * slightly modified for datatype endianess, etc */ #define NDIS_802_11_LENGTH_SSID 32 #define NDIS_802_11_LENGTH_RATES 8 #define NDIS_802_11_LENGTH_RATES_EX 16 enum ndis_80211_net_type { ndis_80211_type_freq_hop, ndis_80211_type_direct_seq, ndis_80211_type_ofdm_a, ndis_80211_type_ofdm_g }; enum ndis_80211_net_infra { ndis_80211_infra_adhoc, ndis_80211_infra_infra, ndis_80211_infra_auto_unknown }; enum ndis_80211_auth_mode { ndis_80211_auth_open, ndis_80211_auth_shared, ndis_80211_auth_auto_switch, ndis_80211_auth_wpa, ndis_80211_auth_wpa_psk, ndis_80211_auth_wpa_none, ndis_80211_auth_wpa2, ndis_80211_auth_wpa2_psk }; enum ndis_80211_encr_status { ndis_80211_encr_wep_enabled, ndis_80211_encr_disabled, ndis_80211_encr_wep_key_absent, ndis_80211_encr_not_supported, ndis_80211_encr_tkip_enabled, ndis_80211_encr_tkip_key_absent, ndis_80211_encr_ccmp_enabled, ndis_80211_encr_ccmp_key_absent }; enum ndis_80211_priv_filter { ndis_80211_priv_accept_all, ndis_80211_priv_8021x_wep }; struct ndis_80211_ssid { __le32 length; u8 essid[NDIS_802_11_LENGTH_SSID]; } __attribute__((packed)); struct ndis_80211_conf_freq_hop { __le32 length; __le32 hop_pattern; __le32 hop_set; __le32 dwell_time; } __attribute__((packed)); struct ndis_80211_conf { __le32 length; __le32 beacon_period; __le32 atim_window; __le32 ds_config; struct ndis_80211_conf_freq_hop fh_config; } __attribute__((packed)); struct ndis_80211_bssid_ex { __le32 length; u8 mac[6]; u8 padding[2]; struct ndis_80211_ssid ssid; __le32 privacy; __le32 rssi; __le32 net_type; struct ndis_80211_conf config; __le32 net_infra; u8 rates[NDIS_802_11_LENGTH_RATES_EX]; __le32 ie_length; u8 ies[0]; } __attribute__((packed)); struct ndis_80211_bssid_list_ex { __le32 num_items; struct ndis_80211_bssid_ex bssid[0]; } __attribute__((packed)); struct ndis_80211_fixed_ies { u8 timestamp[8]; __le16 beacon_interval; __le16 capabilities; } __attribute__((packed)); struct ndis_80211_wep_key { __le32 size; __le32 index; __le32 length; u8 material[32]; } __attribute__((packed)); struct ndis_80211_key { __le32 size; __le32 index; __le32 length; u8 bssid[6]; u8 padding[6]; u8 rsc[8]; u8 material[32]; } __attribute__((packed)); struct ndis_80211_remove_key { __le32 size; __le32 index; u8 bssid[6]; } __attribute__((packed)); struct ndis_config_param { __le32 name_offs; __le32 name_length; __le32 type; __le32 value_offs; __le32 value_length; } __attribute__((packed)); struct ndis_80211_assoc_info { __le32 length; __le16 req_ies; struct req_ie { __le16 capa; __le16 listen_interval; u8 cur_ap_address[6]; } req_ie; __le32 req_ie_length; __le32 offset_req_ies; __le16 resp_ies; struct resp_ie { __le16 capa; __le16 status_code; __le16 assoc_id; } resp_ie; __le32 resp_ie_length; __le32 offset_resp_ies; } __attribute__((packed)); /* these have to match what is in wpa_supplicant */ enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP }; enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP, CIPHER_WEP104 }; enum wpa_key_mgmt { KEY_MGMT_802_1X, KEY_MGMT_PSK, KEY_MGMT_NONE, KEY_MGMT_802_1X_NO_WPA, KEY_MGMT_WPA_NONE }; /* * private data */ #define NET_TYPE_11FB 0 #define CAP_MODE_80211A 1 #define CAP_MODE_80211B 2 #define CAP_MODE_80211G 4 #define CAP_MODE_MASK 7 #define CAP_SUPPORT_TXPOWER 8 #define WORK_LINK_UP (1<<0) #define WORK_LINK_DOWN (1<<1) #define WORK_SET_MULTICAST_LIST (1<<2) #define COMMAND_BUFFER_SIZE (CONTROL_BUFFER_SIZE + sizeof(struct rndis_set)) /* RNDIS device private data */ struct rndis_wext_private { char name[32]; struct usbnet *usbdev; struct workqueue_struct *workqueue; struct delayed_work stats_work; struct work_struct work; struct mutex command_lock; spinlock_t stats_lock; unsigned long work_pending; struct iw_statistics iwstats; struct iw_statistics privstats; int nick_len; char nick[32]; int caps; int multicast_size; /* module parameters */ char param_country[4]; int param_frameburst; int param_afterburner; int param_power_save; int param_power_output; int param_roamtrigger; int param_roamdelta; u32 param_workaround_interval; /* hardware state */ int radio_on; int infra_mode; struct ndis_80211_ssid essid; /* encryption stuff */ int encr_tx_key_index; char encr_keys[4][32]; int encr_key_len[4]; int wpa_version; int wpa_keymgmt; int wpa_authalg; int wpa_ie_len; u8 *wpa_ie; int wpa_cipher_pair; int wpa_cipher_group; u8 command_buffer[COMMAND_BUFFER_SIZE]; }; static const int freq_chan[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; static const int rates_80211g[8] = { 6, 9, 12, 18, 24, 36, 48, 54 }; static const int bcm4320_power_output[4] = { 25, 50, 75, 100 }; static const unsigned char zero_bssid[ETH_ALEN] = {0,}; static const unsigned char ffff_bssid[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; static struct rndis_wext_private *get_rndis_wext_priv(struct usbnet *dev) { return (struct rndis_wext_private *)dev->driver_priv; } static u32 get_bcm4320_power(struct rndis_wext_private *priv) { return BCM4320_DEFAULT_TXPOWER * bcm4320_power_output[priv->param_power_output] / 100; } /* translate error code */ static int rndis_error_status(__le32 rndis_status) { int ret = -EINVAL; switch (rndis_status) { case RNDIS_STATUS_SUCCESS: ret = 0; break; case RNDIS_STATUS_FAILURE: case RNDIS_STATUS_INVALID_DATA: ret = -EINVAL; break; case RNDIS_STATUS_NOT_SUPPORTED: ret = -EOPNOTSUPP; break; case RNDIS_STATUS_ADAPTER_NOT_READY: case RNDIS_STATUS_ADAPTER_NOT_OPEN: ret = -EBUSY; break; } return ret; } static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len) { struct rndis_wext_private *priv = get_rndis_wext_priv(dev); union { void *buf; struct rndis_msg_hdr *header; struct rndis_query *get; struct rndis_query_c *get_c; } u; int ret, buflen; buflen = *len + sizeof(*u.get); if (buflen < CONTROL_BUFFER_SIZE) buflen = CONTROL_BUFFER_SIZE; if (buflen > COMMAND_BUFFER_SIZE) { u.buf = kmalloc(buflen, GFP_KERNEL); if (!u.buf) return -ENOMEM; } else { u.buf = priv->command_buffer; } mutex_lock(&priv->command_lock); memset(u.get, 0, sizeof *u.get); u.get->msg_type = RNDIS_MSG_QUERY; u.get->msg_len = ccpu2(sizeof *u.get); u.get->oid = oid; ret = rndis_command(dev, u.header, buflen); if (ret == 0) { ret = le32_to_cpu(u.get_c->len); *len = (*len > ret) ? ret : *len; memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len); ret = rndis_error_status(u.get_c->status); } mutex_unlock(&priv->command_lock); if (u.buf != priv->command_buffer) kfree(u.buf); return ret; } static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len) { struct rndis_wext_private *priv = get_rndis_wext_priv(dev); union { void *buf; struct rndis_msg_hdr *header; struct rndis_set *set; struct rndis_set_c *set_c; } u; int ret, buflen; buflen = len + sizeof(*u.set); if (buflen < CONTROL_BUFFER_SIZE) buflen = CONTROL_BUFFER_SIZE; if (buflen > COMMAND_BUFFER_SIZE) { u.buf = kmalloc(buflen, GFP_KERNEL); if (!u.buf) return -ENOMEM; } else { u.buf = priv->command_buffer; } mutex_lock(&priv->command_lock); memset(u.set, 0, sizeof *u.set); u.set->msg_type = RNDIS_MSG_SET; u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len); u.set->oid = oid; u.set->len = cpu_to_le32(len); u.set->offset = ccpu2(sizeof(*u.set) - 8); u.set->handle = ccpu2(0); memcpy(u.buf + sizeof(*u.set), data, len); ret = rndis_command(dev, u.header, buflen); if (ret == 0) ret = rndis_error_status(u.set_c->status); mutex_unlock(&priv->command_lock); if (u.buf != priv->command_buffer) kfree(u.buf); return ret; } /* * Specs say that we can only set config parameters only soon after device * initialization. * value_type: 0 = u32, 2 = unicode string */ static int rndis_set_config_parameter(struct usbnet *dev, char *param, int value_type, void *value) { struct ndis_config_param *infobuf; int value_len, info_len, param_len, ret, i; __le16 *unibuf; __le32 *dst_value; if (value_type == 0) value_len = sizeof(__le32); else if (value_type == 2) value_len = strlen(value) * sizeof(__le16); else return -EINVAL; param_len = strlen(param) * sizeof(__le16); info_len = sizeof(*infobuf) + param_len + value_len; #ifdef DEBUG info_len += 12; #endif infobuf = kmalloc(info_len, GFP_KERNEL); if (!infobuf) return -ENOMEM; #ifdef DEBUG info_len -= 12; /* extra 12 bytes are for padding (debug output) */ memset(infobuf, 0xCC, info_len + 12); #endif if (value_type == 2) devdbg(dev, "setting config parameter: %s, value: %s", param, (u8 *)value); else devdbg(dev, "setting config parameter: %s, value: %d", param, *(u32 *)value); infobuf->name_offs = cpu_to_le32(sizeof(*infobuf)); infobuf->name_length = cpu_to_le32(param_len); infobuf->type = cpu_to_le32(value_type); infobuf->value_offs = cpu_to_le32(sizeof(*infobuf) + param_len); infobuf->value_length = cpu_to_le32(value_len); /* simple string to unicode string conversion */ unibuf = (void *)infobuf + sizeof(*infobuf); for (i = 0; i < param_len / sizeof(__le16); i++) unibuf[i] = cpu_to_le16(param[i]); if (value_type == 2) { unibuf = (void *)infobuf + sizeof(*infobuf) + param_len; for (i = 0; i < value_len / sizeof(__le16); i++) unibuf[i] = cpu_to_le16(((u8 *)value)[i]); } else { dst_value = (void *)infobuf + sizeof(*infobuf) + param_len; *dst_value = cpu_to_le32(*(u32 *)value); } #ifdef DEBUG devdbg(dev, "info buffer (len: %d):", info_len); for (i = 0; i < info_len; i += 12) { u32 *tmp = (u32 *)((u8 *)infobuf + i); devdbg(dev, "%08X:%08X:%08X", cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]), cpu_to_be32(tmp[2])); } #endif ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER, infobuf, info_len); if (ret != 0) devdbg(dev, "setting rndis config paramater failed, %d.", ret); kfree(infobuf); return ret; } static int rndis_set_config_parameter_str(struct usbnet *dev, char *param, char *value) { return(rndis_set_config_parameter(dev, param, 2, value)); } /*static int rndis_set_config_parameter_u32(struct usbnet *dev, char *param, u32 value) { return(rndis_set_config_parameter(dev, param, 0, &value)); }*/ /* * data conversion functions */ static int level_to_qual(int level) { int qual = 100 * (level - WL_NOISE) / (WL_SIGMAX - WL_NOISE); return qual >= 0 ? (qual <= 100 ? qual : 100) : 0; } static void dsconfig_to_freq(unsigned int dsconfig, struct iw_freq *freq) { freq->e = 0; freq->i = 0; freq->flags = 0; /* see comment in wireless.h above the "struct iw_freq" * definition for an explanation of this if * NOTE: 1000000 is due to the kHz */ if (dsconfig > 1000000) { freq->m = dsconfig / 10; freq->e = 1; } else freq->m = dsconfig; /* convert from kHz to Hz */ freq->e += 3; } static int freq_to_dsconfig(struct iw_freq *freq, unsigned int *dsconfig) { if (freq->m < 1000 && freq->e == 0) { if (freq->m >= 1 && freq->m <= ARRAY_SIZE(freq_chan)) *dsconfig = freq_chan[freq->m - 1] * 1000; else return -1; } else { int i; *dsconfig = freq->m; for (i = freq->e; i > 0; i--) *dsconfig *= 10; *dsconfig /= 1000; } return 0; } /* * common functions */ static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index); static int get_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid) { int ret, len; len = sizeof(*ssid); ret = rndis_query_oid(usbdev, OID_802_11_SSID, ssid, &len); if (ret != 0) ssid->length = 0; #ifdef DEBUG { unsigned char tmp[NDIS_802_11_LENGTH_SSID + 1]; memcpy(tmp, ssid->essid, le32_to_cpu(ssid->length)); tmp[le32_to_cpu(ssid->length)] = 0; devdbg(usbdev, "get_essid: '%s', ret: %d", tmp, ret); } #endif return ret; } static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); int ret; ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid)); if (ret == 0) { memcpy(&priv->essid, ssid, sizeof(priv->essid)); priv->radio_on = 1; devdbg(usbdev, "set_essid: radio_on = 1"); } return ret; } static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) { int ret, len; len = ETH_ALEN; ret = rndis_query_oid(usbdev, OID_802_11_BSSID, bssid, &len); if (ret != 0) memset(bssid, 0, ETH_ALEN); return ret; } static int get_association_info(struct usbnet *usbdev, struct ndis_80211_assoc_info *info, int len) { return rndis_query_oid(usbdev, OID_802_11_ASSOCIATION_INFORMATION, info, &len); } static int is_associated(struct usbnet *usbdev) { u8 bssid[ETH_ALEN]; int ret; ret = get_bssid(usbdev, bssid); return(ret == 0 && memcmp(bssid, zero_bssid, ETH_ALEN) != 0); } static int disassociate(struct usbnet *usbdev, int reset_ssid) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); struct ndis_80211_ssid ssid; int i, ret = 0; if (priv->radio_on) { ret = rndis_set_oid(usbdev, OID_802_11_DISASSOCIATE, NULL, 0); if (ret == 0) { priv->radio_on = 0; devdbg(usbdev, "disassociate: radio_on = 0"); if (reset_ssid) msleep(100); } } /* disassociate causes radio to be turned off; if reset_ssid * is given, set random ssid to enable radio */ if (reset_ssid) { ssid.length = cpu_to_le32(sizeof(ssid.essid)); get_random_bytes(&ssid.essid[2], sizeof(ssid.essid)-2); ssid.essid[0] = 0x1; ssid.essid[1] = 0xff; for (i = 2; i < sizeof(ssid.essid); i++) ssid.essid[i] = 0x1 + (ssid.essid[i] * 0xfe / 0xff); ret = set_essid(usbdev, &ssid); } return ret; } static int set_auth_mode(struct usbnet *usbdev, int wpa_version, int authalg) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); __le32 tmp; int auth_mode, ret; devdbg(usbdev, "set_auth_mode: wpa_version=0x%x authalg=0x%x " "keymgmt=0x%x", wpa_version, authalg, priv->wpa_keymgmt); if (wpa_version & IW_AUTH_WPA_VERSION_WPA2) { if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X) auth_mode = ndis_80211_auth_wpa2; else auth_mode = ndis_80211_auth_wpa2_psk; } else if (wpa_version & IW_AUTH_WPA_VERSION_WPA) { if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X) auth_mode = ndis_80211_auth_wpa; else if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_PSK) auth_mode = ndis_80211_auth_wpa_psk; else auth_mode = ndis_80211_auth_wpa_none; } else if (authalg & IW_AUTH_ALG_SHARED_KEY) { if (authalg & IW_AUTH_ALG_OPEN_SYSTEM) auth_mode = ndis_80211_auth_auto_switch; else auth_mode = ndis_80211_auth_shared; } else auth_mode = ndis_80211_auth_open; tmp = cpu_to_le32(auth_mode); ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp, sizeof(tmp)); if (ret != 0) { devwarn(usbdev, "setting auth mode failed (%08X)", ret); return ret; } priv->wpa_version = wpa_version; priv->wpa_authalg = authalg; return 0; } static int set_priv_filter(struct usbnet *usbdev) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); __le32 tmp; devdbg(usbdev, "set_priv_filter: wpa_version=0x%x", priv->wpa_version); if (priv->wpa_version & IW_AUTH_WPA_VERSION_WPA2 || priv->wpa_version & IW_AUTH_WPA_VERSION_WPA) tmp = cpu_to_le32(ndis_80211_priv_8021x_wep); else tmp = cpu_to_le32(ndis_80211_priv_accept_all); return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp, sizeof(tmp)); } static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); __le32 tmp; int encr_mode, ret; devdbg(usbdev, "set_encr_mode: cipher_pair=0x%x cipher_group=0x%x", pairwise, groupwise); if (pairwise & IW_AUTH_CIPHER_CCMP) encr_mode = ndis_80211_encr_ccmp_enabled; else if (pairwise & IW_AUTH_CIPHER_TKIP) encr_mode = ndis_80211_encr_tkip_enabled; else if (pairwise & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104)) encr_mode = ndis_80211_encr_wep_enabled; else if (groupwise & IW_AUTH_CIPHER_CCMP) encr_mode = ndis_80211_encr_ccmp_enabled; else if (groupwise & IW_AUTH_CIPHER_TKIP) encr_mode = ndis_80211_encr_tkip_enabled; else encr_mode = ndis_80211_encr_disabled; tmp = cpu_to_le32(encr_mode); ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp, sizeof(tmp)); if (ret != 0) { devwarn(usbdev, "setting encr mode failed (%08X)", ret); return ret; } priv->wpa_cipher_pair = pairwise; priv->wpa_cipher_group = groupwise; return 0; } static int set_assoc_params(struct usbnet *usbdev) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); set_auth_mode(usbdev, priv->wpa_version, priv->wpa_authalg); set_priv_filter(usbdev); set_encr_mode(usbdev, priv->wpa_cipher_pair, priv->wpa_cipher_group); return 0; } static int set_infra_mode(struct usbnet *usbdev, int mode) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); __le32 tmp; int ret, i; devdbg(usbdev, "set_infra_mode: infra_mode=0x%x", priv->infra_mode); tmp = cpu_to_le32(mode); ret = rndis_set_oid(usbdev, OID_802_11_INFRASTRUCTURE_MODE, &tmp, sizeof(tmp)); if (ret != 0) { devwarn(usbdev, "setting infra mode failed (%08X)", ret); return ret; } /* NDIS drivers clear keys when infrastructure mode is * changed. But Linux tools assume otherwise. So set the * keys */ if (priv->wpa_keymgmt == 0 || priv->wpa_keymgmt == IW_AUTH_KEY_MGMT_802_1X) { for (i = 0; i < 4; i++) { if (priv->encr_key_len[i] > 0) add_wep_key(usbdev, priv->encr_keys[i], priv->encr_key_len[i], i); } } priv->infra_mode = mode; return 0; } static void set_default_iw_params(struct usbnet *usbdev) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); priv->wpa_keymgmt = 0; priv->wpa_version = 0; set_infra_mode(usbdev, ndis_80211_infra_infra); set_auth_mode(usbdev, IW_AUTH_WPA_VERSION_DISABLED, IW_AUTH_ALG_OPEN_SYSTEM); set_priv_filter(usbdev); set_encr_mode(usbdev, IW_AUTH_CIPHER_NONE, IW_AUTH_CIPHER_NONE); } static int deauthenticate(struct usbnet *usbdev) { int ret; ret = disassociate(usbdev, 1); set_default_iw_params(usbdev); return ret; } /* index must be 0 - N, as per NDIS */ static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); struct ndis_80211_wep_key ndis_key; int ret; if (key_len <= 0 || key_len > 32 || index < 0 || index >= 4) return -EINVAL; memset(&ndis_key, 0, sizeof(ndis_key)); ndis_key.size = cpu_to_le32(sizeof(ndis_key)); ndis_key.length = cpu_to_le32(key_len); ndis_key.index = cpu_to_le32(index); memcpy(&ndis_key.material, key, key_len); if (index == priv->encr_tx_key_index) { ndis_key.index |= cpu_to_le32(1 << 31); ret = set_encr_mode(usbdev, IW_AUTH_CIPHER_WEP104, IW_AUTH_CIPHER_NONE); if (ret) devwarn(usbdev, "encryption couldn't be enabled (%08X)", ret); } ret = rndis_set_oid(usbdev, OID_802_11_ADD_WEP, &ndis_key, sizeof(ndis_key)); if (ret != 0) { devwarn(usbdev, "adding encryption key %d failed (%08X)", index+1, ret); return ret; } priv->encr_key_len[index] = key_len; memcpy(&priv->encr_keys[index], key, key_len); return 0; } /* remove_key is for both wep and wpa */ static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN]) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); struct ndis_80211_remove_key remove_key; __le32 keyindex; int ret; if (priv->encr_key_len[index] == 0) return 0; priv->encr_key_len[index] = 0; memset(&priv->encr_keys[index], 0, sizeof(priv->encr_keys[index])); if (priv->wpa_cipher_pair == IW_AUTH_CIPHER_TKIP || priv->wpa_cipher_pair == IW_AUTH_CIPHER_CCMP || priv->wpa_cipher_group == IW_AUTH_CIPHER_TKIP || priv->wpa_cipher_group == IW_AUTH_CIPHER_CCMP) { remove_key.size = cpu_to_le32(sizeof(remove_key)); remove_key.index = cpu_to_le32(index); if (bssid) { /* pairwise key */ if (memcmp(bssid, ffff_bssid, ETH_ALEN) != 0) remove_key.index |= cpu_to_le32(1 << 30); memcpy(remove_key.bssid, bssid, sizeof(remove_key.bssid)); } else memset(remove_key.bssid, 0xff, sizeof(remove_key.bssid)); ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_KEY, &remove_key, sizeof(remove_key)); if (ret != 0) return ret; } else { keyindex = cpu_to_le32(index); ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_WEP, &keyindex, sizeof(keyindex)); if (ret != 0) { devwarn(usbdev, "removing encryption key %d failed (%08X)", index, ret); return ret; } } /* if it is transmit key, disable encryption */ if (index == priv->encr_tx_key_index) set_encr_mode(usbdev, IW_AUTH_CIPHER_NONE, IW_AUTH_CIPHER_NONE); return 0; } static void set_multicast_list(struct usbnet *usbdev) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); struct dev_mc_list *mclist; __le32 filter; int ret, i, size; char *buf; filter = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST; if (usbdev->net->flags & IFF_PROMISC) { filter |= RNDIS_PACKET_TYPE_PROMISCUOUS | RNDIS_PACKET_TYPE_ALL_LOCAL; } else if (usbdev->net->flags & IFF_ALLMULTI || usbdev->net->mc_count > priv->multicast_size) { filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; } else if (usbdev->net->mc_count > 0) { size = min(priv->multicast_size, usbdev->net->mc_count); buf = kmalloc(size * ETH_ALEN, GFP_KERNEL); if (!buf) { devwarn(usbdev, "couldn't alloc %d bytes of memory", size * ETH_ALEN); return; } mclist = usbdev->net->mc_list; for (i = 0; i < size && mclist; mclist = mclist->next) { if (mclist->dmi_addrlen != ETH_ALEN) continue; memcpy(buf + i * ETH_ALEN, mclist->dmi_addr, ETH_ALEN); i++; } ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, buf, i * ETH_ALEN); if (ret == 0 && i > 0) filter |= RNDIS_PACKET_TYPE_MULTICAST; else filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; devdbg(usbdev, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d", i, priv->multicast_size, ret); kfree(buf); } ret = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter, sizeof(filter)); if (ret < 0) { devwarn(usbdev, "couldn't set packet filter: %08x", le32_to_cpu(filter)); } devdbg(usbdev, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d", le32_to_cpu(filter), ret); } /* * wireless extension handlers */ static int rndis_iw_commit(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* dummy op */ return 0; } static int rndis_iw_get_range(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_range *range = (struct iw_range *)extra; struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); int len, ret, i, j, num, has_80211g_rates; u8 rates[8]; __le32 tx_power; devdbg(usbdev, "SIOCGIWRANGE"); /* clear iw_range struct */ memset(range, 0, sizeof(*range)); wrqu->data.length = sizeof(*range); range->txpower_capa = IW_TXPOW_MWATT; range->num_txpower = 1; if (priv->caps & CAP_SUPPORT_TXPOWER) { len = sizeof(tx_power); ret = rndis_query_oid(usbdev, OID_802_11_TX_POWER_LEVEL, &tx_power, &len); if (ret == 0 && le32_to_cpu(tx_power) != 0xFF) range->txpower[0] = le32_to_cpu(tx_power); else range->txpower[0] = get_bcm4320_power(priv); } else range->txpower[0] = get_bcm4320_power(priv); len = sizeof(rates); ret = rndis_query_oid(usbdev, OID_802_11_SUPPORTED_RATES, &rates, &len); has_80211g_rates = 0; if (ret == 0) { j = 0; for (i = 0; i < len; i++) { if (rates[i] == 0) break; range->bitrate[j] = (rates[i] & 0x7f) * 500000; /* check for non 802.11b rates */ if (range->bitrate[j] == 6000000 || range->bitrate[j] == 9000000 || (range->bitrate[j] >= 12000000 && range->bitrate[j] != 22000000)) has_80211g_rates = 1; j++; } range->num_bitrates = j; } else range->num_bitrates = 0; /* fill in 802.11g rates */ if (has_80211g_rates) { num = range->num_bitrates; for (i = 0; i < ARRAY_SIZE(rates_80211g); i++) { for (j = 0; j < num; j++) { if (range->bitrate[j] == rates_80211g[i] * 1000000) break; } if (j == num) range->bitrate[range->num_bitrates++] = rates_80211g[i] * 1000000; if (range->num_bitrates == IW_MAX_BITRATES) break; } /* estimated max real througput in bps */ range->throughput = 54 * 1000 * 1000 / 2; /* ~35% more with afterburner */ if (priv->param_afterburner) range->throughput = range->throughput / 100 * 135; } else { /* estimated max real througput in bps */ range->throughput = 11 * 1000 * 1000 / 2; } range->num_channels = ARRAY_SIZE(freq_chan); for (i = 0; i < ARRAY_SIZE(freq_chan) && i < IW_MAX_FREQUENCIES; i++) { range->freq[i].i = i + 1; range->freq[i].m = freq_chan[i] * 100000; range->freq[i].e = 1; } range->num_frequency = i; range->min_rts = 0; range->max_rts = 2347; range->min_frag = 256; range->max_frag = 2346; range->max_qual.qual = 100; range->max_qual.level = 154; range->max_qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID; range->we_version_compiled = WIRELESS_EXT; range->we_version_source = WIRELESS_EXT; range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; return 0; } static int rndis_iw_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); strcpy(wrqu->name, priv->name); return 0; } static int rndis_iw_set_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *essid) { struct ndis_80211_ssid ssid; int length = wrqu->essid.length; struct usbnet *usbdev = netdev_priv(dev); devdbg(usbdev, "SIOCSIWESSID: [flags:%d,len:%d] '%.32s'", wrqu->essid.flags, wrqu->essid.length, essid); if (length > NDIS_802_11_LENGTH_SSID) length = NDIS_802_11_LENGTH_SSID; ssid.length = cpu_to_le32(length); if (length > 0) memcpy(ssid.essid, essid, length); else memset(ssid.essid, 0, NDIS_802_11_LENGTH_SSID); set_assoc_params(usbdev); if (!wrqu->essid.flags || length == 0) return disassociate(usbdev, 1); else return set_essid(usbdev, &ssid); } static int rndis_iw_get_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *essid) { struct ndis_80211_ssid ssid; struct usbnet *usbdev = netdev_priv(dev); int ret; ret = get_essid(usbdev, &ssid); if (ret == 0 && le32_to_cpu(ssid.length) > 0) { wrqu->essid.flags = 1; wrqu->essid.length = le32_to_cpu(ssid.length); memcpy(essid, ssid.essid, wrqu->essid.length); essid[wrqu->essid.length] = 0; } else { memset(essid, 0, sizeof(NDIS_802_11_LENGTH_SSID)); wrqu->essid.flags = 0; wrqu->essid.length = 0; } devdbg(usbdev, "SIOCGIWESSID: %s", essid); return ret; } static int rndis_iw_get_bssid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); unsigned char bssid[ETH_ALEN]; int ret; ret = get_bssid(usbdev, bssid); if (ret == 0) devdbg(usbdev, "SIOCGIWAP: %pM", bssid); else devdbg(usbdev, "SIOCGIWAP: <not associated>"); wrqu->ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu->ap_addr.sa_data, bssid, ETH_ALEN); return ret; } static int rndis_iw_set_bssid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); u8 *bssid = (u8 *)wrqu->ap_addr.sa_data; int ret; devdbg(usbdev, "SIOCSIWAP: %pM", bssid); ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN); /* user apps may set ap's mac address, which is not required; * they may fail to work if this function fails, so return * success */ if (ret) devwarn(usbdev, "setting AP mac address failed (%08X)", ret); return 0; } static int rndis_iw_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *p = &wrqu->param; struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); int ret = -ENOTSUPP; switch (p->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: devdbg(usbdev, "SIOCSIWAUTH: WPA_VERSION, %08x", p->value); priv->wpa_version = p->value; ret = 0; break; case IW_AUTH_CIPHER_PAIRWISE: devdbg(usbdev, "SIOCSIWAUTH: CIPHER_PAIRWISE, %08x", p->value); priv->wpa_cipher_pair = p->value; ret = 0; break; case IW_AUTH_CIPHER_GROUP: devdbg(usbdev, "SIOCSIWAUTH: CIPHER_GROUP, %08x", p->value); priv->wpa_cipher_group = p->value; ret = 0; break; case IW_AUTH_KEY_MGMT: devdbg(usbdev, "SIOCSIWAUTH: KEY_MGMT, %08x", p->value); priv->wpa_keymgmt = p->value; ret = 0; break; case IW_AUTH_TKIP_COUNTERMEASURES: devdbg(usbdev, "SIOCSIWAUTH: TKIP_COUNTERMEASURES, %08x", p->value); ret = 0; break; case IW_AUTH_DROP_UNENCRYPTED: devdbg(usbdev, "SIOCSIWAUTH: DROP_UNENCRYPTED, %08x", p->value); ret = 0; break; case IW_AUTH_80211_AUTH_ALG: devdbg(usbdev, "SIOCSIWAUTH: 80211_AUTH_ALG, %08x", p->value); priv->wpa_authalg = p->value; ret = 0; break; case IW_AUTH_WPA_ENABLED: devdbg(usbdev, "SIOCSIWAUTH: WPA_ENABLED, %08x", p->value); if (wrqu->param.value) deauthenticate(usbdev); ret = 0; break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: devdbg(usbdev, "SIOCSIWAUTH: RX_UNENCRYPTED_EAPOL, %08x", p->value); ret = 0; break; case IW_AUTH_ROAMING_CONTROL: devdbg(usbdev, "SIOCSIWAUTH: ROAMING_CONTROL, %08x", p->value); ret = 0; break; case IW_AUTH_PRIVACY_INVOKED: devdbg(usbdev, "SIOCSIWAUTH: invalid cmd %d", wrqu->param.flags & IW_AUTH_INDEX); return -EOPNOTSUPP; default: devdbg(usbdev, "SIOCSIWAUTH: UNKNOWN %08x, %08x", p->flags & IW_AUTH_INDEX, p->value); } return ret; } static int rndis_iw_get_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *p = &wrqu->param; struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); switch (p->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: p->value = priv->wpa_version; break; case IW_AUTH_CIPHER_PAIRWISE: p->value = priv->wpa_cipher_pair; break; case IW_AUTH_CIPHER_GROUP: p->value = priv->wpa_cipher_group; break; case IW_AUTH_KEY_MGMT: p->value = priv->wpa_keymgmt; break; case IW_AUTH_80211_AUTH_ALG: p->value = priv->wpa_authalg; break; default: devdbg(usbdev, "SIOCGIWAUTH: invalid cmd %d", wrqu->param.flags & IW_AUTH_INDEX); return -EOPNOTSUPP; } return 0; } static int rndis_iw_get_mode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); switch (priv->infra_mode) { case ndis_80211_infra_adhoc: wrqu->mode = IW_MODE_ADHOC; break; case ndis_80211_infra_infra: wrqu->mode = IW_MODE_INFRA; break; /*case ndis_80211_infra_auto_unknown:*/ default: wrqu->mode = IW_MODE_AUTO; break; } devdbg(usbdev, "SIOCGIWMODE: %08x", wrqu->mode); return 0; } static int rndis_iw_set_mode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); int mode; devdbg(usbdev, "SIOCSIWMODE: %08x", wrqu->mode); switch (wrqu->mode) { case IW_MODE_ADHOC: mode = ndis_80211_infra_adhoc; break; case IW_MODE_INFRA: mode = ndis_80211_infra_infra; break; /*case IW_MODE_AUTO:*/ default: mode = ndis_80211_infra_auto_unknown; break; } return set_infra_mode(usbdev, mode); } static int rndis_iw_set_encode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); int ret, index, key_len; u8 *key; index = (wrqu->encoding.flags & IW_ENCODE_INDEX); /* iwconfig gives index as 1 - N */ if (index > 0) index--; else index = priv->encr_tx_key_index; if (index < 0 || index >= 4) { devwarn(usbdev, "encryption index out of range (%u)", index); return -EINVAL; } /* remove key if disabled */ if (wrqu->data.flags & IW_ENCODE_DISABLED) { if (remove_key(usbdev, index, NULL)) return -EINVAL; else return 0; } /* global encryption state (for all keys) */ if (wrqu->data.flags & IW_ENCODE_OPEN) ret = set_auth_mode(usbdev, IW_AUTH_WPA_VERSION_DISABLED, IW_AUTH_ALG_OPEN_SYSTEM); else /*if (wrqu->data.flags & IW_ENCODE_RESTRICTED)*/ ret = set_auth_mode(usbdev, IW_AUTH_WPA_VERSION_DISABLED, IW_AUTH_ALG_SHARED_KEY); if (ret != 0) return ret; if (wrqu->data.length > 0) { key_len = wrqu->data.length; key = extra; } else { /* must be set as tx key */ if (priv->encr_key_len[index] == 0) return -EINVAL; key_len = priv->encr_key_len[index]; key = priv->encr_keys[index]; priv->encr_tx_key_index = index; } if (add_wep_key(usbdev, key, key_len, index) != 0) return -EINVAL; if (index == priv->encr_tx_key_index) /* ndis drivers want essid to be set after setting encr */ set_essid(usbdev, &priv->essid); return 0; } static int rndis_iw_set_encode_ext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); struct ndis_80211_key ndis_key; int keyidx, ret; u8 *addr; keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX; /* iwconfig gives index as 1 - N */ if (keyidx) keyidx--; else keyidx = priv->encr_tx_key_index; if (keyidx < 0 || keyidx >= 4) return -EINVAL; if (ext->alg == WPA_ALG_WEP) { if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) priv->encr_tx_key_index = keyidx; return add_wep_key(usbdev, ext->key, ext->key_len, keyidx); } if ((wrqu->encoding.flags & IW_ENCODE_DISABLED) || ext->alg == IW_ENCODE_ALG_NONE || ext->key_len == 0) return remove_key(usbdev, keyidx, NULL); if (ext->key_len > sizeof(ndis_key.material)) return -1; memset(&ndis_key, 0, sizeof(ndis_key)); ndis_key.size = cpu_to_le32(sizeof(ndis_key) - sizeof(ndis_key.material) + ext->key_len); ndis_key.length = cpu_to_le32(ext->key_len); ndis_key.index = cpu_to_le32(keyidx); if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { memcpy(ndis_key.rsc, ext->rx_seq, 6); ndis_key.index |= cpu_to_le32(1 << 29); } addr = ext->addr.sa_data; if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { /* group key */ if (priv->infra_mode == ndis_80211_infra_adhoc) memset(ndis_key.bssid, 0xff, ETH_ALEN); else get_bssid(usbdev, ndis_key.bssid); } else { /* pairwise key */ ndis_key.index |= cpu_to_le32(1 << 30); memcpy(ndis_key.bssid, addr, ETH_ALEN); } if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) ndis_key.index |= cpu_to_le32(1 << 31); if (ext->alg == IW_ENCODE_ALG_TKIP && ext->key_len == 32) { /* wpa_supplicant gives us the Michael MIC RX/TX keys in * different order than NDIS spec, so swap the order here. */ memcpy(ndis_key.material, ext->key, 16); memcpy(ndis_key.material + 16, ext->key + 24, 8); memcpy(ndis_key.material + 24, ext->key + 16, 8); } else memcpy(ndis_key.material, ext->key, ext->key_len); ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key, le32_to_cpu(ndis_key.size)); devdbg(usbdev, "SIOCSIWENCODEEXT: OID_802_11_ADD_KEY -> %08X", ret); if (ret != 0) return ret; priv->encr_key_len[keyidx] = ext->key_len; memcpy(&priv->encr_keys[keyidx], ndis_key.material, ext->key_len); if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) priv->encr_tx_key_index = keyidx; return 0; } static int rndis_iw_set_scan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); union iwreq_data evt; int ret = -EINVAL; __le32 tmp; devdbg(usbdev, "SIOCSIWSCAN"); if (wrqu->data.flags == 0) { tmp = ccpu2(1); ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, sizeof(tmp)); evt.data.flags = 0; evt.data.length = 0; wireless_send_event(dev, SIOCGIWSCAN, &evt, NULL); } return ret; } static char *rndis_translate_scan(struct net_device *dev, struct iw_request_info *info, char *cev, char *end_buf, struct ndis_80211_bssid_ex *bssid) { struct usbnet *usbdev = netdev_priv(dev); u8 *ie; char *current_val; int bssid_len, ie_len, i; u32 beacon, atim; struct iw_event iwe; unsigned char sbuf[32]; bssid_len = le32_to_cpu(bssid->length); devdbg(usbdev, "BSSID %pM", bssid->mac); iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, bssid->mac, ETH_ALEN); cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_ADDR_LEN); devdbg(usbdev, "SSID(%d) %s", le32_to_cpu(bssid->ssid.length), bssid->ssid.essid); iwe.cmd = SIOCGIWESSID; iwe.u.essid.length = le32_to_cpu(bssid->ssid.length); iwe.u.essid.flags = 1; cev = iwe_stream_add_point(info, cev, end_buf, &iwe, bssid->ssid.essid); devdbg(usbdev, "MODE %d", le32_to_cpu(bssid->net_infra)); iwe.cmd = SIOCGIWMODE; switch (le32_to_cpu(bssid->net_infra)) { case ndis_80211_infra_adhoc: iwe.u.mode = IW_MODE_ADHOC; break; case ndis_80211_infra_infra: iwe.u.mode = IW_MODE_INFRA; break; /*case ndis_80211_infra_auto_unknown:*/ default: iwe.u.mode = IW_MODE_AUTO; break; } cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_UINT_LEN); devdbg(usbdev, "FREQ %d kHz", le32_to_cpu(bssid->config.ds_config)); iwe.cmd = SIOCGIWFREQ; dsconfig_to_freq(le32_to_cpu(bssid->config.ds_config), &iwe.u.freq); cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_FREQ_LEN); devdbg(usbdev, "QUAL %d", le32_to_cpu(bssid->rssi)); iwe.cmd = IWEVQUAL; iwe.u.qual.qual = level_to_qual(le32_to_cpu(bssid->rssi)); iwe.u.qual.level = le32_to_cpu(bssid->rssi); iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_QUAL_LEN); devdbg(usbdev, "ENCODE %d", le32_to_cpu(bssid->privacy)); iwe.cmd = SIOCGIWENCODE; iwe.u.data.length = 0; if (le32_to_cpu(bssid->privacy) == ndis_80211_priv_accept_all) iwe.u.data.flags = IW_ENCODE_DISABLED; else iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; cev = iwe_stream_add_point(info, cev, end_buf, &iwe, NULL); devdbg(usbdev, "RATES:"); current_val = cev + iwe_stream_lcp_len(info); iwe.cmd = SIOCGIWRATE; for (i = 0; i < sizeof(bssid->rates); i++) { if (bssid->rates[i] & 0x7f) { iwe.u.bitrate.value = ((bssid->rates[i] & 0x7f) * 500000); devdbg(usbdev, " %d", iwe.u.bitrate.value); current_val = iwe_stream_add_value(info, cev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN); } } if ((current_val - cev) > iwe_stream_lcp_len(info)) cev = current_val; beacon = le32_to_cpu(bssid->config.beacon_period); devdbg(usbdev, "BCN_INT %d", beacon); iwe.cmd = IWEVCUSTOM; snprintf(sbuf, sizeof(sbuf), "bcn_int=%d", beacon); iwe.u.data.length = strlen(sbuf); cev = iwe_stream_add_point(info, cev, end_buf, &iwe, sbuf); atim = le32_to_cpu(bssid->config.atim_window); devdbg(usbdev, "ATIM %d", atim); iwe.cmd = IWEVCUSTOM; snprintf(sbuf, sizeof(sbuf), "atim=%u", atim); iwe.u.data.length = strlen(sbuf); cev = iwe_stream_add_point(info, cev, end_buf, &iwe, sbuf); ie = (void *)(bssid->ies + sizeof(struct ndis_80211_fixed_ies)); ie_len = min(bssid_len - (int)sizeof(*bssid), (int)le32_to_cpu(bssid->ie_length)); ie_len -= sizeof(struct ndis_80211_fixed_ies); while (ie_len >= 2 && 2 + ie[1] <= ie_len) { if ((ie[0] == WLAN_EID_GENERIC && ie[1] >= 4 && memcmp(ie + 2, "\x00\x50\xf2\x01", 4) == 0) || ie[0] == WLAN_EID_RSN) { devdbg(usbdev, "IE: WPA%d", (ie[0] == WLAN_EID_RSN) ? 2 : 1); iwe.cmd = IWEVGENIE; /* arbitrary cut-off at 64 */ iwe.u.data.length = min(ie[1] + 2, 64); cev = iwe_stream_add_point(info, cev, end_buf, &iwe, ie); } ie_len -= 2 + ie[1]; ie += 2 + ie[1]; } return cev; } static int rndis_iw_get_scan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); void *buf = NULL; char *cev = extra; struct ndis_80211_bssid_list_ex *bssid_list; struct ndis_80211_bssid_ex *bssid; int ret = -EINVAL, len, count, bssid_len; devdbg(usbdev, "SIOCGIWSCAN"); len = CONTROL_BUFFER_SIZE; buf = kmalloc(len, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out; } ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len); if (ret != 0) goto out; bssid_list = buf; bssid = bssid_list->bssid; bssid_len = le32_to_cpu(bssid->length); count = le32_to_cpu(bssid_list->num_items); devdbg(usbdev, "SIOCGIWSCAN: %d BSSIDs found", count); while (count && ((void *)bssid + bssid_len) <= (buf + len)) { cev = rndis_translate_scan(dev, info, cev, extra + IW_SCAN_MAX_DATA, bssid); bssid = (void *)bssid + bssid_len; bssid_len = le32_to_cpu(bssid->length); count--; } out: wrqu->data.length = cev - extra; wrqu->data.flags = 0; kfree(buf); return ret; } static int rndis_iw_set_genie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); int ret = 0; #ifdef DEBUG int j; u8 *gie = extra; for (j = 0; j < wrqu->data.length; j += 8) devdbg(usbdev, "SIOCSIWGENIE %04x - " "%02x %02x %02x %02x %02x %02x %02x %02x", j, gie[j + 0], gie[j + 1], gie[j + 2], gie[j + 3], gie[j + 4], gie[j + 5], gie[j + 6], gie[j + 7]); #endif /* clear existing IEs */ if (priv->wpa_ie_len) { kfree(priv->wpa_ie); priv->wpa_ie_len = 0; } /* set new IEs */ priv->wpa_ie = kmalloc(wrqu->data.length, GFP_KERNEL); if (priv->wpa_ie) { priv->wpa_ie_len = wrqu->data.length; memcpy(priv->wpa_ie, extra, priv->wpa_ie_len); } else ret = -ENOMEM; return ret; } static int rndis_iw_get_genie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); devdbg(usbdev, "SIOCGIWGENIE"); if (priv->wpa_ie_len == 0 || priv->wpa_ie == NULL) { wrqu->data.length = 0; return 0; } if (wrqu->data.length < priv->wpa_ie_len) return -E2BIG; wrqu->data.length = priv->wpa_ie_len; memcpy(extra, priv->wpa_ie, priv->wpa_ie_len); return 0; } static int rndis_iw_set_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); __le32 tmp; devdbg(usbdev, "SIOCSIWRTS"); tmp = cpu_to_le32(wrqu->rts.value); return rndis_set_oid(usbdev, OID_802_11_RTS_THRESHOLD, &tmp, sizeof(tmp)); } static int rndis_iw_get_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); __le32 tmp; int len, ret; len = sizeof(tmp); ret = rndis_query_oid(usbdev, OID_802_11_RTS_THRESHOLD, &tmp, &len); if (ret == 0) { wrqu->rts.value = le32_to_cpu(tmp); wrqu->rts.flags = 1; wrqu->rts.disabled = 0; } devdbg(usbdev, "SIOCGIWRTS: %d", wrqu->rts.value); return ret; } static int rndis_iw_set_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); __le32 tmp; devdbg(usbdev, "SIOCSIWFRAG"); tmp = cpu_to_le32(wrqu->frag.value); return rndis_set_oid(usbdev, OID_802_11_FRAGMENTATION_THRESHOLD, &tmp, sizeof(tmp)); } static int rndis_iw_get_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); __le32 tmp; int len, ret; len = sizeof(tmp); ret = rndis_query_oid(usbdev, OID_802_11_FRAGMENTATION_THRESHOLD, &tmp, &len); if (ret == 0) { wrqu->frag.value = le32_to_cpu(tmp); wrqu->frag.flags = 1; wrqu->frag.disabled = 0; } devdbg(usbdev, "SIOCGIWFRAG: %d", wrqu->frag.value); return ret; } static int rndis_iw_set_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); devdbg(usbdev, "SIOCSIWNICK"); priv->nick_len = wrqu->data.length; if (priv->nick_len > 32) priv->nick_len = 32; memcpy(priv->nick, extra, priv->nick_len); return 0; } static int rndis_iw_get_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); wrqu->data.flags = 1; wrqu->data.length = priv->nick_len; memcpy(extra, priv->nick, priv->nick_len); devdbg(usbdev, "SIOCGIWNICK: '%s'", priv->nick); return 0; } static int rndis_iw_set_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct ndis_80211_conf config; unsigned int dsconfig; int len, ret; /* this OID is valid only when not associated */ if (is_associated(usbdev)) return 0; dsconfig = 0; if (freq_to_dsconfig(&wrqu->freq, &dsconfig)) return -EINVAL; len = sizeof(config); ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); if (ret != 0) { devdbg(usbdev, "SIOCSIWFREQ: querying configuration failed"); return 0; } config.ds_config = cpu_to_le32(dsconfig); devdbg(usbdev, "SIOCSIWFREQ: %d * 10^%d", wrqu->freq.m, wrqu->freq.e); return rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config, sizeof(config)); } static int rndis_iw_get_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct ndis_80211_conf config; int len, ret; len = sizeof(config); ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); if (ret == 0) dsconfig_to_freq(le32_to_cpu(config.ds_config), &wrqu->freq); devdbg(usbdev, "SIOCGIWFREQ: %d", wrqu->freq.m); return ret; } static int rndis_iw_get_txpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); __le32 tx_power; int ret = 0, len; if (priv->radio_on) { if (priv->caps & CAP_SUPPORT_TXPOWER) { len = sizeof(tx_power); ret = rndis_query_oid(usbdev, OID_802_11_TX_POWER_LEVEL, &tx_power, &len); if (ret != 0) return ret; } else /* fake incase not supported */ tx_power = cpu_to_le32(get_bcm4320_power(priv)); wrqu->txpower.flags = IW_TXPOW_MWATT; wrqu->txpower.value = le32_to_cpu(tx_power); wrqu->txpower.disabled = 0; } else { wrqu->txpower.flags = IW_TXPOW_MWATT; wrqu->txpower.value = 0; wrqu->txpower.disabled = 1; } devdbg(usbdev, "SIOCGIWTXPOW: %d", wrqu->txpower.value); return ret; } static int rndis_iw_set_txpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); __le32 tx_power = 0; int ret = 0; if (!wrqu->txpower.disabled) { if (wrqu->txpower.flags == IW_TXPOW_MWATT) tx_power = cpu_to_le32(wrqu->txpower.value); else { /* wrqu->txpower.flags == IW_TXPOW_DBM */ if (wrqu->txpower.value > 20) tx_power = cpu_to_le32(128); else if (wrqu->txpower.value < -43) tx_power = cpu_to_le32(127); else { signed char tmp; tmp = wrqu->txpower.value; tmp = -12 - tmp; tmp <<= 2; tx_power = cpu_to_le32((unsigned char)tmp); } } } devdbg(usbdev, "SIOCSIWTXPOW: %d", le32_to_cpu(tx_power)); if (le32_to_cpu(tx_power) != 0) { if (priv->caps & CAP_SUPPORT_TXPOWER) { /* turn radio on first */ if (!priv->radio_on) disassociate(usbdev, 1); ret = rndis_set_oid(usbdev, OID_802_11_TX_POWER_LEVEL, &tx_power, sizeof(tx_power)); if (ret != 0) ret = -EOPNOTSUPP; return ret; } else { /* txpower unsupported, just turn radio on */ if (!priv->radio_on) return disassociate(usbdev, 1); return 0; /* all ready on */ } } /* tx_power == 0, turn off radio */ return disassociate(usbdev, 0); } static int rndis_iw_get_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); __le32 tmp; int ret, len; len = sizeof(tmp); ret = rndis_query_oid(usbdev, OID_GEN_LINK_SPEED, &tmp, &len); if (ret == 0) { wrqu->bitrate.value = le32_to_cpu(tmp) * 100; wrqu->bitrate.disabled = 0; wrqu->bitrate.flags = 1; } return ret; } static int rndis_iw_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); struct iw_mlme *mlme = (struct iw_mlme *)extra; unsigned char bssid[ETH_ALEN]; get_bssid(usbdev, bssid); if (memcmp(bssid, mlme->addr.sa_data, ETH_ALEN)) return -EINVAL; switch (mlme->cmd) { case IW_MLME_DEAUTH: return deauthenticate(usbdev); case IW_MLME_DISASSOC: return disassociate(usbdev, priv->radio_on); default: return -EOPNOTSUPP; } return 0; } static struct iw_statistics *rndis_get_wireless_stats(struct net_device *dev) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); unsigned long flags; spin_lock_irqsave(&priv->stats_lock, flags); memcpy(&priv->iwstats, &priv->privstats, sizeof(priv->iwstats)); spin_unlock_irqrestore(&priv->stats_lock, flags); return &priv->iwstats; } #define IW_IOCTL(x) [(x) - SIOCSIWCOMMIT] static const iw_handler rndis_iw_handler[] = { IW_IOCTL(SIOCSIWCOMMIT) = rndis_iw_commit, IW_IOCTL(SIOCGIWNAME) = rndis_iw_get_name, IW_IOCTL(SIOCSIWFREQ) = rndis_iw_set_freq, IW_IOCTL(SIOCGIWFREQ) = rndis_iw_get_freq, IW_IOCTL(SIOCSIWMODE) = rndis_iw_set_mode, IW_IOCTL(SIOCGIWMODE) = rndis_iw_get_mode, IW_IOCTL(SIOCGIWRANGE) = rndis_iw_get_range, IW_IOCTL(SIOCSIWAP) = rndis_iw_set_bssid, IW_IOCTL(SIOCGIWAP) = rndis_iw_get_bssid, IW_IOCTL(SIOCSIWSCAN) = rndis_iw_set_scan, IW_IOCTL(SIOCGIWSCAN) = rndis_iw_get_scan, IW_IOCTL(SIOCSIWESSID) = rndis_iw_set_essid, IW_IOCTL(SIOCGIWESSID) = rndis_iw_get_essid, IW_IOCTL(SIOCSIWNICKN) = rndis_iw_set_nick, IW_IOCTL(SIOCGIWNICKN) = rndis_iw_get_nick, IW_IOCTL(SIOCGIWRATE) = rndis_iw_get_rate, IW_IOCTL(SIOCSIWRTS) = rndis_iw_set_rts, IW_IOCTL(SIOCGIWRTS) = rndis_iw_get_rts, IW_IOCTL(SIOCSIWFRAG) = rndis_iw_set_frag, IW_IOCTL(SIOCGIWFRAG) = rndis_iw_get_frag, IW_IOCTL(SIOCSIWTXPOW) = rndis_iw_set_txpower, IW_IOCTL(SIOCGIWTXPOW) = rndis_iw_get_txpower, IW_IOCTL(SIOCSIWENCODE) = rndis_iw_set_encode, IW_IOCTL(SIOCSIWENCODEEXT) = rndis_iw_set_encode_ext, IW_IOCTL(SIOCSIWAUTH) = rndis_iw_set_auth, IW_IOCTL(SIOCGIWAUTH) = rndis_iw_get_auth, IW_IOCTL(SIOCSIWGENIE) = rndis_iw_set_genie, IW_IOCTL(SIOCGIWGENIE) = rndis_iw_get_genie, IW_IOCTL(SIOCSIWMLME) = rndis_iw_set_mlme, }; static const iw_handler rndis_wext_private_handler[] = { }; static const struct iw_priv_args rndis_wext_private_args[] = { }; static const struct iw_handler_def rndis_iw_handlers = { .num_standard = ARRAY_SIZE(rndis_iw_handler), .num_private = ARRAY_SIZE(rndis_wext_private_handler), .num_private_args = ARRAY_SIZE(rndis_wext_private_args), .standard = (iw_handler *)rndis_iw_handler, .private = (iw_handler *)rndis_wext_private_handler, .private_args = (struct iw_priv_args *)rndis_wext_private_args, .get_wireless_stats = rndis_get_wireless_stats, }; static void rndis_wext_worker(struct work_struct *work) { struct rndis_wext_private *priv = container_of(work, struct rndis_wext_private, work); struct usbnet *usbdev = priv->usbdev; union iwreq_data evt; unsigned char bssid[ETH_ALEN]; struct ndis_80211_assoc_info *info; int assoc_size = sizeof(*info) + IW_CUSTOM_MAX + 32; int ret, offset; if (test_and_clear_bit(WORK_LINK_UP, &priv->work_pending)) { netif_carrier_on(usbdev->net); info = kzalloc(assoc_size, GFP_KERNEL); if (!info) goto get_bssid; /* Get association info IEs from device and send them back to * userspace. */ ret = get_association_info(usbdev, info, assoc_size); if (!ret) { evt.data.length = le32_to_cpu(info->req_ie_length); if (evt.data.length > 0) { offset = le32_to_cpu(info->offset_req_ies); wireless_send_event(usbdev->net, IWEVASSOCREQIE, &evt, (char *)info + offset); } evt.data.length = le32_to_cpu(info->resp_ie_length); if (evt.data.length > 0) { offset = le32_to_cpu(info->offset_resp_ies); wireless_send_event(usbdev->net, IWEVASSOCRESPIE, &evt, (char *)info + offset); } } kfree(info); get_bssid: ret = get_bssid(usbdev, bssid); if (!ret) { evt.data.flags = 0; evt.data.length = 0; memcpy(evt.ap_addr.sa_data, bssid, ETH_ALEN); wireless_send_event(usbdev->net, SIOCGIWAP, &evt, NULL); } } if (test_and_clear_bit(WORK_LINK_DOWN, &priv->work_pending)) { netif_carrier_off(usbdev->net); evt.data.flags = 0; evt.data.length = 0; memset(evt.ap_addr.sa_data, 0, ETH_ALEN); wireless_send_event(usbdev->net, SIOCGIWAP, &evt, NULL); } if (test_and_clear_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending)) set_multicast_list(usbdev); } static void rndis_wext_set_multicast_list(struct net_device *dev) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); if (test_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending)) return; set_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending); queue_work(priv->workqueue, &priv->work); } static void rndis_wext_link_change(struct usbnet *usbdev, int state) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); /* queue work to avoid recursive calls into rndis_command */ set_bit(state ? WORK_LINK_UP : WORK_LINK_DOWN, &priv->work_pending); queue_work(priv->workqueue, &priv->work); } static int rndis_wext_get_caps(struct usbnet *usbdev) { struct { __le32 num_items; __le32 items[8]; } networks_supported; int len, retval, i, n; __le32 tx_power; struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); /* determine if supports setting txpower */ len = sizeof(tx_power); retval = rndis_query_oid(usbdev, OID_802_11_TX_POWER_LEVEL, &tx_power, &len); if (retval == 0 && le32_to_cpu(tx_power) != 0xFF) priv->caps |= CAP_SUPPORT_TXPOWER; /* determine supported modes */ len = sizeof(networks_supported); retval = rndis_query_oid(usbdev, OID_802_11_NETWORK_TYPES_SUPPORTED, &networks_supported, &len); if (retval >= 0) { n = le32_to_cpu(networks_supported.num_items); if (n > 8) n = 8; for (i = 0; i < n; i++) { switch (le32_to_cpu(networks_supported.items[i])) { case ndis_80211_type_freq_hop: case ndis_80211_type_direct_seq: priv->caps |= CAP_MODE_80211B; break; case ndis_80211_type_ofdm_a: priv->caps |= CAP_MODE_80211A; break; case ndis_80211_type_ofdm_g: priv->caps |= CAP_MODE_80211G; break; } } if (priv->caps & CAP_MODE_80211A) strcat(priv->name, "a"); if (priv->caps & CAP_MODE_80211B) strcat(priv->name, "b"); if (priv->caps & CAP_MODE_80211G) strcat(priv->name, "g"); } return retval; } #define STATS_UPDATE_JIFFIES (HZ) static void rndis_update_wireless_stats(struct work_struct *work) { struct rndis_wext_private *priv = container_of(work, struct rndis_wext_private, stats_work.work); struct usbnet *usbdev = priv->usbdev; struct iw_statistics iwstats; __le32 rssi, tmp; int len, ret, j; unsigned long flags; int update_jiffies = STATS_UPDATE_JIFFIES; void *buf; spin_lock_irqsave(&priv->stats_lock, flags); memcpy(&iwstats, &priv->privstats, sizeof(iwstats)); spin_unlock_irqrestore(&priv->stats_lock, flags); /* only update stats when connected */ if (!is_associated(usbdev)) { iwstats.qual.qual = 0; iwstats.qual.level = 0; iwstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID | IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; goto end; } len = sizeof(rssi); ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); devdbg(usbdev, "stats: OID_802_11_RSSI -> %d, rssi:%d", ret, le32_to_cpu(rssi)); if (ret == 0) { memset(&iwstats.qual, 0, sizeof(iwstats.qual)); iwstats.qual.qual = level_to_qual(le32_to_cpu(rssi)); iwstats.qual.level = le32_to_cpu(rssi); iwstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID; } memset(&iwstats.discard, 0, sizeof(iwstats.discard)); len = sizeof(tmp); ret = rndis_query_oid(usbdev, OID_GEN_XMIT_ERROR, &tmp, &len); if (ret == 0) iwstats.discard.misc += le32_to_cpu(tmp); len = sizeof(tmp); ret = rndis_query_oid(usbdev, OID_GEN_RCV_ERROR, &tmp, &len); if (ret == 0) iwstats.discard.misc += le32_to_cpu(tmp); len = sizeof(tmp); ret = rndis_query_oid(usbdev, OID_GEN_RCV_NO_BUFFER, &tmp, &len); if (ret == 0) iwstats.discard.misc += le32_to_cpu(tmp); /* Workaround transfer stalls on poor quality links. * TODO: find right way to fix these stalls (as stalls do not happen * with ndiswrapper/windows driver). */ if (iwstats.qual.qual <= 25) { /* Decrease stats worker interval to catch stalls. * faster. Faster than 400-500ms causes packet loss, * Slower doesn't catch stalls fast enough. */ j = msecs_to_jiffies(priv->param_workaround_interval); if (j > STATS_UPDATE_JIFFIES) j = STATS_UPDATE_JIFFIES; else if (j <= 0) j = 1; update_jiffies = j; /* Send scan OID. Use of both OIDs is required to get device * working. */ tmp = ccpu2(1); rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, sizeof(tmp)); len = CONTROL_BUFFER_SIZE; buf = kmalloc(len, GFP_KERNEL); if (!buf) goto end; rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len); kfree(buf); } end: spin_lock_irqsave(&priv->stats_lock, flags); memcpy(&priv->privstats, &iwstats, sizeof(iwstats)); spin_unlock_irqrestore(&priv->stats_lock, flags); if (update_jiffies >= HZ) update_jiffies = round_jiffies_relative(update_jiffies); else { j = round_jiffies_relative(update_jiffies); if (abs(j - update_jiffies) <= 10) update_jiffies = j; } queue_delayed_work(priv->workqueue, &priv->stats_work, update_jiffies); } static int bcm4320_early_init(struct usbnet *usbdev) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); char buf[8]; /* Early initialization settings, setting these won't have effect * if called after generic_rndis_bind(). */ priv->param_country[0] = modparam_country[0]; priv->param_country[1] = modparam_country[1]; priv->param_country[2] = 0; priv->param_frameburst = modparam_frameburst; priv->param_afterburner = modparam_afterburner; priv->param_power_save = modparam_power_save; priv->param_power_output = modparam_power_output; priv->param_roamtrigger = modparam_roamtrigger; priv->param_roamdelta = modparam_roamdelta; priv->param_country[0] = toupper(priv->param_country[0]); priv->param_country[1] = toupper(priv->param_country[1]); /* doesn't support EU as country code, use FI instead */ if (!strcmp(priv->param_country, "EU")) strcpy(priv->param_country, "FI"); if (priv->param_power_save < 0) priv->param_power_save = 0; else if (priv->param_power_save > 2) priv->param_power_save = 2; if (priv->param_power_output < 0) priv->param_power_output = 0; else if (priv->param_power_output > 3) priv->param_power_output = 3; if (priv->param_roamtrigger < -80) priv->param_roamtrigger = -80; else if (priv->param_roamtrigger > -60) priv->param_roamtrigger = -60; if (priv->param_roamdelta < 0) priv->param_roamdelta = 0; else if (priv->param_roamdelta > 2) priv->param_roamdelta = 2; if (modparam_workaround_interval < 0) priv->param_workaround_interval = 500; else priv->param_workaround_interval = modparam_workaround_interval; rndis_set_config_parameter_str(usbdev, "Country", priv->param_country); rndis_set_config_parameter_str(usbdev, "FrameBursting", priv->param_frameburst ? "1" : "0"); rndis_set_config_parameter_str(usbdev, "Afterburner", priv->param_afterburner ? "1" : "0"); sprintf(buf, "%d", priv->param_power_save); rndis_set_config_parameter_str(usbdev, "PowerSaveMode", buf); sprintf(buf, "%d", priv->param_power_output); rndis_set_config_parameter_str(usbdev, "PwrOut", buf); sprintf(buf, "%d", priv->param_roamtrigger); rndis_set_config_parameter_str(usbdev, "RoamTrigger", buf); sprintf(buf, "%d", priv->param_roamdelta); rndis_set_config_parameter_str(usbdev, "RoamDelta", buf); return 0; } static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf) { struct rndis_wext_private *priv; int retval, len; __le32 tmp; /* allocate rndis private data */ priv = kzalloc(sizeof(struct rndis_wext_private), GFP_KERNEL); if (!priv) return -ENOMEM; /* These have to be initialized before calling generic_rndis_bind(). * Otherwise we'll be in big trouble in rndis_wext_early_init(). */ usbdev->driver_priv = priv; strcpy(priv->name, "IEEE802.11"); usbdev->net->wireless_handlers = &rndis_iw_handlers; priv->usbdev = usbdev; mutex_init(&priv->command_lock); spin_lock_init(&priv->stats_lock); /* try bind rndis_host */ retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS); if (retval < 0) goto fail; /* generic_rndis_bind set packet filter to multicast_all+ * promisc mode which doesn't work well for our devices (device * picks up rssi to closest station instead of to access point). * * rndis_host wants to avoid all OID as much as possible * so do promisc/multicast handling in rndis_wext. */ usbdev->net->set_multicast_list = rndis_wext_set_multicast_list; tmp = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST; retval = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &tmp, sizeof(tmp)); len = sizeof(tmp); retval = rndis_query_oid(usbdev, OID_802_3_MAXIMUM_LIST_SIZE, &tmp, &len); priv->multicast_size = le32_to_cpu(tmp); if (retval < 0 || priv->multicast_size < 0) priv->multicast_size = 0; if (priv->multicast_size > 0) usbdev->net->flags |= IFF_MULTICAST; else usbdev->net->flags &= ~IFF_MULTICAST; priv->iwstats.qual.qual = 0; priv->iwstats.qual.level = 0; priv->iwstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID | IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; rndis_wext_get_caps(usbdev); set_default_iw_params(usbdev); /* turn radio on */ priv->radio_on = 1; disassociate(usbdev, 1); netif_carrier_off(usbdev->net); /* because rndis_command() sleeps we need to use workqueue */ priv->workqueue = create_singlethread_workqueue("rndis_wlan"); INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats); queue_delayed_work(priv->workqueue, &priv->stats_work, round_jiffies_relative(STATS_UPDATE_JIFFIES)); INIT_WORK(&priv->work, rndis_wext_worker); return 0; fail: kfree(priv); return retval; } static void rndis_wext_unbind(struct usbnet *usbdev, struct usb_interface *intf) { struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); /* turn radio off */ disassociate(usbdev, 0); cancel_delayed_work_sync(&priv->stats_work); cancel_work_sync(&priv->work); flush_workqueue(priv->workqueue); destroy_workqueue(priv->workqueue); if (priv && priv->wpa_ie_len) kfree(priv->wpa_ie); kfree(priv); rndis_unbind(usbdev, intf); } static int rndis_wext_reset(struct usbnet *usbdev) { return deauthenticate(usbdev); } static const struct driver_info bcm4320b_info = { .description = "Wireless RNDIS device, BCM4320b based", .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT, .bind = rndis_wext_bind, .unbind = rndis_wext_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, .reset = rndis_wext_reset, .early_init = bcm4320_early_init, .link_change = rndis_wext_link_change, }; static const struct driver_info bcm4320a_info = { .description = "Wireless RNDIS device, BCM4320a based", .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT, .bind = rndis_wext_bind, .unbind = rndis_wext_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, .reset = rndis_wext_reset, .early_init = bcm4320_early_init, .link_change = rndis_wext_link_change, }; static const struct driver_info rndis_wext_info = { .description = "Wireless RNDIS device", .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT, .bind = rndis_wext_bind, .unbind = rndis_wext_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, .reset = rndis_wext_reset, .early_init = bcm4320_early_init, .link_change = rndis_wext_link_change, }; /*-------------------------------------------------------------------------*/ static const struct usb_device_id products [] = { #define RNDIS_MASTER_INTERFACE \ .bInterfaceClass = USB_CLASS_COMM, \ .bInterfaceSubClass = 2 /* ACM */, \ .bInterfaceProtocol = 0x0ff /* INF driver for these devices have DriverVer >= 4.xx.xx.xx and many custom * parameters available. Chipset marked as 'BCM4320SKFBG' in NDISwrapper-wiki. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0411, .idProduct = 0x00bc, /* Buffalo WLI-U2-KG125S */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0baf, .idProduct = 0x011b, /* U.S. Robotics USR5421 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x050d, .idProduct = 0x011b, /* Belkin F5D7051 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1799, /* Belkin has two vendor ids */ .idProduct = 0x011b, /* Belkin F5D7051 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x13b1, .idProduct = 0x0014, /* Linksys WUSB54GSv2 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x13b1, .idProduct = 0x0026, /* Linksys WUSB54GSC */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0b05, .idProduct = 0x1717, /* Asus WL169gE */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0a5c, .idProduct = 0xd11b, /* Eminent EM4045 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1690, .idProduct = 0x0715, /* BT Voyager 1055 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, /* These devices have DriverVer < 4.xx.xx.xx and do not have any custom * parameters available, hardware probably contain older firmware version with * no way of updating. Chipset marked as 'BCM4320????' in NDISwrapper-wiki. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x13b1, .idProduct = 0x000e, /* Linksys WUSB54GSv1 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320a_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0baf, .idProduct = 0x0111, /* U.S. Robotics USR5420 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320a_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0411, .idProduct = 0x004b, /* BUFFALO WLI-USB-G54 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320a_info, }, /* Generic Wireless RNDIS devices that we don't have exact * idVendor/idProduct/chip yet. */ { /* RNDIS is MSFT's un-official variant of CDC ACM */ USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_wext_info, }, { /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), .driver_info = (unsigned long) &rndis_wext_info, }, { }, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver rndis_wlan_driver = { .name = "rndis_wlan", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, }; static int __init rndis_wlan_init(void) { return usb_register(&rndis_wlan_driver); } module_init(rndis_wlan_init); static void __exit rndis_wlan_exit(void) { usb_deregister(&rndis_wlan_driver); } module_exit(rndis_wlan_exit); MODULE_AUTHOR("Bjorge Dijkstra"); MODULE_AUTHOR("Jussi Kivilinna"); MODULE_DESCRIPTION("Driver for RNDIS based USB Wireless adapters"); MODULE_LICENSE("GPL");
gpl-2.0
htc-msm8960/android_kernel_htc_msm8930
net/sunrpc/clnt.c
102
44933
/* * linux/net/sunrpc/clnt.c * * This file contains the high-level RPC interface. * It is modeled as a finite state machine to support both synchronous * and asynchronous requests. * * - RPC header generation and argument serialization. * - Credential refresh. * - TCP connect handling. * - Retry of operation when it is suspected the operation failed because * of uid squashing on the server, or when the credentials were stale * and need to be refreshed, or when a packet was damaged in transit. * This may be have to be moved to the VFS layer. * * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kallsyms.h> #include <linux/mm.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/utsname.h> #include <linux/workqueue.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/un.h> #include <linux/rcupdate.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/metrics.h> #include <linux/sunrpc/bc_xprt.h> #include <trace/events/sunrpc.h> #include "sunrpc.h" #include "netns.h" #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_CALL #endif #define dprint_status(t) \ dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ __func__, t->tk_status) static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); static void call_start(struct rpc_task *task); static void call_reserve(struct rpc_task *task); static void call_reserveresult(struct rpc_task *task); static void call_allocate(struct rpc_task *task); static void call_decode(struct rpc_task *task); static void call_bind(struct rpc_task *task); static void call_bind_status(struct rpc_task *task); static void call_transmit(struct rpc_task *task); #if defined(CONFIG_SUNRPC_BACKCHANNEL) static void call_bc_transmit(struct rpc_task *task); #endif static void call_status(struct rpc_task *task); static void call_transmit_status(struct rpc_task *task); static void call_refresh(struct rpc_task *task); static void call_refreshresult(struct rpc_task *task); static void call_timeout(struct rpc_task *task); static void call_connect(struct rpc_task *task); static void call_connect_status(struct rpc_task *task); static __be32 *rpc_encode_header(struct rpc_task *task); static __be32 *rpc_verify_header(struct rpc_task *task); static int rpc_ping(struct rpc_clnt *clnt); static void rpc_register_client(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_add(&clnt->cl_clients, &sn->all_clients); spin_unlock(&sn->rpc_client_lock); } static void rpc_unregister_client(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_del(&clnt->cl_clients); spin_unlock(&sn->rpc_client_lock); } static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) { if (clnt->cl_dentry) { if (clnt->cl_auth && clnt->cl_auth->au_ops->pipes_destroy) clnt->cl_auth->au_ops->pipes_destroy(clnt->cl_auth); rpc_remove_client_dir(clnt->cl_dentry); } clnt->cl_dentry = NULL; } static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct super_block *pipefs_sb; pipefs_sb = rpc_get_sb_net(net); if (pipefs_sb) { __rpc_clnt_remove_pipedir(clnt); rpc_put_sb_net(net); } } static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, struct rpc_clnt *clnt, const char *dir_name) { static uint32_t clntid; char name[15]; struct qstr q = { .name = name, }; struct dentry *dir, *dentry; int error; dir = rpc_d_lookup_sb(sb, dir_name); if (dir == NULL) return dir; for (;;) { q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); name[sizeof(name) - 1] = '\0'; q.hash = full_name_hash(q.name, q.len); dentry = rpc_create_client_dir(dir, &q, clnt); if (!IS_ERR(dentry)) break; error = PTR_ERR(dentry); if (error != -EEXIST) { printk(KERN_INFO "RPC: Couldn't create pipefs entry" " %s/%s, error %d\n", dir_name, name, error); break; } } dput(dir); return dentry; } static int rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name) { struct net *net = rpc_net_ns(clnt); struct super_block *pipefs_sb; struct dentry *dentry; clnt->cl_dentry = NULL; if (dir_name == NULL) return 0; pipefs_sb = rpc_get_sb_net(net); if (!pipefs_sb) return 0; dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt, dir_name); rpc_put_sb_net(net); if (IS_ERR(dentry)) return PTR_ERR(dentry); clnt->cl_dentry = dentry; return 0; } static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) { if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) || ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry)) return 1; return 0; } static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, struct super_block *sb) { struct dentry *dentry; int err = 0; switch (event) { case RPC_PIPEFS_MOUNT: dentry = rpc_setup_pipedir_sb(sb, clnt, clnt->cl_program->pipe_dir_name); BUG_ON(dentry == NULL); if (IS_ERR(dentry)) return PTR_ERR(dentry); clnt->cl_dentry = dentry; if (clnt->cl_auth->au_ops->pipes_create) { err = clnt->cl_auth->au_ops->pipes_create(clnt->cl_auth); if (err) __rpc_clnt_remove_pipedir(clnt); } break; case RPC_PIPEFS_UMOUNT: __rpc_clnt_remove_pipedir(clnt); break; default: printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); return -ENOTSUPP; } return err; } static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, struct super_block *sb) { int error = 0; for (;; clnt = clnt->cl_parent) { if (!rpc_clnt_skip_event(clnt, event)) error = __rpc_clnt_handle_event(clnt, event, sb); if (error || clnt == clnt->cl_parent) break; } return error; } static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); struct rpc_clnt *clnt; spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { if (clnt->cl_program->pipe_dir_name == NULL) break; if (rpc_clnt_skip_event(clnt, event)) continue; if (atomic_inc_not_zero(&clnt->cl_count) == 0) continue; spin_unlock(&sn->rpc_client_lock); return clnt; } spin_unlock(&sn->rpc_client_lock); return NULL; } static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct super_block *sb = ptr; struct rpc_clnt *clnt; int error = 0; while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) { error = __rpc_pipefs_event(clnt, event, sb); rpc_release_client(clnt); if (error) break; } return error; } static struct notifier_block rpc_clients_block = { .notifier_call = rpc_pipefs_event, .priority = SUNRPC_PIPEFS_RPC_PRIO, }; int rpc_clients_notifier_register(void) { return rpc_pipefs_notifier_register(&rpc_clients_block); } void rpc_clients_notifier_unregister(void) { return rpc_pipefs_notifier_unregister(&rpc_clients_block); } static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) { clnt->cl_nodelen = strlen(nodename); if (clnt->cl_nodelen > UNX_MAXNODENAME) clnt->cl_nodelen = UNX_MAXNODENAME; memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen); } static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) { const struct rpc_program *program = args->program; const struct rpc_version *version; struct rpc_clnt *clnt = NULL; struct rpc_auth *auth; int err; dprintk("RPC: creating %s client for %s (xprt %p)\n", program->name, args->servername, xprt); err = rpciod_up(); if (err) goto out_no_rpciod; err = -EINVAL; if (!xprt) goto out_no_xprt; if (args->version >= program->nrvers) goto out_err; version = program->version[args->version]; if (version == NULL) goto out_err; err = -ENOMEM; clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); if (!clnt) goto out_err; clnt->cl_parent = clnt; rcu_assign_pointer(clnt->cl_xprt, xprt); clnt->cl_procinfo = version->procs; clnt->cl_maxproc = version->nrprocs; clnt->cl_protname = program->name; clnt->cl_prog = args->prognumber ? : program->number; clnt->cl_vers = version->number; clnt->cl_stats = program->stats; clnt->cl_metrics = rpc_alloc_iostats(clnt); err = -ENOMEM; if (clnt->cl_metrics == NULL) goto out_no_stats; clnt->cl_program = program; INIT_LIST_HEAD(&clnt->cl_tasks); spin_lock_init(&clnt->cl_lock); if (!xprt_bound(xprt)) clnt->cl_autobind = 1; clnt->cl_timeout = xprt->timeout; if (args->timeout != NULL) { memcpy(&clnt->cl_timeout_default, args->timeout, sizeof(clnt->cl_timeout_default)); clnt->cl_timeout = &clnt->cl_timeout_default; } clnt->cl_rtt = &clnt->cl_rtt_default; rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); clnt->cl_principal = NULL; if (args->client_name) { clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL); if (!clnt->cl_principal) goto out_no_principal; } atomic_set(&clnt->cl_count, 1); err = rpc_setup_pipedir(clnt, program->pipe_dir_name); if (err < 0) goto out_no_path; auth = rpcauth_create(args->authflavor, clnt); if (IS_ERR(auth)) { printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", args->authflavor); err = PTR_ERR(auth); goto out_no_auth; } rpc_clnt_set_nodename(clnt, utsname()->nodename); rpc_register_client(clnt); return clnt; out_no_auth: rpc_clnt_remove_pipedir(clnt); out_no_path: kfree(clnt->cl_principal); out_no_principal: rpc_free_iostats(clnt->cl_metrics); out_no_stats: kfree(clnt); out_err: xprt_put(xprt); out_no_xprt: rpciod_down(); out_no_rpciod: return ERR_PTR(err); } struct rpc_clnt *rpc_create(struct rpc_create_args *args) { struct rpc_xprt *xprt; struct rpc_clnt *clnt; struct xprt_create xprtargs = { .net = args->net, .ident = args->protocol, .srcaddr = args->saddress, .dstaddr = args->address, .addrlen = args->addrsize, .servername = args->servername, .bc_xprt = args->bc_xprt, }; char servername[48]; if (xprtargs.servername == NULL) { struct sockaddr_un *sun = (struct sockaddr_un *)args->address; struct sockaddr_in *sin = (struct sockaddr_in *)args->address; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)args->address; servername[0] = '\0'; switch (args->address->sa_family) { case AF_LOCAL: snprintf(servername, sizeof(servername), "%s", sun->sun_path); break; case AF_INET: snprintf(servername, sizeof(servername), "%pI4", &sin->sin_addr.s_addr); break; case AF_INET6: snprintf(servername, sizeof(servername), "%pI6", &sin6->sin6_addr); break; default: return ERR_PTR(-EINVAL); } xprtargs.servername = servername; } xprt = xprt_create_transport(&xprtargs); if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; xprt->resvport = 1; if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) xprt->resvport = 0; clnt = rpc_new_client(args, xprt); if (IS_ERR(clnt)) return clnt; if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { int err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); return ERR_PTR(err); } } clnt->cl_softrtry = 1; if (args->flags & RPC_CLNT_CREATE_HARDRTRY) clnt->cl_softrtry = 0; if (args->flags & RPC_CLNT_CREATE_AUTOBIND) clnt->cl_autobind = 1; if (args->flags & RPC_CLNT_CREATE_DISCRTRY) clnt->cl_discrtry = 1; if (!(args->flags & RPC_CLNT_CREATE_QUIET)) clnt->cl_chatty = 1; return clnt; } EXPORT_SYMBOL_GPL(rpc_create); struct rpc_clnt * rpc_clone_client(struct rpc_clnt *clnt) { struct rpc_clnt *new; struct rpc_xprt *xprt; int err = -ENOMEM; new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); if (!new) goto out_no_clnt; new->cl_parent = clnt; new->cl_autobind = 0; INIT_LIST_HEAD(&new->cl_tasks); spin_lock_init(&new->cl_lock); rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval); new->cl_metrics = rpc_alloc_iostats(clnt); if (new->cl_metrics == NULL) goto out_no_stats; if (clnt->cl_principal) { new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL); if (new->cl_principal == NULL) goto out_no_principal; } rcu_read_lock(); xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); rcu_read_unlock(); if (xprt == NULL) goto out_no_transport; rcu_assign_pointer(new->cl_xprt, xprt); atomic_set(&new->cl_count, 1); err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); if (err != 0) goto out_no_path; rpc_clnt_set_nodename(new, utsname()->nodename); if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); atomic_inc(&clnt->cl_count); rpc_register_client(new); rpciod_up(); return new; out_no_path: xprt_put(xprt); out_no_transport: kfree(new->cl_principal); out_no_principal: rpc_free_iostats(new->cl_metrics); out_no_stats: kfree(new); out_no_clnt: dprintk("RPC: %s: returned error %d\n", __func__, err); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(rpc_clone_client); void rpc_killall_tasks(struct rpc_clnt *clnt) { struct rpc_task *rovr; if (list_empty(&clnt->cl_tasks)) return; dprintk("RPC: killing all tasks for client %p\n", clnt); spin_lock(&clnt->cl_lock); list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { if (!RPC_IS_ACTIVATED(rovr)) continue; if (!(rovr->tk_flags & RPC_TASK_KILLED)) { rovr->tk_flags |= RPC_TASK_KILLED; rpc_exit(rovr, -EIO); if (RPC_IS_QUEUED(rovr)) rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr); } } spin_unlock(&clnt->cl_lock); } EXPORT_SYMBOL_GPL(rpc_killall_tasks); void rpc_shutdown_client(struct rpc_clnt *clnt) { dprintk_rcu("RPC: shutting down %s client for %s\n", clnt->cl_protname, rcu_dereference(clnt->cl_xprt)->servername); while (!list_empty(&clnt->cl_tasks)) { rpc_killall_tasks(clnt); wait_event_timeout(destroy_wait, list_empty(&clnt->cl_tasks), 1*HZ); } rpc_release_client(clnt); } EXPORT_SYMBOL_GPL(rpc_shutdown_client); static void rpc_free_client(struct rpc_clnt *clnt) { dprintk_rcu("RPC: destroying %s client for %s\n", clnt->cl_protname, rcu_dereference(clnt->cl_xprt)->servername); if (clnt->cl_parent != clnt) rpc_release_client(clnt->cl_parent); rpc_unregister_client(clnt); rpc_clnt_remove_pipedir(clnt); rpc_free_iostats(clnt->cl_metrics); kfree(clnt->cl_principal); clnt->cl_metrics = NULL; xprt_put(rcu_dereference_raw(clnt->cl_xprt)); rpciod_down(); kfree(clnt); } static void rpc_free_auth(struct rpc_clnt *clnt) { if (clnt->cl_auth == NULL) { rpc_free_client(clnt); return; } atomic_inc(&clnt->cl_count); rpcauth_release(clnt->cl_auth); clnt->cl_auth = NULL; if (atomic_dec_and_test(&clnt->cl_count)) rpc_free_client(clnt); } void rpc_release_client(struct rpc_clnt *clnt) { dprintk("RPC: rpc_release_client(%p)\n", clnt); if (list_empty(&clnt->cl_tasks)) wake_up(&destroy_wait); if (atomic_dec_and_test(&clnt->cl_count)) rpc_free_auth(clnt); } struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, const struct rpc_program *program, u32 vers) { struct rpc_clnt *clnt; const struct rpc_version *version; int err; BUG_ON(vers >= program->nrvers || !program->version[vers]); version = program->version[vers]; clnt = rpc_clone_client(old); if (IS_ERR(clnt)) goto out; clnt->cl_procinfo = version->procs; clnt->cl_maxproc = version->nrprocs; clnt->cl_protname = program->name; clnt->cl_prog = program->number; clnt->cl_vers = version->number; clnt->cl_stats = program->stats; err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); clnt = ERR_PTR(err); } out: return clnt; } EXPORT_SYMBOL_GPL(rpc_bind_new_program); void rpc_task_release_client(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; if (clnt != NULL) { spin_lock(&clnt->cl_lock); list_del(&task->tk_task); spin_unlock(&clnt->cl_lock); task->tk_client = NULL; rpc_release_client(clnt); } } static void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) { if (clnt != NULL) { rpc_task_release_client(task); task->tk_client = clnt; atomic_inc(&clnt->cl_count); if (clnt->cl_softrtry) task->tk_flags |= RPC_TASK_SOFT; spin_lock(&clnt->cl_lock); list_add_tail(&task->tk_task, &clnt->cl_tasks); spin_unlock(&clnt->cl_lock); } } void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt) { rpc_task_release_client(task); rpc_task_set_client(task, clnt); } EXPORT_SYMBOL_GPL(rpc_task_reset_client); static void rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) { if (msg != NULL) { task->tk_msg.rpc_proc = msg->rpc_proc; task->tk_msg.rpc_argp = msg->rpc_argp; task->tk_msg.rpc_resp = msg->rpc_resp; if (msg->rpc_cred != NULL) task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred); } } static void rpc_default_callback(struct rpc_task *task, void *data) { } static const struct rpc_call_ops rpc_default_ops = { .rpc_call_done = rpc_default_callback, }; struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) { struct rpc_task *task; task = rpc_new_task(task_setup_data); if (IS_ERR(task)) goto out; rpc_task_set_client(task, task_setup_data->rpc_client); rpc_task_set_rpc_message(task, task_setup_data->rpc_message); if (task->tk_action == NULL) rpc_call_start(task); atomic_inc(&task->tk_count); rpc_execute(task); out: return task; } EXPORT_SYMBOL_GPL(rpc_run_task); int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = &rpc_default_ops, .flags = flags, }; int status; BUG_ON(flags & RPC_TASK_ASYNC); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); return status; } EXPORT_SYMBOL_GPL(rpc_call_sync); int rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, const struct rpc_call_ops *tk_ops, void *data) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = tk_ops, .callback_data = data, .flags = flags|RPC_TASK_ASYNC, }; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } EXPORT_SYMBOL_GPL(rpc_call_async); #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, const struct rpc_call_ops *tk_ops) { struct rpc_task *task; struct xdr_buf *xbufp = &req->rq_snd_buf; struct rpc_task_setup task_setup_data = { .callback_ops = tk_ops, }; dprintk("RPC: rpc_run_bc_task req= %p\n", req); task = rpc_new_task(&task_setup_data); if (IS_ERR(task)) { xprt_free_bc_request(req); goto out; } task->tk_rqstp = req; xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + xbufp->tail[0].iov_len; task->tk_action = call_bc_transmit; atomic_inc(&task->tk_count); BUG_ON(atomic_read(&task->tk_count) != 2); rpc_execute(task); out: dprintk("RPC: rpc_run_bc_task: task= %p\n", task); return task; } #endif void rpc_call_start(struct rpc_task *task) { task->tk_action = call_start; } EXPORT_SYMBOL_GPL(rpc_call_start); size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) { size_t bytes; struct rpc_xprt *xprt; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); bytes = xprt->addrlen; if (bytes > bufsize) bytes = bufsize; memcpy(buf, &xprt->addr, bytes); rcu_read_unlock(); return bytes; } EXPORT_SYMBOL_GPL(rpc_peeraddr); const char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) { struct rpc_xprt *xprt; xprt = rcu_dereference(clnt->cl_xprt); if (xprt->address_strings[format] != NULL) return xprt->address_strings[format]; else return "unprintable"; } EXPORT_SYMBOL_GPL(rpc_peeraddr2str); static const struct sockaddr_in rpc_inaddr_loopback = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), }; static const struct sockaddr_in6 rpc_in6addr_loopback = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, }; static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, struct sockaddr *buf, int buflen) { struct socket *sock; int err; err = __sock_create(net, sap->sa_family, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); if (err < 0) { dprintk("RPC: can't create UDP socket (%d)\n", err); goto out; } switch (sap->sa_family) { case AF_INET: err = kernel_bind(sock, (struct sockaddr *)&rpc_inaddr_loopback, sizeof(rpc_inaddr_loopback)); break; case AF_INET6: err = kernel_bind(sock, (struct sockaddr *)&rpc_in6addr_loopback, sizeof(rpc_in6addr_loopback)); break; default: err = -EAFNOSUPPORT; goto out; } if (err < 0) { dprintk("RPC: can't bind UDP socket (%d)\n", err); goto out_release; } err = kernel_connect(sock, sap, salen, 0); if (err < 0) { dprintk("RPC: can't connect UDP socket (%d)\n", err); goto out_release; } err = kernel_getsockname(sock, buf, &buflen); if (err < 0) { dprintk("RPC: getsockname failed (%d)\n", err); goto out_release; } err = 0; if (buf->sa_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf; sin6->sin6_scope_id = 0; } dprintk("RPC: %s succeeded\n", __func__); out_release: sock_release(sock); out: return err; } static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen) { switch (family) { case AF_INET: if (buflen < sizeof(rpc_inaddr_loopback)) return -EINVAL; memcpy(buf, &rpc_inaddr_loopback, sizeof(rpc_inaddr_loopback)); break; case AF_INET6: if (buflen < sizeof(rpc_in6addr_loopback)) return -EINVAL; memcpy(buf, &rpc_in6addr_loopback, sizeof(rpc_in6addr_loopback)); default: dprintk("RPC: %s: address family not supported\n", __func__); return -EAFNOSUPPORT; } dprintk("RPC: %s: succeeded\n", __func__); return 0; } int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen) { struct sockaddr_storage address; struct sockaddr *sap = (struct sockaddr *)&address; struct rpc_xprt *xprt; struct net *net; size_t salen; int err; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); salen = xprt->addrlen; memcpy(sap, &xprt->addr, salen); net = get_net(xprt->xprt_net); rcu_read_unlock(); rpc_set_port(sap, 0); err = rpc_sockname(net, sap, salen, buf, buflen); put_net(net); if (err != 0) return rpc_anyaddr(sap->sa_family, buf, buflen); return 0; } EXPORT_SYMBOL_GPL(rpc_localaddr); void rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) { struct rpc_xprt *xprt; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); if (xprt->ops->set_buffer_size) xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rpc_setbufsize); int rpc_protocol(struct rpc_clnt *clnt) { int protocol; rcu_read_lock(); protocol = rcu_dereference(clnt->cl_xprt)->prot; rcu_read_unlock(); return protocol; } EXPORT_SYMBOL_GPL(rpc_protocol); struct net *rpc_net_ns(struct rpc_clnt *clnt) { struct net *ret; rcu_read_lock(); ret = rcu_dereference(clnt->cl_xprt)->xprt_net; rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_net_ns); size_t rpc_max_payload(struct rpc_clnt *clnt) { size_t ret; rcu_read_lock(); ret = rcu_dereference(clnt->cl_xprt)->max_payload; rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_max_payload); void rpc_force_rebind(struct rpc_clnt *clnt) { if (clnt->cl_autobind) { rcu_read_lock(); xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); rcu_read_unlock(); } } EXPORT_SYMBOL_GPL(rpc_force_rebind); int rpc_restart_call_prepare(struct rpc_task *task) { if (RPC_ASSASSINATED(task)) return 0; task->tk_action = call_start; if (task->tk_ops->rpc_call_prepare != NULL) task->tk_action = rpc_prepare_task; return 1; } EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); int rpc_restart_call(struct rpc_task *task) { if (RPC_ASSASSINATED(task)) return 0; task->tk_action = call_start; return 1; } EXPORT_SYMBOL_GPL(rpc_restart_call); #ifdef RPC_DEBUG static const char *rpc_proc_name(const struct rpc_task *task) { const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; if (proc) { if (proc->p_name) return proc->p_name; else return "NULL"; } else return "no proc"; } #endif static void call_start(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid, clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), (RPC_IS_ASYNC(task) ? "async" : "sync")); task->tk_msg.rpc_proc->p_count++; clnt->cl_stats->rpccnt++; task->tk_action = call_reserve; } static void call_reserve(struct rpc_task *task) { dprint_status(task); task->tk_status = 0; task->tk_action = call_reserveresult; xprt_reserve(task); } static void call_reserveresult(struct rpc_task *task) { int status = task->tk_status; dprint_status(task); task->tk_status = 0; if (status >= 0) { if (task->tk_rqstp) { task->tk_action = call_refresh; return; } printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", __func__, status); rpc_exit(task, -EIO); return; } if (task->tk_rqstp) { printk(KERN_ERR "%s: status=%d, request allocated anyway\n", __func__, status); xprt_release(task); } switch (status) { case -ENOMEM: rpc_delay(task, HZ >> 2); case -EAGAIN: task->tk_action = call_reserve; return; case -EIO: break; default: printk(KERN_ERR "%s: unrecognized error %d, exiting\n", __func__, status); break; } rpc_exit(task, status); } static void call_refresh(struct rpc_task *task) { dprint_status(task); task->tk_action = call_refreshresult; task->tk_status = 0; task->tk_client->cl_stats->rpcauthrefresh++; rpcauth_refreshcred(task); } static void call_refreshresult(struct rpc_task *task) { int status = task->tk_status; dprint_status(task); task->tk_status = 0; task->tk_action = call_refresh; switch (status) { case 0: if (rpcauth_uptodatecred(task)) task->tk_action = call_allocate; return; case -ETIMEDOUT: rpc_delay(task, 3*HZ); case -EAGAIN: status = -EACCES; if (!task->tk_cred_retry) break; task->tk_cred_retry--; dprintk("RPC: %5u %s: retry refresh creds\n", task->tk_pid, __func__); return; } dprintk("RPC: %5u %s: refresh creds failed with error %d\n", task->tk_pid, __func__, status); rpc_exit(task, status); } static void call_allocate(struct rpc_task *task) { unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = task->tk_xprt; struct rpc_procinfo *proc = task->tk_msg.rpc_proc; dprint_status(task); task->tk_status = 0; task->tk_action = call_bind; if (req->rq_buffer) return; if (proc->p_proc != 0) { BUG_ON(proc->p_arglen == 0); if (proc->p_decode != NULL) BUG_ON(proc->p_replen == 0); } req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen; req->rq_callsize <<= 2; req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; req->rq_rcvsize <<= 2; req->rq_buffer = xprt->ops->buf_alloc(task, req->rq_callsize + req->rq_rcvsize); if (req->rq_buffer != NULL) return; dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { task->tk_action = call_allocate; rpc_delay(task, HZ>>4); return; } rpc_exit(task, -ERESTARTSYS); } static inline int rpc_task_need_encode(struct rpc_task *task) { return task->tk_rqstp->rq_snd_buf.len == 0; } static inline void rpc_task_force_reencode(struct rpc_task *task) { task->tk_rqstp->rq_snd_buf.len = 0; task->tk_rqstp->rq_bytes_sent = 0; } static inline void rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) { buf->head[0].iov_base = start; buf->head[0].iov_len = len; buf->tail[0].iov_len = 0; buf->page_len = 0; buf->flags = 0; buf->len = 0; buf->buflen = len; } static void rpc_xdr_encode(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; kxdreproc_t encode; __be32 *p; dprint_status(task); rpc_xdr_buf_init(&req->rq_snd_buf, req->rq_buffer, req->rq_callsize); rpc_xdr_buf_init(&req->rq_rcv_buf, (char *)req->rq_buffer + req->rq_callsize, req->rq_rcvsize); p = rpc_encode_header(task); if (p == NULL) { printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); rpc_exit(task, -EIO); return; } encode = task->tk_msg.rpc_proc->p_encode; if (encode == NULL) return; task->tk_status = rpcauth_wrap_req(task, encode, req, p, task->tk_msg.rpc_argp); } static void call_bind(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; dprint_status(task); task->tk_action = call_connect; if (!xprt_bound(xprt)) { task->tk_action = call_bind_status; task->tk_timeout = xprt->bind_timeout; xprt->ops->rpcbind(task); } } static void call_bind_status(struct rpc_task *task) { int status = -EIO; if (task->tk_status >= 0) { dprint_status(task); task->tk_status = 0; task->tk_action = call_connect; return; } trace_rpc_bind_status(task); switch (task->tk_status) { case -ENOMEM: dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); rpc_delay(task, HZ >> 2); goto retry_timeout; case -EACCES: dprintk("RPC: %5u remote rpcbind: RPC program/version " "unavailable\n", task->tk_pid); if (task->tk_msg.rpc_proc->p_proc == 0) { status = -EOPNOTSUPP; break; } if (task->tk_rebind_retry == 0) break; task->tk_rebind_retry--; rpc_delay(task, 3*HZ); goto retry_timeout; case -ETIMEDOUT: dprintk("RPC: %5u rpcbind request timed out\n", task->tk_pid); goto retry_timeout; case -EPFNOSUPPORT: dprintk("RPC: %5u unrecognized remote rpcbind service\n", task->tk_pid); break; case -EPROTONOSUPPORT: dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", task->tk_pid); task->tk_status = 0; task->tk_action = call_bind; return; case -ECONNREFUSED: case -ECONNRESET: case -ENOTCONN: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPIPE: dprintk("RPC: %5u remote rpcbind unreachable: %d\n", task->tk_pid, task->tk_status); if (!RPC_IS_SOFTCONN(task)) { rpc_delay(task, 5*HZ); goto retry_timeout; } status = task->tk_status; break; default: dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", task->tk_pid, -task->tk_status); } rpc_exit(task, status); return; retry_timeout: task->tk_action = call_timeout; } static void call_connect(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; dprintk("RPC: %5u call_connect xprt %p %s connected\n", task->tk_pid, xprt, (xprt_connected(xprt) ? "is" : "is not")); task->tk_action = call_transmit; if (!xprt_connected(xprt)) { task->tk_action = call_connect_status; if (task->tk_status < 0) return; xprt_connect(task); } } static void call_connect_status(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; int status = task->tk_status; dprint_status(task); task->tk_status = 0; if (status >= 0 || status == -EAGAIN) { clnt->cl_stats->netreconn++; task->tk_action = call_transmit; return; } trace_rpc_connect_status(task, status); switch (status) { case -ETIMEDOUT: task->tk_action = call_timeout; break; default: rpc_exit(task, -EIO); } } static void call_transmit(struct rpc_task *task) { dprint_status(task); task->tk_action = call_status; if (task->tk_status < 0) return; task->tk_status = xprt_prepare_transmit(task); if (task->tk_status != 0) return; task->tk_action = call_transmit_status; if (rpc_task_need_encode(task)) { BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); rpc_xdr_encode(task); if (task->tk_status != 0) { if (task->tk_status == -EAGAIN) rpc_delay(task, HZ >> 4); else rpc_exit(task, task->tk_status); return; } } xprt_transmit(task); if (task->tk_status < 0) return; call_transmit_status(task); if (rpc_reply_expected(task)) return; task->tk_action = rpc_exit_task; rpc_wake_up_queued_task(&task->tk_xprt->pending, task); } static void call_transmit_status(struct rpc_task *task) { task->tk_action = call_status; if (task->tk_status == 0) { xprt_end_transmit(task); rpc_task_force_reencode(task); return; } switch (task->tk_status) { case -EAGAIN: break; default: dprint_status(task); xprt_end_transmit(task); rpc_task_force_reencode(task); break; case -ECONNREFUSED: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: if (RPC_IS_SOFTCONN(task)) { xprt_end_transmit(task); rpc_exit(task, task->tk_status); break; } case -ECONNRESET: case -ENOTCONN: case -EPIPE: rpc_task_force_reencode(task); } } #if defined(CONFIG_SUNRPC_BACKCHANNEL) static void call_bc_transmit(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; BUG_ON(task->tk_status != 0); task->tk_status = xprt_prepare_transmit(task); if (task->tk_status == -EAGAIN) { task->tk_status = 0; task->tk_action = call_bc_transmit; return; } task->tk_action = rpc_exit_task; if (task->tk_status < 0) { printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); return; } xprt_transmit(task); xprt_end_transmit(task); dprint_status(task); switch (task->tk_status) { case 0: break; case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -ETIMEDOUT: printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); xprt_conditional_disconnect(task->tk_xprt, req->rq_connect_cookie); break; default: BUG_ON(task->tk_status == -EAGAIN); printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); break; } rpc_wake_up_queued_task(&req->rq_xprt->pending, task); } #endif static void call_status(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; int status; if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent) task->tk_status = req->rq_reply_bytes_recvd; dprint_status(task); status = task->tk_status; if (status >= 0) { task->tk_action = call_decode; return; } trace_rpc_call_status(task); task->tk_status = 0; switch(status) { case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: rpc_delay(task, 3*HZ); case -ETIMEDOUT: task->tk_action = call_timeout; if (task->tk_client->cl_discrtry) xprt_conditional_disconnect(task->tk_xprt, req->rq_connect_cookie); break; case -ECONNRESET: case -ECONNREFUSED: rpc_force_rebind(clnt); rpc_delay(task, 3*HZ); case -EPIPE: case -ENOTCONN: task->tk_action = call_bind; break; case -EAGAIN: task->tk_action = call_transmit; break; case -EIO: rpc_exit(task, status); break; default: if (clnt->cl_chatty) printk("%s: RPC call returned error %d\n", clnt->cl_protname, -status); rpc_exit(task, status); } } static void call_timeout(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; if (xprt_adjust_timeout(task->tk_rqstp) == 0) { dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); goto retry; } dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); task->tk_timeouts++; if (RPC_IS_SOFTCONN(task)) { rpc_exit(task, -ETIMEDOUT); return; } if (RPC_IS_SOFT(task)) { if (clnt->cl_chatty) { rcu_read_lock(); printk(KERN_NOTICE "%s: server %s not responding, timed out\n", clnt->cl_protname, rcu_dereference(clnt->cl_xprt)->servername); rcu_read_unlock(); } if (task->tk_flags & RPC_TASK_TIMEOUT) rpc_exit(task, -ETIMEDOUT); else rpc_exit(task, -EIO); return; } if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { task->tk_flags |= RPC_CALL_MAJORSEEN; if (clnt->cl_chatty) { rcu_read_lock(); printk(KERN_NOTICE "%s: server %s not responding, still trying\n", clnt->cl_protname, rcu_dereference(clnt->cl_xprt)->servername); rcu_read_unlock(); } } rpc_force_rebind(clnt); rpcauth_invalcred(task); retry: clnt->cl_stats->rpcretrans++; task->tk_action = call_bind; task->tk_status = 0; } static void call_decode(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; __be32 *p; dprint_status(task); if (task->tk_flags & RPC_CALL_MAJORSEEN) { if (clnt->cl_chatty) { rcu_read_lock(); printk(KERN_NOTICE "%s: server %s OK\n", clnt->cl_protname, rcu_dereference(clnt->cl_xprt)->servername); rcu_read_unlock(); } task->tk_flags &= ~RPC_CALL_MAJORSEEN; } smp_rmb(); req->rq_rcv_buf.len = req->rq_private_buf.len; WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, sizeof(req->rq_rcv_buf)) != 0); if (req->rq_rcv_buf.len < 12) { if (!RPC_IS_SOFT(task)) { task->tk_action = call_bind; clnt->cl_stats->rpcretrans++; goto out_retry; } dprintk("RPC: %s: too small RPC reply size (%d bytes)\n", clnt->cl_protname, task->tk_status); task->tk_action = call_timeout; goto out_retry; } p = rpc_verify_header(task); if (IS_ERR(p)) { if (p == ERR_PTR(-EAGAIN)) goto out_retry; return; } task->tk_action = rpc_exit_task; if (decode) { task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, task->tk_msg.rpc_resp); } dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, task->tk_status); return; out_retry: task->tk_status = 0; if (task->tk_rqstp == req) { req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0; if (task->tk_client->cl_discrtry) xprt_conditional_disconnect(task->tk_xprt, req->rq_connect_cookie); } } static __be32 * rpc_encode_header(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; __be32 *p = req->rq_svec[0].iov_base; p = xprt_skip_transport_header(task->tk_xprt, p); *p++ = req->rq_xid; *p++ = htonl(RPC_CALL); *p++ = htonl(RPC_VERSION); *p++ = htonl(clnt->cl_prog); *p++ = htonl(clnt->cl_vers); *p++ = htonl(task->tk_msg.rpc_proc->p_proc); p = rpcauth_marshcred(task, p); req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); return p; } static __be32 * rpc_verify_header(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; int len = task->tk_rqstp->rq_rcv_buf.len >> 2; __be32 *p = iov->iov_base; u32 n; int error = -EACCES; if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { dprintk("RPC: %5u %s: XDR representation not a multiple of" " 4 bytes: 0x%x\n", task->tk_pid, __func__, task->tk_rqstp->rq_rcv_buf.len); goto out_eio; } if ((len -= 3) < 0) goto out_overflow; p += 1; if ((n = ntohl(*p++)) != RPC_REPLY) { dprintk("RPC: %5u %s: not an RPC reply: %x\n", task->tk_pid, __func__, n); goto out_garbage; } if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { if (--len < 0) goto out_overflow; switch ((n = ntohl(*p++))) { case RPC_AUTH_ERROR: break; case RPC_MISMATCH: dprintk("RPC: %5u %s: RPC call version mismatch!\n", task->tk_pid, __func__); error = -EPROTONOSUPPORT; goto out_err; default: dprintk("RPC: %5u %s: RPC call rejected, " "unknown error: %x\n", task->tk_pid, __func__, n); goto out_eio; } if (--len < 0) goto out_overflow; switch ((n = ntohl(*p++))) { case RPC_AUTH_REJECTEDCRED: case RPC_AUTH_REJECTEDVERF: case RPCSEC_GSS_CREDPROBLEM: case RPCSEC_GSS_CTXPROBLEM: if (!task->tk_cred_retry) break; task->tk_cred_retry--; dprintk("RPC: %5u %s: retry stale creds\n", task->tk_pid, __func__); rpcauth_invalcred(task); xprt_release(task); task->tk_action = call_reserve; goto out_retry; case RPC_AUTH_BADCRED: case RPC_AUTH_BADVERF: if (!task->tk_garb_retry) break; task->tk_garb_retry--; dprintk("RPC: %5u %s: retry garbled creds\n", task->tk_pid, __func__); task->tk_action = call_bind; goto out_retry; case RPC_AUTH_TOOWEAK: rcu_read_lock(); printk(KERN_NOTICE "RPC: server %s requires stronger " "authentication.\n", rcu_dereference(clnt->cl_xprt)->servername); rcu_read_unlock(); break; default: dprintk("RPC: %5u %s: unknown auth error: %x\n", task->tk_pid, __func__, n); error = -EIO; } dprintk("RPC: %5u %s: call rejected %d\n", task->tk_pid, __func__, n); goto out_err; } if (!(p = rpcauth_checkverf(task, p))) { dprintk("RPC: %5u %s: auth check failed\n", task->tk_pid, __func__); goto out_garbage; } len = p - (__be32 *)iov->iov_base - 1; if (len < 0) goto out_overflow; switch ((n = ntohl(*p++))) { case RPC_SUCCESS: return p; case RPC_PROG_UNAVAIL: dprintk_rcu("RPC: %5u %s: program %u is unsupported " "by server %s\n", task->tk_pid, __func__, (unsigned int)clnt->cl_prog, rcu_dereference(clnt->cl_xprt)->servername); error = -EPFNOSUPPORT; goto out_err; case RPC_PROG_MISMATCH: dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported " "by server %s\n", task->tk_pid, __func__, (unsigned int)clnt->cl_prog, (unsigned int)clnt->cl_vers, rcu_dereference(clnt->cl_xprt)->servername); error = -EPROTONOSUPPORT; goto out_err; case RPC_PROC_UNAVAIL: dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, " "version %u on server %s\n", task->tk_pid, __func__, rpc_proc_name(task), clnt->cl_prog, clnt->cl_vers, rcu_dereference(clnt->cl_xprt)->servername); error = -EOPNOTSUPP; goto out_err; case RPC_GARBAGE_ARGS: dprintk("RPC: %5u %s: server saw garbage\n", task->tk_pid, __func__); break; default: dprintk("RPC: %5u %s: server accept status: %x\n", task->tk_pid, __func__, n); } out_garbage: clnt->cl_stats->rpcgarbage++; if (task->tk_garb_retry) { task->tk_garb_retry--; dprintk("RPC: %5u %s: retrying\n", task->tk_pid, __func__); task->tk_action = call_bind; out_retry: return ERR_PTR(-EAGAIN); } out_eio: error = -EIO; out_err: rpc_exit(task, error); dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, __func__, error); return ERR_PTR(error); out_overflow: dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, __func__); goto out_garbage; } static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj) { } static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj) { return 0; } static struct rpc_procinfo rpcproc_null = { .p_encode = rpcproc_encode_null, .p_decode = rpcproc_decode_null, }; static int rpc_ping(struct rpc_clnt *clnt) { struct rpc_message msg = { .rpc_proc = &rpcproc_null, }; int err; msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN); put_rpccred(msg.rpc_cred); return err; } struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) { struct rpc_message msg = { .rpc_proc = &rpcproc_null, .rpc_cred = cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = &msg, .callback_ops = &rpc_default_ops, .flags = flags, }; return rpc_run_task(&task_setup_data); } EXPORT_SYMBOL_GPL(rpc_call_null); #ifdef RPC_DEBUG static void rpc_show_header(void) { printk(KERN_INFO "-pid- flgs status -client- --rqstp- " "-timeout ---ops--\n"); } static void rpc_show_task(const struct rpc_clnt *clnt, const struct rpc_task *task) { const char *rpc_waitq = "none"; if (RPC_IS_QUEUED(task)) rpc_waitq = rpc_qname(task->tk_waitqueue); printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", task->tk_pid, task->tk_flags, task->tk_status, clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), task->tk_action, rpc_waitq); } void rpc_show_tasks(struct net *net) { struct rpc_clnt *clnt; struct rpc_task *task; int header = 0; struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { spin_lock(&clnt->cl_lock); list_for_each_entry(task, &clnt->cl_tasks, tk_task) { if (!header) { rpc_show_header(); header++; } rpc_show_task(clnt, task); } spin_unlock(&clnt->cl_lock); } spin_unlock(&sn->rpc_client_lock); } #endif
gpl-2.0
playfulgod/msm-3.0
arch/arm/mach-msm/qdsp6v2/rtac.c
102
27052
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/fs.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/msm_audio_acdb.h> #include <asm/atomic.h> #include <mach/qdsp6v2/audio_acdb.h> #include <mach/qdsp6v2/rtac.h> #include <sound/q6asm.h> #include <sound/q6adm.h> #ifndef CONFIG_RTAC void rtac_add_adm_device(u32 port_id, u32 copp_id, u32 path_id, u32 popp_id) {} void rtac_remove_adm_device(u32 port_id) {} void rtac_remove_popp_from_adm_devices(u32 popp_id) {} void rtac_set_adm_handle(void *handle) {} bool rtac_make_adm_callback(uint32_t *payload, u32 payload_size) {return false; } void rtac_set_asm_handle(u32 session_id, void *handle) {} bool rtac_make_asm_callback(u32 session_id, uint32_t *payload, u32 payload_size) {return false; } void rtac_add_voice(u32 cvs_handle, u32 cvp_handle, u32 rx_afe_port, u32 tx_afe_port, u32 session_id) {} void rtac_remove_voice(u32 cvs_handle) {} void rtac_set_voice_handle(u32 mode, void *handle) {} bool rtac_make_voice_callback(u32 mode, uint32_t *payload, u32 payload_size) {return false; } #else #define VOICE_CMD_SET_PARAM 0x00011006 #define VOICE_CMD_GET_PARAM 0x00011007 #define VOICE_EVT_GET_PARAM_ACK 0x00011008 /* Max size of payload (buf size - apr header) */ #define MAX_PAYLOAD_SIZE 4076 #define RTAC_MAX_ACTIVE_DEVICES 4 #define RTAC_MAX_ACTIVE_VOICE_COMBOS 2 #define RTAC_MAX_ACTIVE_POPP 8 #define RTAC_BUF_SIZE 4096 #define TIMEOUT_MS 1000 /* APR data */ struct rtac_apr_data { void *apr_handle; atomic_t cmd_state; wait_queue_head_t cmd_wait; }; static struct rtac_apr_data rtac_adm_apr_data; static struct rtac_apr_data rtac_asm_apr_data[SESSION_MAX+1]; static struct rtac_apr_data rtac_voice_apr_data[RTAC_VOICE_MODES]; /* ADM info & APR */ struct rtac_adm_data { uint32_t topology_id; uint32_t afe_port; uint32_t copp; uint32_t num_of_popp; uint32_t popp[RTAC_MAX_ACTIVE_POPP]; }; struct rtac_adm { uint32_t num_of_dev; struct rtac_adm_data device[RTAC_MAX_ACTIVE_DEVICES]; }; static struct rtac_adm rtac_adm_data; static u32 rtac_adm_payload_size; static u32 rtac_adm_user_buf_size; static u8 *rtac_adm_buffer; /* ASM APR */ static u32 rtac_asm_payload_size; static u32 rtac_asm_user_buf_size; static u8 *rtac_asm_buffer; /* Voice info & APR */ struct rtac_voice_data { uint32_t tx_topology_id; uint32_t rx_topology_id; uint32_t tx_afe_port; uint32_t rx_afe_port; uint16_t cvs_handle; uint16_t cvp_handle; }; struct rtac_voice { uint32_t num_of_voice_combos; struct rtac_voice_data voice[RTAC_MAX_ACTIVE_VOICE_COMBOS]; }; static struct rtac_voice rtac_voice_data; static u32 rtac_voice_payload_size; static u32 rtac_voice_user_buf_size; static u8 *rtac_voice_buffer; static u32 voice_session_id[RTAC_MAX_ACTIVE_VOICE_COMBOS]; struct mutex rtac_adm_mutex; struct mutex rtac_adm_apr_mutex; struct mutex rtac_asm_apr_mutex; struct mutex rtac_voice_mutex; struct mutex rtac_voice_apr_mutex; static int rtac_open(struct inode *inode, struct file *f) { pr_debug("%s\n", __func__); return 0; } static int rtac_release(struct inode *inode, struct file *f) { pr_debug("%s\n", __func__); return 0; } /* ADM Info */ void add_popp(u32 dev_idx, u32 port_id, u32 popp_id) { u32 i = 0; for (; i < rtac_adm_data.device[dev_idx].num_of_popp; i++) if (rtac_adm_data.device[dev_idx].popp[i] == popp_id) goto done; if (rtac_adm_data.device[dev_idx].num_of_popp == RTAC_MAX_ACTIVE_POPP) { pr_err("%s, Max POPP!\n", __func__); goto done; } rtac_adm_data.device[dev_idx].popp[ rtac_adm_data.device[dev_idx].num_of_popp++] = popp_id; done: return; } void rtac_add_adm_device(u32 port_id, u32 copp_id, u32 path_id, u32 popp_id) { u32 i = 0; pr_debug("%s: port_id = %d, popp_id = %d\n", __func__, port_id, popp_id); mutex_lock(&rtac_adm_mutex); if (rtac_adm_data.num_of_dev == RTAC_MAX_ACTIVE_DEVICES) { pr_err("%s, Can't add anymore RTAC devices!\n", __func__); goto done; } /* Check if device already added */ if (rtac_adm_data.num_of_dev != 0) { for (; i < rtac_adm_data.num_of_dev; i++) { if (rtac_adm_data.device[i].afe_port == port_id) { add_popp(i, port_id, popp_id); goto done; } if (rtac_adm_data.device[i].num_of_popp == RTAC_MAX_ACTIVE_POPP) { pr_err("%s, Max POPP!\n", __func__); goto done; } } } /* Add device */ rtac_adm_data.num_of_dev++; if (path_id == ADM_PATH_PLAYBACK) rtac_adm_data.device[i].topology_id = get_adm_rx_topology(); else rtac_adm_data.device[i].topology_id = get_adm_tx_topology(); rtac_adm_data.device[i].afe_port = port_id; rtac_adm_data.device[i].copp = copp_id; rtac_adm_data.device[i].popp[ rtac_adm_data.device[i].num_of_popp++] = popp_id; done: mutex_unlock(&rtac_adm_mutex); return; } static void shift_adm_devices(u32 dev_idx) { for (; dev_idx < rtac_adm_data.num_of_dev; dev_idx++) { memcpy(&rtac_adm_data.device[dev_idx], &rtac_adm_data.device[dev_idx + 1], sizeof(rtac_adm_data.device[dev_idx])); memset(&rtac_adm_data.device[dev_idx + 1], 0, sizeof(rtac_adm_data.device[dev_idx])); } } static void shift_popp(u32 copp_idx, u32 popp_idx) { for (; popp_idx < rtac_adm_data.device[copp_idx].num_of_popp; popp_idx++) { memcpy(&rtac_adm_data.device[copp_idx].popp[popp_idx], &rtac_adm_data.device[copp_idx].popp[popp_idx + 1], sizeof(uint32_t)); memset(&rtac_adm_data.device[copp_idx].popp[popp_idx + 1], 0, sizeof(uint32_t)); } } void rtac_remove_adm_device(u32 port_id) { s32 i; pr_debug("%s: port_id = %d\n", __func__, port_id); mutex_lock(&rtac_adm_mutex); /* look for device */ for (i = 0; i < rtac_adm_data.num_of_dev; i++) { if (rtac_adm_data.device[i].afe_port == port_id) { memset(&rtac_adm_data.device[i], 0, sizeof(rtac_adm_data.device[i])); rtac_adm_data.num_of_dev--; if (rtac_adm_data.num_of_dev >= 1) { shift_adm_devices(i); break; } } } mutex_unlock(&rtac_adm_mutex); return; } void rtac_remove_popp_from_adm_devices(u32 popp_id) { s32 i, j; pr_debug("%s: popp_id = %d\n", __func__, popp_id); mutex_lock(&rtac_adm_mutex); for (i = 0; i < rtac_adm_data.num_of_dev; i++) { for (j = 0; j < rtac_adm_data.device[i].num_of_popp; j++) { if (rtac_adm_data.device[i].popp[j] == popp_id) { rtac_adm_data.device[i].popp[j] = 0; rtac_adm_data.device[i].num_of_popp--; shift_popp(i, j); } } } mutex_unlock(&rtac_adm_mutex); } /* Voice Info */ static void set_rtac_voice_data(int idx, u32 cvs_handle, u32 cvp_handle, u32 rx_afe_port, u32 tx_afe_port, u32 session_id) { rtac_voice_data.voice[idx].tx_topology_id = get_voice_tx_topology(); rtac_voice_data.voice[idx].rx_topology_id = get_voice_rx_topology(); rtac_voice_data.voice[idx].tx_afe_port = tx_afe_port; rtac_voice_data.voice[idx].rx_afe_port = rx_afe_port; rtac_voice_data.voice[idx].cvs_handle = cvs_handle; rtac_voice_data.voice[idx].cvp_handle = cvp_handle; /* Store session ID for voice RTAC */ voice_session_id[idx] = session_id; } void rtac_add_voice(u32 cvs_handle, u32 cvp_handle, u32 rx_afe_port, u32 tx_afe_port, u32 session_id) { u32 i = 0; pr_debug("%s\n", __func__); mutex_lock(&rtac_voice_mutex); if (rtac_voice_data.num_of_voice_combos == RTAC_MAX_ACTIVE_VOICE_COMBOS) { pr_err("%s, Can't add anymore RTAC devices!\n", __func__); goto done; } /* Check if device already added */ if (rtac_voice_data.num_of_voice_combos != 0) { for (; i < rtac_voice_data.num_of_voice_combos; i++) { if (rtac_voice_data.voice[i].cvs_handle == cvs_handle) { set_rtac_voice_data(i, cvs_handle, cvp_handle, rx_afe_port, tx_afe_port, session_id); goto done; } } } /* Add device */ rtac_voice_data.num_of_voice_combos++; set_rtac_voice_data(i, cvs_handle, cvp_handle, rx_afe_port, tx_afe_port, session_id); done: mutex_unlock(&rtac_voice_mutex); return; } static void shift_voice_devices(u32 idx) { for (; idx < rtac_voice_data.num_of_voice_combos - 1; idx++) { memcpy(&rtac_voice_data.voice[idx], &rtac_voice_data.voice[idx + 1], sizeof(rtac_voice_data.voice[idx])); voice_session_id[idx] = voice_session_id[idx + 1]; } } void rtac_remove_voice(u32 cvs_handle) { u32 i = 0; pr_debug("%s\n", __func__); mutex_lock(&rtac_voice_mutex); /* look for device */ for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) { if (rtac_voice_data.voice[i].cvs_handle == cvs_handle) { shift_voice_devices(i); rtac_voice_data.num_of_voice_combos--; memset(&rtac_voice_data.voice[ rtac_voice_data.num_of_voice_combos], 0, sizeof(rtac_voice_data.voice [rtac_voice_data.num_of_voice_combos])); voice_session_id[rtac_voice_data.num_of_voice_combos] = 0; break; } } mutex_unlock(&rtac_voice_mutex); return; } static int get_voice_index(u32 cvs_handle) { u32 i; for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) { if (rtac_voice_data.voice[i].cvs_handle == cvs_handle) return i; } pr_err("%s: No voice index for CVS handle %d found returning 0\n", __func__, cvs_handle); return 0; } /* ADM APR */ void rtac_set_adm_handle(void *handle) { pr_debug("%s: handle = %d\n", __func__, (unsigned int)handle); mutex_lock(&rtac_adm_apr_mutex); rtac_adm_apr_data.apr_handle = handle; mutex_unlock(&rtac_adm_apr_mutex); } bool rtac_make_adm_callback(uint32_t *payload, u32 payload_size) { pr_debug("%s:cmd_state = %d\n", __func__, atomic_read(&rtac_adm_apr_data.cmd_state)); if (atomic_read(&rtac_adm_apr_data.cmd_state) != 1) return false; /* Offset data for in-band payload */ rtac_copy_adm_payload_to_user(payload, payload_size); atomic_set(&rtac_adm_apr_data.cmd_state, 0); wake_up(&rtac_adm_apr_data.cmd_wait); return true; } void rtac_copy_adm_payload_to_user(void *payload, u32 payload_size) { pr_debug("%s\n", __func__); rtac_adm_payload_size = payload_size; memcpy(rtac_adm_buffer, &payload_size, sizeof(u32)); if (payload_size != 0) { if (payload_size > rtac_adm_user_buf_size) { pr_err("%s: Buffer set not big enough for " "returned data, buf size = %d, " "ret data = %d\n", __func__, rtac_adm_user_buf_size, payload_size); goto done; } memcpy(rtac_adm_buffer + sizeof(u32), payload, payload_size); } done: return; } u32 send_adm_apr(void *buf, u32 opcode) { s32 result; u32 count = 0; u32 bytes_returned = 0; u32 port_index = 0; u32 copp_id; u32 payload_size; struct apr_hdr adm_params; pr_debug("%s\n", __func__); if (copy_from_user(&count, (void *)buf, sizeof(count))) { pr_err("%s: Copy to user failed! buf = 0x%x\n", __func__, (unsigned int)buf); result = -EFAULT; goto done; } if (count <= 0) { pr_err("%s: Invalid buffer size = %d\n", __func__, count); goto done; } if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy payload size from user buffer\n", __func__); goto done; } if ((payload_size < 0) || (payload_size > MAX_PAYLOAD_SIZE)) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); goto done; } if (copy_from_user(&copp_id, buf + 2 * sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy port id from user buffer\n", __func__); goto done; } for (port_index = 0; port_index < AFE_MAX_PORTS; port_index++) { if (adm_get_copp_id(port_index) == copp_id) break; } if (port_index >= AFE_MAX_PORTS) { pr_err("%s: Could not find port index for copp = %d\n", __func__, copp_id); goto done; } mutex_lock(&rtac_adm_apr_mutex); if (rtac_adm_apr_data.apr_handle == NULL) { pr_err("%s: APR not initialized\n", __func__); goto err; } /* Set globals for copy of returned payload */ rtac_adm_user_buf_size = count; /* Copy buffer to in-band payload */ if (copy_from_user(rtac_adm_buffer + sizeof(adm_params), buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } /* Pack header */ adm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(20), APR_PKT_VER); adm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, payload_size); adm_params.src_svc = APR_SVC_ADM; adm_params.src_domain = APR_DOMAIN_APPS; adm_params.src_port = port_index; adm_params.dest_svc = APR_SVC_ADM; adm_params.dest_domain = APR_DOMAIN_ADSP; adm_params.dest_port = copp_id; adm_params.token = port_index; adm_params.opcode = opcode; memcpy(rtac_adm_buffer, &adm_params, sizeof(adm_params)); atomic_set(&rtac_adm_apr_data.cmd_state, 1); pr_debug("%s: Sending RTAC command size = %d\n", __func__, adm_params.pkt_size); result = apr_send_pkt(rtac_adm_apr_data.apr_handle, (uint32_t *)rtac_adm_buffer); if (result < 0) { pr_err("%s: Set params failed port = %d, copp = %d\n", __func__, port_index, copp_id); goto err; } /* Wait for the callback */ result = wait_event_timeout(rtac_adm_apr_data.cmd_wait, (atomic_read(&rtac_adm_apr_data.cmd_state) == 0), msecs_to_jiffies(TIMEOUT_MS)); mutex_unlock(&rtac_adm_apr_mutex); if (!result) { pr_err("%s: Set params timed out port = %d, copp = %d\n", __func__, port_index, copp_id); goto done; } if (rtac_adm_payload_size != 0) { if (copy_to_user(buf, rtac_adm_buffer, rtac_adm_payload_size + sizeof(u32))) { pr_err("%s: Could not copy buffer to user," "size = %d\n", __func__, payload_size); goto done; } } /* Return data written for SET & data read for GET */ if (opcode == ADM_CMD_GET_PARAMS) bytes_returned = rtac_adm_payload_size; else bytes_returned = payload_size; done: return bytes_returned; err: mutex_unlock(&rtac_adm_apr_mutex); return bytes_returned; } /* ASM APR */ void rtac_set_asm_handle(u32 session_id, void *handle) { pr_debug("%s\n", __func__); mutex_lock(&rtac_asm_apr_mutex); rtac_asm_apr_data[session_id].apr_handle = handle; mutex_unlock(&rtac_asm_apr_mutex); } bool rtac_make_asm_callback(u32 session_id, uint32_t *payload, u32 payload_size) { if (atomic_read(&rtac_asm_apr_data[session_id].cmd_state) != 1) return false; pr_debug("%s\n", __func__); /* Offset data for in-band payload */ rtac_copy_asm_payload_to_user(payload, payload_size); atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 0); wake_up(&rtac_asm_apr_data[session_id].cmd_wait); return true; } void rtac_copy_asm_payload_to_user(void *payload, u32 payload_size) { pr_debug("%s\n", __func__); rtac_asm_payload_size = payload_size; memcpy(rtac_asm_buffer, &payload_size, sizeof(u32)); if (payload_size) { if (payload_size > rtac_asm_user_buf_size) { pr_err("%s: Buffer set not big enough for " "returned data, buf size = %d, " "ret data = %d\n", __func__, rtac_asm_user_buf_size, payload_size); goto done; } memcpy(rtac_asm_buffer + sizeof(u32), payload, payload_size); } done: return; } u32 send_rtac_asm_apr(void *buf, u32 opcode) { s32 result; u32 count = 0; u32 bytes_returned = 0; u32 session_id = 0; u32 payload_size; struct apr_hdr asm_params; pr_debug("%s\n", __func__); if (copy_from_user(&count, (void *)buf, sizeof(count))) { pr_err("%s: Copy to user failed! buf = 0x%x\n", __func__, (unsigned int)buf); result = -EFAULT; goto done; } if (count <= 0) { pr_err("%s: Invalid buffer size = %d\n", __func__, count); goto done; } if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy payload size from user buffer\n", __func__); goto done; } if ((payload_size < 0) || (payload_size > MAX_PAYLOAD_SIZE)) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); goto done; } if (copy_from_user(&session_id, buf + 2 * sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy session id from user buffer\n", __func__); goto done; } if (session_id >= AFE_MAX_PORTS) { pr_err("%s: Invalid Session = %d\n", __func__, session_id); goto done; } mutex_lock(&rtac_asm_apr_mutex); if (rtac_asm_apr_data[session_id].apr_handle == NULL) { pr_err("%s: APR not initialized\n", __func__); goto err; } /* Set globals for copy of returned payload */ rtac_asm_user_buf_size = count; /* Copy buffer to in-band payload */ if (copy_from_user(rtac_asm_buffer + sizeof(asm_params), buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } /* Pack header */ asm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(20), APR_PKT_VER); asm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, payload_size); asm_params.src_svc = q6asm_get_apr_service_id(session_id); asm_params.src_domain = APR_DOMAIN_APPS; asm_params.src_port = (session_id << 8) | 0x0001; asm_params.dest_svc = APR_SVC_ASM; asm_params.dest_domain = APR_DOMAIN_ADSP; asm_params.dest_port = (session_id << 8) | 0x0001; asm_params.token = session_id; asm_params.opcode = opcode; memcpy(rtac_asm_buffer, &asm_params, sizeof(asm_params)); atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 1); pr_debug("%s: Sending RTAC command size = %d, session_id=%d\n", __func__, asm_params.pkt_size, session_id); result = apr_send_pkt(rtac_asm_apr_data[session_id].apr_handle, (uint32_t *)rtac_asm_buffer); if (result < 0) { pr_err("%s: Set params failed session = %d\n", __func__, session_id); goto err; } /* Wait for the callback */ result = wait_event_timeout(rtac_asm_apr_data[session_id].cmd_wait, (atomic_read(&rtac_asm_apr_data[session_id].cmd_state) == 0), 5 * HZ); mutex_unlock(&rtac_asm_apr_mutex); if (!result) { pr_err("%s: Set params timed out session = %d\n", __func__, session_id); goto done; } if (rtac_asm_payload_size != 0) { if (copy_to_user(buf, rtac_asm_buffer, rtac_asm_payload_size + sizeof(u32))) { pr_err("%s: Could not copy buffer to user," "size = %d\n", __func__, payload_size); goto done; } } /* Return data written for SET & data read for GET */ if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS) bytes_returned = rtac_asm_payload_size; else bytes_returned = payload_size; done: return bytes_returned; err: mutex_unlock(&rtac_asm_apr_mutex); return bytes_returned; } /* Voice APR */ void rtac_set_voice_handle(u32 mode, void *handle) { pr_debug("%s\n", __func__); mutex_lock(&rtac_voice_apr_mutex); rtac_voice_apr_data[mode].apr_handle = handle; mutex_unlock(&rtac_voice_apr_mutex); } bool rtac_make_voice_callback(u32 mode, uint32_t *payload, u32 payload_size) { if ((atomic_read(&rtac_voice_apr_data[mode].cmd_state) != 1) || (mode < 0) || (mode >= RTAC_VOICE_MODES)) return false; pr_debug("%s\n", __func__); /* Offset data for in-band payload */ rtac_copy_voice_payload_to_user(payload, payload_size); atomic_set(&rtac_voice_apr_data[mode].cmd_state, 0); wake_up(&rtac_voice_apr_data[mode].cmd_wait); return true; } void rtac_copy_voice_payload_to_user(void *payload, u32 payload_size) { pr_debug("%s\n", __func__); rtac_voice_payload_size = payload_size; memcpy(rtac_voice_buffer, &payload_size, sizeof(u32)); if (payload_size) { if (payload_size > rtac_voice_user_buf_size) { pr_err("%s: Buffer set not big enough for " "returned data, buf size = %d, " "ret data = %d\n", __func__, rtac_voice_user_buf_size, payload_size); goto done; } memcpy(rtac_voice_buffer + sizeof(u32), payload, payload_size); } done: return; } u32 send_voice_apr(u32 mode, void *buf, u32 opcode) { s32 result; u32 count = 0; u32 bytes_returned = 0; u32 payload_size; u16 dest_port; struct apr_hdr voice_params; pr_debug("%s\n", __func__); if (copy_from_user(&count, (void *)buf, sizeof(count))) { pr_err("%s: Copy to user failed! buf = 0x%x\n", __func__, (unsigned int)buf); result = -EFAULT; goto done; } if (count <= 0) { pr_err("%s: Invalid buffer size = %d\n", __func__, count); goto done; } if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy payload size from user buffer\n", __func__); goto done; } if ((payload_size < 0) || (payload_size > MAX_PAYLOAD_SIZE)) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); goto done; } if (copy_from_user(&dest_port, buf + 2 * sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy port id from user buffer\n", __func__); goto done; } if ((mode != RTAC_CVP) && (mode != RTAC_CVS)) { pr_err("%s: Invalid Mode for APR, mode = %d\n", __func__, mode); goto done; } mutex_lock(&rtac_voice_apr_mutex); if (rtac_voice_apr_data[mode].apr_handle == NULL) { pr_err("%s: APR not initialized\n", __func__); goto err; } /* Set globals for copy of returned payload */ rtac_voice_user_buf_size = count; /* Copy buffer to in-band payload */ if (copy_from_user(rtac_voice_buffer + sizeof(voice_params), buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } /* Pack header */ voice_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(20), APR_PKT_VER); voice_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, payload_size); voice_params.src_svc = 0; voice_params.src_domain = APR_DOMAIN_APPS; voice_params.src_port = voice_session_id[ get_voice_index(dest_port)]; voice_params.dest_svc = 0; voice_params.dest_domain = APR_DOMAIN_MODEM; voice_params.dest_port = dest_port; voice_params.token = 0; voice_params.opcode = opcode; memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params)); atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1); pr_debug("%s: Sending RTAC command size = %d, opcode = %x\n", __func__, voice_params.pkt_size, opcode); result = apr_send_pkt(rtac_voice_apr_data[mode].apr_handle, (uint32_t *)rtac_voice_buffer); if (result < 0) { pr_err("%s: apr_send_pkt failed opcode = %x\n", __func__, opcode); goto err; } /* Wait for the callback */ result = wait_event_timeout(rtac_voice_apr_data[mode].cmd_wait, (atomic_read(&rtac_voice_apr_data[mode].cmd_state) == 0), msecs_to_jiffies(TIMEOUT_MS)); mutex_unlock(&rtac_voice_apr_mutex); if (!result) { pr_err("%s: apr_send_pkt timed out opcode = %x\n", __func__, opcode); goto done; } if (rtac_voice_payload_size != 0) { if (copy_to_user(buf, rtac_voice_buffer, rtac_voice_payload_size + sizeof(u32))) { pr_err("%s: Could not copy buffer to user," "size = %d\n", __func__, payload_size); goto done; } } /* Return data written for SET & data read for GET */ if (opcode == VOICE_CMD_GET_PARAM) bytes_returned = rtac_voice_payload_size; else bytes_returned = payload_size; done: return bytes_returned; err: mutex_unlock(&rtac_voice_apr_mutex); return bytes_returned; } static long rtac_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { s32 result = 0; pr_debug("%s\n", __func__); if (arg == 0) { pr_err("%s: No data sent to driver!\n", __func__); result = -EFAULT; goto done; } switch (cmd) { case AUDIO_GET_RTAC_ADM_INFO: if (copy_to_user((void *)arg, &rtac_adm_data, sizeof(rtac_adm_data))) pr_err("%s: Could not copy to userspace!\n", __func__); else result = sizeof(rtac_adm_data); break; case AUDIO_GET_RTAC_VOICE_INFO: if (copy_to_user((void *)arg, &rtac_voice_data, sizeof(rtac_voice_data))) pr_err("%s: Could not copy to userspace!\n", __func__); else result = sizeof(rtac_voice_data); break; case AUDIO_GET_RTAC_ADM_CAL: result = send_adm_apr((void *)arg, ADM_CMD_GET_PARAMS); break; case AUDIO_SET_RTAC_ADM_CAL: result = send_adm_apr((void *)arg, ADM_CMD_SET_PARAMS); break; case AUDIO_GET_RTAC_ASM_CAL: result = send_rtac_asm_apr((void *)arg, ASM_STREAM_CMD_GET_PP_PARAMS); break; case AUDIO_SET_RTAC_ASM_CAL: result = send_rtac_asm_apr((void *)arg, ASM_STREAM_CMD_SET_PP_PARAMS); break; case AUDIO_GET_RTAC_CVS_CAL: result = send_voice_apr(RTAC_CVS, (void *)arg, VOICE_CMD_GET_PARAM); break; case AUDIO_SET_RTAC_CVS_CAL: result = send_voice_apr(RTAC_CVS, (void *)arg, VOICE_CMD_SET_PARAM); break; case AUDIO_GET_RTAC_CVP_CAL: result = send_voice_apr(RTAC_CVP, (void *)arg, VOICE_CMD_GET_PARAM); break; case AUDIO_SET_RTAC_CVP_CAL: result = send_voice_apr(RTAC_CVP, (void *)arg, VOICE_CMD_SET_PARAM); break; default: pr_err("%s: Invalid IOCTL, command = %d!\n", __func__, cmd); } done: return result; } static const struct file_operations rtac_fops = { .owner = THIS_MODULE, .open = rtac_open, .release = rtac_release, .unlocked_ioctl = rtac_ioctl, }; struct miscdevice rtac_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_rtac", .fops = &rtac_fops, }; static int __init rtac_init(void) { int i = 0; pr_debug("%s\n", __func__); /* ADM */ memset(&rtac_adm_data, 0, sizeof(rtac_adm_data)); rtac_adm_apr_data.apr_handle = NULL; atomic_set(&rtac_adm_apr_data.cmd_state, 0); init_waitqueue_head(&rtac_adm_apr_data.cmd_wait); mutex_init(&rtac_adm_mutex); mutex_init(&rtac_adm_apr_mutex); rtac_adm_buffer = kmalloc(RTAC_BUF_SIZE, GFP_KERNEL); if (rtac_adm_buffer == NULL) { pr_err("%s: Could not allocate payload of size = %d\n", __func__, RTAC_BUF_SIZE); goto nomem; } /* ASM */ for (i = 0; i < SESSION_MAX+1; i++) { rtac_asm_apr_data[i].apr_handle = NULL; atomic_set(&rtac_asm_apr_data[i].cmd_state, 0); init_waitqueue_head(&rtac_asm_apr_data[i].cmd_wait); } mutex_init(&rtac_asm_apr_mutex); rtac_asm_buffer = kmalloc(RTAC_BUF_SIZE, GFP_KERNEL); if (rtac_asm_buffer == NULL) { pr_err("%s: Could not allocate payload of size = %d\n", __func__, RTAC_BUF_SIZE); goto nomem; } /* Voice */ memset(&rtac_voice_data, 0, sizeof(rtac_voice_data)); for (i = 0; i < RTAC_VOICE_MODES; i++) { rtac_voice_apr_data[i].apr_handle = NULL; atomic_set(&rtac_voice_apr_data[i].cmd_state, 0); init_waitqueue_head(&rtac_voice_apr_data[i].cmd_wait); } mutex_init(&rtac_voice_mutex); mutex_init(&rtac_voice_apr_mutex); rtac_voice_buffer = kmalloc(RTAC_BUF_SIZE, GFP_KERNEL); if (rtac_voice_buffer == NULL) { pr_err("%s: Could not allocate payload of size = %d\n", __func__, RTAC_BUF_SIZE); goto nomem; } return misc_register(&rtac_misc); nomem: return -ENOMEM; } module_init(rtac_init); MODULE_DESCRIPTION("MSM 8x60 Real-Time Audio Calibration driver"); MODULE_LICENSE("GPL v2"); #endif
gpl-2.0
mehrvarz/android_kernel_asus_grouper
fs/logfs/dir.c
358
21375
/* * fs/logfs/dir.c - directory-related code * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> */ #include "logfs.h" #include <linux/slab.h> /* * Atomic dir operations * * Directory operations are by default not atomic. Dentries and Inodes are * created/removed/altered in separate operations. Therefore we need to do * a small amount of journaling. * * Create, link, mkdir, mknod and symlink all share the same function to do * the work: __logfs_create. This function works in two atomic steps: * 1. allocate inode (remember in journal) * 2. allocate dentry (clear journal) * * As we can only get interrupted between the two, when the inode we just * created is simply stored in the anchor. On next mount, if we were * interrupted, we delete the inode. From a users point of view the * operation never happened. * * Unlink and rmdir also share the same function: unlink. Again, this * function works in two atomic steps * 1. remove dentry (remember inode in journal) * 2. unlink inode (clear journal) * * And again, on the next mount, if we were interrupted, we delete the inode. * From a users point of view the operation succeeded. * * Rename is the real pain to deal with, harder than all the other methods * combined. Depending on the circumstances we can run into three cases. * A "target rename" where the target dentry already existed, a "local * rename" where both parent directories are identical or a "cross-directory * rename" in the remaining case. * * Local rename is atomic, as the old dentry is simply rewritten with a new * name. * * Cross-directory rename works in two steps, similar to __logfs_create and * logfs_unlink: * 1. Write new dentry (remember old dentry in journal) * 2. Remove old dentry (clear journal) * * Here we remember a dentry instead of an inode. On next mount, if we were * interrupted, we delete the dentry. From a users point of view, the * operation succeeded. * * Target rename works in three atomic steps: * 1. Attach old inode to new dentry (remember old dentry and new inode) * 2. Remove old dentry (still remember the new inode) * 3. Remove victim inode * * Here we remember both an inode an a dentry. If we get interrupted * between steps 1 and 2, we delete both the dentry and the inode. If * we get interrupted between steps 2 and 3, we delete just the inode. * In either case, the remaining objects are deleted on next mount. From * a users point of view, the operation succeeded. */ static int write_dir(struct inode *dir, struct logfs_disk_dentry *dd, loff_t pos) { return logfs_inode_write(dir, dd, sizeof(*dd), pos, WF_LOCK, NULL); } static int write_inode(struct inode *inode) { return __logfs_write_inode(inode, WF_LOCK); } static s64 dir_seek_data(struct inode *inode, s64 pos) { s64 new_pos = logfs_seek_data(inode, pos); return max(pos, new_pos - 1); } static int beyond_eof(struct inode *inode, loff_t bix) { loff_t pos = bix << inode->i_sb->s_blocksize_bits; return pos >= i_size_read(inode); } /* * Prime value was chosen to be roughly 256 + 26. r5 hash uses 11, * so short names (len <= 9) don't even occupy the complete 32bit name * space. A prime >256 ensures short names quickly spread the 32bit * name space. Add about 26 for the estimated amount of information * of each character and pick a prime nearby, preferably a bit-sparse * one. */ static u32 hash_32(const char *s, int len, u32 seed) { u32 hash = seed; int i; for (i = 0; i < len; i++) hash = hash * 293 + s[i]; return hash; } /* * We have to satisfy several conflicting requirements here. Small * directories should stay fairly compact and not require too many * indirect blocks. The number of possible locations for a given hash * should be small to make lookup() fast. And we should try hard not * to overflow the 32bit name space or nfs and 32bit host systems will * be unhappy. * * So we use the following scheme. First we reduce the hash to 0..15 * and try a direct block. If that is occupied we reduce the hash to * 16..255 and try an indirect block. Same for 2x and 3x indirect * blocks. Lastly we reduce the hash to 0x800_0000 .. 0xffff_ffff, * but use buckets containing eight entries instead of a single one. * * Using 16 entries should allow for a reasonable amount of hash * collisions, so the 32bit name space can be packed fairly tight * before overflowing. Oh and currently we don't overflow but return * and error. * * How likely are collisions? Doing the appropriate math is beyond me * and the Bronstein textbook. But running a test program to brute * force collisions for a couple of days showed that on average the * first collision occurs after 598M entries, with 290M being the * smallest result. Obviously 21 entries could already cause a * collision if all entries are carefully chosen. */ static pgoff_t hash_index(u32 hash, int round) { u32 i0_blocks = I0_BLOCKS; u32 i1_blocks = I1_BLOCKS; u32 i2_blocks = I2_BLOCKS; u32 i3_blocks = I3_BLOCKS; switch (round) { case 0: return hash % i0_blocks; case 1: return i0_blocks + hash % (i1_blocks - i0_blocks); case 2: return i1_blocks + hash % (i2_blocks - i1_blocks); case 3: return i2_blocks + hash % (i3_blocks - i2_blocks); case 4 ... 19: return i3_blocks + 16 * (hash % (((1<<31) - i3_blocks) / 16)) + round - 4; } BUG(); } static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry) { struct qstr *name = &dentry->d_name; struct page *page; struct logfs_disk_dentry *dd; u32 hash = hash_32(name->name, name->len, 0); pgoff_t index; int round; if (name->len > LOGFS_MAX_NAMELEN) return ERR_PTR(-ENAMETOOLONG); for (round = 0; round < 20; round++) { index = hash_index(hash, round); if (beyond_eof(dir, index)) return NULL; if (!logfs_exist_block(dir, index)) continue; page = read_cache_page(dir->i_mapping, index, (filler_t *)logfs_readpage, NULL); if (IS_ERR(page)) return page; dd = kmap_atomic(page, KM_USER0); BUG_ON(dd->namelen == 0); if (name->len != be16_to_cpu(dd->namelen) || memcmp(name->name, dd->name, name->len)) { kunmap_atomic(dd, KM_USER0); page_cache_release(page); continue; } kunmap_atomic(dd, KM_USER0); return page; } return NULL; } static int logfs_remove_inode(struct inode *inode) { int ret; inode->i_nlink--; ret = write_inode(inode); LOGFS_BUG_ON(ret, inode->i_sb); return ret; } static void abort_transaction(struct inode *inode, struct logfs_transaction *ta) { if (logfs_inode(inode)->li_block) logfs_inode(inode)->li_block->ta = NULL; kfree(ta); } static int logfs_unlink(struct inode *dir, struct dentry *dentry) { struct logfs_super *super = logfs_super(dir->i_sb); struct inode *inode = dentry->d_inode; struct logfs_transaction *ta; struct page *page; pgoff_t index; int ret; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = UNLINK_1; ta->ino = inode->i_ino; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; page = logfs_get_dd_page(dir, dentry); if (!page) { kfree(ta); return -ENOENT; } if (IS_ERR(page)) { kfree(ta); return PTR_ERR(page); } index = page->index; page_cache_release(page); mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(dir, ta); ret = logfs_delete(dir, index, NULL); if (!ret) ret = write_inode(dir); if (ret) { abort_transaction(dir, ta); printk(KERN_ERR"LOGFS: unable to delete inode\n"); goto out; } ta->state = UNLINK_2; logfs_add_transaction(inode, ta); ret = logfs_remove_inode(inode); out: mutex_unlock(&super->s_dirop_mutex); return ret; } static inline int logfs_empty_dir(struct inode *dir) { u64 data; data = logfs_seek_data(dir, 0) << dir->i_sb->s_blocksize_bits; return data >= i_size_read(dir); } static int logfs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (!logfs_empty_dir(inode)) return -ENOTEMPTY; return logfs_unlink(dir, dentry); } /* FIXME: readdir currently has it's own dir_walk code. I don't see a good * way to combine the two copies */ #define IMPLICIT_NODES 2 static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir) { struct inode *dir = file->f_dentry->d_inode; loff_t pos = file->f_pos - IMPLICIT_NODES; struct page *page; struct logfs_disk_dentry *dd; int full; BUG_ON(pos < 0); for (;; pos++) { if (beyond_eof(dir, pos)) break; if (!logfs_exist_block(dir, pos)) { /* deleted dentry */ pos = dir_seek_data(dir, pos); continue; } page = read_cache_page(dir->i_mapping, pos, (filler_t *)logfs_readpage, NULL); if (IS_ERR(page)) return PTR_ERR(page); dd = kmap(page); BUG_ON(dd->namelen == 0); full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), pos, be64_to_cpu(dd->ino), dd->type); kunmap(page); page_cache_release(page); if (full) break; } file->f_pos = pos + IMPLICIT_NODES; return 0; } static int logfs_readdir(struct file *file, void *buf, filldir_t filldir) { struct inode *inode = file->f_dentry->d_inode; ino_t pino = parent_ino(file->f_dentry); int err; if (file->f_pos < 0) return -EINVAL; if (file->f_pos == 0) { if (filldir(buf, ".", 1, 1, inode->i_ino, DT_DIR) < 0) return 0; file->f_pos++; } if (file->f_pos == 1) { if (filldir(buf, "..", 2, 2, pino, DT_DIR) < 0) return 0; file->f_pos++; } err = __logfs_readdir(file, buf, filldir); return err; } static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name) { dd->namelen = cpu_to_be16(name->len); memcpy(dd->name, name->name, name->len); } static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct page *page; struct logfs_disk_dentry *dd; pgoff_t index; u64 ino = 0; struct inode *inode; page = logfs_get_dd_page(dir, dentry); if (IS_ERR(page)) return ERR_CAST(page); if (!page) { d_add(dentry, NULL); return NULL; } index = page->index; dd = kmap_atomic(page, KM_USER0); ino = be64_to_cpu(dd->ino); kunmap_atomic(dd, KM_USER0); page_cache_release(page); inode = logfs_iget(dir->i_sb, ino); if (IS_ERR(inode)) printk(KERN_ERR"LogFS: Cannot read inode #%llx for dentry (%lx, %lx)n", ino, dir->i_ino, index); return d_splice_alias(inode, dentry); } static void grow_dir(struct inode *dir, loff_t index) { index = (index + 1) << dir->i_sb->s_blocksize_bits; if (i_size_read(dir) < index) i_size_write(dir, index); } static int logfs_write_dir(struct inode *dir, struct dentry *dentry, struct inode *inode) { struct page *page; struct logfs_disk_dentry *dd; u32 hash = hash_32(dentry->d_name.name, dentry->d_name.len, 0); pgoff_t index; int round, err; for (round = 0; round < 20; round++) { index = hash_index(hash, round); if (logfs_exist_block(dir, index)) continue; page = find_or_create_page(dir->i_mapping, index, GFP_KERNEL); if (!page) return -ENOMEM; dd = kmap_atomic(page, KM_USER0); memset(dd, 0, sizeof(*dd)); dd->ino = cpu_to_be64(inode->i_ino); dd->type = logfs_type(inode); logfs_set_name(dd, &dentry->d_name); kunmap_atomic(dd, KM_USER0); err = logfs_write_buf(dir, page, WF_LOCK); unlock_page(page); page_cache_release(page); if (!err) grow_dir(dir, index); return err; } /* FIXME: Is there a better return value? In most cases neither * the filesystem nor the directory are full. But we have had * too many collisions for this particular hash and no fallback. */ return -ENOSPC; } static int __logfs_create(struct inode *dir, struct dentry *dentry, struct inode *inode, const char *dest, long destlen) { struct logfs_super *super = logfs_super(dir->i_sb); struct logfs_inode *li = logfs_inode(inode); struct logfs_transaction *ta; int ret; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) { inode->i_nlink--; iput(inode); return -ENOMEM; } ta->state = CREATE_1; ta->ino = inode->i_ino; mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(inode, ta); if (dest) { /* symlink */ ret = logfs_inode_write(inode, dest, destlen, 0, WF_LOCK, NULL); if (!ret) ret = write_inode(inode); } else { /* creat/mkdir/mknod */ ret = write_inode(inode); } if (ret) { abort_transaction(inode, ta); li->li_flags |= LOGFS_IF_STILLBORN; /* FIXME: truncate symlink */ inode->i_nlink--; iput(inode); goto out; } ta->state = CREATE_2; logfs_add_transaction(dir, ta); ret = logfs_write_dir(dir, dentry, inode); /* sync directory */ if (!ret) ret = write_inode(dir); if (ret) { logfs_del_transaction(dir, ta); ta->state = CREATE_2; logfs_add_transaction(inode, ta); logfs_remove_inode(inode); iput(inode); goto out; } d_instantiate(dentry, inode); out: mutex_unlock(&super->s_dirop_mutex); return ret; } static int logfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct inode *inode; /* * FIXME: why do we have to fill in S_IFDIR, while the mode is * correct for mknod, creat, etc.? Smells like the vfs *should* * do it for us but for some reason fails to do so. */ inode = logfs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &logfs_dir_iops; inode->i_fop = &logfs_dir_fops; return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { struct inode *inode; inode = logfs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &logfs_reg_iops; inode->i_fop = &logfs_reg_fops; inode->i_mapping->a_ops = &logfs_reg_aops; return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct inode *inode; if (dentry->d_name.len > LOGFS_MAX_NAMELEN) return -ENAMETOOLONG; inode = logfs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, mode, rdev); return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_symlink(struct inode *dir, struct dentry *dentry, const char *target) { struct inode *inode; size_t destlen = strlen(target) + 1; if (destlen > dir->i_sb->s_blocksize) return -ENAMETOOLONG; inode = logfs_new_inode(dir, S_IFLNK | 0777); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &logfs_symlink_iops; inode->i_mapping->a_ops = &logfs_reg_aops; return __logfs_create(dir, dentry, inode, target, destlen); } static int logfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; if (inode->i_nlink >= LOGFS_LINK_MAX) return -EMLINK; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; ihold(inode); inode->i_nlink++; mark_inode_dirty_sync(inode); return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_get_dd(struct inode *dir, struct dentry *dentry, struct logfs_disk_dentry *dd, loff_t *pos) { struct page *page; void *map; page = logfs_get_dd_page(dir, dentry); if (IS_ERR(page)) return PTR_ERR(page); *pos = page->index; map = kmap_atomic(page, KM_USER0); memcpy(dd, map, sizeof(*dd)); kunmap_atomic(map, KM_USER0); page_cache_release(page); return 0; } static int logfs_delete_dd(struct inode *dir, loff_t pos) { /* * Getting called with pos somewhere beyond eof is either a goofup * within this file or means someone maliciously edited the * (crc-protected) journal. */ BUG_ON(beyond_eof(dir, pos)); dir->i_ctime = dir->i_mtime = CURRENT_TIME; log_dir(" Delete dentry (%lx, %llx)\n", dir->i_ino, pos); return logfs_delete(dir, pos, NULL); } /* * Cross-directory rename, target does not exist. Just a little nasty. * Create a new dentry in the target dir, then remove the old dentry, * all the while taking care to remember our operation in the journal. */ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct logfs_super *super = logfs_super(old_dir->i_sb); struct logfs_disk_dentry dd; struct logfs_transaction *ta; loff_t pos; int err; /* 1. locate source dd */ err = logfs_get_dd(old_dir, old_dentry, &dd, &pos); if (err) return err; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = CROSS_RENAME_1; ta->dir = old_dir->i_ino; ta->pos = pos; /* 2. write target dd */ mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(new_dir, ta); err = logfs_write_dir(new_dir, new_dentry, old_dentry->d_inode); if (!err) err = write_inode(new_dir); if (err) { super->s_rename_dir = 0; super->s_rename_pos = 0; abort_transaction(new_dir, ta); goto out; } /* 3. remove source dd */ ta->state = CROSS_RENAME_2; logfs_add_transaction(old_dir, ta); err = logfs_delete_dd(old_dir, pos); if (!err) err = write_inode(old_dir); LOGFS_BUG_ON(err, old_dir->i_sb); out: mutex_unlock(&super->s_dirop_mutex); return err; } static int logfs_replace_inode(struct inode *dir, struct dentry *dentry, struct logfs_disk_dentry *dd, struct inode *inode) { loff_t pos; int err; err = logfs_get_dd(dir, dentry, dd, &pos); if (err) return err; dd->ino = cpu_to_be64(inode->i_ino); dd->type = logfs_type(inode); err = write_dir(dir, dd, pos); if (err) return err; log_dir("Replace dentry (%lx, %llx) %s -> %llx\n", dir->i_ino, pos, dd->name, be64_to_cpu(dd->ino)); return write_inode(dir); } /* Target dentry exists - the worst case. We need to attach the source * inode to the target dentry, then remove the orphaned target inode and * source dentry. */ static int logfs_rename_target(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct logfs_super *super = logfs_super(old_dir->i_sb); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; int isdir = S_ISDIR(old_inode->i_mode); struct logfs_disk_dentry dd; struct logfs_transaction *ta; loff_t pos; int err; BUG_ON(isdir != S_ISDIR(new_inode->i_mode)); if (isdir) { if (!logfs_empty_dir(new_inode)) return -ENOTEMPTY; } /* 1. locate source dd */ err = logfs_get_dd(old_dir, old_dentry, &dd, &pos); if (err) return err; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = TARGET_RENAME_1; ta->dir = old_dir->i_ino; ta->pos = pos; ta->ino = new_inode->i_ino; /* 2. attach source inode to target dd */ mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(new_dir, ta); err = logfs_replace_inode(new_dir, new_dentry, &dd, old_inode); if (err) { super->s_rename_dir = 0; super->s_rename_pos = 0; super->s_victim_ino = 0; abort_transaction(new_dir, ta); goto out; } /* 3. remove source dd */ ta->state = TARGET_RENAME_2; logfs_add_transaction(old_dir, ta); err = logfs_delete_dd(old_dir, pos); if (!err) err = write_inode(old_dir); LOGFS_BUG_ON(err, old_dir->i_sb); /* 4. remove target inode */ ta->state = TARGET_RENAME_3; logfs_add_transaction(new_inode, ta); err = logfs_remove_inode(new_inode); out: mutex_unlock(&super->s_dirop_mutex); return err; } static int logfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { if (new_dentry->d_inode) return logfs_rename_target(old_dir, old_dentry, new_dir, new_dentry); return logfs_rename_cross(old_dir, old_dentry, new_dir, new_dentry); } /* No locking done here, as this is called before .get_sb() returns. */ int logfs_replay_journal(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct inode *inode; u64 ino, pos; int err; if (super->s_victim_ino) { /* delete victim inode */ ino = super->s_victim_ino; printk(KERN_INFO"LogFS: delete unmapped inode #%llx\n", ino); inode = logfs_iget(sb, ino); if (IS_ERR(inode)) goto fail; LOGFS_BUG_ON(i_size_read(inode) > 0, sb); super->s_victim_ino = 0; err = logfs_remove_inode(inode); iput(inode); if (err) { super->s_victim_ino = ino; goto fail; } } if (super->s_rename_dir) { /* delete old dd from rename */ ino = super->s_rename_dir; pos = super->s_rename_pos; printk(KERN_INFO"LogFS: delete unbacked dentry (%llx, %llx)\n", ino, pos); inode = logfs_iget(sb, ino); if (IS_ERR(inode)) goto fail; super->s_rename_dir = 0; super->s_rename_pos = 0; err = logfs_delete_dd(inode, pos); iput(inode); if (err) { super->s_rename_dir = ino; super->s_rename_pos = pos; goto fail; } } return 0; fail: LOGFS_BUG(sb); return -EIO; } const struct inode_operations logfs_symlink_iops = { .readlink = generic_readlink, .follow_link = page_follow_link_light, }; const struct inode_operations logfs_dir_iops = { .create = logfs_create, .link = logfs_link, .lookup = logfs_lookup, .mkdir = logfs_mkdir, .mknod = logfs_mknod, .rename = logfs_rename, .rmdir = logfs_rmdir, .symlink = logfs_symlink, .unlink = logfs_unlink, }; const struct file_operations logfs_dir_fops = { .fsync = logfs_fsync, .unlocked_ioctl = logfs_ioctl, .readdir = logfs_readdir, .read = generic_read_dir, .llseek = default_llseek, };
gpl-2.0
Cheshkin/sprout_cm11_mt6589_kernel
net/core/ethtool.c
614
36845
/* * net/core/ethtool.c - Ethtool ioctl handler * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx> * * This file is where we call all the ethtool_ops commands to get * the information ethtool needs. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/sched.h> /* * Some useful ethtool_ops methods that're device independent. * If we find that all drivers want to do the same thing here, * we can turn these into dev_() function calls. */ u32 ethtool_op_get_link(struct net_device *dev) { return netif_carrier_ok(dev) ? 1 : 0; } EXPORT_SYMBOL(ethtool_op_get_link); /* Handlers for each ethtool command */ #define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32) static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = { [NETIF_F_SG_BIT] = "tx-scatter-gather", [NETIF_F_IP_CSUM_BIT] = "tx-checksum-ipv4", [NETIF_F_HW_CSUM_BIT] = "tx-checksum-ip-generic", [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", [NETIF_F_HIGHDMA_BIT] = "highdma", [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", [NETIF_F_HW_VLAN_TX_BIT] = "tx-vlan-hw-insert", [NETIF_F_HW_VLAN_RX_BIT] = "rx-vlan-hw-parse", [NETIF_F_HW_VLAN_FILTER_BIT] = "rx-vlan-filter", [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged", [NETIF_F_GSO_BIT] = "tx-generic-segmentation", [NETIF_F_LLTX_BIT] = "tx-lockless", [NETIF_F_NETNS_LOCAL_BIT] = "netns-local", [NETIF_F_GRO_BIT] = "rx-gro", [NETIF_F_LRO_BIT] = "rx-lro", [NETIF_F_TSO_BIT] = "tx-tcp-segmentation", [NETIF_F_UFO_BIT] = "tx-udp-fragmentation", [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust", [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu", [NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter", [NETIF_F_RXHASH_BIT] = "rx-hashing", [NETIF_F_RXCSUM_BIT] = "rx-checksum", [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy", [NETIF_F_LOOPBACK_BIT] = "loopback", [NETIF_F_RXFCS_BIT] = "rx-fcs", [NETIF_F_RXALL_BIT] = "rx-all", }; static int ethtool_get_features(struct net_device *dev, void __user *useraddr) { struct ethtool_gfeatures cmd = { .cmd = ETHTOOL_GFEATURES, .size = ETHTOOL_DEV_FEATURE_WORDS, }; struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; u32 __user *sizeaddr; u32 copy_size; int i; /* in case feature bits run out again */ BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t)); for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { features[i].available = (u32)(dev->hw_features >> (32 * i)); features[i].requested = (u32)(dev->wanted_features >> (32 * i)); features[i].active = (u32)(dev->features >> (32 * i)); features[i].never_changed = (u32)(NETIF_F_NEVER_CHANGE >> (32 * i)); } sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size); if (get_user(copy_size, sizeaddr)) return -EFAULT; if (copy_size > ETHTOOL_DEV_FEATURE_WORDS) copy_size = ETHTOOL_DEV_FEATURE_WORDS; if (copy_to_user(useraddr, &cmd, sizeof(cmd))) return -EFAULT; useraddr += sizeof(cmd); if (copy_to_user(useraddr, features, copy_size * sizeof(*features))) return -EFAULT; return 0; } static int ethtool_set_features(struct net_device *dev, void __user *useraddr) { struct ethtool_sfeatures cmd; struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; netdev_features_t wanted = 0, valid = 0; int i, ret = 0; if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; useraddr += sizeof(cmd); if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS) return -EINVAL; if (copy_from_user(features, useraddr, sizeof(features))) return -EFAULT; for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { valid |= (netdev_features_t)features[i].valid << (32 * i); wanted |= (netdev_features_t)features[i].requested << (32 * i); } if (valid & ~NETIF_F_ETHTOOL_BITS) return -EINVAL; if (valid & ~dev->hw_features) { valid &= dev->hw_features; ret |= ETHTOOL_F_UNSUPPORTED; } dev->wanted_features &= ~valid; dev->wanted_features |= wanted & valid; __netdev_update_features(dev); if ((dev->wanted_features ^ dev->features) & valid) ret |= ETHTOOL_F_WISH; return ret; } static int __ethtool_get_sset_count(struct net_device *dev, int sset) { const struct ethtool_ops *ops = dev->ethtool_ops; if (sset == ETH_SS_FEATURES) return ARRAY_SIZE(netdev_features_strings); if (ops && ops->get_sset_count && ops->get_strings) return ops->get_sset_count(dev, sset); else return -EOPNOTSUPP; } static void __ethtool_get_strings(struct net_device *dev, u32 stringset, u8 *data) { const struct ethtool_ops *ops = dev->ethtool_ops; if (stringset == ETH_SS_FEATURES) memcpy(data, netdev_features_strings, sizeof(netdev_features_strings)); else /* ops->get_strings is valid because checked earlier */ ops->get_strings(dev, stringset, data); } static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd) { /* feature masks of legacy discrete ethtool ops */ switch (eth_cmd) { case ETHTOOL_GTXCSUM: case ETHTOOL_STXCSUM: return NETIF_F_ALL_CSUM | NETIF_F_SCTP_CSUM; case ETHTOOL_GRXCSUM: case ETHTOOL_SRXCSUM: return NETIF_F_RXCSUM; case ETHTOOL_GSG: case ETHTOOL_SSG: return NETIF_F_SG; case ETHTOOL_GTSO: case ETHTOOL_STSO: return NETIF_F_ALL_TSO; case ETHTOOL_GUFO: case ETHTOOL_SUFO: return NETIF_F_UFO; case ETHTOOL_GGSO: case ETHTOOL_SGSO: return NETIF_F_GSO; case ETHTOOL_GGRO: case ETHTOOL_SGRO: return NETIF_F_GRO; default: BUG(); } } static int ethtool_get_one_feature(struct net_device *dev, char __user *useraddr, u32 ethcmd) { netdev_features_t mask = ethtool_get_feature_mask(ethcmd); struct ethtool_value edata = { .cmd = ethcmd, .data = !!(dev->features & mask), }; if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } static int ethtool_set_one_feature(struct net_device *dev, void __user *useraddr, u32 ethcmd) { struct ethtool_value edata; netdev_features_t mask; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; mask = ethtool_get_feature_mask(ethcmd); mask &= dev->hw_features; if (!mask) return -EOPNOTSUPP; if (edata.data) dev->wanted_features |= mask; else dev->wanted_features &= ~mask; __netdev_update_features(dev); return 0; } #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) #define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \ NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH) static u32 __ethtool_get_flags(struct net_device *dev) { u32 flags = 0; if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO; if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN; if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN; if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE; if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH; return flags; } static int __ethtool_set_flags(struct net_device *dev, u32 data) { netdev_features_t features = 0, changed; if (data & ~ETH_ALL_FLAGS) return -EINVAL; if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO; if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_RX; if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_TX; if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE; if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH; /* allow changing only bits set in hw_features */ changed = (features ^ dev->features) & ETH_ALL_FEATURES; if (changed & ~dev->hw_features) return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; dev->wanted_features = (dev->wanted_features & ~changed) | (features & changed); __netdev_update_features(dev); return 0; } int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { ASSERT_RTNL(); if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings) return -EOPNOTSUPP; memset(cmd, 0, sizeof(struct ethtool_cmd)); cmd->cmd = ETHTOOL_GSET; return dev->ethtool_ops->get_settings(dev, cmd); } EXPORT_SYMBOL(__ethtool_get_settings); static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) { int err; struct ethtool_cmd cmd; err = __ethtool_get_settings(dev, &cmd); if (err < 0) return err; if (copy_to_user(useraddr, &cmd, sizeof(cmd))) return -EFAULT; return 0; } static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) { struct ethtool_cmd cmd; if (!dev->ethtool_ops->set_settings) return -EOPNOTSUPP; if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; return dev->ethtool_ops->set_settings(dev, &cmd); } static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) { struct ethtool_drvinfo info; const struct ethtool_ops *ops = dev->ethtool_ops; memset(&info, 0, sizeof(info)); info.cmd = ETHTOOL_GDRVINFO; if (ops && ops->get_drvinfo) { ops->get_drvinfo(dev, &info); } else if (dev->dev.parent && dev->dev.parent->driver) { strlcpy(info.bus_info, dev_name(dev->dev.parent), sizeof(info.bus_info)); strlcpy(info.driver, dev->dev.parent->driver->name, sizeof(info.driver)); } else { return -EOPNOTSUPP; } /* * this method of obtaining string set info is deprecated; * Use ETHTOOL_GSSET_INFO instead. */ if (ops && ops->get_sset_count) { int rc; rc = ops->get_sset_count(dev, ETH_SS_TEST); if (rc >= 0) info.testinfo_len = rc; rc = ops->get_sset_count(dev, ETH_SS_STATS); if (rc >= 0) info.n_stats = rc; rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); if (rc >= 0) info.n_priv_flags = rc; } if (ops && ops->get_regs_len) info.regdump_len = ops->get_regs_len(dev); if (ops && ops->get_eeprom_len) info.eedump_len = ops->get_eeprom_len(dev); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; } static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, void __user *useraddr) { struct ethtool_sset_info info; u64 sset_mask; int i, idx = 0, n_bits = 0, ret, rc; u32 *info_buf = NULL; if (copy_from_user(&info, useraddr, sizeof(info))) return -EFAULT; /* store copy of mask, because we zero struct later on */ sset_mask = info.sset_mask; if (!sset_mask) return 0; /* calculate size of return buffer */ n_bits = hweight64(sset_mask); memset(&info, 0, sizeof(info)); info.cmd = ETHTOOL_GSSET_INFO; info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER); if (!info_buf) return -ENOMEM; /* * fill return buffer based on input bitmask and successful * get_sset_count return */ for (i = 0; i < 64; i++) { if (!(sset_mask & (1ULL << i))) continue; rc = __ethtool_get_sset_count(dev, i); if (rc >= 0) { info.sset_mask |= (1ULL << i); info_buf[idx++] = rc; } } ret = -EFAULT; if (copy_to_user(useraddr, &info, sizeof(info))) goto out; useraddr += offsetof(struct ethtool_sset_info, data); if (copy_to_user(useraddr, info_buf, idx * sizeof(u32))) goto out; ret = 0; out: kfree(info_buf); return ret; } static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, u32 cmd, void __user *useraddr) { struct ethtool_rxnfc info; size_t info_size = sizeof(info); int rc; if (!dev->ethtool_ops->set_rxnfc) return -EOPNOTSUPP; /* struct ethtool_rxnfc was originally defined for * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data * members. User-space might still be using that * definition. */ if (cmd == ETHTOOL_SRXFH) info_size = (offsetof(struct ethtool_rxnfc, data) + sizeof(info.data)); if (copy_from_user(&info, useraddr, info_size)) return -EFAULT; rc = dev->ethtool_ops->set_rxnfc(dev, &info); if (rc) return rc; if (cmd == ETHTOOL_SRXCLSRLINS && copy_to_user(useraddr, &info, info_size)) return -EFAULT; return 0; } static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, u32 cmd, void __user *useraddr) { struct ethtool_rxnfc info; size_t info_size = sizeof(info); const struct ethtool_ops *ops = dev->ethtool_ops; int ret; void *rule_buf = NULL; if (!ops->get_rxnfc) return -EOPNOTSUPP; /* struct ethtool_rxnfc was originally defined for * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data * members. User-space might still be using that * definition. */ if (cmd == ETHTOOL_GRXFH) info_size = (offsetof(struct ethtool_rxnfc, data) + sizeof(info.data)); if (copy_from_user(&info, useraddr, info_size)) return -EFAULT; if (info.cmd == ETHTOOL_GRXCLSRLALL) { if (info.rule_cnt > 0) { if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) rule_buf = kzalloc(info.rule_cnt * sizeof(u32), GFP_USER); if (!rule_buf) return -ENOMEM; } } ret = ops->get_rxnfc(dev, &info, rule_buf); if (ret < 0) goto err_out; ret = -EFAULT; if (copy_to_user(useraddr, &info, info_size)) goto err_out; if (rule_buf) { useraddr += offsetof(struct ethtool_rxnfc, rule_locs); if (copy_to_user(useraddr, rule_buf, info.rule_cnt * sizeof(u32))) goto err_out; } ret = 0; err_out: kfree(rule_buf); return ret; } static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, void __user *useraddr) { u32 user_size, dev_size; u32 *indir; int ret; if (!dev->ethtool_ops->get_rxfh_indir_size || !dev->ethtool_ops->get_rxfh_indir) return -EOPNOTSUPP; dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); if (dev_size == 0) return -EOPNOTSUPP; if (copy_from_user(&user_size, useraddr + offsetof(struct ethtool_rxfh_indir, size), sizeof(user_size))) return -EFAULT; if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size), &dev_size, sizeof(dev_size))) return -EFAULT; /* If the user buffer size is 0, this is just a query for the * device table size. Otherwise, if it's smaller than the * device table size it's an error. */ if (user_size < dev_size) return user_size == 0 ? 0 : -EINVAL; indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER); if (!indir) return -ENOMEM; ret = dev->ethtool_ops->get_rxfh_indir(dev, indir); if (ret) goto out; if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, ring_index[0]), indir, dev_size * sizeof(indir[0]))) ret = -EFAULT; out: kfree(indir); return ret; } static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, void __user *useraddr) { struct ethtool_rxnfc rx_rings; u32 user_size, dev_size, i; u32 *indir; int ret; if (!dev->ethtool_ops->get_rxfh_indir_size || !dev->ethtool_ops->set_rxfh_indir || !dev->ethtool_ops->get_rxnfc) return -EOPNOTSUPP; dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); if (dev_size == 0) return -EOPNOTSUPP; if (copy_from_user(&user_size, useraddr + offsetof(struct ethtool_rxfh_indir, size), sizeof(user_size))) return -EFAULT; if (user_size != 0 && user_size != dev_size) return -EINVAL; indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER); if (!indir) return -ENOMEM; rx_rings.cmd = ETHTOOL_GRXRINGS; ret = dev->ethtool_ops->get_rxnfc(dev, &rx_rings, NULL); if (ret) goto out; if (user_size == 0) { for (i = 0; i < dev_size; i++) indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); } else { if (copy_from_user(indir, useraddr + offsetof(struct ethtool_rxfh_indir, ring_index[0]), dev_size * sizeof(indir[0]))) { ret = -EFAULT; goto out; } /* Validate ring indices */ for (i = 0; i < dev_size; i++) { if (indir[i] >= rx_rings.data) { ret = -EINVAL; goto out; } } } ret = dev->ethtool_ops->set_rxfh_indir(dev, indir); out: kfree(indir); return ret; } static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) { struct ethtool_regs regs; const struct ethtool_ops *ops = dev->ethtool_ops; void *regbuf; int reglen, ret; if (!ops->get_regs || !ops->get_regs_len) return -EOPNOTSUPP; if (copy_from_user(&regs, useraddr, sizeof(regs))) return -EFAULT; reglen = ops->get_regs_len(dev); if (regs.len > reglen) regs.len = reglen; regbuf = vzalloc(reglen); if (reglen && !regbuf) return -ENOMEM; ops->get_regs(dev, &regs, regbuf); ret = -EFAULT; if (copy_to_user(useraddr, &regs, sizeof(regs))) goto out; useraddr += offsetof(struct ethtool_regs, data); if (regbuf && copy_to_user(useraddr, regbuf, regs.len)) goto out; ret = 0; out: vfree(regbuf); return ret; } static int ethtool_reset(struct net_device *dev, char __user *useraddr) { struct ethtool_value reset; int ret; if (!dev->ethtool_ops->reset) return -EOPNOTSUPP; if (copy_from_user(&reset, useraddr, sizeof(reset))) return -EFAULT; ret = dev->ethtool_ops->reset(dev, &reset.data); if (ret) return ret; if (copy_to_user(useraddr, &reset, sizeof(reset))) return -EFAULT; return 0; } static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; if (!dev->ethtool_ops->get_wol) return -EOPNOTSUPP; dev->ethtool_ops->get_wol(dev, &wol); if (copy_to_user(useraddr, &wol, sizeof(wol))) return -EFAULT; return 0; } static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) { struct ethtool_wolinfo wol; if (!dev->ethtool_ops->set_wol) return -EOPNOTSUPP; if (copy_from_user(&wol, useraddr, sizeof(wol))) return -EFAULT; return dev->ethtool_ops->set_wol(dev, &wol); } static int ethtool_nway_reset(struct net_device *dev) { if (!dev->ethtool_ops->nway_reset) return -EOPNOTSUPP; return dev->ethtool_ops->nway_reset(dev); } static int ethtool_get_link(struct net_device *dev, char __user *useraddr) { struct ethtool_value edata = { .cmd = ETHTOOL_GLINK }; if (!dev->ethtool_ops->get_link) return -EOPNOTSUPP; edata.data = netif_running(dev) && dev->ethtool_ops->get_link(dev); if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) { struct ethtool_eeprom eeprom; const struct ethtool_ops *ops = dev->ethtool_ops; void __user *userbuf = useraddr + sizeof(eeprom); u32 bytes_remaining; u8 *data; int ret = 0; if (!ops->get_eeprom || !ops->get_eeprom_len) return -EOPNOTSUPP; if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) return -EFAULT; /* Check for wrap and zero */ if (eeprom.offset + eeprom.len <= eeprom.offset) return -EINVAL; /* Check for exceeding total eeprom len */ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) return -EINVAL; data = kmalloc(PAGE_SIZE, GFP_USER); if (!data) return -ENOMEM; bytes_remaining = eeprom.len; while (bytes_remaining > 0) { eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); ret = ops->get_eeprom(dev, &eeprom, data); if (ret) break; if (copy_to_user(userbuf, data, eeprom.len)) { ret = -EFAULT; break; } userbuf += eeprom.len; eeprom.offset += eeprom.len; bytes_remaining -= eeprom.len; } eeprom.len = userbuf - (useraddr + sizeof(eeprom)); eeprom.offset -= eeprom.len; if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) ret = -EFAULT; kfree(data); return ret; } static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) { struct ethtool_eeprom eeprom; const struct ethtool_ops *ops = dev->ethtool_ops; void __user *userbuf = useraddr + sizeof(eeprom); u32 bytes_remaining; u8 *data; int ret = 0; if (!ops->set_eeprom || !ops->get_eeprom_len) return -EOPNOTSUPP; if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) return -EFAULT; /* Check for wrap and zero */ if (eeprom.offset + eeprom.len <= eeprom.offset) return -EINVAL; /* Check for exceeding total eeprom len */ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) return -EINVAL; data = kmalloc(PAGE_SIZE, GFP_USER); if (!data) return -ENOMEM; bytes_remaining = eeprom.len; while (bytes_remaining > 0) { eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); if (copy_from_user(data, userbuf, eeprom.len)) { ret = -EFAULT; break; } ret = ops->set_eeprom(dev, &eeprom, data); if (ret) break; userbuf += eeprom.len; eeprom.offset += eeprom.len; bytes_remaining -= eeprom.len; } kfree(data); return ret; } static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) { struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; if (!dev->ethtool_ops->get_coalesce) return -EOPNOTSUPP; dev->ethtool_ops->get_coalesce(dev, &coalesce); if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) return -EFAULT; return 0; } static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) { struct ethtool_coalesce coalesce; if (!dev->ethtool_ops->set_coalesce) return -EOPNOTSUPP; if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) return -EFAULT; return dev->ethtool_ops->set_coalesce(dev, &coalesce); } static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) { struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; if (!dev->ethtool_ops->get_ringparam) return -EOPNOTSUPP; dev->ethtool_ops->get_ringparam(dev, &ringparam); if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) return -EFAULT; return 0; } static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) { struct ethtool_ringparam ringparam; if (!dev->ethtool_ops->set_ringparam) return -EOPNOTSUPP; if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) return -EFAULT; return dev->ethtool_ops->set_ringparam(dev, &ringparam); } static noinline_for_stack int ethtool_get_channels(struct net_device *dev, void __user *useraddr) { struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; if (!dev->ethtool_ops->get_channels) return -EOPNOTSUPP; dev->ethtool_ops->get_channels(dev, &channels); if (copy_to_user(useraddr, &channels, sizeof(channels))) return -EFAULT; return 0; } static noinline_for_stack int ethtool_set_channels(struct net_device *dev, void __user *useraddr) { struct ethtool_channels channels; if (!dev->ethtool_ops->set_channels) return -EOPNOTSUPP; if (copy_from_user(&channels, useraddr, sizeof(channels))) return -EFAULT; return dev->ethtool_ops->set_channels(dev, &channels); } static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) { struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; if (!dev->ethtool_ops->get_pauseparam) return -EOPNOTSUPP; dev->ethtool_ops->get_pauseparam(dev, &pauseparam); if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) return -EFAULT; return 0; } static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) { struct ethtool_pauseparam pauseparam; if (!dev->ethtool_ops->set_pauseparam) return -EOPNOTSUPP; if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) return -EFAULT; return dev->ethtool_ops->set_pauseparam(dev, &pauseparam); } static int ethtool_self_test(struct net_device *dev, char __user *useraddr) { struct ethtool_test test; const struct ethtool_ops *ops = dev->ethtool_ops; u64 *data; int ret, test_len; if (!ops->self_test || !ops->get_sset_count) return -EOPNOTSUPP; test_len = ops->get_sset_count(dev, ETH_SS_TEST); if (test_len < 0) return test_len; WARN_ON(test_len == 0); if (copy_from_user(&test, useraddr, sizeof(test))) return -EFAULT; test.len = test_len; data = kmalloc(test_len * sizeof(u64), GFP_USER); if (!data) return -ENOMEM; ops->self_test(dev, &test, data); ret = -EFAULT; if (copy_to_user(useraddr, &test, sizeof(test))) goto out; useraddr += sizeof(test); if (copy_to_user(useraddr, data, test.len * sizeof(u64))) goto out; ret = 0; out: kfree(data); return ret; } static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) { struct ethtool_gstrings gstrings; u8 *data; int ret; if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) return -EFAULT; ret = __ethtool_get_sset_count(dev, gstrings.string_set); if (ret < 0) return ret; gstrings.len = ret; data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); if (!data) return -ENOMEM; __ethtool_get_strings(dev, gstrings.string_set, data); ret = -EFAULT; if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) goto out; useraddr += sizeof(gstrings); if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) goto out; ret = 0; out: kfree(data); return ret; } static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) { struct ethtool_value id; static bool busy; int rc; if (!dev->ethtool_ops->set_phys_id) return -EOPNOTSUPP; if (busy) return -EBUSY; if (copy_from_user(&id, useraddr, sizeof(id))) return -EFAULT; rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE); if (rc < 0) return rc; /* Drop the RTNL lock while waiting, but prevent reentry or * removal of the device. */ busy = true; dev_hold(dev); rtnl_unlock(); if (rc == 0) { /* Driver will handle this itself */ schedule_timeout_interruptible( id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT); } else { /* Driver expects to be called at twice the frequency in rc */ int n = rc * 2, i, interval = HZ / n; /* Count down seconds */ do { /* Count down iterations per second */ i = n; do { rtnl_lock(); rc = dev->ethtool_ops->set_phys_id(dev, (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON); rtnl_unlock(); if (rc) break; schedule_timeout_interruptible(interval); } while (!signal_pending(current) && --i != 0); } while (!signal_pending(current) && (id.data == 0 || --id.data != 0)); } rtnl_lock(); dev_put(dev); busy = false; (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); return rc; } static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) { struct ethtool_stats stats; const struct ethtool_ops *ops = dev->ethtool_ops; u64 *data; int ret, n_stats; if (!ops->get_ethtool_stats || !ops->get_sset_count) return -EOPNOTSUPP; n_stats = ops->get_sset_count(dev, ETH_SS_STATS); if (n_stats < 0) return n_stats; WARN_ON(n_stats == 0); if (copy_from_user(&stats, useraddr, sizeof(stats))) return -EFAULT; stats.n_stats = n_stats; data = kmalloc(n_stats * sizeof(u64), GFP_USER); if (!data) return -ENOMEM; ops->get_ethtool_stats(dev, &stats, data); ret = -EFAULT; if (copy_to_user(useraddr, &stats, sizeof(stats))) goto out; useraddr += sizeof(stats); if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) goto out; ret = 0; out: kfree(data); return ret; } static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) { struct ethtool_perm_addr epaddr; if (copy_from_user(&epaddr, useraddr, sizeof(epaddr))) return -EFAULT; if (epaddr.size < dev->addr_len) return -ETOOSMALL; epaddr.size = dev->addr_len; if (copy_to_user(useraddr, &epaddr, sizeof(epaddr))) return -EFAULT; useraddr += sizeof(epaddr); if (copy_to_user(useraddr, dev->perm_addr, epaddr.size)) return -EFAULT; return 0; } static int ethtool_get_value(struct net_device *dev, char __user *useraddr, u32 cmd, u32 (*actor)(struct net_device *)) { struct ethtool_value edata = { .cmd = cmd }; if (!actor) return -EOPNOTSUPP; edata.data = actor(dev); if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr, void (*actor)(struct net_device *, u32)) { struct ethtool_value edata; if (!actor) return -EOPNOTSUPP; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; actor(dev, edata.data); return 0; } static int ethtool_set_value(struct net_device *dev, char __user *useraddr, int (*actor)(struct net_device *, u32)) { struct ethtool_value edata; if (!actor) return -EOPNOTSUPP; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; return actor(dev, edata.data); } static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) { struct ethtool_flash efl; if (copy_from_user(&efl, useraddr, sizeof(efl))) return -EFAULT; if (!dev->ethtool_ops->flash_device) return -EOPNOTSUPP; efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; return dev->ethtool_ops->flash_device(dev, &efl); } static int ethtool_set_dump(struct net_device *dev, void __user *useraddr) { struct ethtool_dump dump; if (!dev->ethtool_ops->set_dump) return -EOPNOTSUPP; if (copy_from_user(&dump, useraddr, sizeof(dump))) return -EFAULT; return dev->ethtool_ops->set_dump(dev, &dump); } static int ethtool_get_dump_flag(struct net_device *dev, void __user *useraddr) { int ret; struct ethtool_dump dump; const struct ethtool_ops *ops = dev->ethtool_ops; if (!dev->ethtool_ops->get_dump_flag) return -EOPNOTSUPP; if (copy_from_user(&dump, useraddr, sizeof(dump))) return -EFAULT; ret = ops->get_dump_flag(dev, &dump); if (ret) return ret; if (copy_to_user(useraddr, &dump, sizeof(dump))) return -EFAULT; return 0; } static int ethtool_get_dump_data(struct net_device *dev, void __user *useraddr) { int ret; __u32 len; struct ethtool_dump dump, tmp; const struct ethtool_ops *ops = dev->ethtool_ops; void *data = NULL; if (!dev->ethtool_ops->get_dump_data || !dev->ethtool_ops->get_dump_flag) return -EOPNOTSUPP; if (copy_from_user(&dump, useraddr, sizeof(dump))) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.cmd = ETHTOOL_GET_DUMP_FLAG; ret = ops->get_dump_flag(dev, &tmp); if (ret) return ret; len = (tmp.len > dump.len) ? dump.len : tmp.len; if (!len) return -EFAULT; data = vzalloc(tmp.len); if (!data) return -ENOMEM; ret = ops->get_dump_data(dev, &dump, data); if (ret) goto out; if (copy_to_user(useraddr, &dump, sizeof(dump))) { ret = -EFAULT; goto out; } useraddr += offsetof(struct ethtool_dump, data); if (copy_to_user(useraddr, data, len)) ret = -EFAULT; out: vfree(data); return ret; } /* The main entry point in this file. Called from net/core/dev.c */ int dev_ethtool(struct net *net, struct ifreq *ifr) { struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); void __user *useraddr = ifr->ifr_data; u32 ethcmd; int rc; netdev_features_t old_features; if (!dev || !netif_device_present(dev)) return -ENODEV; if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd))) return -EFAULT; if (!dev->ethtool_ops) { /* ETHTOOL_GDRVINFO does not require any driver support. * It is also unprivileged and does not change anything, * so we can take a shortcut to it. */ if (ethcmd == ETHTOOL_GDRVINFO) return ethtool_get_drvinfo(dev, useraddr); else return -EOPNOTSUPP; } /* Allow some commands to be done by anyone */ switch (ethcmd) { case ETHTOOL_GSET: case ETHTOOL_GDRVINFO: case ETHTOOL_GMSGLVL: case ETHTOOL_GCOALESCE: case ETHTOOL_GRINGPARAM: case ETHTOOL_GPAUSEPARAM: case ETHTOOL_GRXCSUM: case ETHTOOL_GTXCSUM: case ETHTOOL_GSG: case ETHTOOL_GSSET_INFO: case ETHTOOL_GSTRINGS: case ETHTOOL_GTSO: case ETHTOOL_GPERMADDR: case ETHTOOL_GUFO: case ETHTOOL_GGSO: case ETHTOOL_GGRO: case ETHTOOL_GFLAGS: case ETHTOOL_GPFLAGS: case ETHTOOL_GRXFH: case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GFEATURES: break; default: if (!capable(CAP_NET_ADMIN)) return -EPERM; } if (dev->ethtool_ops->begin) { rc = dev->ethtool_ops->begin(dev); if (rc < 0) return rc; } old_features = dev->features; switch (ethcmd) { case ETHTOOL_GSET: rc = ethtool_get_settings(dev, useraddr); break; case ETHTOOL_SSET: rc = ethtool_set_settings(dev, useraddr); break; case ETHTOOL_GDRVINFO: rc = ethtool_get_drvinfo(dev, useraddr); break; case ETHTOOL_GREGS: rc = ethtool_get_regs(dev, useraddr); break; case ETHTOOL_GWOL: rc = ethtool_get_wol(dev, useraddr); break; case ETHTOOL_SWOL: rc = ethtool_set_wol(dev, useraddr); break; case ETHTOOL_GMSGLVL: rc = ethtool_get_value(dev, useraddr, ethcmd, dev->ethtool_ops->get_msglevel); break; case ETHTOOL_SMSGLVL: rc = ethtool_set_value_void(dev, useraddr, dev->ethtool_ops->set_msglevel); break; case ETHTOOL_NWAY_RST: rc = ethtool_nway_reset(dev); break; case ETHTOOL_GLINK: rc = ethtool_get_link(dev, useraddr); break; case ETHTOOL_GEEPROM: rc = ethtool_get_eeprom(dev, useraddr); break; case ETHTOOL_SEEPROM: rc = ethtool_set_eeprom(dev, useraddr); break; case ETHTOOL_GCOALESCE: rc = ethtool_get_coalesce(dev, useraddr); break; case ETHTOOL_SCOALESCE: rc = ethtool_set_coalesce(dev, useraddr); break; case ETHTOOL_GRINGPARAM: rc = ethtool_get_ringparam(dev, useraddr); break; case ETHTOOL_SRINGPARAM: rc = ethtool_set_ringparam(dev, useraddr); break; case ETHTOOL_GPAUSEPARAM: rc = ethtool_get_pauseparam(dev, useraddr); break; case ETHTOOL_SPAUSEPARAM: rc = ethtool_set_pauseparam(dev, useraddr); break; case ETHTOOL_TEST: rc = ethtool_self_test(dev, useraddr); break; case ETHTOOL_GSTRINGS: rc = ethtool_get_strings(dev, useraddr); break; case ETHTOOL_PHYS_ID: rc = ethtool_phys_id(dev, useraddr); break; case ETHTOOL_GSTATS: rc = ethtool_get_stats(dev, useraddr); break; case ETHTOOL_GPERMADDR: rc = ethtool_get_perm_addr(dev, useraddr); break; case ETHTOOL_GFLAGS: rc = ethtool_get_value(dev, useraddr, ethcmd, __ethtool_get_flags); break; case ETHTOOL_SFLAGS: rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags); break; case ETHTOOL_GPFLAGS: rc = ethtool_get_value(dev, useraddr, ethcmd, dev->ethtool_ops->get_priv_flags); break; case ETHTOOL_SPFLAGS: rc = ethtool_set_value(dev, useraddr, dev->ethtool_ops->set_priv_flags); break; case ETHTOOL_GRXFH: case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRLALL: rc = ethtool_get_rxnfc(dev, ethcmd, useraddr); break; case ETHTOOL_SRXFH: case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLINS: rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); break; case ETHTOOL_FLASHDEV: rc = ethtool_flash_device(dev, useraddr); break; case ETHTOOL_RESET: rc = ethtool_reset(dev, useraddr); break; case ETHTOOL_GSSET_INFO: rc = ethtool_get_sset_info(dev, useraddr); break; case ETHTOOL_GRXFHINDIR: rc = ethtool_get_rxfh_indir(dev, useraddr); break; case ETHTOOL_SRXFHINDIR: rc = ethtool_set_rxfh_indir(dev, useraddr); break; case ETHTOOL_GFEATURES: rc = ethtool_get_features(dev, useraddr); break; case ETHTOOL_SFEATURES: rc = ethtool_set_features(dev, useraddr); break; case ETHTOOL_GTXCSUM: case ETHTOOL_GRXCSUM: case ETHTOOL_GSG: case ETHTOOL_GTSO: case ETHTOOL_GUFO: case ETHTOOL_GGSO: case ETHTOOL_GGRO: rc = ethtool_get_one_feature(dev, useraddr, ethcmd); break; case ETHTOOL_STXCSUM: case ETHTOOL_SRXCSUM: case ETHTOOL_SSG: case ETHTOOL_STSO: case ETHTOOL_SUFO: case ETHTOOL_SGSO: case ETHTOOL_SGRO: rc = ethtool_set_one_feature(dev, useraddr, ethcmd); break; case ETHTOOL_GCHANNELS: rc = ethtool_get_channels(dev, useraddr); break; case ETHTOOL_SCHANNELS: rc = ethtool_set_channels(dev, useraddr); break; case ETHTOOL_SET_DUMP: rc = ethtool_set_dump(dev, useraddr); break; case ETHTOOL_GET_DUMP_FLAG: rc = ethtool_get_dump_flag(dev, useraddr); break; case ETHTOOL_GET_DUMP_DATA: rc = ethtool_get_dump_data(dev, useraddr); break; default: rc = -EOPNOTSUPP; } if (dev->ethtool_ops->complete) dev->ethtool_ops->complete(dev); if (old_features != dev->features) netdev_features_change(dev); return rc; }
gpl-2.0
Vachounet/AcerLiquidGlowKernel
drivers/infiniband/hw/mlx4/qp.c
1638
59884
/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/log2.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <rdma/ib_cache.h> #include <rdma/ib_pack.h> #include <rdma/ib_addr.h> #include <linux/mlx4/qp.h> #include "mlx4_ib.h" #include "user.h" enum { MLX4_IB_ACK_REQ_FREQ = 8, }; enum { MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83, MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, MLX4_IB_LINK_TYPE_IB = 0, MLX4_IB_LINK_TYPE_ETH = 1 }; enum { /* * Largest possible UD header: send with GRH and immediate * data plus 18 bytes for an Ethernet header with VLAN/802.1Q * tag. (LRH would only use 8 bytes, so Ethernet is the * biggest case) */ MLX4_IB_UD_HEADER_SIZE = 82, MLX4_IB_LSO_HEADER_SPARE = 128, }; enum { MLX4_IB_IBOE_ETHERTYPE = 0x8915 }; struct mlx4_ib_sqp { struct mlx4_ib_qp qp; int pkey_index; u32 qkey; u32 send_psn; struct ib_ud_header ud_header; u8 header_buf[MLX4_IB_UD_HEADER_SIZE]; }; enum { MLX4_IB_MIN_SQ_STRIDE = 6, MLX4_IB_CACHE_LINE_SIZE = 64, }; static const __be32 mlx4_ib_opcode[] = { [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), }; static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) { return container_of(mqp, struct mlx4_ib_sqp, qp); } static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { return qp->mqp.qpn >= dev->dev->caps.sqp_start && qp->mqp.qpn <= dev->dev->caps.sqp_start + 3; } static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { return qp->mqp.qpn >= dev->dev->caps.sqp_start && qp->mqp.qpn <= dev->dev->caps.sqp_start + 1; } static void *get_wqe(struct mlx4_ib_qp *qp, int offset) { return mlx4_buf_offset(&qp->buf, offset); } static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) { return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); } static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) { return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); } /* * Stamp a SQ WQE so that it is invalid if prefetched by marking the * first four bytes of every 64 byte chunk with * 0x7FFFFFF | (invalid_ownership_value << 31). * * When the max work request size is less than or equal to the WQE * basic block size, as an optimization, we can stamp all WQEs with * 0xffffffff, and skip the very first chunk of each WQE. */ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) { __be32 *wqe; int i; int s; int ind; void *buf; __be32 stamp; struct mlx4_wqe_ctrl_seg *ctrl; if (qp->sq_max_wqes_per_wr > 1) { s = roundup(size, 1U << qp->sq.wqe_shift); for (i = 0; i < s; i += 64) { ind = (i >> qp->sq.wqe_shift) + n; stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : cpu_to_be32(0xffffffff); buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); *wqe = stamp; } } else { ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); s = (ctrl->fence_size & 0x3f) << 4; for (i = 64; i < s; i += 64) { wqe = buf + i; *wqe = cpu_to_be32(0xffffffff); } } } static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) { struct mlx4_wqe_ctrl_seg *ctrl; struct mlx4_wqe_inline_seg *inl; void *wqe; int s; ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); s = sizeof(struct mlx4_wqe_ctrl_seg); if (qp->ibqp.qp_type == IB_QPT_UD) { struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; struct mlx4_av *av = (struct mlx4_av *)dgram->av; memset(dgram, 0, sizeof *dgram); av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); s += sizeof(struct mlx4_wqe_datagram_seg); } /* Pad the remainder of the WQE with an inline data segment. */ if (size > s) { inl = wqe + s; inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl)); } ctrl->srcrb_flags = 0; ctrl->fence_size = size / 16; /* * Make sure descriptor is fully written before setting ownership bit * (because HW can start executing as soon as we do). */ wmb(); ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) | (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); } /* Post NOP WQE to prevent wrap-around in the middle of WR */ static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) { unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); if (unlikely(s < qp->sq_max_wqes_per_wr)) { post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); ind += s; } return ind; } static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) { struct ib_event event; struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; if (type == MLX4_EVENT_TYPE_PATH_MIG) to_mibqp(qp)->port = to_mibqp(qp)->alt_port; if (ibqp->event_handler) { event.device = ibqp->device; event.element.qp = ibqp; switch (type) { case MLX4_EVENT_TYPE_PATH_MIG: event.event = IB_EVENT_PATH_MIG; break; case MLX4_EVENT_TYPE_COMM_EST: event.event = IB_EVENT_COMM_EST; break; case MLX4_EVENT_TYPE_SQ_DRAINED: event.event = IB_EVENT_SQ_DRAINED; break; case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: event.event = IB_EVENT_QP_LAST_WQE_REACHED; break; case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: event.event = IB_EVENT_QP_FATAL; break; case MLX4_EVENT_TYPE_PATH_MIG_FAILED: event.event = IB_EVENT_PATH_MIG_ERR; break; case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: event.event = IB_EVENT_QP_REQ_ERR; break; case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: event.event = IB_EVENT_QP_ACCESS_ERR; break; default: printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " "on QP %06x\n", type, qp->qpn); return; } ibqp->event_handler(&event, ibqp->qp_context); } } static int send_wqe_overhead(enum ib_qp_type type, u32 flags) { /* * UD WQEs must have a datagram segment. * RC and UC WQEs might have a remote address segment. * MLX WQEs need two extra inline data segments (for the UD * header and space for the ICRC). */ switch (type) { case IB_QPT_UD: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg) + ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0); case IB_QPT_UC: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_raddr_seg); case IB_QPT_RC: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_atomic_seg) + sizeof (struct mlx4_wqe_raddr_seg); case IB_QPT_SMI: case IB_QPT_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + ALIGN(MLX4_IB_UD_HEADER_SIZE + DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE, MLX4_INLINE_ALIGN) * sizeof (struct mlx4_wqe_inline_seg), sizeof (struct mlx4_wqe_data_seg)) + ALIGN(4 + sizeof (struct mlx4_wqe_inline_seg), sizeof (struct mlx4_wqe_data_seg)); default: return sizeof (struct mlx4_wqe_ctrl_seg); } } static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int is_user, int has_srq, struct mlx4_ib_qp *qp) { /* Sanity check RQ size before proceeding */ if (cap->max_recv_wr > dev->dev->caps.max_wqes || cap->max_recv_sge > dev->dev->caps.max_rq_sg) return -EINVAL; if (has_srq) { /* QPs attached to an SRQ should have no RQ */ if (cap->max_recv_wr) return -EINVAL; qp->rq.wqe_cnt = qp->rq.max_gs = 0; } else { /* HW requires >= 1 RQ entry with >= 1 gather entry */ if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) return -EINVAL; qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); } cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; cap->max_recv_sge = qp->rq.max_gs; return 0; } static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum ib_qp_type type, struct mlx4_ib_qp *qp) { int s; /* Sanity check SQ size before proceeding */ if (cap->max_send_wr > dev->dev->caps.max_wqes || cap->max_send_sge > dev->dev->caps.max_sq_sg || cap->max_inline_data + send_wqe_overhead(type, qp->flags) + sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) return -EINVAL; /* * For MLX transport we need 2 extra S/G entries: * one for the header and one for the checksum at the end */ if ((type == IB_QPT_SMI || type == IB_QPT_GSI) && cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) return -EINVAL; s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + send_wqe_overhead(type, qp->flags); if (s > dev->dev->caps.max_sq_desc_sz) return -EINVAL; /* * Hermon supports shrinking WQEs, such that a single work * request can include multiple units of 1 << wqe_shift. This * way, work requests can differ in size, and do not have to * be a power of 2 in size, saving memory and speeding up send * WR posting. Unfortunately, if we do this then the * wqe_index field in CQEs can't be used to look up the WR ID * anymore, so we do this only if selective signaling is off. * * Further, on 32-bit platforms, we can't use vmap() to make * the QP buffer virtually contiguous. Thus we have to use * constant-sized WRs to make sure a WR is always fully within * a single page-sized chunk. * * Finally, we use NOP work requests to pad the end of the * work queue, to avoid wrap-around in the middle of WR. We * set NEC bit to avoid getting completions with error for * these NOP WRs, but since NEC is only supported starting * with firmware 2.2.232, we use constant-sized WRs for older * firmware. * * And, since MLX QPs only support SEND, we use constant-sized * WRs in this case. * * We look for the smallest value of wqe_shift such that the * resulting number of wqes does not exceed device * capabilities. * * We set WQE size to at least 64 bytes, this way stamping * invalidates each WQE. */ if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC && qp->sq_signal_bits && BITS_PER_LONG == 64 && type != IB_QPT_SMI && type != IB_QPT_GSI) qp->sq.wqe_shift = ilog2(64); else qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); for (;;) { qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); /* * We need to leave 2 KB + 1 WR of headroom in the SQ to * allow HW to prefetch. */ qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * qp->sq_max_wqes_per_wr + qp->sq_spare_wqes); if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) break; if (qp->sq_max_wqes_per_wr <= 1) return -EINVAL; ++qp->sq.wqe_shift; } qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - send_wqe_overhead(type, qp->flags)) / sizeof (struct mlx4_wqe_data_seg); qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); if (qp->rq.wqe_shift > qp->sq.wqe_shift) { qp->rq.offset = 0; qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; } else { qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; qp->sq.offset = 0; } cap->max_send_wr = qp->sq.max_post = (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; cap->max_send_sge = min(qp->sq.max_gs, min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)); /* We don't support inline sends for kernel QPs (yet) */ cap->max_inline_data = 0; return 0; } static int set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) { /* Sanity check SQ size before proceeding */ if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || ucmd->log_sq_stride > ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) return -EINVAL; qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; qp->sq.wqe_shift = ucmd->log_sq_stride; qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); return 0; } static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) { int qpn; int err; mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); INIT_LIST_HEAD(&qp->gid_list); qp->state = IB_QPS_RESET; if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp); if (err) goto err; if (pd->uobject) { struct mlx4_ib_create_qp ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err; } qp->sq_no_prefetch = ucmd.sq_no_prefetch; err = set_user_sq_size(dev, qp, &ucmd); if (err) goto err; qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, qp->buf_size, 0, 0); if (IS_ERR(qp->umem)) { err = PTR_ERR(qp->umem); goto err; } err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), ilog2(qp->umem->page_size), &qp->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); if (err) goto err_mtt; if (!init_attr->srq) { err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), ucmd.db_addr, &qp->db); if (err) goto err_mtt; } } else { qp->sq_no_prefetch = 0; if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) qp->flags |= MLX4_IB_QP_LSO; err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); if (err) goto err; if (!init_attr->srq) { err = mlx4_db_alloc(dev->dev, &qp->db, 0); if (err) goto err; *qp->db.db = 0; } if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { err = -ENOMEM; goto err_db; } err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, &qp->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); if (err) goto err_mtt; qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL); qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; } } if (sqpn) { qpn = sqpn; } else { err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); if (err) goto err_wrid; } err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn; /* * Hardware wants QPN written in big-endian order (after * shifting) for send doorbell. Precompute this value to save * a little bit when posting sends. */ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); qp->mqp.event = mlx4_ib_qp_event; return 0; err_qpn: if (!sqpn) mlx4_qp_release_range(dev->dev, qpn, 1); err_wrid: if (pd->uobject) { if (!init_attr->srq) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); } else { kfree(qp->sq.wrid); kfree(qp->rq.wrid); } err_mtt: mlx4_mtt_cleanup(dev->dev, &qp->mtt); err_buf: if (pd->uobject) ib_umem_release(qp->umem); else mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); err_db: if (!pd->uobject && !init_attr->srq) mlx4_db_free(dev->dev, &qp->db); err: return err; } static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) { switch (state) { case IB_QPS_RESET: return MLX4_QP_STATE_RST; case IB_QPS_INIT: return MLX4_QP_STATE_INIT; case IB_QPS_RTR: return MLX4_QP_STATE_RTR; case IB_QPS_RTS: return MLX4_QP_STATE_RTS; case IB_QPS_SQD: return MLX4_QP_STATE_SQD; case IB_QPS_SQE: return MLX4_QP_STATE_SQER; case IB_QPS_ERR: return MLX4_QP_STATE_ERR; default: return -1; } } static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { if (send_cq == recv_cq) { spin_lock_irq(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_lock_irq(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { spin_lock_irq(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) __releases(&send_cq->lock) __releases(&recv_cq->lock) { if (send_cq == recv_cq) { __release(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else { spin_unlock(&send_cq->lock); spin_unlock_irq(&recv_cq->lock); } } static void del_gid_entries(struct mlx4_ib_qp *qp) { struct mlx4_ib_gid_entry *ge, *tmp; list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { list_del(&ge->list); kfree(ge); } } static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, int is_user) { struct mlx4_ib_cq *send_cq, *recv_cq; if (qp->state != IB_QPS_RESET) if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n", qp->mqp.qpn); send_cq = to_mcq(qp->ibqp.send_cq); recv_cq = to_mcq(qp->ibqp.recv_cq); mlx4_ib_lock_cqs(send_cq, recv_cq); if (!is_user) { __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); if (send_cq != recv_cq) __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); } mlx4_qp_remove(dev->dev, &qp->mqp); mlx4_ib_unlock_cqs(send_cq, recv_cq); mlx4_qp_free(dev->dev, &qp->mqp); if (!is_sqp(dev, qp)) mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); mlx4_mtt_cleanup(dev->dev, &qp->mtt); if (is_user) { if (!qp->ibqp.srq) mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), &qp->db); ib_umem_release(qp->umem); } else { kfree(qp->sq.wrid); kfree(qp->rq.wrid); mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); if (!qp->ibqp.srq) mlx4_db_free(dev->dev, &qp->db); } del_gid_entries(qp); } struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(pd->device); struct mlx4_ib_sqp *sqp; struct mlx4_ib_qp *qp; int err; /* * We only support LSO and multicast loopback blocking, and * only for kernel UD QPs. */ if (init_attr->create_flags & ~(IB_QP_CREATE_IPOIB_UD_LSO | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)) return ERR_PTR(-EINVAL); if (init_attr->create_flags && (pd->uobject || init_attr->qp_type != IB_QPT_UD)) return ERR_PTR(-EINVAL); switch (init_attr->qp_type) { case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_UD: { qp = kzalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); err = create_qp_common(dev, pd, init_attr, udata, 0, qp); if (err) { kfree(qp); return ERR_PTR(err); } qp->ibqp.qp_num = qp->mqp.qpn; break; } case IB_QPT_SMI: case IB_QPT_GSI: { /* Userspace is not allowed to create special QPs: */ if (pd->uobject) return ERR_PTR(-EINVAL); sqp = kzalloc(sizeof *sqp, GFP_KERNEL); if (!sqp) return ERR_PTR(-ENOMEM); qp = &sqp->qp; err = create_qp_common(dev, pd, init_attr, udata, dev->dev->caps.sqp_start + (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) + init_attr->port_num - 1, qp); if (err) { kfree(sqp); return ERR_PTR(err); } qp->port = init_attr->port_num; qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; break; } default: /* Don't support raw QPs */ return ERR_PTR(-EINVAL); } return &qp->ibqp; } int mlx4_ib_destroy_qp(struct ib_qp *qp) { struct mlx4_ib_dev *dev = to_mdev(qp->device); struct mlx4_ib_qp *mqp = to_mqp(qp); if (is_qp0(dev, mqp)) mlx4_CLOSE_PORT(dev->dev, mqp->port); destroy_qp_common(dev, mqp, !!qp->pd->uobject); if (is_sqp(dev, mqp)) kfree(to_msqp(mqp)); else kfree(mqp); return 0; } static int to_mlx4_st(enum ib_qp_type type) { switch (type) { case IB_QPT_RC: return MLX4_QP_ST_RC; case IB_QPT_UC: return MLX4_QP_ST_UC; case IB_QPT_UD: return MLX4_QP_ST_UD; case IB_QPT_SMI: case IB_QPT_GSI: return MLX4_QP_ST_MLX; default: return -1; } } static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) { u8 dest_rd_atomic; u32 access_flags; u32 hw_access_flags = 0; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) dest_rd_atomic = attr->max_dest_rd_atomic; else dest_rd_atomic = qp->resp_depth; if (attr_mask & IB_QP_ACCESS_FLAGS) access_flags = attr->qp_access_flags; else access_flags = qp->atomic_rd_en; if (!dest_rd_atomic) access_flags &= IB_ACCESS_REMOTE_WRITE; if (access_flags & IB_ACCESS_REMOTE_READ) hw_access_flags |= MLX4_QP_BIT_RRE; if (access_flags & IB_ACCESS_REMOTE_ATOMIC) hw_access_flags |= MLX4_QP_BIT_RAE; if (access_flags & IB_ACCESS_REMOTE_WRITE) hw_access_flags |= MLX4_QP_BIT_RWE; return cpu_to_be32(hw_access_flags); } static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr, int attr_mask) { if (attr_mask & IB_QP_PKEY_INDEX) sqp->pkey_index = attr->pkey_index; if (attr_mask & IB_QP_QKEY) sqp->qkey = attr->qkey; if (attr_mask & IB_QP_SQ_PSN) sqp->send_psn = attr->sq_psn; } static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port) { path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); } static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, struct mlx4_qp_path *path, u8 port) { int err; int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_ETHERNET; u8 mac[6]; int is_mcast; u16 vlan_tag; int vidx; path->grh_mylmc = ah->src_path_bits & 0x7f; path->rlid = cpu_to_be16(ah->dlid); if (ah->static_rate) { path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET; while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && !(1 << path->static_rate & dev->dev->caps.stat_rate_support)) --path->static_rate; } else path->static_rate = 0; path->counter_index = 0xff; if (ah->ah_flags & IB_AH_GRH) { if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { printk(KERN_ERR "sgid_index (%u) too large. max is %d\n", ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1); return -1; } path->grh_mylmc |= 1 << 7; path->mgid_index = ah->grh.sgid_index; path->hop_limit = ah->grh.hop_limit; path->tclass_flowlabel = cpu_to_be32((ah->grh.traffic_class << 20) | (ah->grh.flow_label)); memcpy(path->rgid, ah->grh.dgid.raw, 16); } if (is_eth) { path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1); if (!(ah->ah_flags & IB_AH_GRH)) return -1; err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); if (err) return err; memcpy(path->dmac, mac, 6); path->ackto = MLX4_IB_LINK_TYPE_ETH; /* use index 0 into MAC table for IBoE */ path->grh_mylmc &= 0x80; vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); if (vlan_tag < 0x1000) { if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) return -ENOENT; path->vlan_index = vidx; path->fl = 1 << 6; } } else path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((port - 1) << 6) | ((ah->sl & 0xf) << 2); return 0; } static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { struct mlx4_ib_gid_entry *ge, *tmp; list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { ge->added = 1; ge->port = qp->port; } } } static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_qp_context *context; enum mlx4_qp_optpar optpar = 0; int sqd_event; int err = -EINVAL; context = kzalloc(sizeof *context, GFP_KERNEL); if (!context) return -ENOMEM; context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | (to_mlx4_st(ibqp->qp_type) << 16)); if (!(attr_mask & IB_QP_PATH_MIG_STATE)) context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); else { optpar |= MLX4_QP_OPTPAR_PM_STATE; switch (attr->path_mig_state) { case IB_MIG_MIGRATED: context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); break; case IB_MIG_REARM: context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11); break; case IB_MIG_ARMED: context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11); break; } } if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; else if (ibqp->qp_type == IB_QPT_UD) { if (qp->flags & MLX4_IB_QP_LSO) context->mtu_msgmax = (IB_MTU_4096 << 5) | ilog2(dev->dev->caps.max_gso_sz); else context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; } else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { printk(KERN_ERR "path MTU (%u) is invalid\n", attr->path_mtu); goto out; } context->mtu_msgmax = (attr->path_mtu << 5) | ilog2(dev->dev->caps.max_msg_sz); } if (qp->rq.wqe_cnt) context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; context->rq_size_stride |= qp->rq.wqe_shift - 4; if (qp->sq.wqe_cnt) context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; context->sq_size_stride |= qp->sq.wqe_shift - 4; if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->sq_size_stride |= !!qp->sq_no_prefetch << 7; if (qp->ibqp.uobject) context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); else context->usr_page = cpu_to_be32(dev->priv_uar.index); if (attr_mask & IB_QP_DEST_QPN) context->remote_qpn = cpu_to_be32(attr->dest_qp_num); if (attr_mask & IB_QP_PORT) { if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD && !(attr_mask & IB_QP_AV)) { mlx4_set_sched(&context->pri_path, attr->port_num); optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE; } } if (attr_mask & IB_QP_PKEY_INDEX) { context->pri_path.pkey_index = attr->pkey_index; optpar |= MLX4_QP_OPTPAR_PKEY_INDEX; } if (attr_mask & IB_QP_AV) { if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path, attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) goto out; optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE); } if (attr_mask & IB_QP_TIMEOUT) { context->pri_path.ackto |= attr->timeout << 3; optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT; } if (attr_mask & IB_QP_ALT_PATH) { if (attr->alt_port_num == 0 || attr->alt_port_num > dev->dev->caps.num_ports) goto out; if (attr->alt_pkey_index >= dev->dev->caps.pkey_table_len[attr->alt_port_num]) goto out; if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, attr->alt_port_num)) goto out; context->alt_path.pkey_index = attr->alt_pkey_index; context->alt_path.ackto = attr->alt_timeout << 3; optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; } context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn); context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); /* Set "fast registration enabled" for all kernel QPs */ if (!qp->ibqp.uobject) context->params1 |= cpu_to_be32(1 << 11); if (attr_mask & IB_QP_RNR_RETRY) { context->params1 |= cpu_to_be32(attr->rnr_retry << 13); optpar |= MLX4_QP_OPTPAR_RNR_RETRY; } if (attr_mask & IB_QP_RETRY_CNT) { context->params1 |= cpu_to_be32(attr->retry_cnt << 16); optpar |= MLX4_QP_OPTPAR_RETRY_COUNT; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic) context->params1 |= cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); optpar |= MLX4_QP_OPTPAR_SRA_MAX; } if (attr_mask & IB_QP_SQ_PSN) context->next_send_psn = cpu_to_be32(attr->sq_psn); context->cqn_send = cpu_to_be32(to_mcq(ibqp->send_cq)->mcq.cqn); if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr->max_dest_rd_atomic) context->params2 |= cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); optpar |= MLX4_QP_OPTPAR_RRA_MAX; } if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE; } if (ibqp->srq) context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC); if (attr_mask & IB_QP_MIN_RNR_TIMER) { context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT; } if (attr_mask & IB_QP_RQ_PSN) context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); context->cqn_recv = cpu_to_be32(to_mcq(ibqp->recv_cq)->mcq.cqn); if (attr_mask & IB_QP_QKEY) { context->qkey = cpu_to_be32(attr->qkey); optpar |= MLX4_QP_OPTPAR_Q_KEY; } if (ibqp->srq) context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->db_rec_addr = cpu_to_be64(qp->db.dma); if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR && (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || ibqp->qp_type == IB_QPT_UD)) { context->pri_path.sched_queue = (qp->port - 1) << 6; if (is_qp0(dev, qp)) context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; else context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE; } if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1; else sqd_event = 0; if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->rlkey |= (1 << 4); /* * Before passing a kernel QP to the HW, make sure that the * ownership bits of the send queue are set and the SQ * headroom is stamped so that the hardware doesn't start * processing stale work requests. */ if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { struct mlx4_wqe_ctrl_seg *ctrl; int i; for (i = 0; i < qp->sq.wqe_cnt; ++i) { ctrl = get_send_wqe(qp, i); ctrl->owner_opcode = cpu_to_be32(1 << 31); if (qp->sq_max_wqes_per_wr == 1) ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); } } err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), to_mlx4_state(new_state), context, optpar, sqd_event, &qp->mqp); if (err) goto out; qp->state = new_state; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->atomic_rd_en = attr->qp_access_flags; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->resp_depth = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_PORT) { qp->port = attr->port_num; update_mcg_macs(dev, qp); } if (attr_mask & IB_QP_ALT_PATH) qp->alt_port = attr->alt_port_num; if (is_sqp(dev, qp)) store_sqp_attrs(to_msqp(qp), attr, attr_mask); /* * If we moved QP0 to RTR, bring the IB link up; if we moved * QP0 to RESET or ERROR, bring the link back down. */ if (is_qp0(dev, qp)) { if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) if (mlx4_INIT_PORT(dev->dev, qp->port)) printk(KERN_WARNING "INIT_PORT failed for port %d\n", qp->port); if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) mlx4_CLOSE_PORT(dev->dev, qp->port); } /* * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && !ibqp->uobject) { mlx4_ib_cq_clean(to_mcq(ibqp->recv_cq), qp->mqp.qpn, ibqp->srq ? to_msrq(ibqp->srq): NULL); if (ibqp->send_cq != ibqp->recv_cq) mlx4_ib_cq_clean(to_mcq(ibqp->send_cq), qp->mqp.qpn, NULL); qp->rq.head = 0; qp->rq.tail = 0; qp->sq.head = 0; qp->sq.tail = 0; qp->sq_next_wqe = 0; if (!ibqp->srq) *qp->db.db = 0; } out: kfree(context); return err; } int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) goto out; if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) { goto out; } if (attr_mask & IB_QP_PKEY_INDEX) { int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) goto out; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { goto out; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { goto out; } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; goto out; } err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); out: mutex_unlock(&qp->mutex); return err; } static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct ib_device *ib_dev = sqp->qp.ibqp.device; struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); union ib_gid sgid; u16 pkey; int send_size; int header_size; int spc; int i; int is_eth; int is_vlan = 0; int is_grh; u16 vlan = 0; send_size = 0; for (i = 0; i < wr->num_sge; ++i) send_size += wr->sg_list[i].length; is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; is_grh = mlx4_ib_ah_grh_present(ah); if (is_eth) { ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, ah->av.ib.gid_index, &sgid); vlan = rdma_get_vlan_id(&sgid); is_vlan = vlan < 0x1000; } ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); if (!is_eth) { sqp->ud_header.lrh.service_level = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid; sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); } if (is_grh) { sqp->ud_header.grh.traffic_class = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; sqp->ud_header.grh.flow_label = ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid); memcpy(sqp->ud_header.grh.destination_gid.raw, ah->av.ib.dgid, 16); } mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); if (!is_eth) { mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) | (sqp->ud_header.lrh.service_level << 8)); mlx->rlid = sqp->ud_header.lrh.destination_lid; } switch (wr->opcode) { case IB_WR_SEND: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; break; case IB_WR_SEND_WITH_IMM: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; sqp->ud_header.immediate_present = 1; sqp->ud_header.immediate_data = wr->ex.imm_data; break; default: return -EINVAL; } if (is_eth) { u8 *smac; memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); /* FIXME: cache smac value? */ smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr; memcpy(sqp->ud_header.eth.smac_h, smac, 6); if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); if (!is_vlan) { sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); } else { u16 pcp; sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13; sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); } } else { sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; } sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); else ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? sqp->qkey : wr->wr.ud.remote_qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); if (0) { printk(KERN_ERR "built UD header of size %d:\n", header_size); for (i = 0; i < header_size / 4; ++i) { if (i % 8 == 0) printk(" [%02x] ", i * 4); printk(" %08x", be32_to_cpu(((__be32 *) sqp->header_buf)[i])); if ((i + 1) % 8 == 0) printk("\n"); } printk("\n"); } /* * Inline data segments may not cross a 64 byte boundary. If * our UD header is bigger than the space available up to the * next 64 byte boundary in the WQE, use two inline data * segments to hold the UD header. */ spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); if (header_size <= spc) { inl->byte_count = cpu_to_be32(1 << 31 | header_size); memcpy(inl + 1, sqp->header_buf, header_size); i = 1; } else { inl->byte_count = cpu_to_be32(1 << 31 | spc); memcpy(inl + 1, sqp->header_buf, spc); inl = (void *) (inl + 1) + spc; memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); /* * Need a barrier here to make sure all the data is * visible before the byte_count field is set. * Otherwise the HCA prefetcher could grab the 64-byte * chunk with this inline segment and get a valid (!= * 0xffffffff) byte count but stale data, and end up * generating a packet with bad headers. * * The first inline segment's byte_count field doesn't * need a barrier, because it comes after a * control/MLX segment and therefore is at an offset * of 16 mod 64. */ wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); i = 2; } *mlx_seg_len = ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); return 0; } static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) { unsigned cur; struct mlx4_ib_cq *cq; cur = wq->head - wq->tail; if (likely(cur + nreq < wq->max_post)) return 0; cq = to_mcq(ib_cq); spin_lock(&cq->lock); cur = wq->head - wq->tail; spin_unlock(&cq->lock); return cur + nreq >= wq->max_post; } static __be32 convert_access(int acc) { return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC) : 0) | (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) | (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ) : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); } static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) { struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); int i; for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i) mfrpl->mapped_page_list[i] = cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | MLX4_MTT_FLAG_PRESENT); fseg->flags = convert_access(wr->wr.fast_reg.access_flags); fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); fseg->buf_list = cpu_to_be64(mfrpl->map); fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length); fseg->offset = 0; /* XXX -- is this just for ZBVA? */ fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift); fseg->reserved[0] = 0; fseg->reserved[1] = 0; } static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) { iseg->flags = 0; iseg->mem_key = cpu_to_be32(rkey); iseg->guest_id = 0; iseg->pa = 0; } static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, u64 remote_addr, u32 rkey) { rseg->raddr = cpu_to_be64(remote_addr); rseg->rkey = cpu_to_be32(rkey); rseg->reserved = 0; } static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) { if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); } else { aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare = 0; } } static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, struct ib_send_wr *wr) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); } static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, struct ib_send_wr *wr, __be16 *vlan) { memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); *vlan = dseg->vlan; } static void set_mlx_icrc_seg(void *dseg) { u32 *t = dseg; struct mlx4_wqe_inline_seg *iseg = dseg; t[1] = 0; /* * Need a barrier here before writing the byte_count field to * make sure that all the data is visible before the * byte_count field is set. Otherwise, if the segment begins * a new cacheline, the HCA prefetcher could grab the 64-byte * chunk and get a valid (!= * 0xffffffff) byte count but * stale data, and end up sending the wrong data. */ wmb(); iseg->byte_count = cpu_to_be32((1 << 31) | 4); } static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); /* * Need a barrier here before writing the byte_count field to * make sure that all the data is visible before the * byte_count field is set. Otherwise, if the segment begins * a new cacheline, the HCA prefetcher could grab the 64-byte * chunk and get a valid (!= * 0xffffffff) byte count but * stale data, and end up sending the wrong data. */ wmb(); dseg->byte_count = cpu_to_be32(sg->length); } static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->byte_count = cpu_to_be32(sg->length); dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); } static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) { unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) *blh = cpu_to_be32(1 << 6); if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && wr->num_sge > qp->sq.max_gs - (halign >> 4))) return -EINVAL; memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | wr->wr.ud.hlen); *lso_seg_len = halign; return 0; } static __be32 send_ieth(struct ib_send_wr *wr) { switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM: return wr->ex.imm_data; case IB_WR_SEND_WITH_INV: return cpu_to_be32(wr->ex.invalidate_rkey); default: return 0; } } int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct mlx4_ib_qp *qp = to_mqp(ibqp); void *wqe; struct mlx4_wqe_ctrl_seg *ctrl; struct mlx4_wqe_data_seg *dseg; unsigned long flags; int nreq; int err = 0; unsigned ind; int uninitialized_var(stamp); int uninitialized_var(size); unsigned uninitialized_var(seglen); __be32 dummy; __be32 *lso_wqe; __be32 uninitialized_var(lso_hdr_sz); __be32 blh; int i; __be16 vlan = cpu_to_be16(0xffff); spin_lock_irqsave(&qp->sq.lock, flags); ind = qp->sq_next_wqe; for (nreq = 0; wr; ++nreq, wr = wr->next) { lso_wqe = &dummy; blh = 0; if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { err = -ENOMEM; *bad_wr = wr; goto out; } if (unlikely(wr->num_sge > qp->sq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; ctrl->srcrb_flags = (wr->send_flags & IB_SEND_SIGNALED ? cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) | (wr->send_flags & IB_SEND_SOLICITED ? cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) | ((wr->send_flags & IB_SEND_IP_CSUM) ? cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) | qp->sq_signal_bits; ctrl->imm = send_ieth(wr); wqe += sizeof *ctrl; size = sizeof *ctrl / 16; switch (ibqp->qp_type) { case IB_QPT_RC: case IB_QPT_UC: switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); set_atomic_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_atomic_seg); size += (sizeof (struct mlx4_wqe_raddr_seg) + sizeof (struct mlx4_wqe_atomic_seg)) / 16; break; case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); set_masked_atomic_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); size += (sizeof (struct mlx4_wqe_raddr_seg) + sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16; break; case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); size += sizeof (struct mlx4_wqe_raddr_seg) / 16; break; case IB_WR_LOCAL_INV: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_local_inv_seg(wqe, wr->ex.invalidate_rkey); wqe += sizeof (struct mlx4_wqe_local_inval_seg); size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; break; case IB_WR_FAST_REG_MR: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_fmr_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_fmr_seg); size += sizeof (struct mlx4_wqe_fmr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case IB_QPT_UD: set_datagram_seg(wqe, wr, &vlan); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; if (wr->opcode == IB_WR_LSO) { err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); if (unlikely(err)) { *bad_wr = wr; goto out; } lso_wqe = (__be32 *) wqe; wqe += seglen; size += seglen / 16; } break; case IB_QPT_SMI: case IB_QPT_GSI: err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; } wqe += seglen; size += seglen / 16; break; default: break; } /* * Write data segments in reverse order, so as to * overwrite cacheline stamp last within each * cacheline. This avoids issues with WQE * prefetching. */ dseg = wqe; dseg += wr->num_sge - 1; size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); /* Add one more inline data segment for ICRC for MLX sends */ if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI)) { set_mlx_icrc_seg(dseg + 1); size += sizeof (struct mlx4_wqe_data_seg) / 16; } for (i = wr->num_sge - 1; i >= 0; --i, --dseg) set_data_seg(dseg, wr->sg_list + i); /* * Possibly overwrite stamping in cacheline with LSO * segment only after making sure all data segments * are written. */ wmb(); *lso_wqe = lso_hdr_sz; ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? MLX4_WQE_CTRL_FENCE : 0) | size; if (be16_to_cpu(vlan) < 0x1000) { ctrl->ins_vlan = 1 << 6; ctrl->vlan_tag = vlan; } /* * Make sure descriptor is fully written before * setting ownership bit (because HW can start * executing as soon as we do). */ wmb(); if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { err = -EINVAL; goto out; } ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; stamp = ind + qp->sq_spare_wqes; ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); /* * We can improve latency by not stamping the last * send queue WQE until after ringing the doorbell, so * only stamp here if there are still more WQEs to post. * * Same optimization applies to padding with NOP wqe * in case of WQE shrinking (used to prevent wrap-around * in the middle of WR). */ if (wr->next) { stamp_send_wqe(qp, stamp, size * 16); ind = pad_wraparound(qp, ind); } } out: if (likely(nreq)) { qp->sq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); writel(qp->doorbell_qpn, to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL); /* * Make sure doorbells don't leak out of SQ spinlock * and reach the HCA out of order. */ mmiowb(); stamp_send_wqe(qp, stamp, size * 16); ind = pad_wraparound(qp, ind); qp->sq_next_wqe = ind; } spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_wqe_data_seg *scat; unsigned long flags; int err = 0; int nreq; int ind; int i; spin_lock_irqsave(&qp->rq.lock, flags); ind = qp->rq.head & (qp->rq.wqe_cnt - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { err = -ENOMEM; *bad_wr = wr; goto out; } if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } scat = get_recv_wqe(qp, ind); for (i = 0; i < wr->num_sge; ++i) __set_data_seg(scat + i, wr->sg_list + i); if (i < qp->rq.max_gs) { scat[i].byte_count = 0; scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); scat[i].addr = 0; } qp->rq.wrid[ind] = wr->wr_id; ind = (ind + 1) & (qp->rq.wqe_cnt - 1); } out: if (likely(nreq)) { qp->rq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); } spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state) { switch (mlx4_state) { case MLX4_QP_STATE_RST: return IB_QPS_RESET; case MLX4_QP_STATE_INIT: return IB_QPS_INIT; case MLX4_QP_STATE_RTR: return IB_QPS_RTR; case MLX4_QP_STATE_RTS: return IB_QPS_RTS; case MLX4_QP_STATE_SQ_DRAINING: case MLX4_QP_STATE_SQD: return IB_QPS_SQD; case MLX4_QP_STATE_SQER: return IB_QPS_SQE; case MLX4_QP_STATE_ERR: return IB_QPS_ERR; default: return -1; } } static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state) { switch (mlx4_mig_state) { case MLX4_QP_PM_ARMED: return IB_MIG_ARMED; case MLX4_QP_PM_REARM: return IB_MIG_REARM; case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED; default: return -1; } } static int to_ib_qp_access_flags(int mlx4_flags) { int ib_flags = 0; if (mlx4_flags & MLX4_QP_BIT_RRE) ib_flags |= IB_ACCESS_REMOTE_READ; if (mlx4_flags & MLX4_QP_BIT_RWE) ib_flags |= IB_ACCESS_REMOTE_WRITE; if (mlx4_flags & MLX4_QP_BIT_RAE) ib_flags |= IB_ACCESS_REMOTE_ATOMIC; return ib_flags; } static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, struct mlx4_qp_path *path) { struct mlx4_dev *dev = ibdev->dev; int is_eth; memset(ib_ah_attr, 0, sizeof *ib_ah_attr); ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) return; is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) == IB_LINK_LAYER_ETHERNET; if (is_eth) ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) | ((path->sched_queue & 4) << 1); else ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf; ib_ah_attr->dlid = be16_to_cpu(path->rlid); ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; if (ib_ah_attr->ah_flags) { ib_ah_attr->grh.sgid_index = path->mgid_index; ib_ah_attr->grh.hop_limit = path->hop_limit; ib_ah_attr->grh.traffic_class = (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; ib_ah_attr->grh.flow_label = be32_to_cpu(path->tclass_flowlabel) & 0xfffff; memcpy(ib_ah_attr->grh.dgid.raw, path->rgid, sizeof ib_ah_attr->grh.dgid.raw); } } int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_qp_context context; int mlx4_state; int err = 0; mutex_lock(&qp->mutex); if (qp->state == IB_QPS_RESET) { qp_attr->qp_state = IB_QPS_RESET; goto done; } err = mlx4_qp_query(dev->dev, &qp->mqp, &context); if (err) { err = -EINVAL; goto out; } mlx4_state = be32_to_cpu(context.flags) >> 28; qp->state = to_ib_qp_state(mlx4_state); qp_attr->qp_state = qp->state; qp_attr->path_mtu = context.mtu_msgmax >> 5; qp_attr->path_mig_state = to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); qp_attr->qkey = be32_to_cpu(context.qkey); qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff; qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff; qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff; qp_attr->qp_access_flags = to_ib_qp_access_flags(be32_to_cpu(context.params2)); if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path); to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path); qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; } qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; if (qp_attr->qp_state == IB_QPS_INIT) qp_attr->port_num = qp->port; else qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7); qp_attr->max_dest_rd_atomic = 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7); qp_attr->min_rnr_timer = (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f; qp_attr->timeout = context.pri_path.ackto >> 3; qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7; qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7; qp_attr->alt_timeout = context.alt_path.ackto >> 3; done: qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; qp_attr->cap.max_recv_sge = qp->rq.max_gs; if (!ibqp->uobject) { qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; qp_attr->cap.max_send_sge = qp->sq.max_gs; } else { qp_attr->cap.max_send_wr = 0; qp_attr->cap.max_send_sge = 0; } /* * We don't support inline sends for kernel QPs (yet), and we * don't know what userspace's value should be. */ qp_attr->cap.max_inline_data = 0; qp_init_attr->cap = qp_attr->cap; qp_init_attr->create_flags = 0; if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; if (qp->flags & MLX4_IB_QP_LSO) qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; out: mutex_unlock(&qp->mutex); return err; }
gpl-2.0
VRToxin-AOSP/android_kernel_lge_bullhead
drivers/platform/x86/intel_mid_powerbtn.c
2150
3971
/* * Power button driver for Medfield. * * Copyright (C) 2010 Intel Corp * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/mfd/intel_msic.h> #define DRIVER_NAME "msic_power_btn" #define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */ /* * MSIC document ti_datasheet defines the 1st bit reg 0x21 is used to mask * power button interrupt */ #define MSIC_PWRBTNM (1 << 0) static irqreturn_t mfld_pb_isr(int irq, void *dev_id) { struct input_dev *input = dev_id; int ret; u8 pbstat; ret = intel_msic_reg_read(INTEL_MSIC_PBSTATUS, &pbstat); dev_dbg(input->dev.parent, "PB_INT status= %d\n", pbstat); if (ret < 0) { dev_err(input->dev.parent, "Read error %d while reading" " MSIC_PB_STATUS\n", ret); } else { input_event(input, EV_KEY, KEY_POWER, !(pbstat & MSIC_PB_LEVEL)); input_sync(input); } return IRQ_HANDLED; } static int mfld_pb_probe(struct platform_device *pdev) { struct input_dev *input; int irq = platform_get_irq(pdev, 0); int error; if (irq < 0) return -EINVAL; input = input_allocate_device(); if (!input) { dev_err(&pdev->dev, "Input device allocation error\n"); return -ENOMEM; } input->name = pdev->name; input->phys = "power-button/input0"; input->id.bustype = BUS_HOST; input->dev.parent = &pdev->dev; input_set_capability(input, EV_KEY, KEY_POWER); error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND, DRIVER_NAME, input); if (error) { dev_err(&pdev->dev, "Unable to request irq %d for mfld power" "button\n", irq); goto err_free_input; } error = input_register_device(input); if (error) { dev_err(&pdev->dev, "Unable to register input dev, error " "%d\n", error); goto err_free_irq; } platform_set_drvdata(pdev, input); /* * SCU firmware might send power button interrupts to IA core before * kernel boots and doesn't get EOI from IA core. The first bit of * MSIC reg 0x21 is kept masked, and SCU firmware doesn't send new * power interrupt to Android kernel. Unmask the bit when probing * power button in kernel. * There is a very narrow race between irq handler and power button * initialization. The race happens rarely. So we needn't worry * about it. */ error = intel_msic_reg_update(INTEL_MSIC_IRQLVL1MSK, 0, MSIC_PWRBTNM); if (error) { dev_err(&pdev->dev, "Unable to clear power button interrupt, " "error: %d\n", error); goto err_free_irq; } return 0; err_free_irq: free_irq(irq, input); err_free_input: input_free_device(input); return error; } static int mfld_pb_remove(struct platform_device *pdev) { struct input_dev *input = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); free_irq(irq, input); input_unregister_device(input); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver mfld_pb_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, .probe = mfld_pb_probe, .remove = mfld_pb_remove, }; module_platform_driver(mfld_pb_driver); MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>"); MODULE_DESCRIPTION("Intel Medfield Power Button Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
Constellation/linux-3.10.17
drivers/mtd/nand/pxa3xx_nand.c
2150
35103
/* * drivers/mtd/nand/pxa3xx_nand.c * * Copyright © 2005 Intel Corporation * Copyright © 2006 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_device.h> #include <mach/dma.h> #include <linux/platform_data/mtd-nand-pxa3xx.h> #define CHIP_DELAY_TIMEOUT (2 * HZ/10) #define NAND_STOP_DELAY (2 * HZ/50) #define PAGE_CHUNK_SIZE (2048) /* registers and bit definitions */ #define NDCR (0x00) /* Control register */ #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */ #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */ #define NDSR (0x14) /* Status Register */ #define NDPCR (0x18) /* Page Count Register */ #define NDBDR0 (0x1C) /* Bad Block Register 0 */ #define NDBDR1 (0x20) /* Bad Block Register 1 */ #define NDDB (0x40) /* Data Buffer */ #define NDCB0 (0x48) /* Command Buffer0 */ #define NDCB1 (0x4C) /* Command Buffer1 */ #define NDCB2 (0x50) /* Command Buffer2 */ #define NDCR_SPARE_EN (0x1 << 31) #define NDCR_ECC_EN (0x1 << 30) #define NDCR_DMA_EN (0x1 << 29) #define NDCR_ND_RUN (0x1 << 28) #define NDCR_DWIDTH_C (0x1 << 27) #define NDCR_DWIDTH_M (0x1 << 26) #define NDCR_PAGE_SZ (0x1 << 24) #define NDCR_NCSX (0x1 << 23) #define NDCR_ND_MODE (0x3 << 21) #define NDCR_NAND_MODE (0x0) #define NDCR_CLR_PG_CNT (0x1 << 20) #define NDCR_STOP_ON_UNCOR (0x1 << 19) #define NDCR_RD_ID_CNT_MASK (0x7 << 16) #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK) #define NDCR_RA_START (0x1 << 15) #define NDCR_PG_PER_BLK (0x1 << 14) #define NDCR_ND_ARB_EN (0x1 << 12) #define NDCR_INT_MASK (0xFFF) #define NDSR_MASK (0xfff) #define NDSR_RDY (0x1 << 12) #define NDSR_FLASH_RDY (0x1 << 11) #define NDSR_CS0_PAGED (0x1 << 10) #define NDSR_CS1_PAGED (0x1 << 9) #define NDSR_CS0_CMDD (0x1 << 8) #define NDSR_CS1_CMDD (0x1 << 7) #define NDSR_CS0_BBD (0x1 << 6) #define NDSR_CS1_BBD (0x1 << 5) #define NDSR_DBERR (0x1 << 4) #define NDSR_SBERR (0x1 << 3) #define NDSR_WRDREQ (0x1 << 2) #define NDSR_RDDREQ (0x1 << 1) #define NDSR_WRCMDREQ (0x1) #define NDCB0_ST_ROW_EN (0x1 << 26) #define NDCB0_AUTO_RS (0x1 << 25) #define NDCB0_CSEL (0x1 << 24) #define NDCB0_CMD_TYPE_MASK (0x7 << 21) #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK) #define NDCB0_NC (0x1 << 20) #define NDCB0_DBC (0x1 << 19) #define NDCB0_ADDR_CYC_MASK (0x7 << 16) #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK) #define NDCB0_CMD2_MASK (0xff << 8) #define NDCB0_CMD1_MASK (0xff) #define NDCB0_ADDR_CYC_SHIFT (16) /* macros for registers read/write */ #define nand_writel(info, off, val) \ __raw_writel((val), (info)->mmio_base + (off)) #define nand_readl(info, off) \ __raw_readl((info)->mmio_base + (off)) /* error code and state */ enum { ERR_NONE = 0, ERR_DMABUSERR = -1, ERR_SENDCMD = -2, ERR_DBERR = -3, ERR_BBERR = -4, ERR_SBERR = -5, }; enum { STATE_IDLE = 0, STATE_PREPARED, STATE_CMD_HANDLE, STATE_DMA_READING, STATE_DMA_WRITING, STATE_DMA_DONE, STATE_PIO_READING, STATE_PIO_WRITING, STATE_CMD_DONE, STATE_READY, }; struct pxa3xx_nand_host { struct nand_chip chip; struct pxa3xx_nand_cmdset *cmdset; struct mtd_info *mtd; void *info_data; /* page size of attached chip */ unsigned int page_size; int use_ecc; int cs; /* calculated from pxa3xx_nand_flash data */ unsigned int col_addr_cycles; unsigned int row_addr_cycles; size_t read_id_bytes; /* cached register value */ uint32_t reg_ndcr; uint32_t ndtr0cs0; uint32_t ndtr1cs0; }; struct pxa3xx_nand_info { struct nand_hw_control controller; struct platform_device *pdev; struct clk *clk; void __iomem *mmio_base; unsigned long mmio_phys; struct completion cmd_complete; unsigned int buf_start; unsigned int buf_count; /* DMA information */ int drcmr_dat; int drcmr_cmd; unsigned char *data_buff; unsigned char *oob_buff; dma_addr_t data_buff_phys; int data_dma_ch; struct pxa_dma_desc *data_desc; dma_addr_t data_desc_addr; struct pxa3xx_nand_host *host[NUM_CHIP_SELECT]; unsigned int state; int cs; int use_ecc; /* use HW ECC ? */ int use_dma; /* use DMA ? */ int is_ready; unsigned int page_size; /* page size of attached chip */ unsigned int data_size; /* data size in FIFO */ unsigned int oob_size; int retcode; /* generated NDCBx register values */ uint32_t ndcb0; uint32_t ndcb1; uint32_t ndcb2; }; static bool use_dma = 1; module_param(use_dma, bool, 0444); MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW"); /* * Default NAND flash controller configuration setup by the * bootloader. This configuration is used only when pdata->keep_config is set */ static struct pxa3xx_nand_cmdset default_cmdset = { .read1 = 0x3000, .read2 = 0x0050, .program = 0x1080, .read_status = 0x0070, .read_id = 0x0090, .erase = 0xD060, .reset = 0x00FF, .lock = 0x002A, .unlock = 0x2423, .lock_status = 0x007A, }; static struct pxa3xx_nand_timing timing[] = { { 40, 80, 60, 100, 80, 100, 90000, 400, 40, }, { 10, 0, 20, 40, 30, 40, 11123, 110, 10, }, { 10, 25, 15, 25, 15, 30, 25000, 60, 10, }, { 10, 35, 15, 25, 15, 25, 25000, 60, 10, }, }; static struct pxa3xx_nand_flash builtin_flash_types[] = { { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] }, { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] }, { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] }, { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] }, { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] }, { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] }, { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] }, { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] }, { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] }, }; /* Define a default flash type setting serve as flash detecting only */ #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL}; #define NDTR0_tCH(c) (min((c), 7) << 19) #define NDTR0_tCS(c) (min((c), 7) << 16) #define NDTR0_tWH(c) (min((c), 7) << 11) #define NDTR0_tWP(c) (min((c), 7) << 8) #define NDTR0_tRH(c) (min((c), 7) << 3) #define NDTR0_tRP(c) (min((c), 7) << 0) #define NDTR1_tR(c) (min((c), 65535) << 16) #define NDTR1_tWHR(c) (min((c), 15) << 4) #define NDTR1_tAR(c) (min((c), 15) << 0) /* convert nano-seconds to nand flash controller clock cycles */ #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host, const struct pxa3xx_nand_timing *t) { struct pxa3xx_nand_info *info = host->info_data; unsigned long nand_clk = clk_get_rate(info->clk); uint32_t ndtr0, ndtr1; ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) | NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) | NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) | NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) | NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) | NDTR0_tRP(ns2cycle(t->tRP, nand_clk)); ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) | NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) | NDTR1_tAR(ns2cycle(t->tAR, nand_clk)); host->ndtr0cs0 = ndtr0; host->ndtr1cs0 = ndtr1; nand_writel(info, NDTR0CS0, ndtr0); nand_writel(info, NDTR1CS0, ndtr1); } static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) { struct pxa3xx_nand_host *host = info->host[info->cs]; int oob_enable = host->reg_ndcr & NDCR_SPARE_EN; info->data_size = host->page_size; if (!oob_enable) { info->oob_size = 0; return; } switch (host->page_size) { case 2048: info->oob_size = (info->use_ecc) ? 40 : 64; break; case 512: info->oob_size = (info->use_ecc) ? 8 : 16; break; } } /** * NOTE: it is a must to set ND_RUN firstly, then write * command buffer, otherwise, it does not work. * We enable all the interrupt at the same time, and * let pxa3xx_nand_irq to handle all logic. */ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) { struct pxa3xx_nand_host *host = info->host[info->cs]; uint32_t ndcr; ndcr = host->reg_ndcr; ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; ndcr |= info->use_dma ? NDCR_DMA_EN : 0; ndcr |= NDCR_ND_RUN; /* clear status bits and run */ nand_writel(info, NDCR, 0); nand_writel(info, NDSR, NDSR_MASK); nand_writel(info, NDCR, ndcr); } static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info) { uint32_t ndcr; int timeout = NAND_STOP_DELAY; /* wait RUN bit in NDCR become 0 */ ndcr = nand_readl(info, NDCR); while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) { ndcr = nand_readl(info, NDCR); udelay(1); } if (timeout <= 0) { ndcr &= ~NDCR_ND_RUN; nand_writel(info, NDCR, ndcr); } /* clear status bits */ nand_writel(info, NDSR, NDSR_MASK); } static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) { uint32_t ndcr; ndcr = nand_readl(info, NDCR); nand_writel(info, NDCR, ndcr & ~int_mask); } static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) { uint32_t ndcr; ndcr = nand_readl(info, NDCR); nand_writel(info, NDCR, ndcr | int_mask); } static void handle_data_pio(struct pxa3xx_nand_info *info) { switch (info->state) { case STATE_PIO_WRITING: __raw_writesl(info->mmio_base + NDDB, info->data_buff, DIV_ROUND_UP(info->data_size, 4)); if (info->oob_size > 0) __raw_writesl(info->mmio_base + NDDB, info->oob_buff, DIV_ROUND_UP(info->oob_size, 4)); break; case STATE_PIO_READING: __raw_readsl(info->mmio_base + NDDB, info->data_buff, DIV_ROUND_UP(info->data_size, 4)); if (info->oob_size > 0) __raw_readsl(info->mmio_base + NDDB, info->oob_buff, DIV_ROUND_UP(info->oob_size, 4)); break; default: dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, info->state); BUG(); } } static void start_data_dma(struct pxa3xx_nand_info *info) { struct pxa_dma_desc *desc = info->data_desc; int dma_len = ALIGN(info->data_size + info->oob_size, 32); desc->ddadr = DDADR_STOP; desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; switch (info->state) { case STATE_DMA_WRITING: desc->dsadr = info->data_buff_phys; desc->dtadr = info->mmio_phys + NDDB; desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG; break; case STATE_DMA_READING: desc->dtadr = info->data_buff_phys; desc->dsadr = info->mmio_phys + NDDB; desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; break; default: dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, info->state); BUG(); } DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch; DDADR(info->data_dma_ch) = info->data_desc_addr; DCSR(info->data_dma_ch) |= DCSR_RUN; } static void pxa3xx_nand_data_dma_irq(int channel, void *data) { struct pxa3xx_nand_info *info = data; uint32_t dcsr; dcsr = DCSR(channel); DCSR(channel) = dcsr; if (dcsr & DCSR_BUSERR) { info->retcode = ERR_DMABUSERR; } info->state = STATE_DMA_DONE; enable_int(info, NDCR_INT_MASK); nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ); } static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) { struct pxa3xx_nand_info *info = devid; unsigned int status, is_completed = 0; unsigned int ready, cmd_done; if (info->cs == 0) { ready = NDSR_FLASH_RDY; cmd_done = NDSR_CS0_CMDD; } else { ready = NDSR_RDY; cmd_done = NDSR_CS1_CMDD; } status = nand_readl(info, NDSR); if (status & NDSR_DBERR) info->retcode = ERR_DBERR; if (status & NDSR_SBERR) info->retcode = ERR_SBERR; if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) { /* whether use dma to transfer data */ if (info->use_dma) { disable_int(info, NDCR_INT_MASK); info->state = (status & NDSR_RDDREQ) ? STATE_DMA_READING : STATE_DMA_WRITING; start_data_dma(info); goto NORMAL_IRQ_EXIT; } else { info->state = (status & NDSR_RDDREQ) ? STATE_PIO_READING : STATE_PIO_WRITING; handle_data_pio(info); } } if (status & cmd_done) { info->state = STATE_CMD_DONE; is_completed = 1; } if (status & ready) { info->is_ready = 1; info->state = STATE_READY; } if (status & NDSR_WRCMDREQ) { nand_writel(info, NDSR, NDSR_WRCMDREQ); status &= ~NDSR_WRCMDREQ; info->state = STATE_CMD_HANDLE; nand_writel(info, NDCB0, info->ndcb0); nand_writel(info, NDCB0, info->ndcb1); nand_writel(info, NDCB0, info->ndcb2); } /* clear NDSR to let the controller exit the IRQ */ nand_writel(info, NDSR, status); if (is_completed) complete(&info->cmd_complete); NORMAL_IRQ_EXIT: return IRQ_HANDLED; } static inline int is_buf_blank(uint8_t *buf, size_t len) { for (; len > 0; len--) if (*buf++ != 0xff) return 0; return 1; } static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, uint16_t column, int page_addr) { uint16_t cmd; int addr_cycle, exec_cmd; struct pxa3xx_nand_host *host; struct mtd_info *mtd; host = info->host[info->cs]; mtd = host->mtd; addr_cycle = 0; exec_cmd = 1; /* reset data and oob column point to handle data */ info->buf_start = 0; info->buf_count = 0; info->oob_size = 0; info->use_ecc = 0; info->is_ready = 0; info->retcode = ERR_NONE; if (info->cs != 0) info->ndcb0 = NDCB0_CSEL; else info->ndcb0 = 0; switch (command) { case NAND_CMD_READ0: case NAND_CMD_PAGEPROG: info->use_ecc = 1; case NAND_CMD_READOOB: pxa3xx_set_datasize(info); break; case NAND_CMD_SEQIN: exec_cmd = 0; break; default: info->ndcb1 = 0; info->ndcb2 = 0; break; } addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles + host->col_addr_cycles); switch (command) { case NAND_CMD_READOOB: case NAND_CMD_READ0: cmd = host->cmdset->read1; if (command == NAND_CMD_READOOB) info->buf_start = mtd->writesize + column; else info->buf_start = column; if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) info->ndcb0 |= NDCB0_CMD_TYPE(0) | addr_cycle | (cmd & NDCB0_CMD1_MASK); else info->ndcb0 |= NDCB0_CMD_TYPE(0) | NDCB0_DBC | addr_cycle | cmd; case NAND_CMD_SEQIN: /* small page addr setting */ if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) { info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) | (column & 0xFF); info->ndcb2 = 0; } else { info->ndcb1 = ((page_addr & 0xFFFF) << 16) | (column & 0xFFFF); if (page_addr & 0xFF0000) info->ndcb2 = (page_addr & 0xFF0000) >> 16; else info->ndcb2 = 0; } info->buf_count = mtd->writesize + mtd->oobsize; memset(info->data_buff, 0xFF, info->buf_count); break; case NAND_CMD_PAGEPROG: if (is_buf_blank(info->data_buff, (mtd->writesize + mtd->oobsize))) { exec_cmd = 0; break; } cmd = host->cmdset->program; info->ndcb0 |= NDCB0_CMD_TYPE(0x1) | NDCB0_AUTO_RS | NDCB0_ST_ROW_EN | NDCB0_DBC | cmd | addr_cycle; break; case NAND_CMD_READID: cmd = host->cmdset->read_id; info->buf_count = host->read_id_bytes; info->ndcb0 |= NDCB0_CMD_TYPE(3) | NDCB0_ADDR_CYC(1) | cmd; info->data_size = 8; break; case NAND_CMD_STATUS: cmd = host->cmdset->read_status; info->buf_count = 1; info->ndcb0 |= NDCB0_CMD_TYPE(4) | NDCB0_ADDR_CYC(1) | cmd; info->data_size = 8; break; case NAND_CMD_ERASE1: cmd = host->cmdset->erase; info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3) | NDCB0_DBC | cmd; info->ndcb1 = page_addr; info->ndcb2 = 0; break; case NAND_CMD_RESET: cmd = host->cmdset->reset; info->ndcb0 |= NDCB0_CMD_TYPE(5) | cmd; break; case NAND_CMD_ERASE2: exec_cmd = 0; break; default: exec_cmd = 0; dev_err(&info->pdev->dev, "non-supported command %x\n", command); break; } return exec_cmd; } static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, int column, int page_addr) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; int ret, exec_cmd; /* * if this is a x16 device ,then convert the input * "byte" address into a "word" address appropriate * for indexing a word-oriented device */ if (host->reg_ndcr & NDCR_DWIDTH_M) column /= 2; /* * There may be different NAND chip hooked to * different chip select, so check whether * chip select has been changed, if yes, reset the timing */ if (info->cs != host->cs) { info->cs = host->cs; nand_writel(info, NDTR0CS0, host->ndtr0cs0); nand_writel(info, NDTR1CS0, host->ndtr1cs0); } info->state = STATE_PREPARED; exec_cmd = prepare_command_pool(info, command, column, page_addr); if (exec_cmd) { init_completion(&info->cmd_complete); pxa3xx_nand_start(info); ret = wait_for_completion_timeout(&info->cmd_complete, CHIP_DELAY_TIMEOUT); if (!ret) { dev_err(&info->pdev->dev, "Wait time out!!!\n"); /* Stop State Machine for next command cycle */ pxa3xx_nand_stop(info); } } info->state = STATE_IDLE; } static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required) { chip->write_buf(mtd, buf, mtd->writesize); chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; chip->read_buf(mtd, buf, mtd->writesize); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); if (info->retcode == ERR_SBERR) { switch (info->use_ecc) { case 1: mtd->ecc_stats.corrected++; break; case 0: default: break; } } else if (info->retcode == ERR_DBERR) { /* * for blank page (all 0xff), HW will calculate its ECC as * 0, which is different from the ECC information within * OOB, ignore such double bit errors */ if (is_buf_blank(buf, mtd->writesize)) info->retcode = ERR_NONE; else mtd->ecc_stats.failed++; } return 0; } static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; char retval = 0xFF; if (info->buf_start < info->buf_count) /* Has just send a new command? */ retval = info->data_buff[info->buf_start++]; return retval; } static u16 pxa3xx_nand_read_word(struct mtd_info *mtd) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; u16 retval = 0xFFFF; if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) { retval = *((u16 *)(info->data_buff+info->buf_start)); info->buf_start += 2; } return retval; } static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; int real_len = min_t(size_t, len, info->buf_count - info->buf_start); memcpy(buf, info->data_buff + info->buf_start, real_len); info->buf_start += real_len; } static void pxa3xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; int real_len = min_t(size_t, len, info->buf_count - info->buf_start); memcpy(info->data_buff + info->buf_start, buf, real_len); info->buf_start += real_len; } static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip) { return; } static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; /* pxa3xx_nand_send_command has waited for command complete */ if (this->state == FL_WRITING || this->state == FL_ERASING) { if (info->retcode == ERR_NONE) return 0; else { /* * any error make it return 0x01 which will tell * the caller the erase and write fail */ return 0x01; } } return 0; } static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, const struct pxa3xx_nand_flash *f) { struct platform_device *pdev = info->pdev; struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; struct pxa3xx_nand_host *host = info->host[info->cs]; uint32_t ndcr = 0x0; /* enable all interrupts */ if (f->page_size != 2048 && f->page_size != 512) { dev_err(&pdev->dev, "Current only support 2048 and 512 size\n"); return -EINVAL; } if (f->flash_width != 16 && f->flash_width != 8) { dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n"); return -EINVAL; } /* calculate flash information */ host->cmdset = &default_cmdset; host->page_size = f->page_size; host->read_id_bytes = (f->page_size == 2048) ? 4 : 2; /* calculate addressing information */ host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; if (f->num_blocks * f->page_per_block > 65536) host->row_addr_cycles = 3; else host->row_addr_cycles = 2; ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0; ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes); ndcr |= NDCR_SPARE_EN; /* enable spare by default */ host->reg_ndcr = ndcr; pxa3xx_nand_set_timing(host, f->timing); return 0; } static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) { /* * We set 0 by hard coding here, for we don't support keep_config * when there is more than one chip attached to the controller */ struct pxa3xx_nand_host *host = info->host[0]; uint32_t ndcr = nand_readl(info, NDCR); if (ndcr & NDCR_PAGE_SZ) { host->page_size = 2048; host->read_id_bytes = 4; } else { host->page_size = 512; host->read_id_bytes = 2; } host->reg_ndcr = ndcr & ~NDCR_INT_MASK; host->cmdset = &default_cmdset; host->ndtr0cs0 = nand_readl(info, NDTR0CS0); host->ndtr1cs0 = nand_readl(info, NDTR1CS0); return 0; } /* the maximum possible buffer size for large page with OOB data * is: 2048 + 64 = 2112 bytes, allocate a page here for both the * data buffer and the DMA descriptor */ #define MAX_BUFF_SIZE PAGE_SIZE static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) { struct platform_device *pdev = info->pdev; int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc); if (use_dma == 0) { info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL); if (info->data_buff == NULL) return -ENOMEM; return 0; } info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE, &info->data_buff_phys, GFP_KERNEL); if (info->data_buff == NULL) { dev_err(&pdev->dev, "failed to allocate dma buffer\n"); return -ENOMEM; } info->data_desc = (void *)info->data_buff + data_desc_offset; info->data_desc_addr = info->data_buff_phys + data_desc_offset; info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW, pxa3xx_nand_data_dma_irq, info); if (info->data_dma_ch < 0) { dev_err(&pdev->dev, "failed to request data dma\n"); dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, info->data_buff, info->data_buff_phys); return info->data_dma_ch; } return 0; } static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) { struct mtd_info *mtd; int ret; mtd = info->host[info->cs]->mtd; /* use the common timing to make a try */ ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); if (ret) return ret; pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0); if (info->is_ready) return 0; return -ENODEV; } static int pxa3xx_nand_scan(struct mtd_info *mtd) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; struct platform_device *pdev = info->pdev; struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL; const struct pxa3xx_nand_flash *f = NULL; struct nand_chip *chip = mtd->priv; uint32_t id = -1; uint64_t chipsize; int i, ret, num; if (pdata->keep_config && !pxa3xx_nand_detect_config(info)) goto KEEP_CONFIG; ret = pxa3xx_nand_sensing(info); if (ret) { dev_info(&info->pdev->dev, "There is no chip on cs %d!\n", info->cs); return ret; } chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); id = *((uint16_t *)(info->data_buff)); if (id != 0) dev_info(&info->pdev->dev, "Detect a flash id %x\n", id); else { dev_warn(&info->pdev->dev, "Read out ID 0, potential timing set wrong!!\n"); return -EINVAL; } num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; for (i = 0; i < num; i++) { if (i < pdata->num_flash) f = pdata->flash + i; else f = &builtin_flash_types[i - pdata->num_flash + 1]; /* find the chip in default list */ if (f->chip_id == id) break; } if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n"); return -EINVAL; } ret = pxa3xx_nand_config_flash(info, f); if (ret) { dev_err(&info->pdev->dev, "ERROR! Configure failed\n"); return ret; } pxa3xx_flash_ids[0].name = f->name; pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff; pxa3xx_flash_ids[0].pagesize = f->page_size; chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size; pxa3xx_flash_ids[0].chipsize = chipsize >> 20; pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; if (f->flash_width == 16) pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; pxa3xx_flash_ids[1].name = NULL; def = pxa3xx_flash_ids; KEEP_CONFIG: chip->ecc.mode = NAND_ECC_HW; chip->ecc.size = host->page_size; chip->ecc.strength = 1; if (host->reg_ndcr & NDCR_DWIDTH_M) chip->options |= NAND_BUSWIDTH_16; if (nand_scan_ident(mtd, 1, def)) return -ENODEV; /* calculate addressing information */ if (mtd->writesize >= 2048) host->col_addr_cycles = 2; else host->col_addr_cycles = 1; info->oob_buff = info->data_buff + mtd->writesize; if ((mtd->size >> chip->page_shift) > 65536) host->row_addr_cycles = 3; else host->row_addr_cycles = 2; mtd->name = mtd_names[0]; return nand_scan_tail(mtd); } static int alloc_nand_resource(struct platform_device *pdev) { struct pxa3xx_nand_platform_data *pdata; struct pxa3xx_nand_info *info; struct pxa3xx_nand_host *host; struct nand_chip *chip = NULL; struct mtd_info *mtd; struct resource *r; int ret, irq, cs; pdata = pdev->dev.platform_data; info = kzalloc(sizeof(*info) + (sizeof(*mtd) + sizeof(*host)) * pdata->num_cs, GFP_KERNEL); if (!info) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } info->pdev = pdev; for (cs = 0; cs < pdata->num_cs; cs++) { mtd = (struct mtd_info *)((unsigned int)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs); chip = (struct nand_chip *)(&mtd[1]); host = (struct pxa3xx_nand_host *)chip; info->host[cs] = host; host->mtd = mtd; host->cs = cs; host->info_data = info; mtd->priv = host; mtd->owner = THIS_MODULE; chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; chip->controller = &info->controller; chip->waitfunc = pxa3xx_nand_waitfunc; chip->select_chip = pxa3xx_nand_select_chip; chip->cmdfunc = pxa3xx_nand_cmdfunc; chip->read_word = pxa3xx_nand_read_word; chip->read_byte = pxa3xx_nand_read_byte; chip->read_buf = pxa3xx_nand_read_buf; chip->write_buf = pxa3xx_nand_write_buf; } spin_lock_init(&chip->controller->lock); init_waitqueue_head(&chip->controller->wq); info->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(info->clk)) { dev_err(&pdev->dev, "failed to get nand clock\n"); ret = PTR_ERR(info->clk); goto fail_free_mtd; } clk_enable(info->clk); /* * This is a dirty hack to make this driver work from devicetree * bindings. It can be removed once we have a prober DMA controller * framework for DT. */ if (pdev->dev.of_node && cpu_is_pxa3xx()) { info->drcmr_dat = 97; info->drcmr_cmd = 99; } else { r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (r == NULL) { dev_err(&pdev->dev, "no resource defined for data DMA\n"); ret = -ENXIO; goto fail_put_clk; } info->drcmr_dat = r->start; r = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (r == NULL) { dev_err(&pdev->dev, "no resource defined for command DMA\n"); ret = -ENXIO; goto fail_put_clk; } info->drcmr_cmd = r->start; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no IRQ resource defined\n"); ret = -ENXIO; goto fail_put_clk; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (r == NULL) { dev_err(&pdev->dev, "no IO memory resource defined\n"); ret = -ENODEV; goto fail_put_clk; } r = request_mem_region(r->start, resource_size(r), pdev->name); if (r == NULL) { dev_err(&pdev->dev, "failed to request memory resource\n"); ret = -EBUSY; goto fail_put_clk; } info->mmio_base = ioremap(r->start, resource_size(r)); if (info->mmio_base == NULL) { dev_err(&pdev->dev, "ioremap() failed\n"); ret = -ENODEV; goto fail_free_res; } info->mmio_phys = r->start; ret = pxa3xx_nand_init_buff(info); if (ret) goto fail_free_io; /* initialize all interrupts to be disabled */ disable_int(info, NDSR_MASK); ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED, pdev->name, info); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ\n"); goto fail_free_buf; } platform_set_drvdata(pdev, info); return 0; fail_free_buf: free_irq(irq, info); if (use_dma) { pxa_free_dma(info->data_dma_ch); dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, info->data_buff, info->data_buff_phys); } else kfree(info->data_buff); fail_free_io: iounmap(info->mmio_base); fail_free_res: release_mem_region(r->start, resource_size(r)); fail_put_clk: clk_disable(info->clk); clk_put(info->clk); fail_free_mtd: kfree(info); return ret; } static int pxa3xx_nand_remove(struct platform_device *pdev) { struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); struct pxa3xx_nand_platform_data *pdata; struct resource *r; int irq, cs; if (!info) return 0; pdata = pdev->dev.platform_data; platform_set_drvdata(pdev, NULL); irq = platform_get_irq(pdev, 0); if (irq >= 0) free_irq(irq, info); if (use_dma) { pxa_free_dma(info->data_dma_ch); dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE, info->data_buff, info->data_buff_phys); } else kfree(info->data_buff); iounmap(info->mmio_base); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(r->start, resource_size(r)); clk_disable(info->clk); clk_put(info->clk); for (cs = 0; cs < pdata->num_cs; cs++) nand_release(info->host[cs]->mtd); kfree(info); return 0; } #ifdef CONFIG_OF static struct of_device_id pxa3xx_nand_dt_ids[] = { { .compatible = "marvell,pxa3xx-nand" }, {} }; MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids); static int pxa3xx_nand_probe_dt(struct platform_device *pdev) { struct pxa3xx_nand_platform_data *pdata; struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_id = of_match_device(pxa3xx_nand_dt_ids, &pdev->dev); if (!of_id) return 0; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; if (of_get_property(np, "marvell,nand-enable-arbiter", NULL)) pdata->enable_arbiter = 1; if (of_get_property(np, "marvell,nand-keep-config", NULL)) pdata->keep_config = 1; of_property_read_u32(np, "num-cs", &pdata->num_cs); pdev->dev.platform_data = pdata; return 0; } #else static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev) { return 0; } #endif static int pxa3xx_nand_probe(struct platform_device *pdev) { struct pxa3xx_nand_platform_data *pdata; struct mtd_part_parser_data ppdata = {}; struct pxa3xx_nand_info *info; int ret, cs, probe_success; ret = pxa3xx_nand_probe_dt(pdev); if (ret) return ret; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "no platform data defined\n"); return -ENODEV; } ret = alloc_nand_resource(pdev); if (ret) { dev_err(&pdev->dev, "alloc nand resource failed\n"); return ret; } info = platform_get_drvdata(pdev); probe_success = 0; for (cs = 0; cs < pdata->num_cs; cs++) { info->cs = cs; ret = pxa3xx_nand_scan(info->host[cs]->mtd); if (ret) { dev_warn(&pdev->dev, "failed to scan nand at cs %d\n", cs); continue; } ppdata.of_node = pdev->dev.of_node; ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, &ppdata, pdata->parts[cs], pdata->nr_parts[cs]); if (!ret) probe_success = 1; } if (!probe_success) { pxa3xx_nand_remove(pdev); return -ENODEV; } return 0; } #ifdef CONFIG_PM static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) { struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); struct pxa3xx_nand_platform_data *pdata; struct mtd_info *mtd; int cs; pdata = pdev->dev.platform_data; if (info->state) { dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); return -EAGAIN; } for (cs = 0; cs < pdata->num_cs; cs++) { mtd = info->host[cs]->mtd; mtd_suspend(mtd); } return 0; } static int pxa3xx_nand_resume(struct platform_device *pdev) { struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); struct pxa3xx_nand_platform_data *pdata; struct mtd_info *mtd; int cs; pdata = pdev->dev.platform_data; /* We don't want to handle interrupt without calling mtd routine */ disable_int(info, NDCR_INT_MASK); /* * Directly set the chip select to a invalid value, * then the driver would reset the timing according * to current chip select at the beginning of cmdfunc */ info->cs = 0xff; /* * As the spec says, the NDSR would be updated to 0x1800 when * doing the nand_clk disable/enable. * To prevent it damaging state machine of the driver, clear * all status before resume */ nand_writel(info, NDSR, NDSR_MASK); for (cs = 0; cs < pdata->num_cs; cs++) { mtd = info->host[cs]->mtd; mtd_resume(mtd); } return 0; } #else #define pxa3xx_nand_suspend NULL #define pxa3xx_nand_resume NULL #endif static struct platform_driver pxa3xx_nand_driver = { .driver = { .name = "pxa3xx-nand", .of_match_table = of_match_ptr(pxa3xx_nand_dt_ids), }, .probe = pxa3xx_nand_probe, .remove = pxa3xx_nand_remove, .suspend = pxa3xx_nand_suspend, .resume = pxa3xx_nand_resume, }; module_platform_driver(pxa3xx_nand_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PXA3xx NAND controller driver");
gpl-2.0
uDude/linux
drivers/hid/hid-prodikeys.c
2662
21506
/* * HID driver for the Prodikeys PC-MIDI Keyboard * providing midi & extra multimedia keys functionality * * Copyright (c) 2009 Don Prince <dhprince.devel@yahoo.co.uk> * * Controls for Octave Shift Up/Down, Channel, and * Sustain Duration available via sysfs. * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/hid.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include "usbhid/usbhid.h" #include "hid-ids.h" #define pk_debug(format, arg...) \ pr_debug("hid-prodikeys: " format "\n" , ## arg) #define pk_error(format, arg...) \ pr_err("hid-prodikeys: " format "\n" , ## arg) struct pcmidi_snd; struct pk_device { unsigned long quirks; struct hid_device *hdev; struct pcmidi_snd *pm; /* pcmidi device context */ }; struct pcmidi_snd; struct pcmidi_sustain { unsigned long in_use; struct pcmidi_snd *pm; struct timer_list timer; unsigned char status; unsigned char note; unsigned char velocity; }; #define PCMIDI_SUSTAINED_MAX 32 struct pcmidi_snd { struct pk_device *pk; unsigned short ifnum; struct hid_report *pcmidi_report6; struct input_dev *input_ep82; unsigned short midi_mode; unsigned short midi_sustain_mode; unsigned short midi_sustain; unsigned short midi_channel; short midi_octave; struct pcmidi_sustain sustained_notes[PCMIDI_SUSTAINED_MAX]; unsigned short fn_state; unsigned short last_key[24]; spinlock_t rawmidi_in_lock; struct snd_card *card; struct snd_rawmidi *rwmidi; struct snd_rawmidi_substream *in_substream; struct snd_rawmidi_substream *out_substream; unsigned long in_triggered; unsigned long out_active; }; #define PK_QUIRK_NOGET 0x00010000 #define PCMIDI_MIDDLE_C 60 #define PCMIDI_CHANNEL_MIN 0 #define PCMIDI_CHANNEL_MAX 15 #define PCMIDI_OCTAVE_MIN (-2) #define PCMIDI_OCTAVE_MAX 2 #define PCMIDI_SUSTAIN_MIN 0 #define PCMIDI_SUSTAIN_MAX 5000 static const char shortname[] = "PC-MIDI"; static const char longname[] = "Prodikeys PC-MIDI Keyboard"; static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); module_param_array(id, charp, NULL, 0444); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the PC-MIDI virtual audio driver"); MODULE_PARM_DESC(id, "ID string for the PC-MIDI virtual audio driver"); MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver"); /* Output routine for the sysfs channel file */ static ssize_t show_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel); return sprintf(buf, "%u (min:%u, max:%u)\n", pk->pm->midi_channel, PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX); } /* Input routine for the sysfs channel file */ static ssize_t store_channel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); unsigned channel = 0; if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) { dbg_hid("pcmidi sysfs write channel=%u\n", channel); pk->pm->midi_channel = channel; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(channel, S_IRUGO | S_IWUSR | S_IWGRP , show_channel, store_channel); static struct device_attribute *sysfs_device_attr_channel = { &dev_attr_channel, }; /* Output routine for the sysfs sustain file */ static ssize_t show_sustain(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain); return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pk->pm->midi_sustain, PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX); } /* Input routine for the sysfs sustain file */ static ssize_t store_sustain(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); unsigned sustain = 0; if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) { dbg_hid("pcmidi sysfs write sustain=%u\n", sustain); pk->pm->midi_sustain = sustain; pk->pm->midi_sustain_mode = (0 == sustain || !pk->pm->midi_mode) ? 0 : 1; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(sustain, S_IRUGO | S_IWUSR | S_IWGRP, show_sustain, store_sustain); static struct device_attribute *sysfs_device_attr_sustain = { &dev_attr_sustain, }; /* Output routine for the sysfs octave file */ static ssize_t show_octave(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave); return sprintf(buf, "%d (min:%d, max:%d)\n", pk->pm->midi_octave, PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX); } /* Input routine for the sysfs octave file */ static ssize_t store_octave(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); int octave = 0; if (sscanf(buf, "%d", &octave) > 0 && octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) { dbg_hid("pcmidi sysfs write octave=%d\n", octave); pk->pm->midi_octave = octave; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(octave, S_IRUGO | S_IWUSR | S_IWGRP, show_octave, store_octave); static struct device_attribute *sysfs_device_attr_octave = { &dev_attr_octave, }; static void pcmidi_send_note(struct pcmidi_snd *pm, unsigned char status, unsigned char note, unsigned char velocity) { unsigned long flags; unsigned char buffer[3]; buffer[0] = status; buffer[1] = note; buffer[2] = velocity; spin_lock_irqsave(&pm->rawmidi_in_lock, flags); if (!pm->in_substream) goto drop_note; if (!test_bit(pm->in_substream->number, &pm->in_triggered)) goto drop_note; snd_rawmidi_receive(pm->in_substream, buffer, 3); drop_note: spin_unlock_irqrestore(&pm->rawmidi_in_lock, flags); return; } void pcmidi_sustained_note_release(unsigned long data) { struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data; pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity); pms->in_use = 0; } void init_sustain_timers(struct pcmidi_snd *pm) { struct pcmidi_sustain *pms; unsigned i; for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; pms->in_use = 0; pms->pm = pm; setup_timer(&pms->timer, pcmidi_sustained_note_release, (unsigned long)pms); } } void stop_sustain_timers(struct pcmidi_snd *pm) { struct pcmidi_sustain *pms; unsigned i; for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; pms->in_use = 1; del_timer_sync(&pms->timer); } } static int pcmidi_get_output_report(struct pcmidi_snd *pm) { struct hid_device *hdev = pm->pk->hdev; struct hid_report *report; list_for_each_entry(report, &hdev->report_enum[HID_OUTPUT_REPORT].report_list, list) { if (!(6 == report->id)) continue; if (report->maxfield < 1) { hid_err(hdev, "output report is empty\n"); break; } if (report->field[0]->report_count != 2) { hid_err(hdev, "field count too low\n"); break; } pm->pcmidi_report6 = report; return 0; } /* should never get here */ return -ENODEV; } static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state) { struct hid_device *hdev = pm->pk->hdev; struct hid_report *report = pm->pcmidi_report6; report->field[0]->value[0] = 0x01; report->field[0]->value[1] = state; usbhid_submit_report(hdev, report, USB_DIR_OUT); } static int pcmidi_handle_report1(struct pcmidi_snd *pm, u8 *data) { u32 bit_mask; bit_mask = data[1]; bit_mask = (bit_mask << 8) | data[2]; bit_mask = (bit_mask << 8) | data[3]; dbg_hid("pcmidi mode: %d\n", pm->midi_mode); /*KEY_MAIL or octave down*/ if (pm->midi_mode && bit_mask == 0x004000) { /* octave down */ pm->midi_octave--; if (pm->midi_octave < -2) pm->midi_octave = -2; dbg_hid("pcmidi mode: %d octave: %d\n", pm->midi_mode, pm->midi_octave); return 1; } /*KEY_WWW or sustain*/ else if (pm->midi_mode && bit_mask == 0x000004) { /* sustain on/off*/ pm->midi_sustain_mode ^= 0x1; return 1; } return 0; /* continue key processing */ } static int pcmidi_handle_report3(struct pcmidi_snd *pm, u8 *data, int size) { struct pcmidi_sustain *pms; unsigned i, j; unsigned char status, note, velocity; unsigned num_notes = (size-1)/2; for (j = 0; j < num_notes; j++) { note = data[j*2+1]; velocity = data[j*2+2]; if (note < 0x81) { /* note on */ status = 128 + 16 + pm->midi_channel; /* 1001nnnn */ note = note - 0x54 + PCMIDI_MIDDLE_C + (pm->midi_octave * 12); if (0 == velocity) velocity = 1; /* force note on */ } else { /* note off */ status = 128 + pm->midi_channel; /* 1000nnnn */ note = note - 0x94 + PCMIDI_MIDDLE_C + (pm->midi_octave*12); if (pm->midi_sustain_mode) { for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; if (!pms->in_use) { pms->status = status; pms->note = note; pms->velocity = velocity; pms->in_use = 1; mod_timer(&pms->timer, jiffies + msecs_to_jiffies(pm->midi_sustain)); return 1; } } } } pcmidi_send_note(pm, status, note, velocity); } return 1; } static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data) { unsigned key; u32 bit_mask; u32 bit_index; bit_mask = data[1]; bit_mask = (bit_mask << 8) | data[2]; bit_mask = (bit_mask << 8) | data[3]; /* break keys */ for (bit_index = 0; bit_index < 24; bit_index++) { key = pm->last_key[bit_index]; if (!((0x01 << bit_index) & bit_mask)) { input_event(pm->input_ep82, EV_KEY, pm->last_key[bit_index], 0); pm->last_key[bit_index] = 0; } } /* make keys */ for (bit_index = 0; bit_index < 24; bit_index++) { key = 0; switch ((0x01 << bit_index) & bit_mask) { case 0x000010: /* Fn lock*/ pm->fn_state ^= 0x000010; if (pm->fn_state) pcmidi_submit_output_report(pm, 0xc5); else pcmidi_submit_output_report(pm, 0xc6); continue; case 0x020000: /* midi launcher..send a key (qwerty) or not? */ pcmidi_submit_output_report(pm, 0xc1); pm->midi_mode ^= 0x01; dbg_hid("pcmidi mode: %d\n", pm->midi_mode); continue; case 0x100000: /* KEY_MESSENGER or octave up */ dbg_hid("pcmidi mode: %d\n", pm->midi_mode); if (pm->midi_mode) { pm->midi_octave++; if (pm->midi_octave > 2) pm->midi_octave = 2; dbg_hid("pcmidi mode: %d octave: %d\n", pm->midi_mode, pm->midi_octave); continue; } else key = KEY_MESSENGER; break; case 0x400000: key = KEY_CALENDAR; break; case 0x080000: key = KEY_ADDRESSBOOK; break; case 0x040000: key = KEY_DOCUMENTS; break; case 0x800000: key = KEY_WORDPROCESSOR; break; case 0x200000: key = KEY_SPREADSHEET; break; case 0x010000: key = KEY_COFFEE; break; case 0x000100: key = KEY_HELP; break; case 0x000200: key = KEY_SEND; break; case 0x000400: key = KEY_REPLY; break; case 0x000800: key = KEY_FORWARDMAIL; break; case 0x001000: key = KEY_NEW; break; case 0x002000: key = KEY_OPEN; break; case 0x004000: key = KEY_CLOSE; break; case 0x008000: key = KEY_SAVE; break; case 0x000001: key = KEY_UNDO; break; case 0x000002: key = KEY_REDO; break; case 0x000004: key = KEY_SPELLCHECK; break; case 0x000008: key = KEY_PRINT; break; } if (key) { input_event(pm->input_ep82, EV_KEY, key, 1); pm->last_key[bit_index] = key; } } return 1; } int pcmidi_handle_report( struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size) { int ret = 0; switch (report_id) { case 0x01: /* midi keys (qwerty)*/ ret = pcmidi_handle_report1(pm, data); break; case 0x03: /* midi keyboard (musical)*/ ret = pcmidi_handle_report3(pm, data, size); break; case 0x04: /* multimedia/midi keys (qwerty)*/ ret = pcmidi_handle_report4(pm, data); break; } return ret; } void pcmidi_setup_extra_keys(struct pcmidi_snd *pm, struct input_dev *input) { /* reassigned functionality for N/A keys MY PICTURES => KEY_WORDPROCESSOR MY MUSIC=> KEY_SPREADSHEET */ unsigned int keys[] = { KEY_FN, KEY_MESSENGER, KEY_CALENDAR, KEY_ADDRESSBOOK, KEY_DOCUMENTS, KEY_WORDPROCESSOR, KEY_SPREADSHEET, KEY_COFFEE, KEY_HELP, KEY_SEND, KEY_REPLY, KEY_FORWARDMAIL, KEY_NEW, KEY_OPEN, KEY_CLOSE, KEY_SAVE, KEY_UNDO, KEY_REDO, KEY_SPELLCHECK, KEY_PRINT, 0 }; unsigned int *pkeys = &keys[0]; unsigned short i; if (pm->ifnum != 1) /* only set up ONCE for interace 1 */ return; pm->input_ep82 = input; for (i = 0; i < 24; i++) pm->last_key[i] = 0; while (*pkeys != 0) { set_bit(*pkeys, pm->input_ep82->keybit); ++pkeys; } } static int pcmidi_set_operational(struct pcmidi_snd *pm) { if (pm->ifnum != 1) return 0; /* only set up ONCE for interace 1 */ pcmidi_get_output_report(pm); pcmidi_submit_output_report(pm, 0xc1); return 0; } static int pcmidi_snd_free(struct snd_device *dev) { return 0; } static int pcmidi_in_open(struct snd_rawmidi_substream *substream) { struct pcmidi_snd *pm = substream->rmidi->private_data; dbg_hid("pcmidi in open\n"); pm->in_substream = substream; return 0; } static int pcmidi_in_close(struct snd_rawmidi_substream *substream) { dbg_hid("pcmidi in close\n"); return 0; } static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up) { struct pcmidi_snd *pm = substream->rmidi->private_data; dbg_hid("pcmidi in trigger %d\n", up); pm->in_triggered = up; } static struct snd_rawmidi_ops pcmidi_in_ops = { .open = pcmidi_in_open, .close = pcmidi_in_close, .trigger = pcmidi_in_trigger }; int pcmidi_snd_initialise(struct pcmidi_snd *pm) { static int dev; struct snd_card *card; struct snd_rawmidi *rwmidi; int err; static struct snd_device_ops ops = { .dev_free = pcmidi_snd_free, }; if (pm->ifnum != 1) return 0; /* only set up midi device ONCE for interace 1 */ if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } /* Setup sound card */ err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) { pk_error("failed to create pc-midi sound card\n"); err = -ENOMEM; goto fail; } pm->card = card; /* Setup sound device */ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pm, &ops); if (err < 0) { pk_error("failed to create pc-midi sound device: error %d\n", err); goto fail; } strncpy(card->driver, shortname, sizeof(card->driver)); strncpy(card->shortname, shortname, sizeof(card->shortname)); strncpy(card->longname, longname, sizeof(card->longname)); /* Set up rawmidi */ err = snd_rawmidi_new(card, card->shortname, 0, 0, 1, &rwmidi); if (err < 0) { pk_error("failed to create pc-midi rawmidi device: error %d\n", err); goto fail; } pm->rwmidi = rwmidi; strncpy(rwmidi->name, card->shortname, sizeof(rwmidi->name)); rwmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT; rwmidi->private_data = pm; snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT, &pcmidi_in_ops); snd_card_set_dev(card, &pm->pk->hdev->dev); /* create sysfs variables */ err = device_create_file(&pm->pk->hdev->dev, sysfs_device_attr_channel); if (err < 0) { pk_error("failed to create sysfs attribute channel: error %d\n", err); goto fail; } err = device_create_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain); if (err < 0) { pk_error("failed to create sysfs attribute sustain: error %d\n", err); goto fail_attr_sustain; } err = device_create_file(&pm->pk->hdev->dev, sysfs_device_attr_octave); if (err < 0) { pk_error("failed to create sysfs attribute octave: error %d\n", err); goto fail_attr_octave; } spin_lock_init(&pm->rawmidi_in_lock); init_sustain_timers(pm); pcmidi_set_operational(pm); /* register it */ err = snd_card_register(card); if (err < 0) { pk_error("failed to register pc-midi sound card: error %d\n", err); goto fail_register; } dbg_hid("pcmidi_snd_initialise finished ok\n"); return 0; fail_register: stop_sustain_timers(pm); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave); fail_attr_octave: device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain); fail_attr_sustain: device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel); fail: if (pm->card) { snd_card_free(pm->card); pm->card = NULL; } return err; } int pcmidi_snd_terminate(struct pcmidi_snd *pm) { if (pm->card) { stop_sustain_timers(pm); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave); snd_card_disconnect(pm->card); snd_card_free_when_closed(pm->card); } return 0; } /* * PC-MIDI report descriptor for report id is wrong. */ static __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == 178 && rdesc[111] == 0x06 && rdesc[112] == 0x00 && rdesc[113] == 0xff) { hid_info(hdev, "fixing up pc-midi keyboard report descriptor\n"); rdesc[144] = 0x18; /* report 4: was 0x10 report count */ } return rdesc; } static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); struct pcmidi_snd *pm; pm = pk->pm; if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) && 1 == pm->ifnum) { pcmidi_setup_extra_keys(pm, hi->input); return 0; } return 0; } static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); int ret = 0; if (1 == pk->pm->ifnum) { if (report->id == data[0]) switch (report->id) { case 0x01: /* midi keys (qwerty)*/ case 0x03: /* midi keyboard (musical)*/ case 0x04: /* extra/midi keys (qwerty)*/ ret = pcmidi_handle_report(pk->pm, report->id, data, size); break; } } return ret; } static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct usb_interface *intf = to_usb_interface(hdev->dev.parent); unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber; unsigned long quirks = id->driver_data; struct pk_device *pk; struct pcmidi_snd *pm = NULL; pk = kzalloc(sizeof(*pk), GFP_KERNEL); if (pk == NULL) { hid_err(hdev, "can't alloc descriptor\n"); return -ENOMEM; } pk->hdev = hdev; pm = kzalloc(sizeof(*pm), GFP_KERNEL); if (pm == NULL) { hid_err(hdev, "can't alloc descriptor\n"); ret = -ENOMEM; goto err_free; } pm->pk = pk; pk->pm = pm; pm->ifnum = ifnum; hid_set_drvdata(hdev, pk); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "hid parse failed\n"); goto err_free; } if (quirks & PK_QUIRK_NOGET) { /* hid_parse cleared all the quirks */ hdev->quirks |= HID_QUIRK_NOGET; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } ret = pcmidi_snd_initialise(pm); if (ret < 0) goto err_stop; return 0; err_stop: hid_hw_stop(hdev); err_free: if (pm != NULL) kfree(pm); kfree(pk); return ret; } static void pk_remove(struct hid_device *hdev) { struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); struct pcmidi_snd *pm; pm = pk->pm; if (pm) { pcmidi_snd_terminate(pm); kfree(pm); } hid_hw_stop(hdev); kfree(pk); } static const struct hid_device_id pk_devices[] = { {HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI), .driver_data = PK_QUIRK_NOGET}, { } }; MODULE_DEVICE_TABLE(hid, pk_devices); static struct hid_driver pk_driver = { .name = "prodikeys", .id_table = pk_devices, .report_fixup = pk_report_fixup, .input_mapping = pk_input_mapping, .raw_event = pk_raw_event, .probe = pk_probe, .remove = pk_remove, }; static int pk_init(void) { int ret; ret = hid_register_driver(&pk_driver); if (ret) pr_err("can't register prodikeys driver\n"); return ret; } static void pk_exit(void) { hid_unregister_driver(&pk_driver); } module_init(pk_init); module_exit(pk_exit); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/hardkernel-kernel-4412
net/9p/trans_common.c
2918
2739
/* * Copyright IBM Corporation, 2010 * Author Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include <linux/slab.h> #include <linux/module.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include <linux/scatterlist.h> #include "trans_common.h" /** * p9_release_req_pages - Release pages after the transaction. * @*private: PDU's private page of struct trans_rpage_info */ void p9_release_req_pages(struct trans_rpage_info *rpinfo) { int i = 0; while (rpinfo->rp_data[i] && rpinfo->rp_nr_pages--) { put_page(rpinfo->rp_data[i]); i++; } } EXPORT_SYMBOL(p9_release_req_pages); /** * p9_nr_pages - Return number of pages needed to accommodate the payload. */ int p9_nr_pages(struct p9_req_t *req) { unsigned long start_page, end_page; start_page = (unsigned long)req->tc->pubuf >> PAGE_SHIFT; end_page = ((unsigned long)req->tc->pubuf + req->tc->pbuf_size + PAGE_SIZE - 1) >> PAGE_SHIFT; return end_page - start_page; } EXPORT_SYMBOL(p9_nr_pages); /** * payload_gup - Translates user buffer into kernel pages and * pins them either for read/write through get_user_pages_fast(). * @req: Request to be sent to server. * @pdata_off: data offset into the first page after translation (gup). * @pdata_len: Total length of the IO. gup may not return requested # of pages. * @nr_pages: number of pages to accommodate the payload * @rw: Indicates if the pages are for read or write. */ int p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, int nr_pages, u8 rw) { uint32_t first_page_bytes = 0; int32_t pdata_mapped_pages; struct trans_rpage_info *rpinfo; *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); if (*pdata_off) first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off), req->tc->pbuf_size); rpinfo = req->tc->private; pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, nr_pages, rw, &rpinfo->rp_data[0]); if (pdata_mapped_pages <= 0) return pdata_mapped_pages; rpinfo->rp_nr_pages = pdata_mapped_pages; if (*pdata_off) { *pdata_len = first_page_bytes; *pdata_len += min((req->tc->pbuf_size - *pdata_len), ((size_t)pdata_mapped_pages - 1) << PAGE_SHIFT); } else { *pdata_len = min(req->tc->pbuf_size, (size_t)pdata_mapped_pages << PAGE_SHIFT); } return 0; } EXPORT_SYMBOL(p9_payload_gup);
gpl-2.0
Coldwindofnowhere/android_kernel_samsung_aries
sound/soc/pxa/tosa.c
2918
7314
/* * tosa.c -- SoC audio for Tosa * * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * * Authors: Liam Girdwood <lrg@slimlogic.co.uk> * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * GPIO's * 1 - Jack Insertion * 5 - Hookswitch (headset answer/hang up switch) * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/gpio.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include <mach/tosa.h> #include <mach/audio.h> #include "../codecs/wm9712.h" #include "pxa2xx-ac97.h" static struct snd_soc_card tosa; #define TOSA_HP 0 #define TOSA_MIC_INT 1 #define TOSA_HEADSET 2 #define TOSA_HP_OFF 3 #define TOSA_SPK_ON 0 #define TOSA_SPK_OFF 1 static int tosa_jack_func; static int tosa_spk_func; static void tosa_ext_control(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = &codec->dapm; /* set up jack connection */ switch (tosa_jack_func) { case TOSA_HP: snd_soc_dapm_disable_pin(dapm, "Mic (Internal)"); snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); break; case TOSA_MIC_INT: snd_soc_dapm_enable_pin(dapm, "Mic (Internal)"); snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); break; case TOSA_HEADSET: snd_soc_dapm_disable_pin(dapm, "Mic (Internal)"); snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_enable_pin(dapm, "Headset Jack"); break; } if (tosa_spk_func == TOSA_SPK_ON) snd_soc_dapm_enable_pin(dapm, "Speaker"); else snd_soc_dapm_disable_pin(dapm, "Speaker"); snd_soc_dapm_sync(dapm); } static int tosa_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; mutex_lock(&codec->mutex); /* check the jack status at stream startup */ tosa_ext_control(codec); mutex_unlock(&codec->mutex); return 0; } static struct snd_soc_ops tosa_ops = { .startup = tosa_startup, }; static int tosa_get_jack(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = tosa_jack_func; return 0; } static int tosa_set_jack(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); if (tosa_jack_func == ucontrol->value.integer.value[0]) return 0; tosa_jack_func = ucontrol->value.integer.value[0]; tosa_ext_control(codec); return 1; } static int tosa_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = tosa_spk_func; return 0; } static int tosa_set_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); if (tosa_spk_func == ucontrol->value.integer.value[0]) return 0; tosa_spk_func = ucontrol->value.integer.value[0]; tosa_ext_control(codec); return 1; } /* tosa dapm event handlers */ static int tosa_hp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value(TOSA_GPIO_L_MUTE, SND_SOC_DAPM_EVENT_ON(event) ? 1 :0); return 0; } /* tosa machine dapm widgets */ static const struct snd_soc_dapm_widget tosa_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", tosa_hp_event), SND_SOC_DAPM_HP("Headset Jack", NULL), SND_SOC_DAPM_MIC("Mic (Internal)", NULL), SND_SOC_DAPM_SPK("Speaker", NULL), }; /* tosa audio map */ static const struct snd_soc_dapm_route audio_map[] = { /* headphone connected to HPOUTL, HPOUTR */ {"Headphone Jack", NULL, "HPOUTL"}, {"Headphone Jack", NULL, "HPOUTR"}, /* ext speaker connected to LOUT2, ROUT2 */ {"Speaker", NULL, "LOUT2"}, {"Speaker", NULL, "ROUT2"}, /* internal mic is connected to mic1, mic2 differential - with bias */ {"MIC1", NULL, "Mic Bias"}, {"MIC2", NULL, "Mic Bias"}, {"Mic Bias", NULL, "Mic (Internal)"}, /* headset is connected to HPOUTR, and LINEINR with bias */ {"Headset Jack", NULL, "HPOUTR"}, {"LINEINR", NULL, "Mic Bias"}, {"Mic Bias", NULL, "Headset Jack"}, }; static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset", "Off"}; static const char *spk_function[] = {"On", "Off"}; static const struct soc_enum tosa_enum[] = { SOC_ENUM_SINGLE_EXT(5, jack_function), SOC_ENUM_SINGLE_EXT(2, spk_function), }; static const struct snd_kcontrol_new tosa_controls[] = { SOC_ENUM_EXT("Jack Function", tosa_enum[0], tosa_get_jack, tosa_set_jack), SOC_ENUM_EXT("Speaker Function", tosa_enum[1], tosa_get_spk, tosa_set_spk), }; static int tosa_ac97_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; int err; snd_soc_dapm_nc_pin(dapm, "OUT3"); snd_soc_dapm_nc_pin(dapm, "MONOOUT"); /* add tosa specific controls */ err = snd_soc_add_controls(codec, tosa_controls, ARRAY_SIZE(tosa_controls)); if (err < 0) return err; /* add tosa specific widgets */ snd_soc_dapm_new_controls(dapm, tosa_dapm_widgets, ARRAY_SIZE(tosa_dapm_widgets)); /* set up tosa specific audio path audio_map */ snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); snd_soc_dapm_sync(dapm); return 0; } static struct snd_soc_dai_link tosa_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9712-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", .init = tosa_ac97_init, .ops = &tosa_ops, }, { .name = "AC97 Aux", .stream_name = "AC97 Aux", .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name = "wm9712-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", .ops = &tosa_ops, }, }; static int tosa_probe(struct snd_soc_card *card) { int ret; ret = gpio_request(TOSA_GPIO_L_MUTE, "Headphone Jack"); if (ret) return ret; ret = gpio_direction_output(TOSA_GPIO_L_MUTE, 0); if (ret) gpio_free(TOSA_GPIO_L_MUTE); return ret; } static int tosa_remove(struct snd_soc_card *card) { gpio_free(TOSA_GPIO_L_MUTE); return 0; } static struct snd_soc_card tosa = { .name = "Tosa", .dai_link = tosa_dai, .num_links = ARRAY_SIZE(tosa_dai), .probe = tosa_probe, .remove = tosa_remove, }; static struct platform_device *tosa_snd_device; static int __init tosa_init(void) { int ret; if (!machine_is_tosa()) return -ENODEV; tosa_snd_device = platform_device_alloc("soc-audio", -1); if (!tosa_snd_device) { ret = -ENOMEM; goto err_alloc; } platform_set_drvdata(tosa_snd_device, &tosa); ret = platform_device_add(tosa_snd_device); if (!ret) return 0; platform_device_put(tosa_snd_device); err_alloc: return ret; } static void __exit tosa_exit(void) { platform_device_unregister(tosa_snd_device); } module_init(tosa_init); module_exit(tosa_exit); /* Module information */ MODULE_AUTHOR("Richard Purdie"); MODULE_DESCRIPTION("ALSA SoC Tosa"); MODULE_LICENSE("GPL");
gpl-2.0
airk000/kernel_htc_7x30
drivers/char/sonypi.c
3174
42248
/* * Sony Programmable I/O Control Device driver for VAIO * * Copyright (C) 2007 Mattia Dongili <malattia@linux.it> * * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net> * * Copyright (C) 2005 Narayanan R S <nars@kadamba.org> * * Copyright (C) 2001-2002 Alcôve <www.alcove.com> * * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au> * * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp> * * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp> * * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com> * * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/module.h> #include <linux/sched.h> #include <linux/input.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/poll.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/err.h> #include <linux/kfifo.h> #include <linux/platform_device.h> #include <linux/gfp.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/system.h> #include <linux/sonypi.h> #define SONYPI_DRIVER_VERSION "1.26" MODULE_AUTHOR("Stelian Pop <stelian@popies.net>"); MODULE_DESCRIPTION("Sony Programmable I/O Control Device driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(SONYPI_DRIVER_VERSION); static int minor = -1; module_param(minor, int, 0); MODULE_PARM_DESC(minor, "minor number of the misc device, default is -1 (automatic)"); static int verbose; /* = 0 */ module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)"); static int fnkeyinit; /* = 0 */ module_param(fnkeyinit, int, 0444); MODULE_PARM_DESC(fnkeyinit, "set this if your Fn keys do not generate any event"); static int camera; /* = 0 */ module_param(camera, int, 0444); MODULE_PARM_DESC(camera, "set this if you have a MotionEye camera (PictureBook series)"); static int compat; /* = 0 */ module_param(compat, int, 0444); MODULE_PARM_DESC(compat, "set this if you want to enable backward compatibility mode"); static unsigned long mask = 0xffffffff; module_param(mask, ulong, 0644); MODULE_PARM_DESC(mask, "set this to the mask of event you want to enable (see doc)"); static int useinput = 1; module_param(useinput, int, 0444); MODULE_PARM_DESC(useinput, "set this if you would like sonypi to feed events to the input subsystem"); static int check_ioport = 1; module_param(check_ioport, int, 0444); MODULE_PARM_DESC(check_ioport, "set this to 0 if you think the automatic ioport check for sony-laptop is wrong"); #define SONYPI_DEVICE_MODEL_TYPE1 1 #define SONYPI_DEVICE_MODEL_TYPE2 2 #define SONYPI_DEVICE_MODEL_TYPE3 3 /* type1 models use those */ #define SONYPI_IRQ_PORT 0x8034 #define SONYPI_IRQ_SHIFT 22 #define SONYPI_TYPE1_BASE 0x50 #define SONYPI_G10A (SONYPI_TYPE1_BASE+0x14) #define SONYPI_TYPE1_REGION_SIZE 0x08 #define SONYPI_TYPE1_EVTYPE_OFFSET 0x04 /* type2 series specifics */ #define SONYPI_SIRQ 0x9b #define SONYPI_SLOB 0x9c #define SONYPI_SHIB 0x9d #define SONYPI_TYPE2_REGION_SIZE 0x20 #define SONYPI_TYPE2_EVTYPE_OFFSET 0x12 /* type3 series specifics */ #define SONYPI_TYPE3_BASE 0x40 #define SONYPI_TYPE3_GID2 (SONYPI_TYPE3_BASE+0x48) /* 16 bits */ #define SONYPI_TYPE3_MISC (SONYPI_TYPE3_BASE+0x6d) /* 8 bits */ #define SONYPI_TYPE3_REGION_SIZE 0x20 #define SONYPI_TYPE3_EVTYPE_OFFSET 0x12 /* battery / brightness addresses */ #define SONYPI_BAT_FLAGS 0x81 #define SONYPI_LCD_LIGHT 0x96 #define SONYPI_BAT1_PCTRM 0xa0 #define SONYPI_BAT1_LEFT 0xa2 #define SONYPI_BAT1_MAXRT 0xa4 #define SONYPI_BAT2_PCTRM 0xa8 #define SONYPI_BAT2_LEFT 0xaa #define SONYPI_BAT2_MAXRT 0xac #define SONYPI_BAT1_MAXTK 0xb0 #define SONYPI_BAT1_FULL 0xb2 #define SONYPI_BAT2_MAXTK 0xb8 #define SONYPI_BAT2_FULL 0xba /* FAN0 information (reverse engineered from ACPI tables) */ #define SONYPI_FAN0_STATUS 0x93 #define SONYPI_TEMP_STATUS 0xC1 /* ioports used for brightness and type2 events */ #define SONYPI_DATA_IOPORT 0x62 #define SONYPI_CST_IOPORT 0x66 /* The set of possible ioports */ struct sonypi_ioport_list { u16 port1; u16 port2; }; static struct sonypi_ioport_list sonypi_type1_ioport_list[] = { { 0x10c0, 0x10c4 }, /* looks like the default on C1Vx */ { 0x1080, 0x1084 }, { 0x1090, 0x1094 }, { 0x10a0, 0x10a4 }, { 0x10b0, 0x10b4 }, { 0x0, 0x0 } }; static struct sonypi_ioport_list sonypi_type2_ioport_list[] = { { 0x1080, 0x1084 }, { 0x10a0, 0x10a4 }, { 0x10c0, 0x10c4 }, { 0x10e0, 0x10e4 }, { 0x0, 0x0 } }; /* same as in type 2 models */ static struct sonypi_ioport_list *sonypi_type3_ioport_list = sonypi_type2_ioport_list; /* The set of possible interrupts */ struct sonypi_irq_list { u16 irq; u16 bits; }; static struct sonypi_irq_list sonypi_type1_irq_list[] = { { 11, 0x2 }, /* IRQ 11, GO22=0,GO23=1 in AML */ { 10, 0x1 }, /* IRQ 10, GO22=1,GO23=0 in AML */ { 5, 0x0 }, /* IRQ 5, GO22=0,GO23=0 in AML */ { 0, 0x3 } /* no IRQ, GO22=1,GO23=1 in AML */ }; static struct sonypi_irq_list sonypi_type2_irq_list[] = { { 11, 0x80 }, /* IRQ 11, 0x80 in SIRQ in AML */ { 10, 0x40 }, /* IRQ 10, 0x40 in SIRQ in AML */ { 9, 0x20 }, /* IRQ 9, 0x20 in SIRQ in AML */ { 6, 0x10 }, /* IRQ 6, 0x10 in SIRQ in AML */ { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */ }; /* same as in type2 models */ static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list; #define SONYPI_CAMERA_BRIGHTNESS 0 #define SONYPI_CAMERA_CONTRAST 1 #define SONYPI_CAMERA_HUE 2 #define SONYPI_CAMERA_COLOR 3 #define SONYPI_CAMERA_SHARPNESS 4 #define SONYPI_CAMERA_PICTURE 5 #define SONYPI_CAMERA_EXPOSURE_MASK 0xC #define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3 #define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30 #define SONYPI_CAMERA_MUTE_MASK 0x40 /* the rest don't need a loop until not 0xff */ #define SONYPI_CAMERA_AGC 6 #define SONYPI_CAMERA_AGC_MASK 0x30 #define SONYPI_CAMERA_SHUTTER_MASK 0x7 #define SONYPI_CAMERA_SHUTDOWN_REQUEST 7 #define SONYPI_CAMERA_CONTROL 0x10 #define SONYPI_CAMERA_STATUS 7 #define SONYPI_CAMERA_STATUS_READY 0x2 #define SONYPI_CAMERA_STATUS_POSITION 0x4 #define SONYPI_DIRECTION_BACKWARDS 0x4 #define SONYPI_CAMERA_REVISION 8 #define SONYPI_CAMERA_ROMVERSION 9 /* Event masks */ #define SONYPI_JOGGER_MASK 0x00000001 #define SONYPI_CAPTURE_MASK 0x00000002 #define SONYPI_FNKEY_MASK 0x00000004 #define SONYPI_BLUETOOTH_MASK 0x00000008 #define SONYPI_PKEY_MASK 0x00000010 #define SONYPI_BACK_MASK 0x00000020 #define SONYPI_HELP_MASK 0x00000040 #define SONYPI_LID_MASK 0x00000080 #define SONYPI_ZOOM_MASK 0x00000100 #define SONYPI_THUMBPHRASE_MASK 0x00000200 #define SONYPI_MEYE_MASK 0x00000400 #define SONYPI_MEMORYSTICK_MASK 0x00000800 #define SONYPI_BATTERY_MASK 0x00001000 #define SONYPI_WIRELESS_MASK 0x00002000 struct sonypi_event { u8 data; u8 event; }; /* The set of possible button release events */ static struct sonypi_event sonypi_releaseev[] = { { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0, 0 } }; /* The set of possible jogger events */ static struct sonypi_event sonypi_joggerev[] = { { 0x1f, SONYPI_EVENT_JOGDIAL_UP }, { 0x01, SONYPI_EVENT_JOGDIAL_DOWN }, { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED }, { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED }, { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP }, { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN }, { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED }, { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED }, { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP }, { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN }, { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED }, { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED }, { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED }, { 0, 0 } }; /* The set of possible capture button events */ static struct sonypi_event sonypi_captureev[] = { { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED }, { 0x07, SONYPI_EVENT_CAPTURE_PRESSED }, { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED }, { 0, 0 } }; /* The set of possible fnkeys events */ static struct sonypi_event sonypi_fnkeyev[] = { { 0x10, SONYPI_EVENT_FNKEY_ESC }, { 0x11, SONYPI_EVENT_FNKEY_F1 }, { 0x12, SONYPI_EVENT_FNKEY_F2 }, { 0x13, SONYPI_EVENT_FNKEY_F3 }, { 0x14, SONYPI_EVENT_FNKEY_F4 }, { 0x15, SONYPI_EVENT_FNKEY_F5 }, { 0x16, SONYPI_EVENT_FNKEY_F6 }, { 0x17, SONYPI_EVENT_FNKEY_F7 }, { 0x18, SONYPI_EVENT_FNKEY_F8 }, { 0x19, SONYPI_EVENT_FNKEY_F9 }, { 0x1a, SONYPI_EVENT_FNKEY_F10 }, { 0x1b, SONYPI_EVENT_FNKEY_F11 }, { 0x1c, SONYPI_EVENT_FNKEY_F12 }, { 0x1f, SONYPI_EVENT_FNKEY_RELEASED }, { 0x21, SONYPI_EVENT_FNKEY_1 }, { 0x22, SONYPI_EVENT_FNKEY_2 }, { 0x31, SONYPI_EVENT_FNKEY_D }, { 0x32, SONYPI_EVENT_FNKEY_E }, { 0x33, SONYPI_EVENT_FNKEY_F }, { 0x34, SONYPI_EVENT_FNKEY_S }, { 0x35, SONYPI_EVENT_FNKEY_B }, { 0x36, SONYPI_EVENT_FNKEY_ONLY }, { 0, 0 } }; /* The set of possible program key events */ static struct sonypi_event sonypi_pkeyev[] = { { 0x01, SONYPI_EVENT_PKEY_P1 }, { 0x02, SONYPI_EVENT_PKEY_P2 }, { 0x04, SONYPI_EVENT_PKEY_P3 }, { 0x5c, SONYPI_EVENT_PKEY_P1 }, { 0, 0 } }; /* The set of possible bluetooth events */ static struct sonypi_event sonypi_blueev[] = { { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED }, { 0x59, SONYPI_EVENT_BLUETOOTH_ON }, { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF }, { 0, 0 } }; /* The set of possible wireless events */ static struct sonypi_event sonypi_wlessev[] = { { 0x59, SONYPI_EVENT_WIRELESS_ON }, { 0x5a, SONYPI_EVENT_WIRELESS_OFF }, { 0, 0 } }; /* The set of possible back button events */ static struct sonypi_event sonypi_backev[] = { { 0x20, SONYPI_EVENT_BACK_PRESSED }, { 0, 0 } }; /* The set of possible help button events */ static struct sonypi_event sonypi_helpev[] = { { 0x3b, SONYPI_EVENT_HELP_PRESSED }, { 0, 0 } }; /* The set of possible lid events */ static struct sonypi_event sonypi_lidev[] = { { 0x51, SONYPI_EVENT_LID_CLOSED }, { 0x50, SONYPI_EVENT_LID_OPENED }, { 0, 0 } }; /* The set of possible zoom events */ static struct sonypi_event sonypi_zoomev[] = { { 0x39, SONYPI_EVENT_ZOOM_PRESSED }, { 0, 0 } }; /* The set of possible thumbphrase events */ static struct sonypi_event sonypi_thumbphraseev[] = { { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED }, { 0, 0 } }; /* The set of possible motioneye camera events */ static struct sonypi_event sonypi_meyeev[] = { { 0x00, SONYPI_EVENT_MEYE_FACE }, { 0x01, SONYPI_EVENT_MEYE_OPPOSITE }, { 0, 0 } }; /* The set of possible memorystick events */ static struct sonypi_event sonypi_memorystickev[] = { { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT }, { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT }, { 0, 0 } }; /* The set of possible battery events */ static struct sonypi_event sonypi_batteryev[] = { { 0x20, SONYPI_EVENT_BATTERY_INSERT }, { 0x30, SONYPI_EVENT_BATTERY_REMOVE }, { 0, 0 } }; static struct sonypi_eventtypes { int model; u8 data; unsigned long mask; struct sonypi_event * events; } sonypi_eventtypes[] = { { SONYPI_DEVICE_MODEL_TYPE1, 0, 0xffffffff, sonypi_releaseev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev }, { SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, { SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev }, { SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev }, { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0 } }; #define SONYPI_BUF_SIZE 128 /* Correspondance table between sonypi events and input layer events */ static struct { int sonypiev; int inputev; } sonypi_inputkeys[] = { { SONYPI_EVENT_CAPTURE_PRESSED, KEY_CAMERA }, { SONYPI_EVENT_FNKEY_ONLY, KEY_FN }, { SONYPI_EVENT_FNKEY_ESC, KEY_FN_ESC }, { SONYPI_EVENT_FNKEY_F1, KEY_FN_F1 }, { SONYPI_EVENT_FNKEY_F2, KEY_FN_F2 }, { SONYPI_EVENT_FNKEY_F3, KEY_FN_F3 }, { SONYPI_EVENT_FNKEY_F4, KEY_FN_F4 }, { SONYPI_EVENT_FNKEY_F5, KEY_FN_F5 }, { SONYPI_EVENT_FNKEY_F6, KEY_FN_F6 }, { SONYPI_EVENT_FNKEY_F7, KEY_FN_F7 }, { SONYPI_EVENT_FNKEY_F8, KEY_FN_F8 }, { SONYPI_EVENT_FNKEY_F9, KEY_FN_F9 }, { SONYPI_EVENT_FNKEY_F10, KEY_FN_F10 }, { SONYPI_EVENT_FNKEY_F11, KEY_FN_F11 }, { SONYPI_EVENT_FNKEY_F12, KEY_FN_F12 }, { SONYPI_EVENT_FNKEY_1, KEY_FN_1 }, { SONYPI_EVENT_FNKEY_2, KEY_FN_2 }, { SONYPI_EVENT_FNKEY_D, KEY_FN_D }, { SONYPI_EVENT_FNKEY_E, KEY_FN_E }, { SONYPI_EVENT_FNKEY_F, KEY_FN_F }, { SONYPI_EVENT_FNKEY_S, KEY_FN_S }, { SONYPI_EVENT_FNKEY_B, KEY_FN_B }, { SONYPI_EVENT_BLUETOOTH_PRESSED, KEY_BLUE }, { SONYPI_EVENT_BLUETOOTH_ON, KEY_BLUE }, { SONYPI_EVENT_PKEY_P1, KEY_PROG1 }, { SONYPI_EVENT_PKEY_P2, KEY_PROG2 }, { SONYPI_EVENT_PKEY_P3, KEY_PROG3 }, { SONYPI_EVENT_BACK_PRESSED, KEY_BACK }, { SONYPI_EVENT_HELP_PRESSED, KEY_HELP }, { SONYPI_EVENT_ZOOM_PRESSED, KEY_ZOOM }, { SONYPI_EVENT_THUMBPHRASE_PRESSED, BTN_THUMB }, { 0, 0 }, }; struct sonypi_keypress { struct input_dev *dev; int key; }; static struct sonypi_device { struct pci_dev *dev; u16 irq; u16 bits; u16 ioport1; u16 ioport2; u16 region_size; u16 evtype_offset; int camera_power; int bluetooth_power; struct mutex lock; struct kfifo fifo; spinlock_t fifo_lock; wait_queue_head_t fifo_proc_list; struct fasync_struct *fifo_async; int open_count; int model; struct input_dev *input_jog_dev; struct input_dev *input_key_dev; struct work_struct input_work; struct kfifo input_fifo; spinlock_t input_fifo_lock; } sonypi_device; #define ITERATIONS_LONG 10000 #define ITERATIONS_SHORT 10 #define wait_on_command(quiet, command, iterations) { \ unsigned int n = iterations; \ while (--n && (command)) \ udelay(1); \ if (!n && (verbose || !quiet)) \ printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \ } #ifdef CONFIG_ACPI #define SONYPI_ACPI_ACTIVE (!acpi_disabled) #else #define SONYPI_ACPI_ACTIVE 0 #endif /* CONFIG_ACPI */ #ifdef CONFIG_ACPI static struct acpi_device *sonypi_acpi_device; static int acpi_driver_registered; #endif static int sonypi_ec_write(u8 addr, u8 value) { #ifdef CONFIG_ACPI if (SONYPI_ACPI_ACTIVE) return ec_write(addr, value); #endif wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); outb_p(0x81, SONYPI_CST_IOPORT); wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); outb_p(addr, SONYPI_DATA_IOPORT); wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); outb_p(value, SONYPI_DATA_IOPORT); wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); return 0; } static int sonypi_ec_read(u8 addr, u8 *value) { #ifdef CONFIG_ACPI if (SONYPI_ACPI_ACTIVE) return ec_read(addr, value); #endif wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); outb_p(0x80, SONYPI_CST_IOPORT); wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); outb_p(addr, SONYPI_DATA_IOPORT); wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); *value = inb_p(SONYPI_DATA_IOPORT); return 0; } static int ec_read16(u8 addr, u16 *value) { u8 val_lb, val_hb; if (sonypi_ec_read(addr, &val_lb)) return -1; if (sonypi_ec_read(addr + 1, &val_hb)) return -1; *value = val_lb | (val_hb << 8); return 0; } /* Initializes the device - this comes from the AML code in the ACPI bios */ static void sonypi_type1_srs(void) { u32 v; pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); v = (v & 0xFFFF0000) | ((u32) sonypi_device.ioport1); pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); v = (v & 0xFFF0FFFF) | (((u32) sonypi_device.ioport1 ^ sonypi_device.ioport2) << 16); pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); v = inl(SONYPI_IRQ_PORT); v &= ~(((u32) 0x3) << SONYPI_IRQ_SHIFT); v |= (((u32) sonypi_device.bits) << SONYPI_IRQ_SHIFT); outl(v, SONYPI_IRQ_PORT); pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); v = (v & 0xFF1FFFFF) | 0x00C00000; pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); } static void sonypi_type2_srs(void) { if (sonypi_ec_write(SONYPI_SHIB, (sonypi_device.ioport1 & 0xFF00) >> 8)) printk(KERN_WARNING "ec_write failed\n"); if (sonypi_ec_write(SONYPI_SLOB, sonypi_device.ioport1 & 0x00FF)) printk(KERN_WARNING "ec_write failed\n"); if (sonypi_ec_write(SONYPI_SIRQ, sonypi_device.bits)) printk(KERN_WARNING "ec_write failed\n"); udelay(10); } static void sonypi_type3_srs(void) { u16 v16; u8 v8; /* This model type uses the same initialiazation of * the embedded controller as the type2 models. */ sonypi_type2_srs(); /* Initialization of PCI config space of the LPC interface bridge. */ v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01; pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16); pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8); v8 = (v8 & 0xCF) | 0x10; pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8); } /* Disables the device - this comes from the AML code in the ACPI bios */ static void sonypi_type1_dis(void) { u32 v; pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); v = v & 0xFF3FFFFF; pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); v = inl(SONYPI_IRQ_PORT); v |= (0x3 << SONYPI_IRQ_SHIFT); outl(v, SONYPI_IRQ_PORT); } static void sonypi_type2_dis(void) { if (sonypi_ec_write(SONYPI_SHIB, 0)) printk(KERN_WARNING "ec_write failed\n"); if (sonypi_ec_write(SONYPI_SLOB, 0)) printk(KERN_WARNING "ec_write failed\n"); if (sonypi_ec_write(SONYPI_SIRQ, 0)) printk(KERN_WARNING "ec_write failed\n"); } static void sonypi_type3_dis(void) { sonypi_type2_dis(); udelay(10); pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0); } static u8 sonypi_call1(u8 dev) { u8 v1, v2; wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(dev, sonypi_device.ioport2); v1 = inb_p(sonypi_device.ioport2); v2 = inb_p(sonypi_device.ioport1); return v2; } static u8 sonypi_call2(u8 dev, u8 fn) { u8 v1; wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(dev, sonypi_device.ioport2); wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(fn, sonypi_device.ioport1); v1 = inb_p(sonypi_device.ioport1); return v1; } static u8 sonypi_call3(u8 dev, u8 fn, u8 v) { u8 v1; wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(dev, sonypi_device.ioport2); wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(fn, sonypi_device.ioport1); wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(v, sonypi_device.ioport1); v1 = inb_p(sonypi_device.ioport1); return v1; } #if 0 /* Get brightness, hue etc. Unreliable... */ static u8 sonypi_read(u8 fn) { u8 v1, v2; int n = 100; while (n--) { v1 = sonypi_call2(0x8f, fn); v2 = sonypi_call2(0x8f, fn); if (v1 == v2 && v1 != 0xff) return v1; } return 0xff; } #endif /* Set brightness, hue etc */ static void sonypi_set(u8 fn, u8 v) { wait_on_command(0, sonypi_call3(0x90, fn, v), ITERATIONS_SHORT); } /* Tests if the camera is ready */ static int sonypi_camera_ready(void) { u8 v; v = sonypi_call2(0x8f, SONYPI_CAMERA_STATUS); return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY)); } /* Turns the camera off */ static void sonypi_camera_off(void) { sonypi_set(SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK); if (!sonypi_device.camera_power) return; sonypi_call2(0x91, 0); sonypi_device.camera_power = 0; } /* Turns the camera on */ static void sonypi_camera_on(void) { int i, j; if (sonypi_device.camera_power) return; for (j = 5; j > 0; j--) { while (sonypi_call2(0x91, 0x1)) msleep(10); sonypi_call1(0x93); for (i = 400; i > 0; i--) { if (sonypi_camera_ready()) break; msleep(10); } if (i) break; } if (j == 0) { printk(KERN_WARNING "sonypi: failed to power on camera\n"); return; } sonypi_set(0x10, 0x5a); sonypi_device.camera_power = 1; } /* sets the bluetooth subsystem power state */ static void sonypi_setbluetoothpower(u8 state) { state = !!state; if (sonypi_device.bluetooth_power == state) return; sonypi_call2(0x96, state); sonypi_call1(0x82); sonypi_device.bluetooth_power = state; } static void input_keyrelease(struct work_struct *work) { struct sonypi_keypress kp; while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp, sizeof(kp), &sonypi_device.input_fifo_lock) == sizeof(kp)) { msleep(10); input_report_key(kp.dev, kp.key, 0); input_sync(kp.dev); } } static void sonypi_report_input_event(u8 event) { struct input_dev *jog_dev = sonypi_device.input_jog_dev; struct input_dev *key_dev = sonypi_device.input_key_dev; struct sonypi_keypress kp = { NULL }; int i; switch (event) { case SONYPI_EVENT_JOGDIAL_UP: case SONYPI_EVENT_JOGDIAL_UP_PRESSED: input_report_rel(jog_dev, REL_WHEEL, 1); input_sync(jog_dev); break; case SONYPI_EVENT_JOGDIAL_DOWN: case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED: input_report_rel(jog_dev, REL_WHEEL, -1); input_sync(jog_dev); break; case SONYPI_EVENT_JOGDIAL_PRESSED: kp.key = BTN_MIDDLE; kp.dev = jog_dev; break; case SONYPI_EVENT_FNKEY_RELEASED: /* Nothing, not all VAIOs generate this event */ break; default: for (i = 0; sonypi_inputkeys[i].sonypiev; i++) if (event == sonypi_inputkeys[i].sonypiev) { kp.dev = key_dev; kp.key = sonypi_inputkeys[i].inputev; break; } break; } if (kp.dev) { input_report_key(kp.dev, kp.key, 1); input_sync(kp.dev); kfifo_in_locked(&sonypi_device.input_fifo, (unsigned char *)&kp, sizeof(kp), &sonypi_device.input_fifo_lock); schedule_work(&sonypi_device.input_work); } } /* Interrupt handler: some event is available */ static irqreturn_t sonypi_irq(int irq, void *dev_id) { u8 v1, v2, event = 0; int i, j; v1 = inb_p(sonypi_device.ioport1); v2 = inb_p(sonypi_device.ioport1 + sonypi_device.evtype_offset); for (i = 0; sonypi_eventtypes[i].model; i++) { if (sonypi_device.model != sonypi_eventtypes[i].model) continue; if ((v2 & sonypi_eventtypes[i].data) != sonypi_eventtypes[i].data) continue; if (!(mask & sonypi_eventtypes[i].mask)) continue; for (j = 0; sonypi_eventtypes[i].events[j].event; j++) { if (v1 == sonypi_eventtypes[i].events[j].data) { event = sonypi_eventtypes[i].events[j].event; goto found; } } } if (verbose) printk(KERN_WARNING "sonypi: unknown event port1=0x%02x,port2=0x%02x\n", v1, v2); /* We need to return IRQ_HANDLED here because there *are* * events belonging to the sonypi device we don't know about, * but we still don't want those to pollute the logs... */ return IRQ_HANDLED; found: if (verbose > 1) printk(KERN_INFO "sonypi: event port1=0x%02x,port2=0x%02x\n", v1, v2); if (useinput) sonypi_report_input_event(event); #ifdef CONFIG_ACPI if (sonypi_acpi_device) acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event); #endif kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event, sizeof(event), &sonypi_device.fifo_lock); kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN); wake_up_interruptible(&sonypi_device.fifo_proc_list); return IRQ_HANDLED; } static int sonypi_misc_fasync(int fd, struct file *filp, int on) { return fasync_helper(fd, filp, on, &sonypi_device.fifo_async); } static int sonypi_misc_release(struct inode *inode, struct file *file) { mutex_lock(&sonypi_device.lock); sonypi_device.open_count--; mutex_unlock(&sonypi_device.lock); return 0; } static int sonypi_misc_open(struct inode *inode, struct file *file) { mutex_lock(&sonypi_device.lock); /* Flush input queue on first open */ if (!sonypi_device.open_count) kfifo_reset(&sonypi_device.fifo); sonypi_device.open_count++; mutex_unlock(&sonypi_device.lock); return 0; } static ssize_t sonypi_misc_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { ssize_t ret; unsigned char c; if ((kfifo_len(&sonypi_device.fifo) == 0) && (file->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_event_interruptible(sonypi_device.fifo_proc_list, kfifo_len(&sonypi_device.fifo) != 0); if (ret) return ret; while (ret < count && (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c), &sonypi_device.fifo_lock) == sizeof(c))) { if (put_user(c, buf++)) return -EFAULT; ret++; } if (ret > 0) { struct inode *inode = file->f_path.dentry->d_inode; inode->i_atime = current_fs_time(inode->i_sb); } return ret; } static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) { poll_wait(file, &sonypi_device.fifo_proc_list, wait); if (kfifo_len(&sonypi_device.fifo)) return POLLIN | POLLRDNORM; return 0; } static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { long ret = 0; void __user *argp = (void __user *)arg; u8 val8; u16 val16; mutex_lock(&sonypi_device.lock); switch (cmd) { case SONYPI_IOCGBRT: if (sonypi_ec_read(SONYPI_LCD_LIGHT, &val8)) { ret = -EIO; break; } if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSBRT: if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } if (sonypi_ec_write(SONYPI_LCD_LIGHT, val8)) ret = -EIO; break; case SONYPI_IOCGBAT1CAP: if (ec_read16(SONYPI_BAT1_FULL, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT1REM: if (ec_read16(SONYPI_BAT1_LEFT, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT2CAP: if (ec_read16(SONYPI_BAT2_FULL, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT2REM: if (ec_read16(SONYPI_BAT2_LEFT, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBATFLAGS: if (sonypi_ec_read(SONYPI_BAT_FLAGS, &val8)) { ret = -EIO; break; } val8 &= 0x07; if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCGBLUE: val8 = sonypi_device.bluetooth_power; if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSBLUE: if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } sonypi_setbluetoothpower(val8); break; /* FAN Controls */ case SONYPI_IOCGFAN: if (sonypi_ec_read(SONYPI_FAN0_STATUS, &val8)) { ret = -EIO; break; } if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSFAN: if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } if (sonypi_ec_write(SONYPI_FAN0_STATUS, val8)) ret = -EIO; break; /* GET Temperature (useful under APM) */ case SONYPI_IOCGTEMP: if (sonypi_ec_read(SONYPI_TEMP_STATUS, &val8)) { ret = -EIO; break; } if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; default: ret = -EINVAL; } mutex_unlock(&sonypi_device.lock); return ret; } static const struct file_operations sonypi_misc_fops = { .owner = THIS_MODULE, .read = sonypi_misc_read, .poll = sonypi_misc_poll, .open = sonypi_misc_open, .release = sonypi_misc_release, .fasync = sonypi_misc_fasync, .unlocked_ioctl = sonypi_misc_ioctl, .llseek = no_llseek, }; static struct miscdevice sonypi_misc_device = { .minor = MISC_DYNAMIC_MINOR, .name = "sonypi", .fops = &sonypi_misc_fops, }; static void sonypi_enable(unsigned int camera_on) { switch (sonypi_device.model) { case SONYPI_DEVICE_MODEL_TYPE1: sonypi_type1_srs(); break; case SONYPI_DEVICE_MODEL_TYPE2: sonypi_type2_srs(); break; case SONYPI_DEVICE_MODEL_TYPE3: sonypi_type3_srs(); break; } sonypi_call1(0x82); sonypi_call2(0x81, 0xff); sonypi_call1(compat ? 0x92 : 0x82); /* Enable ACPI mode to get Fn key events */ if (!SONYPI_ACPI_ACTIVE && fnkeyinit) outb(0xf0, 0xb2); if (camera && camera_on) sonypi_camera_on(); } static int sonypi_disable(void) { sonypi_call2(0x81, 0); /* make sure we don't get any more events */ if (camera) sonypi_camera_off(); /* disable ACPI mode */ if (!SONYPI_ACPI_ACTIVE && fnkeyinit) outb(0xf1, 0xb2); switch (sonypi_device.model) { case SONYPI_DEVICE_MODEL_TYPE1: sonypi_type1_dis(); break; case SONYPI_DEVICE_MODEL_TYPE2: sonypi_type2_dis(); break; case SONYPI_DEVICE_MODEL_TYPE3: sonypi_type3_dis(); break; } return 0; } #ifdef CONFIG_ACPI static int sonypi_acpi_add(struct acpi_device *device) { sonypi_acpi_device = device; strcpy(acpi_device_name(device), "Sony laptop hotkeys"); strcpy(acpi_device_class(device), "sony/hotkey"); return 0; } static int sonypi_acpi_remove(struct acpi_device *device, int type) { sonypi_acpi_device = NULL; return 0; } static const struct acpi_device_id sonypi_device_ids[] = { {"SNY6001", 0}, {"", 0}, }; static struct acpi_driver sonypi_acpi_driver = { .name = "sonypi", .class = "hkey", .ids = sonypi_device_ids, .ops = { .add = sonypi_acpi_add, .remove = sonypi_acpi_remove, }, }; #endif static int __devinit sonypi_create_input_devices(struct platform_device *pdev) { struct input_dev *jog_dev; struct input_dev *key_dev; int i; int error; sonypi_device.input_jog_dev = jog_dev = input_allocate_device(); if (!jog_dev) return -ENOMEM; jog_dev->name = "Sony Vaio Jogdial"; jog_dev->id.bustype = BUS_ISA; jog_dev->id.vendor = PCI_VENDOR_ID_SONY; jog_dev->dev.parent = &pdev->dev; jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE); jog_dev->relbit[0] = BIT_MASK(REL_WHEEL); sonypi_device.input_key_dev = key_dev = input_allocate_device(); if (!key_dev) { error = -ENOMEM; goto err_free_jogdev; } key_dev->name = "Sony Vaio Keys"; key_dev->id.bustype = BUS_ISA; key_dev->id.vendor = PCI_VENDOR_ID_SONY; key_dev->dev.parent = &pdev->dev; /* Initialize the Input Drivers: special keys */ key_dev->evbit[0] = BIT_MASK(EV_KEY); for (i = 0; sonypi_inputkeys[i].sonypiev; i++) if (sonypi_inputkeys[i].inputev) set_bit(sonypi_inputkeys[i].inputev, key_dev->keybit); error = input_register_device(jog_dev); if (error) goto err_free_keydev; error = input_register_device(key_dev); if (error) goto err_unregister_jogdev; return 0; err_unregister_jogdev: input_unregister_device(jog_dev); /* Set to NULL so we don't free it again below */ jog_dev = NULL; err_free_keydev: input_free_device(key_dev); sonypi_device.input_key_dev = NULL; err_free_jogdev: input_free_device(jog_dev); sonypi_device.input_jog_dev = NULL; return error; } static int __devinit sonypi_setup_ioports(struct sonypi_device *dev, const struct sonypi_ioport_list *ioport_list) { /* try to detect if sony-laptop is being used and thus * has already requested one of the known ioports. * As in the deprecated check_region this is racy has we have * multiple ioports available and one of them can be requested * between this check and the subsequent request. Anyway, as an * attempt to be some more user-friendly as we currently are, * this is enough. */ const struct sonypi_ioport_list *check = ioport_list; while (check_ioport && check->port1) { if (!request_region(check->port1, sonypi_device.region_size, "Sony Programmable I/O Device Check")) { printk(KERN_ERR "sonypi: ioport 0x%.4x busy, using sony-laptop? " "if not use check_ioport=0\n", check->port1); return -EBUSY; } release_region(check->port1, sonypi_device.region_size); check++; } while (ioport_list->port1) { if (request_region(ioport_list->port1, sonypi_device.region_size, "Sony Programmable I/O Device")) { dev->ioport1 = ioport_list->port1; dev->ioport2 = ioport_list->port2; return 0; } ioport_list++; } return -EBUSY; } static int __devinit sonypi_setup_irq(struct sonypi_device *dev, const struct sonypi_irq_list *irq_list) { while (irq_list->irq) { if (!request_irq(irq_list->irq, sonypi_irq, IRQF_SHARED, "sonypi", sonypi_irq)) { dev->irq = irq_list->irq; dev->bits = irq_list->bits; return 0; } irq_list++; } return -EBUSY; } static void __devinit sonypi_display_info(void) { printk(KERN_INFO "sonypi: detected type%d model, " "verbose = %d, fnkeyinit = %s, camera = %s, " "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n", sonypi_device.model, verbose, fnkeyinit ? "on" : "off", camera ? "on" : "off", compat ? "on" : "off", mask, useinput ? "on" : "off", SONYPI_ACPI_ACTIVE ? "on" : "off"); printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n", sonypi_device.irq, sonypi_device.ioport1, sonypi_device.ioport2); if (minor == -1) printk(KERN_INFO "sonypi: device allocated minor is %d\n", sonypi_misc_device.minor); } static int __devinit sonypi_probe(struct platform_device *dev) { const struct sonypi_ioport_list *ioport_list; const struct sonypi_irq_list *irq_list; struct pci_dev *pcidev; int error; printk(KERN_WARNING "sonypi: please try the sony-laptop module instead " "and report failures, see also " "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n"); spin_lock_init(&sonypi_device.fifo_lock); error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL); if (error) { printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); return error; } init_waitqueue_head(&sonypi_device.fifo_proc_list); mutex_init(&sonypi_device.lock); sonypi_device.bluetooth_power = -1; if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL))) sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1; else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, NULL))) sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3; else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, NULL))) sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3; else sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2; if (pcidev && pci_enable_device(pcidev)) { printk(KERN_ERR "sonypi: pci_enable_device failed\n"); error = -EIO; goto err_put_pcidev; } sonypi_device.dev = pcidev; if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) { ioport_list = sonypi_type1_ioport_list; sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE; sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET; irq_list = sonypi_type1_irq_list; } else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) { ioport_list = sonypi_type2_ioport_list; sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE; sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET; irq_list = sonypi_type2_irq_list; } else { ioport_list = sonypi_type3_ioport_list; sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE; sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET; irq_list = sonypi_type3_irq_list; } error = sonypi_setup_ioports(&sonypi_device, ioport_list); if (error) { printk(KERN_ERR "sonypi: failed to request ioports\n"); goto err_disable_pcidev; } error = sonypi_setup_irq(&sonypi_device, irq_list); if (error) { printk(KERN_ERR "sonypi: request_irq failed\n"); goto err_free_ioports; } if (minor != -1) sonypi_misc_device.minor = minor; error = misc_register(&sonypi_misc_device); if (error) { printk(KERN_ERR "sonypi: misc_register failed\n"); goto err_free_irq; } sonypi_display_info(); if (useinput) { error = sonypi_create_input_devices(dev); if (error) { printk(KERN_ERR "sonypi: failed to create input devices\n"); goto err_miscdev_unregister; } spin_lock_init(&sonypi_device.input_fifo_lock); error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE, GFP_KERNEL); if (error) { printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); goto err_inpdev_unregister; } INIT_WORK(&sonypi_device.input_work, input_keyrelease); } sonypi_enable(0); return 0; err_inpdev_unregister: input_unregister_device(sonypi_device.input_key_dev); input_unregister_device(sonypi_device.input_jog_dev); err_miscdev_unregister: misc_deregister(&sonypi_misc_device); err_free_irq: free_irq(sonypi_device.irq, sonypi_irq); err_free_ioports: release_region(sonypi_device.ioport1, sonypi_device.region_size); err_disable_pcidev: if (pcidev) pci_disable_device(pcidev); err_put_pcidev: pci_dev_put(pcidev); kfifo_free(&sonypi_device.fifo); return error; } static int __devexit sonypi_remove(struct platform_device *dev) { sonypi_disable(); synchronize_irq(sonypi_device.irq); flush_work_sync(&sonypi_device.input_work); if (useinput) { input_unregister_device(sonypi_device.input_key_dev); input_unregister_device(sonypi_device.input_jog_dev); kfifo_free(&sonypi_device.input_fifo); } misc_deregister(&sonypi_misc_device); free_irq(sonypi_device.irq, sonypi_irq); release_region(sonypi_device.ioport1, sonypi_device.region_size); if (sonypi_device.dev) { pci_disable_device(sonypi_device.dev); pci_dev_put(sonypi_device.dev); } kfifo_free(&sonypi_device.fifo); return 0; } #ifdef CONFIG_PM static int old_camera_power; static int sonypi_suspend(struct platform_device *dev, pm_message_t state) { old_camera_power = sonypi_device.camera_power; sonypi_disable(); return 0; } static int sonypi_resume(struct platform_device *dev) { sonypi_enable(old_camera_power); return 0; } #else #define sonypi_suspend NULL #define sonypi_resume NULL #endif static void sonypi_shutdown(struct platform_device *dev) { sonypi_disable(); } static struct platform_driver sonypi_driver = { .driver = { .name = "sonypi", .owner = THIS_MODULE, }, .probe = sonypi_probe, .remove = __devexit_p(sonypi_remove), .shutdown = sonypi_shutdown, .suspend = sonypi_suspend, .resume = sonypi_resume, }; static struct platform_device *sonypi_platform_device; static struct dmi_system_id __initdata sonypi_dmi_table[] = { { .ident = "Sony Vaio", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"), }, }, { .ident = "Sony Vaio", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"), }, }, { } }; static int __init sonypi_init(void) { int error; printk(KERN_INFO "sonypi: Sony Programmable I/O Controller Driver v%s.\n", SONYPI_DRIVER_VERSION); if (!dmi_check_system(sonypi_dmi_table)) return -ENODEV; error = platform_driver_register(&sonypi_driver); if (error) return error; sonypi_platform_device = platform_device_alloc("sonypi", -1); if (!sonypi_platform_device) { error = -ENOMEM; goto err_driver_unregister; } error = platform_device_add(sonypi_platform_device); if (error) goto err_free_device; #ifdef CONFIG_ACPI if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0) acpi_driver_registered = 1; #endif return 0; err_free_device: platform_device_put(sonypi_platform_device); err_driver_unregister: platform_driver_unregister(&sonypi_driver); return error; } static void __exit sonypi_exit(void) { #ifdef CONFIG_ACPI if (acpi_driver_registered) acpi_bus_unregister_driver(&sonypi_acpi_driver); #endif platform_device_unregister(sonypi_platform_device); platform_driver_unregister(&sonypi_driver); printk(KERN_INFO "sonypi: removed.\n"); } module_init(sonypi_init); module_exit(sonypi_exit);
gpl-2.0
bmourit/owl2x-linux
arch/blackfin/kernel/dumpstack.c
4198
4386
/* Provide basic stack dumping functions * * Copyright 2004-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later */ #include <linux/kernel.h> #include <linux/thread_info.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/module.h> #include <asm/trace.h> /* * Checks to see if the address pointed to is either a * 16-bit CALL instruction, or a 32-bit CALL instruction */ static bool is_bfin_call(unsigned short *addr) { unsigned int opcode; if (!get_instruction(&opcode, addr)) return false; if ((opcode >= 0x0060 && opcode <= 0x0067) || (opcode >= 0x0070 && opcode <= 0x0077) || (opcode >= 0xE3000000 && opcode <= 0xE3FFFFFF)) return true; return false; } void show_stack(struct task_struct *task, unsigned long *stack) { #ifdef CONFIG_PRINTK unsigned int *addr, *endstack, *fp = 0, *frame; unsigned short *ins_addr; char buf[150]; unsigned int i, j, ret_addr, frame_no = 0; /* * If we have been passed a specific stack, use that one otherwise * if we have been passed a task structure, use that, otherwise * use the stack of where the variable "stack" exists */ if (stack == NULL) { if (task) { /* We know this is a kernel stack, so this is the start/end */ stack = (unsigned long *)task->thread.ksp; endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE); } else { /* print out the existing stack info */ stack = (unsigned long *)&stack; endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); } } else endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); printk(KERN_NOTICE "Stack info:\n"); decode_address(buf, (unsigned int)stack); printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) { printk(KERN_NOTICE "Invalid stack pointer\n"); return; } /* First thing is to look for a frame pointer */ for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { if (*addr & 0x1) continue; ins_addr = (unsigned short *)*addr; ins_addr--; if (is_bfin_call(ins_addr)) fp = addr - 1; if (fp) { /* Let's check to see if it is a frame pointer */ while (fp >= (addr - 1) && fp < endstack && fp && ((unsigned int) fp & 0x3) == 0) fp = (unsigned int *)*fp; if (fp == 0 || fp == endstack) { fp = addr - 1; break; } fp = 0; } } if (fp) { frame = fp; printk(KERN_NOTICE " FP: (0x%p)\n", fp); } else frame = 0; /* * Now that we think we know where things are, we * walk the stack again, this time printing things out * incase there is no frame pointer, we still look for * valid return addresses */ /* First time print out data, next time, print out symbols */ for (j = 0; j <= 1; j++) { if (j) printk(KERN_NOTICE "Return addresses in stack:\n"); else printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack); fp = frame; frame_no = 0; for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0; addr < endstack; addr++, i++) { ret_addr = 0; if (!j && i % 8 == 0) printk(KERN_NOTICE "%p:", addr); /* if it is an odd address, or zero, just skip it */ if (*addr & 0x1 || !*addr) goto print; ins_addr = (unsigned short *)*addr; /* Go back one instruction, and see if it is a CALL */ ins_addr--; ret_addr = is_bfin_call(ins_addr); print: if (!j && stack == (unsigned long *)addr) printk("[%08x]", *addr); else if (ret_addr) if (j) { decode_address(buf, (unsigned int)*addr); if (frame == addr) { printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf); continue; } printk(KERN_NOTICE " address : %s\n", buf); } else printk("<%08x>", *addr); else if (fp == addr) { if (j) frame = addr+1; else printk("(%08x)", *addr); fp = (unsigned int *)*addr; frame_no++; } else if (!j) printk(" %08x ", *addr); } if (!j) printk("\n"); } #endif } EXPORT_SYMBOL(show_stack); void dump_stack(void) { unsigned long stack; #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON int tflags; #endif trace_buffer_save(tflags); dump_bfin_trace_buffer(); dump_stack_print_info(KERN_DEFAULT); show_stack(current, &stack); trace_buffer_restore(tflags); } EXPORT_SYMBOL(dump_stack);
gpl-2.0
playfulgod/msm-3.0
arch/um/os-Linux/sys-i386/registers.c
4710
1795
/* * Copyright (C) 2004 PathScale, Inc * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <errno.h> #include <sys/ptrace.h> #include <sys/user.h> #include "kern_constants.h" #include "longjmp.h" #include "user.h" #include "sysdep/ptrace_user.h" int save_fp_registers(int pid, unsigned long *fp_regs) { if (ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0) return -errno; return 0; } int restore_fp_registers(int pid, unsigned long *fp_regs) { if (ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0) return -errno; return 0; } int save_fpx_registers(int pid, unsigned long *fp_regs) { if (ptrace(PTRACE_GETFPXREGS, pid, 0, fp_regs) < 0) return -errno; return 0; } int restore_fpx_registers(int pid, unsigned long *fp_regs) { if (ptrace(PTRACE_SETFPXREGS, pid, 0, fp_regs) < 0) return -errno; return 0; } unsigned long get_thread_reg(int reg, jmp_buf *buf) { switch (reg) { case EIP: return buf[0]->__eip; case UESP: return buf[0]->__esp; case EBP: return buf[0]->__ebp; default: printk(UM_KERN_ERR "get_thread_regs - unknown register %d\n", reg); return 0; } } int have_fpx_regs = 1; int get_fp_registers(int pid, unsigned long *regs) { if (have_fpx_regs) return save_fpx_registers(pid, regs); else return save_fp_registers(pid, regs); } int put_fp_registers(int pid, unsigned long *regs) { if (have_fpx_regs) return restore_fpx_registers(pid, regs); else return restore_fp_registers(pid, regs); } void arch_init_registers(int pid) { struct user_fpxregs_struct fpx_regs; int err; err = ptrace(PTRACE_GETFPXREGS, pid, 0, &fpx_regs); if (!err) return; if (errno != EIO) panic("check_ptrace : PTRACE_GETFPXREGS failed, errno = %d", errno); have_fpx_regs = 0; }
gpl-2.0
MassStash/htc_m8_kernel_sense_5.0.2
drivers/net/wan/cycx_main.c
5734
9382
/* * cycx_main.c Cyclades Cyclom 2X WAN Link Driver. Main module. * * Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo * * Based on sdlamain.c by Gene Kozin <genek@compuserve.com> & * Jaspreet Singh <jaspreet@sangoma.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * ============================================================================ * Please look at the bitkeeper changelog (or any other scm tool that ends up * importing bitkeeper changelog or that replaces bitkeeper in the future as * main tool for linux development). * * 2001/05/09 acme Fix MODULE_DESC for debug, .bss nitpicks, * some cleanups * 2000/07/13 acme remove useless #ifdef MODULE and crap * #if KERNEL_VERSION > blah * 2000/07/06 acme __exit at cyclomx_cleanup * 2000/04/02 acme dprintk and cycx_debug * module_init/module_exit * 2000/01/21 acme rename cyclomx_open to cyclomx_mod_inc_use_count * and cyclomx_close to cyclomx_mod_dec_use_count * 2000/01/08 acme cleanup * 1999/11/06 acme cycx_down back to life (it needs to be * called to iounmap the dpmbase) * 1999/08/09 acme removed references to enable_tx_int * use spinlocks instead of cli/sti in * cyclomx_set_state * 1999/05/19 acme works directly linked into the kernel * init_waitqueue_head for 2.3.* kernel * 1999/05/18 acme major cleanup (polling not needed), etc * 1998/08/28 acme minor cleanup (ioctls for firmware deleted) * queue_task activated * 1998/08/08 acme Initial version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/stddef.h> /* offsetof(), etc. */ #include <linux/errno.h> /* return codes */ #include <linux/string.h> /* inline memset(), etc. */ #include <linux/slab.h> /* kmalloc(), kfree() */ #include <linux/kernel.h> /* printk(), and other useful stuff */ #include <linux/module.h> /* support for loadable modules */ #include <linux/ioport.h> /* request_region(), release_region() */ #include <linux/wanrouter.h> /* WAN router definitions */ #include <linux/cyclomx.h> /* cyclomx common user API definitions */ #include <linux/init.h> /* __init (when not using as a module) */ #include <linux/interrupt.h> unsigned int cycx_debug; MODULE_AUTHOR("Arnaldo Carvalho de Melo"); MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver."); MODULE_LICENSE("GPL"); module_param(cycx_debug, int, 0); MODULE_PARM_DESC(cycx_debug, "cyclomx debug level"); /* Defines & Macros */ #define CYCX_DRV_VERSION 0 /* version number */ #define CYCX_DRV_RELEASE 11 /* release (minor version) number */ #define CYCX_MAX_CARDS 1 /* max number of adapters */ #define CONFIG_CYCX_CARDS 1 /* Function Prototypes */ /* WAN link driver entry points */ static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf); static int cycx_wan_shutdown(struct wan_device *wandev); /* Miscellaneous functions */ static irqreturn_t cycx_isr(int irq, void *dev_id); /* Global Data * Note: All data must be explicitly initialized!!! */ /* private data */ static const char cycx_drvname[] = "cyclomx"; static const char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver"; static const char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo " "<acme@conectiva.com.br>"; static int cycx_ncards = CONFIG_CYCX_CARDS; static struct cycx_device *cycx_card_array; /* adapter data space */ /* Kernel Loadable Module Entry Points */ /* * Module 'insert' entry point. * o print announcement * o allocate adapter data space * o initialize static data * o register all cards with WAN router * o calibrate Cyclom 2X shared memory access delay. * * Return: 0 Ok * < 0 error. * Context: process */ static int __init cycx_init(void) { int cnt, err = -ENOMEM; pr_info("%s v%u.%u %s\n", cycx_fullname, CYCX_DRV_VERSION, CYCX_DRV_RELEASE, cycx_copyright); /* Verify number of cards and allocate adapter data space */ cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS); cycx_ncards = max_t(int, cycx_ncards, 1); cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL); if (!cycx_card_array) goto out; /* Register adapters with WAN router */ for (cnt = 0; cnt < cycx_ncards; ++cnt) { struct cycx_device *card = &cycx_card_array[cnt]; struct wan_device *wandev = &card->wandev; sprintf(card->devname, "%s%d", cycx_drvname, cnt + 1); wandev->magic = ROUTER_MAGIC; wandev->name = card->devname; wandev->private = card; wandev->setup = cycx_wan_setup; wandev->shutdown = cycx_wan_shutdown; err = register_wan_device(wandev); if (err) { pr_err("%s registration failed with error %d!\n", card->devname, err); break; } } err = -ENODEV; if (!cnt) { kfree(cycx_card_array); goto out; } err = 0; cycx_ncards = cnt; /* adjust actual number of cards */ out: return err; } /* * Module 'remove' entry point. * o unregister all adapters from the WAN router * o release all remaining system resources */ static void __exit cycx_exit(void) { int i = 0; for (; i < cycx_ncards; ++i) { struct cycx_device *card = &cycx_card_array[i]; unregister_wan_device(card->devname); } kfree(cycx_card_array); } /* WAN Device Driver Entry Points */ /* * Setup/configure WAN link driver. * o check adapter state * o make sure firmware is present in configuration * o allocate interrupt vector * o setup Cyclom 2X hardware * o call appropriate routine to perform protocol-specific initialization * * This function is called when router handles ROUTER_SETUP IOCTL. The * configuration structure is in kernel memory (including extended data, if * any). */ static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf) { int rc = -EFAULT; struct cycx_device *card; int irq; /* Sanity checks */ if (!wandev || !wandev->private || !conf) goto out; card = wandev->private; rc = -EBUSY; if (wandev->state != WAN_UNCONFIGURED) goto out; rc = -EINVAL; if (!conf->data_size || !conf->data) { pr_err("%s: firmware not found in configuration data!\n", wandev->name); goto out; } if (conf->irq <= 0) { pr_err("%s: can't configure without IRQ!\n", wandev->name); goto out; } /* Allocate IRQ */ irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */ if (request_irq(irq, cycx_isr, 0, wandev->name, card)) { pr_err("%s: can't reserve IRQ %d!\n", wandev->name, irq); goto out; } /* Configure hardware, load firmware, etc. */ memset(&card->hw, 0, sizeof(card->hw)); card->hw.irq = irq; card->hw.dpmsize = CYCX_WINDOWSIZE; card->hw.fwid = CFID_X25_2X; spin_lock_init(&card->lock); init_waitqueue_head(&card->wait_stats); rc = cycx_setup(&card->hw, conf->data, conf->data_size, conf->maddr); if (rc) goto out_irq; /* Initialize WAN device data space */ wandev->irq = irq; wandev->dma = wandev->ioport = 0; wandev->maddr = (unsigned long)card->hw.dpmbase; wandev->msize = card->hw.dpmsize; wandev->hw_opt[2] = 0; wandev->hw_opt[3] = card->hw.fwid; /* Protocol-specific initialization */ switch (card->hw.fwid) { #ifdef CONFIG_CYCLOMX_X25 case CFID_X25_2X: rc = cycx_x25_wan_init(card, conf); break; #endif default: pr_err("%s: this firmware is not supported!\n", wandev->name); rc = -EINVAL; } if (rc) { cycx_down(&card->hw); goto out_irq; } rc = 0; out: return rc; out_irq: free_irq(irq, card); goto out; } /* * Shut down WAN link driver. * o shut down adapter hardware * o release system resources. * * This function is called by the router when device is being unregistered or * when it handles ROUTER_DOWN IOCTL. */ static int cycx_wan_shutdown(struct wan_device *wandev) { int ret = -EFAULT; struct cycx_device *card; /* sanity checks */ if (!wandev || !wandev->private) goto out; ret = 0; if (wandev->state == WAN_UNCONFIGURED) goto out; card = wandev->private; wandev->state = WAN_UNCONFIGURED; cycx_down(&card->hw); pr_info("%s: irq %d being freed!\n", wandev->name, wandev->irq); free_irq(wandev->irq, card); out: return ret; } /* Miscellaneous */ /* * Cyclom 2X Interrupt Service Routine. * o acknowledge Cyclom 2X hardware interrupt. * o call protocol-specific interrupt service routine, if any. */ static irqreturn_t cycx_isr(int irq, void *dev_id) { struct cycx_device *card = dev_id; if (card->wandev.state == WAN_UNCONFIGURED) goto out; if (card->in_isr) { pr_warn("%s: interrupt re-entrancy on IRQ %d!\n", card->devname, card->wandev.irq); goto out; } if (card->isr) card->isr(card); return IRQ_HANDLED; out: return IRQ_NONE; } /* Set WAN device state. */ void cycx_set_state(struct cycx_device *card, int state) { unsigned long flags; char *string_state = NULL; spin_lock_irqsave(&card->lock, flags); if (card->wandev.state != state) { switch (state) { case WAN_CONNECTED: string_state = "connected!"; break; case WAN_DISCONNECTED: string_state = "disconnected!"; break; } pr_info("%s: link %s\n", card->devname, string_state); card->wandev.state = state; } card->state_tick = jiffies; spin_unlock_irqrestore(&card->lock, flags); } module_init(cycx_init); module_exit(cycx_exit);
gpl-2.0
chentz78/chentz-N4-Kernel
security/inode.c
7782
6849
/* * inode.c - securityfs * * Copyright (C) 2005 Greg Kroah-Hartman <gregkh@suse.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * Based on fs/debugfs/inode.c which had the following copyright notice: * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2004 IBM Inc. */ /* #define DEBUG */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/magic.h> static struct vfsmount *mount; static int mount_count; static inline int positive(struct dentry *dentry) { return dentry->d_inode && !d_unhashed(dentry); } static int fill_super(struct super_block *sb, void *data, int silent) { static struct tree_descr files[] = {{""}}; return simple_fill_super(sb, SECURITYFS_MAGIC, files); } static struct dentry *get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_single(fs_type, flags, data, fill_super); } static struct file_system_type fs_type = { .owner = THIS_MODULE, .name = "securityfs", .mount = get_sb, .kill_sb = kill_litter_super, }; /** * securityfs_create_file - create a file in the securityfs filesystem * * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the securityfs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * * This is the basic "create a file" function for securityfs. It allows for a * wide range of flexibility in creating a file, or a directory (if you * want to create a directory, the securityfs_create_dir() function is * recommended to be used instead). * * This function returns a pointer to a dentry if it succeeds. This * pointer must be passed to the securityfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here). If an error occurs, the function will return * the erorr value (via ERR_PTR). * * If securityfs is not enabled in the kernel, the value %-ENODEV is * returned. */ struct dentry *securityfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *dentry; int is_dir = S_ISDIR(mode); struct inode *dir, *inode; int error; if (!is_dir) { BUG_ON(!fops); mode = (mode & S_IALLUGO) | S_IFREG; } pr_debug("securityfs: creating file '%s'\n",name); error = simple_pin_fs(&fs_type, &mount, &mount_count); if (error) return ERR_PTR(error); if (!parent) parent = mount->mnt_root; dir = parent->d_inode; mutex_lock(&dir->i_mutex); dentry = lookup_one_len(name, parent, strlen(name)); if (IS_ERR(dentry)) goto out; if (dentry->d_inode) { error = -EEXIST; goto out1; } inode = new_inode(dir->i_sb); if (!inode) { error = -ENOMEM; goto out1; } inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_private = data; if (is_dir) { inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; inc_nlink(inode); inc_nlink(dir); } else { inode->i_fop = fops; } d_instantiate(dentry, inode); dget(dentry); mutex_unlock(&dir->i_mutex); return dentry; out1: dput(dentry); dentry = ERR_PTR(error); out: mutex_unlock(&dir->i_mutex); simple_release_fs(&mount, &mount_count); return dentry; } EXPORT_SYMBOL_GPL(securityfs_create_file); /** * securityfs_create_dir - create a directory in the securityfs filesystem * * @name: a pointer to a string containing the name of the directory to * create. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * directory will be created in the root of the securityfs filesystem. * * This function creates a directory in securityfs with the given @name. * * This function returns a pointer to a dentry if it succeeds. This * pointer must be passed to the securityfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here). If an error occurs, %NULL will be returned. * * If securityfs is not enabled in the kernel, the value %-ENODEV is * returned. It is not wise to check for this value, but rather, check for * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling * code. */ struct dentry *securityfs_create_dir(const char *name, struct dentry *parent) { return securityfs_create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, parent, NULL, NULL); } EXPORT_SYMBOL_GPL(securityfs_create_dir); /** * securityfs_remove - removes a file or directory from the securityfs filesystem * * @dentry: a pointer to a the dentry of the file or directory to be removed. * * This function removes a file or directory in securityfs that was previously * created with a call to another securityfs function (like * securityfs_create_file() or variants thereof.) * * This function is required to be called in order for the file to be * removed. No automatic cleanup of files will happen when a module is * removed; you are responsible here. */ void securityfs_remove(struct dentry *dentry) { struct dentry *parent; if (!dentry || IS_ERR(dentry)) return; parent = dentry->d_parent; if (!parent || !parent->d_inode) return; mutex_lock(&parent->d_inode->i_mutex); if (positive(dentry)) { if (dentry->d_inode) { if (S_ISDIR(dentry->d_inode->i_mode)) simple_rmdir(parent->d_inode, dentry); else simple_unlink(parent->d_inode, dentry); dput(dentry); } } mutex_unlock(&parent->d_inode->i_mutex); simple_release_fs(&mount, &mount_count); } EXPORT_SYMBOL_GPL(securityfs_remove); static struct kobject *security_kobj; static int __init securityfs_init(void) { int retval; security_kobj = kobject_create_and_add("security", kernel_kobj); if (!security_kobj) return -EINVAL; retval = register_filesystem(&fs_type); if (retval) kobject_put(security_kobj); return retval; } core_initcall(securityfs_init); MODULE_LICENSE("GPL");
gpl-2.0
psyke83/android_kernel_samsung_msm
net/netfilter/xt_time.c
8806
7669
/* * xt_time * Copyright © CC Computer Consultants GmbH, 2007 * * based on ipt_time by Fabrice MARIE <fabrice@netfilter.org> * This is a module which is used for time matching * It is using some modified code from dietlibc (localtime() function) * that you can find at http://www.fefe.de/dietlibc/ * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from gnu.org/gpl. */ #include <linux/ktime.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_time.h> struct xtm { u_int8_t month; /* (1-12) */ u_int8_t monthday; /* (1-31) */ u_int8_t weekday; /* (1-7) */ u_int8_t hour; /* (0-23) */ u_int8_t minute; /* (0-59) */ u_int8_t second; /* (0-59) */ unsigned int dse; }; extern struct timezone sys_tz; /* ouch */ static const u_int16_t days_since_year[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, }; static const u_int16_t days_since_leapyear[] = { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, }; /* * Since time progresses forward, it is best to organize this array in reverse, * to minimize lookup time. */ enum { DSE_FIRST = 2039, }; static const u_int16_t days_since_epoch[] = { /* 2039 - 2030 */ 25202, 24837, 24472, 24106, 23741, 23376, 23011, 22645, 22280, 21915, /* 2029 - 2020 */ 21550, 21184, 20819, 20454, 20089, 19723, 19358, 18993, 18628, 18262, /* 2019 - 2010 */ 17897, 17532, 17167, 16801, 16436, 16071, 15706, 15340, 14975, 14610, /* 2009 - 2000 */ 14245, 13879, 13514, 13149, 12784, 12418, 12053, 11688, 11323, 10957, /* 1999 - 1990 */ 10592, 10227, 9862, 9496, 9131, 8766, 8401, 8035, 7670, 7305, /* 1989 - 1980 */ 6940, 6574, 6209, 5844, 5479, 5113, 4748, 4383, 4018, 3652, /* 1979 - 1970 */ 3287, 2922, 2557, 2191, 1826, 1461, 1096, 730, 365, 0, }; static inline bool is_leap(unsigned int y) { return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0); } /* * Each network packet has a (nano)seconds-since-the-epoch (SSTE) timestamp. * Since we match against days and daytime, the SSTE value needs to be * computed back into human-readable dates. * * This is done in three separate functions so that the most expensive * calculations are done last, in case a "simple match" can be found earlier. */ static inline unsigned int localtime_1(struct xtm *r, time_t time) { unsigned int v, w; /* Each day has 86400s, so finding the hour/minute is actually easy. */ v = time % 86400; r->second = v % 60; w = v / 60; r->minute = w % 60; r->hour = w / 60; return v; } static inline void localtime_2(struct xtm *r, time_t time) { /* * Here comes the rest (weekday, monthday). First, divide the SSTE * by seconds-per-day to get the number of _days_ since the epoch. */ r->dse = time / 86400; /* * 1970-01-01 (w=0) was a Thursday (4). * -1 and +1 map Sunday properly onto 7. */ r->weekday = (4 + r->dse - 1) % 7 + 1; } static void localtime_3(struct xtm *r, time_t time) { unsigned int year, i, w = r->dse; /* * In each year, a certain number of days-since-the-epoch have passed. * Find the year that is closest to said days. * * Consider, for example, w=21612 (2029-03-04). Loop will abort on * dse[i] <= w, which happens when dse[i] == 21550. This implies * year == 2009. w will then be 62. */ for (i = 0, year = DSE_FIRST; days_since_epoch[i] > w; ++i, --year) /* just loop */; w -= days_since_epoch[i]; /* * By now we have the current year, and the day of the year. * r->yearday = w; * * On to finding the month (like above). In each month, a certain * number of days-since-New Year have passed, and find the closest * one. * * Consider w=62 (in a non-leap year). Loop will abort on * dsy[i] < w, which happens when dsy[i] == 31+28 (i == 2). * Concludes i == 2, i.e. 3rd month => March. * * (A different approach to use would be to subtract a monthlength * from w repeatedly while counting.) */ if (is_leap(year)) { /* use days_since_leapyear[] in a leap year */ for (i = ARRAY_SIZE(days_since_leapyear) - 1; i > 0 && days_since_leapyear[i] > w; --i) /* just loop */; r->monthday = w - days_since_leapyear[i] + 1; } else { for (i = ARRAY_SIZE(days_since_year) - 1; i > 0 && days_since_year[i] > w; --i) /* just loop */; r->monthday = w - days_since_year[i] + 1; } r->month = i + 1; } static bool time_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_time_info *info = par->matchinfo; unsigned int packet_time; struct xtm current_time; s64 stamp; /* * We cannot use get_seconds() instead of __net_timestamp() here. * Suppose you have two rules: * 1. match before 13:00 * 2. match after 13:00 * If you match against processing time (get_seconds) it * may happen that the same packet matches both rules if * it arrived at the right moment before 13:00. */ if (skb->tstamp.tv64 == 0) __net_timestamp((struct sk_buff *)skb); stamp = ktime_to_ns(skb->tstamp); stamp = div_s64(stamp, NSEC_PER_SEC); if (info->flags & XT_TIME_LOCAL_TZ) /* Adjust for local timezone */ stamp -= 60 * sys_tz.tz_minuteswest; /* * xt_time will match when _all_ of the following hold: * - 'now' is in the global time range date_start..date_end * - 'now' is in the monthday mask * - 'now' is in the weekday mask * - 'now' is in the daytime range time_start..time_end * (and by default, libxt_time will set these so as to match) */ if (stamp < info->date_start || stamp > info->date_stop) return false; packet_time = localtime_1(&current_time, stamp); if (info->daytime_start < info->daytime_stop) { if (packet_time < info->daytime_start || packet_time > info->daytime_stop) return false; } else { if (packet_time < info->daytime_start && packet_time > info->daytime_stop) return false; } localtime_2(&current_time, stamp); if (!(info->weekdays_match & (1 << current_time.weekday))) return false; /* Do not spend time computing monthday if all days match anyway */ if (info->monthdays_match != XT_TIME_ALL_MONTHDAYS) { localtime_3(&current_time, stamp); if (!(info->monthdays_match & (1 << current_time.monthday))) return false; } return true; } static int time_mt_check(const struct xt_mtchk_param *par) { const struct xt_time_info *info = par->matchinfo; if (info->daytime_start > XT_TIME_MAX_DAYTIME || info->daytime_stop > XT_TIME_MAX_DAYTIME) { pr_info("invalid argument - start or " "stop time greater than 23:59:59\n"); return -EDOM; } return 0; } static struct xt_match xt_time_mt_reg __read_mostly = { .name = "time", .family = NFPROTO_UNSPEC, .match = time_mt, .checkentry = time_mt_check, .matchsize = sizeof(struct xt_time_info), .me = THIS_MODULE, }; static int __init time_mt_init(void) { int minutes = sys_tz.tz_minuteswest; if (minutes < 0) /* east of Greenwich */ printk(KERN_INFO KBUILD_MODNAME ": kernel timezone is +%02d%02d\n", -minutes / 60, -minutes % 60); else /* west of Greenwich */ printk(KERN_INFO KBUILD_MODNAME ": kernel timezone is -%02d%02d\n", minutes / 60, minutes % 60); return xt_register_match(&xt_time_mt_reg); } static void __exit time_mt_exit(void) { xt_unregister_match(&xt_time_mt_reg); } module_init(time_mt_init); module_exit(time_mt_exit); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: time-based matching"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_time"); MODULE_ALIAS("ip6t_time");
gpl-2.0
smac0628/caf-LA.BF.1.1.2.1
drivers/tty/ipwireless/main.c
12646
8648
/* * IPWireless 3G PCMCIA Network Driver * * Original code * by Stephen Blackheath <stephen@blacksapphire.com>, * Ben Martel <benm@symmetric.co.nz> * * Copyrighted as follows: * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) * * Various driver changes and rewrites, port to new kernels * Copyright (C) 2006-2007 Jiri Kosina * * Misc code cleanups and updates * Copyright (C) 2007 David Sterba */ #include "hardware.h" #include "network.h" #include "main.h" #include "tty.h" #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <pcmcia/cisreg.h> #include <pcmcia/device_id.h> #include <pcmcia/ss.h> #include <pcmcia/ds.h> static const struct pcmcia_device_id ipw_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0100), PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0200), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, ipw_ids); static void ipwireless_detach(struct pcmcia_device *link); /* * Module params */ /* Debug mode: more verbose, print sent/recv bytes */ int ipwireless_debug; int ipwireless_loopback; int ipwireless_out_queue = 10; module_param_named(debug, ipwireless_debug, int, 0); module_param_named(loopback, ipwireless_loopback, int, 0); module_param_named(out_queue, ipwireless_out_queue, int, 0); MODULE_PARM_DESC(debug, "switch on debug messages [0]"); MODULE_PARM_DESC(loopback, "debug: enable ras_raw channel [0]"); MODULE_PARM_DESC(out_queue, "debug: set size of outgoing PPP queue [10]"); /* Executes in process context. */ static void signalled_reboot_work(struct work_struct *work_reboot) { struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev, work_reboot); struct pcmcia_device *link = ipw->link; pcmcia_reset_card(link->socket); } static void signalled_reboot_callback(void *callback_data) { struct ipw_dev *ipw = (struct ipw_dev *) callback_data; /* Delegate to process context. */ schedule_work(&ipw->work_reboot); } static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) { struct ipw_dev *ipw = priv_data; int ret; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; /* 0x40 causes it to generate level mode interrupts. */ /* 0x04 enables IREQ pin. */ p_dev->config_index |= 0x44; p_dev->io_lines = 16; ret = pcmcia_request_io(p_dev); if (ret) return ret; if (!request_region(p_dev->resource[0]->start, resource_size(p_dev->resource[0]), IPWIRELESS_PCCARD_NAME)) { ret = -EBUSY; goto exit; } p_dev->resource[2]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; ret = pcmcia_request_window(p_dev, p_dev->resource[2], 0); if (ret != 0) goto exit1; ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); if (ret != 0) goto exit1; ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; ipw->common_memory = ioremap(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); if (!request_mem_region(p_dev->resource[2]->start, resource_size(p_dev->resource[2]), IPWIRELESS_PCCARD_NAME)) { ret = -EBUSY; goto exit2; } p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); if (ret != 0) goto exit3; ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); if (ret != 0) goto exit3; ipw->attr_memory = ioremap(p_dev->resource[3]->start, resource_size(p_dev->resource[3])); if (!request_mem_region(p_dev->resource[3]->start, resource_size(p_dev->resource[3]), IPWIRELESS_PCCARD_NAME)) { ret = -EBUSY; goto exit4; } return 0; exit4: iounmap(ipw->attr_memory); exit3: release_mem_region(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); exit2: iounmap(ipw->common_memory); exit1: release_region(p_dev->resource[0]->start, resource_size(p_dev->resource[0])); exit: pcmcia_disable_device(p_dev); return ret; } static int config_ipwireless(struct ipw_dev *ipw) { struct pcmcia_device *link = ipw->link; int ret = 0; ipw->is_v2_card = 0; link->config_flags |= CONF_AUTO_SET_IO | CONF_AUTO_SET_IOMEM | CONF_ENABLE_IRQ; ret = pcmcia_loop_config(link, ipwireless_probe, ipw); if (ret != 0) return ret; INIT_WORK(&ipw->work_reboot, signalled_reboot_work); ipwireless_init_hardware_v1(ipw->hardware, link->resource[0]->start, ipw->attr_memory, ipw->common_memory, ipw->is_v2_card, signalled_reboot_callback, ipw); ret = pcmcia_request_irq(link, ipwireless_interrupt); if (ret != 0) goto exit; printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n", ipw->is_v2_card ? "V2/V3" : "V1"); printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": I/O ports %pR, irq %d\n", link->resource[0], (unsigned int) link->irq); if (ipw->attr_memory && ipw->common_memory) printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": attr memory %pR, common memory %pR\n", link->resource[3], link->resource[2]); ipw->network = ipwireless_network_create(ipw->hardware); if (!ipw->network) goto exit; ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network); if (!ipw->tty) goto exit; ipwireless_init_hardware_v2_v3(ipw->hardware); /* * Do the RequestConfiguration last, because it enables interrupts. * Then we don't get any interrupts before we're ready for them. */ ret = pcmcia_enable_device(link); if (ret != 0) goto exit; return 0; exit: if (ipw->common_memory) { release_mem_region(link->resource[2]->start, resource_size(link->resource[2])); iounmap(ipw->common_memory); } if (ipw->attr_memory) { release_mem_region(link->resource[3]->start, resource_size(link->resource[3])); iounmap(ipw->attr_memory); } pcmcia_disable_device(link); return -1; } static void release_ipwireless(struct ipw_dev *ipw) { release_region(ipw->link->resource[0]->start, resource_size(ipw->link->resource[0])); if (ipw->common_memory) { release_mem_region(ipw->link->resource[2]->start, resource_size(ipw->link->resource[2])); iounmap(ipw->common_memory); } if (ipw->attr_memory) { release_mem_region(ipw->link->resource[3]->start, resource_size(ipw->link->resource[3])); iounmap(ipw->attr_memory); } pcmcia_disable_device(ipw->link); } /* * ipwireless_attach() creates an "instance" of the driver, allocating * local data structures for one device (one interface). The device * is registered with Card Services. * * The pcmcia_device structure is initialized, but we don't actually * configure the card at this point -- we wait until we receive a * card insertion event. */ static int ipwireless_attach(struct pcmcia_device *link) { struct ipw_dev *ipw; int ret; ipw = kzalloc(sizeof(struct ipw_dev), GFP_KERNEL); if (!ipw) return -ENOMEM; ipw->link = link; link->priv = ipw; ipw->hardware = ipwireless_hardware_create(); if (!ipw->hardware) { kfree(ipw); return -ENOMEM; } /* RegisterClient will call config_ipwireless */ ret = config_ipwireless(ipw); if (ret != 0) { ipwireless_detach(link); return ret; } return 0; } /* * This deletes a driver "instance". The device is de-registered with * Card Services. If it has been released, all local data structures * are freed. Otherwise, the structures will be freed when the device * is released. */ static void ipwireless_detach(struct pcmcia_device *link) { struct ipw_dev *ipw = link->priv; release_ipwireless(ipw); if (ipw->tty != NULL) ipwireless_tty_free(ipw->tty); if (ipw->network != NULL) ipwireless_network_free(ipw->network); if (ipw->hardware != NULL) ipwireless_hardware_free(ipw->hardware); kfree(ipw); } static struct pcmcia_driver me = { .owner = THIS_MODULE, .probe = ipwireless_attach, .remove = ipwireless_detach, .name = IPWIRELESS_PCCARD_NAME, .id_table = ipw_ids }; /* * Module insertion : initialisation of the module. * Register the card with cardmgr... */ static int __init init_ipwireless(void) { int ret; ret = ipwireless_tty_init(); if (ret != 0) return ret; ret = pcmcia_register_driver(&me); if (ret != 0) ipwireless_tty_release(); return ret; } /* * Module removal */ static void __exit exit_ipwireless(void) { pcmcia_unregister_driver(&me); ipwireless_tty_release(); } module_init(init_ipwireless); module_exit(exit_ipwireless); MODULE_AUTHOR(IPWIRELESS_PCMCIA_AUTHOR); MODULE_DESCRIPTION(IPWIRELESS_PCCARD_NAME " " IPWIRELESS_PCMCIA_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
evilwombat/gopro-linux
arch/arm/mach-mmp/irq-pxa168.c
103
1287
/* * linux/arch/arm/mach-mmp/irq.c * * Generic IRQ handling, GPIO IRQ demultiplexing, etc. * * Author: Bin Yang <bin.yang@marvell.com> * Created: Sep 30, 2008 * Copyright: Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/regs-icu.h> #include "common.h" #define IRQ_ROUTE_TO_AP (ICU_INT_CONF_AP_INT | ICU_INT_CONF_IRQ) #define PRIORITY_DEFAULT 0x1 #define PRIORITY_NONE 0x0 /* means IRQ disabled */ static void icu_mask_irq(struct irq_data *d) { __raw_writel(PRIORITY_NONE, ICU_INT_CONF(d->irq)); } static void icu_unmask_irq(struct irq_data *d) { __raw_writel(IRQ_ROUTE_TO_AP | PRIORITY_DEFAULT, ICU_INT_CONF(d->irq)); } static struct irq_chip icu_irq_chip = { .name = "icu_irq", .irq_ack = icu_mask_irq, .irq_mask = icu_mask_irq, .irq_unmask = icu_unmask_irq, }; void __init icu_init_irq(void) { int irq; for (irq = 0; irq < 64; irq++) { icu_mask_irq(irq_get_irq_data(irq)); set_irq_chip(irq, &icu_irq_chip); set_irq_handler(irq, handle_level_irq); set_irq_flags(irq, IRQF_VALID); } }
gpl-2.0
arjen75/L3_kernel2
arch/arm/mach-pxa/mainstone.c
103
15429
/* * linux/arch/arm/mach-pxa/mainstone.c * * Support for the Intel HCDDBBVA0 Development Platform. * (go figure how they came up with such name...) * * Author: Nicolas Pitre * Created: Nov 05, 2002 * Copyright: MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/sysdev.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/bitops.h> #include <linux/fb.h> #include <linux/ioport.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/pwm_backlight.h> #include <linux/smc91x.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/memory.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/mach/flash.h> #include <mach/pxa27x.h> #include <mach/gpio.h> #include <mach/mainstone.h> #include <mach/audio.h> #include <mach/pxafb.h> #include <plat/i2c.h> #include <mach/mmc.h> #include <mach/irda.h> #include <mach/ohci.h> #include <plat/pxa27x_keypad.h> #include <mach/smemc.h> #include "generic.h" #include "devices.h" static unsigned long mainstone_pin_config[] = { /* Chip Select */ GPIO15_nCS_1, /* LCD - 16bpp Active TFT */ GPIOxx_LCD_TFT_16BPP, GPIO16_PWM0_OUT, /* Backlight */ /* MMC */ GPIO32_MMC_CLK, GPIO112_MMC_CMD, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, /* USB Host Port 1 */ GPIO88_USBH1_PWR, GPIO89_USBH1_PEN, /* PC Card */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO85_nPCE_1, GPIO54_nPCE_2, GPIO79_PSKTSEL, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, /* AC97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, GPIO45_AC97_SYSCLK, /* Keypad */ GPIO93_KP_DKIN_0, GPIO94_KP_DKIN_1, GPIO95_KP_DKIN_2, GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH, GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH, GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH, GPIO97_KP_MKIN_3 | WAKEUP_ON_LEVEL_HIGH, GPIO98_KP_MKIN_4 | WAKEUP_ON_LEVEL_HIGH, GPIO99_KP_MKIN_5 | WAKEUP_ON_LEVEL_HIGH, GPIO103_KP_MKOUT_0, GPIO104_KP_MKOUT_1, GPIO105_KP_MKOUT_2, GPIO106_KP_MKOUT_3, GPIO107_KP_MKOUT_4, GPIO108_KP_MKOUT_5, GPIO96_KP_MKOUT_6, /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, /* GPIO */ GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, }; static unsigned long mainstone_irq_enabled; static void mainstone_mask_irq(struct irq_data *d) { int mainstone_irq = (d->irq - MAINSTONE_IRQ(0)); MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq)); } static void mainstone_unmask_irq(struct irq_data *d) { int mainstone_irq = (d->irq - MAINSTONE_IRQ(0)); /* the irq can be acknowledged only if deasserted, so it's done here */ MST_INTSETCLR &= ~(1 << mainstone_irq); MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq)); } static struct irq_chip mainstone_irq_chip = { .name = "FPGA", .irq_ack = mainstone_mask_irq, .irq_mask = mainstone_mask_irq, .irq_unmask = mainstone_unmask_irq, }; static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled; do { /* clear useless edge notification */ desc->irq_data.chip->irq_ack(&desc->irq_data); if (likely(pending)) { irq = MAINSTONE_IRQ(0) + __ffs(pending); generic_handle_irq(irq); } pending = MST_INTSETCLR & mainstone_irq_enabled; } while (pending); } static void __init mainstone_init_irq(void) { int irq; pxa27x_init_irq(); /* setup extra Mainstone irqs */ for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { set_irq_chip(irq, &mainstone_irq_chip); set_irq_handler(irq, handle_level_irq); if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14)) set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN); else set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } set_irq_flags(MAINSTONE_IRQ(8), 0); set_irq_flags(MAINSTONE_IRQ(12), 0); MST_INTMSKENA = 0; MST_INTSETCLR = 0; set_irq_chained_handler(IRQ_GPIO(0), mainstone_irq_handler); set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); } #ifdef CONFIG_PM static int mainstone_irq_resume(struct sys_device *dev) { MST_INTMSKENA = mainstone_irq_enabled; return 0; } static struct sysdev_class mainstone_irq_sysclass = { .name = "cpld_irq", .resume = mainstone_irq_resume, }; static struct sys_device mainstone_irq_device = { .cls = &mainstone_irq_sysclass, }; static int __init mainstone_irq_device_init(void) { int ret = -ENODEV; if (machine_is_mainstone()) { ret = sysdev_class_register(&mainstone_irq_sysclass); if (ret == 0) ret = sysdev_register(&mainstone_irq_device); } return ret; } device_initcall(mainstone_irq_device_init); #endif static struct resource smc91x_resources[] = { [0] = { .start = (MST_ETH_PHYS + 0x300), .end = (MST_ETH_PHYS + 0xfffff), .flags = IORESOURCE_MEM, }, [1] = { .start = MAINSTONE_IRQ(3), .end = MAINSTONE_IRQ(3), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct smc91x_platdata mainstone_smc91x_info = { .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &mainstone_smc91x_info, }, }; static int mst_audio_startup(struct snd_pcm_substream *substream, void *priv) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) MST_MSCWR2 &= ~MST_MSCWR2_AC97_SPKROFF; return 0; } static void mst_audio_shutdown(struct snd_pcm_substream *substream, void *priv) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) MST_MSCWR2 |= MST_MSCWR2_AC97_SPKROFF; } static long mst_audio_suspend_mask; static void mst_audio_suspend(void *priv) { mst_audio_suspend_mask = MST_MSCWR2; MST_MSCWR2 |= MST_MSCWR2_AC97_SPKROFF; } static void mst_audio_resume(void *priv) { MST_MSCWR2 &= mst_audio_suspend_mask | ~MST_MSCWR2_AC97_SPKROFF; } static pxa2xx_audio_ops_t mst_audio_ops = { .startup = mst_audio_startup, .shutdown = mst_audio_shutdown, .suspend = mst_audio_suspend, .resume = mst_audio_resume, }; static struct resource flash_resources[] = { [0] = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_64M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_CS1_PHYS, .end = PXA_CS1_PHYS + SZ_64M - 1, .flags = IORESOURCE_MEM, }, }; static struct mtd_partition mainstoneflash0_partitions[] = { { .name = "Bootloader", .size = 0x00040000, .offset = 0, .mask_flags = MTD_WRITEABLE /* force read-only */ },{ .name = "Kernel", .size = 0x00400000, .offset = 0x00040000, },{ .name = "Filesystem", .size = MTDPART_SIZ_FULL, .offset = 0x00440000 } }; static struct flash_platform_data mst_flash_data[2] = { { .map_name = "cfi_probe", .parts = mainstoneflash0_partitions, .nr_parts = ARRAY_SIZE(mainstoneflash0_partitions), }, { .map_name = "cfi_probe", .parts = NULL, .nr_parts = 0, } }; static struct platform_device mst_flash_device[2] = { { .name = "pxa2xx-flash", .id = 0, .dev = { .platform_data = &mst_flash_data[0], }, .resource = &flash_resources[0], .num_resources = 1, }, { .name = "pxa2xx-flash", .id = 1, .dev = { .platform_data = &mst_flash_data[1], }, .resource = &flash_resources[1], .num_resources = 1, }, }; #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct platform_pwm_backlight_data mainstone_backlight_data = { .pwm_id = 0, .max_brightness = 1023, .dft_brightness = 1023, .pwm_period_ns = 78770, }; static struct platform_device mainstone_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &pxa27x_device_pwm0.dev, .platform_data = &mainstone_backlight_data, }, }; static void __init mainstone_backlight_register(void) { int ret = platform_device_register(&mainstone_backlight_device); if (ret) printk(KERN_ERR "mainstone: failed to register backlight device: %d\n", ret); } #else #define mainstone_backlight_register() do { } while (0) #endif static struct pxafb_mode_info toshiba_ltm04c380k_mode = { .pixclock = 50000, .xres = 640, .yres = 480, .bpp = 16, .hsync_len = 1, .left_margin = 0x9f, .right_margin = 1, .vsync_len = 44, .upper_margin = 0, .lower_margin = 0, .sync = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mode_info toshiba_ltm035a776c_mode = { .pixclock = 110000, .xres = 240, .yres = 320, .bpp = 16, .hsync_len = 4, .left_margin = 8, .right_margin = 20, .vsync_len = 3, .upper_margin = 1, .lower_margin = 10, .sync = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mach_info mainstone_pxafb_info = { .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, }; static int mainstone_mci_init(struct device *dev, irq_handler_t mstone_detect_int, void *data) { int err; /* make sure SD/Memory Stick multiplexer's signals * are routed to MMC controller */ MST_MSCWR1 &= ~MST_MSCWR1_MS_SEL; err = request_irq(MAINSTONE_MMC_IRQ, mstone_detect_int, IRQF_DISABLED, "MMC card detect", data); if (err) printk(KERN_ERR "mainstone_mci_init: MMC/SD: can't request MMC card detect IRQ\n"); return err; } static void mainstone_mci_setpower(struct device *dev, unsigned int vdd) { struct pxamci_platform_data* p_d = dev->platform_data; if (( 1 << vdd) & p_d->ocr_mask) { printk(KERN_DEBUG "%s: on\n", __func__); MST_MSCWR1 |= MST_MSCWR1_MMC_ON; MST_MSCWR1 &= ~MST_MSCWR1_MS_SEL; } else { printk(KERN_DEBUG "%s: off\n", __func__); MST_MSCWR1 &= ~MST_MSCWR1_MMC_ON; } } static void mainstone_mci_exit(struct device *dev, void *data) { free_irq(MAINSTONE_MMC_IRQ, data); } static struct pxamci_platform_data mainstone_mci_platform_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .init = mainstone_mci_init, .setpower = mainstone_mci_setpower, .exit = mainstone_mci_exit, .gpio_card_detect = -1, .gpio_card_ro = -1, .gpio_power = -1, }; static void mainstone_irda_transceiver_mode(struct device *dev, int mode) { unsigned long flags; local_irq_save(flags); if (mode & IR_SIRMODE) { MST_MSCWR1 &= ~MST_MSCWR1_IRDA_FIR; } else if (mode & IR_FIRMODE) { MST_MSCWR1 |= MST_MSCWR1_IRDA_FIR; } pxa2xx_transceiver_mode(dev, mode); if (mode & IR_OFF) { MST_MSCWR1 = (MST_MSCWR1 & ~MST_MSCWR1_IRDA_MASK) | MST_MSCWR1_IRDA_OFF; } else { MST_MSCWR1 = (MST_MSCWR1 & ~MST_MSCWR1_IRDA_MASK) | MST_MSCWR1_IRDA_FULL; } local_irq_restore(flags); } static struct pxaficp_platform_data mainstone_ficp_platform_data = { .gpio_pwdown = -1, .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF, .transceiver_mode = mainstone_irda_transceiver_mode, }; static struct gpio_keys_button gpio_keys_button[] = { [0] = { .desc = "wakeup", .code = KEY_SUSPEND, .type = EV_KEY, .gpio = 1, .wakeup = 1, }, }; static struct gpio_keys_platform_data mainstone_gpio_keys = { .buttons = gpio_keys_button, .nbuttons = 1, }; static struct platform_device mst_gpio_keys_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &mainstone_gpio_keys, }, }; static struct platform_device *platform_devices[] __initdata = { &smc91x_device, &mst_flash_device[0], &mst_flash_device[1], &mst_gpio_keys_device, }; static struct pxaohci_platform_data mainstone_ohci_platform_data = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW | POWER_SENSE_LOW, }; #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) static unsigned int mainstone_matrix_keys[] = { KEY(0, 0, KEY_A), KEY(1, 0, KEY_B), KEY(2, 0, KEY_C), KEY(3, 0, KEY_D), KEY(4, 0, KEY_E), KEY(5, 0, KEY_F), KEY(0, 1, KEY_G), KEY(1, 1, KEY_H), KEY(2, 1, KEY_I), KEY(3, 1, KEY_J), KEY(4, 1, KEY_K), KEY(5, 1, KEY_L), KEY(0, 2, KEY_M), KEY(1, 2, KEY_N), KEY(2, 2, KEY_O), KEY(3, 2, KEY_P), KEY(4, 2, KEY_Q), KEY(5, 2, KEY_R), KEY(0, 3, KEY_S), KEY(1, 3, KEY_T), KEY(2, 3, KEY_U), KEY(3, 3, KEY_V), KEY(4, 3, KEY_W), KEY(5, 3, KEY_X), KEY(2, 4, KEY_Y), KEY(3, 4, KEY_Z), KEY(0, 4, KEY_DOT), /* . */ KEY(1, 4, KEY_CLOSE), /* @ */ KEY(4, 4, KEY_SLASH), KEY(5, 4, KEY_BACKSLASH), KEY(0, 5, KEY_HOME), KEY(1, 5, KEY_LEFTSHIFT), KEY(2, 5, KEY_SPACE), KEY(3, 5, KEY_SPACE), KEY(4, 5, KEY_ENTER), KEY(5, 5, KEY_BACKSPACE), KEY(0, 6, KEY_UP), KEY(1, 6, KEY_DOWN), KEY(2, 6, KEY_LEFT), KEY(3, 6, KEY_RIGHT), KEY(4, 6, KEY_SELECT), }; struct pxa27x_keypad_platform_data mainstone_keypad_info = { .matrix_key_rows = 6, .matrix_key_cols = 7, .matrix_key_map = mainstone_matrix_keys, .matrix_key_map_size = ARRAY_SIZE(mainstone_matrix_keys), .enable_rotary0 = 1, .rotary0_up_key = KEY_UP, .rotary0_down_key = KEY_DOWN, .debounce_interval = 30, }; static void __init mainstone_init_keypad(void) { pxa_set_keypad_info(&mainstone_keypad_info); } #else static inline void mainstone_init_keypad(void) {} #endif static void __init mainstone_init(void) { int SW7 = 0; /* FIXME: get from SCR (Mst doc section 3.2.1.1) */ pxa2xx_mfp_config(ARRAY_AND_SIZE(mainstone_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); mst_flash_data[0].width = (__raw_readl(BOOT_DEF) & 1) ? 2 : 4; mst_flash_data[1].width = 4; /* Compensate for SW7 which swaps the flash banks */ mst_flash_data[SW7].name = "processor-flash"; mst_flash_data[SW7 ^ 1].name = "mainboard-flash"; printk(KERN_NOTICE "Mainstone configured to boot from %s\n", mst_flash_data[0].name); /* system bus arbiter setting * - Core_Park * - LCD_wt:DMA_wt:CORE_Wt = 2:3:4 */ ARB_CNTRL = ARB_CORE_PARK | 0x234; platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); /* reading Mainstone's "Virtual Configuration Register" might be handy to select LCD type here */ if (0) mainstone_pxafb_info.modes = &toshiba_ltm04c380k_mode; else mainstone_pxafb_info.modes = &toshiba_ltm035a776c_mode; set_pxa_fb_info(&mainstone_pxafb_info); mainstone_backlight_register(); pxa_set_mci_info(&mainstone_mci_platform_data); pxa_set_ficp_info(&mainstone_ficp_platform_data); pxa_set_ohci_info(&mainstone_ohci_platform_data); pxa_set_i2c_info(NULL); pxa_set_ac97_info(&mst_audio_ops); mainstone_init_keypad(); } static struct map_desc mainstone_io_desc[] __initdata = { { /* CPLD */ .virtual = MST_FPGA_VIRT, .pfn = __phys_to_pfn(MST_FPGA_PHYS), .length = 0x00100000, .type = MT_DEVICE } }; static void __init mainstone_map_io(void) { pxa27x_map_io(); iotable_init(mainstone_io_desc, ARRAY_SIZE(mainstone_io_desc)); /* for use I SRAM as framebuffer. */ PSLR |= 0xF04; PCFR = 0x66; } MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)") /* Maintainer: MontaVista Software Inc. */ .boot_params = 0xa0000100, /* BLOB boot parameter setting */ .map_io = mainstone_map_io, .nr_irqs = MAINSTONE_NR_IRQS, .init_irq = mainstone_init_irq, .timer = &pxa_timer, .init_machine = mainstone_init, MACHINE_END
gpl-2.0
bstando/limbo-android
jni/qemu/hw/lm32_juart.c
103
3712
/* * LatticeMico32 JTAG UART model. * * Copyright (c) 2010 Michael Walle <michael@walle.cc> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "hw.h" #include "sysbus.h" #include "trace.h" #include "qemu-char.h" #include "lm32_juart.h" enum { LM32_JUART_MIN_SAVE_VERSION = 0, LM32_JUART_CURRENT_SAVE_VERSION = 0, LM32_JUART_MAX_SAVE_VERSION = 0, }; enum { JTX_FULL = (1<<8), }; enum { JRX_FULL = (1<<8), }; struct LM32JuartState { SysBusDevice busdev; CharDriverState *chr; uint32_t jtx; uint32_t jrx; }; typedef struct LM32JuartState LM32JuartState; uint32_t lm32_juart_get_jtx(DeviceState *d) { LM32JuartState *s = container_of(d, LM32JuartState, busdev.qdev); trace_lm32_juart_get_jtx(s->jtx); return s->jtx; } uint32_t lm32_juart_get_jrx(DeviceState *d) { LM32JuartState *s = container_of(d, LM32JuartState, busdev.qdev); trace_lm32_juart_get_jrx(s->jrx); return s->jrx; } void lm32_juart_set_jtx(DeviceState *d, uint32_t jtx) { LM32JuartState *s = container_of(d, LM32JuartState, busdev.qdev); unsigned char ch = jtx & 0xff; trace_lm32_juart_set_jtx(s->jtx); s->jtx = jtx; if (s->chr) { qemu_chr_fe_write(s->chr, &ch, 1); } } void lm32_juart_set_jrx(DeviceState *d, uint32_t jtx) { LM32JuartState *s = container_of(d, LM32JuartState, busdev.qdev); trace_lm32_juart_set_jrx(s->jrx); s->jrx &= ~JRX_FULL; } static void juart_rx(void *opaque, const uint8_t *buf, int size) { LM32JuartState *s = opaque; s->jrx = *buf | JRX_FULL; } static int juart_can_rx(void *opaque) { LM32JuartState *s = opaque; return !(s->jrx & JRX_FULL); } static void juart_event(void *opaque, int event) { } static void juart_reset(DeviceState *d) { LM32JuartState *s = container_of(d, LM32JuartState, busdev.qdev); s->jtx = 0; s->jrx = 0; } static int lm32_juart_init(SysBusDevice *dev) { LM32JuartState *s = FROM_SYSBUS(typeof(*s), dev); s->chr = qemu_char_get_next_serial(); if (s->chr) { qemu_chr_add_handlers(s->chr, juart_can_rx, juart_rx, juart_event, s); } return 0; } static const VMStateDescription vmstate_lm32_juart = { .name = "lm32-juart", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(jtx, LM32JuartState), VMSTATE_UINT32(jrx, LM32JuartState), VMSTATE_END_OF_LIST() } }; static void lm32_juart_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = lm32_juart_init; dc->reset = juart_reset; dc->vmsd = &vmstate_lm32_juart; } static TypeInfo lm32_juart_info = { .name = "lm32-juart", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(LM32JuartState), .class_init = lm32_juart_class_init, }; static void lm32_juart_register_types(void) { type_register_static(&lm32_juart_info); } type_init(lm32_juart_register_types)
gpl-2.0
blakejwc/paparazzi
sw/airborne/arch/lpc21/test/bootloader/usbstdreq.c
103
7116
/* LPCUSB, an USB device driver for LPC microcontrollers Copyright (C) 2006 Bertrik Sikken (bertrik@sikken.nl) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Standard request handler. This modules handles the 'chapter 9' processing, specifically the standard device requests in table 9-3 from the universal serial bus specification revision 2.0 Specific types of devices may specify additional requests (for example HID devices add a GET_DESCRIPTOR request for interfaces), but they will not be part of this module. */ // TODO some requests have to return a request error if device not configured: // TODO GET_INTERFACE, GET_STATUS, SET_INTERFACE, SYNCH_FRAME // TODO this applies to the following if endpoint != 0: // TODO SET_FEATURE, GET_FEATURE #include "type.h" #include "usbdebug.h" #include "usbstruct.h" #include "usbapi.h" #define MAX_DESC_HANDLERS 4 // device, interface, endpoint, other // device state info static U8 bConfiguration = 0; static TFnGetDescriptor *pfnGetDescriptor = NULL; /************************************************************************* HandleStdDeviceReq ================== Local function to handle a standard device request IN pSetup The setup packet IN/OUT *piLen Pointer to data length ppbData Data buffer. Returns TRUE if the request was handled successfully **************************************************************************/ static BOOL HandleStdDeviceReq(TSetupPacket *pSetup, int *piLen, U8 **ppbData) { U8 *pbData = *ppbData; switch (pSetup->bRequest) { case REQ_GET_STATUS: // bit 0: self-powered // bit 1: remote wakeup pbData[0] = 0; // TODO use bmAttributes according to configuration pbData[1] = 0; *piLen = 2; break; case REQ_SET_ADDRESS: USBHwSetAddress(pSetup->wValue); break; case REQ_GET_DESCRIPTOR: DBG("D%x", pSetup->wValue); if (pfnGetDescriptor == NULL) { return FALSE; } return pfnGetDescriptor(pSetup->wValue, pSetup->wIndex, piLen, ppbData); case REQ_GET_CONFIGURATION: // indicate if we are configured pbData[0] = bConfiguration; *piLen = 1; break; case REQ_SET_CONFIGURATION: bConfiguration = pSetup->wValue & 0xFF; // TODO use bConfigurationValue(s) USBHwConfigDevice((pSetup->wValue & 0xFF) != 0); break; case REQ_CLEAR_FEATURE: case REQ_SET_FEATURE: if (pSetup->wValue == FEA_REMOTE_WAKEUP) { // put DEVICE_REMOTE_WAKEUP code here } if (pSetup->wValue == FEA_TEST_MODE) { // put TEST_MODE code here } return FALSE; case REQ_SET_DESCRIPTOR: DBG("Device req %d not implemented\n", pSetup->bRequest); return FALSE; default: DBG("Illegal device req %d\n", pSetup->bRequest); return FALSE; } return TRUE; } /************************************************************************* HandleStdInterfaceReq ===================== Local function to handle a standard interface request IN pSetup The setup packet IN/OUT *piLen Pointer to data length ppbData Data buffer. Returns TRUE if the request was handled successfully **************************************************************************/ static BOOL HandleStdInterfaceReq(TSetupPacket *pSetup, int *piLen, U8 **ppbData) { U8 *pbData = *ppbData; switch (pSetup->bRequest) { case REQ_GET_STATUS: // no bits specified pbData[0] = 0; pbData[1] = 0; *piLen = 2; break; case REQ_CLEAR_FEATURE: case REQ_SET_FEATURE: // not defined for interface return FALSE; case REQ_GET_INTERFACE: // TODO use bNumInterfaces // there is only one interface, return n-1 (= 0) pbData[0] = 0; *piLen = 1; break; case REQ_SET_INTERFACE: // TODO use bNumInterfaces // there is only one interface (= 0) if (pSetup->wValue == 0) { // ACK (zero packet) will be sent automatically } else { return FALSE; } break; default: DBG("Illegal interface req %d\n", pSetup->bRequest); return FALSE; } return TRUE; } /************************************************************************* HandleStdEndPointReq ==================== Local function to handle a standard endpoint request IN pSetup The setup packet IN/OUT *piLen Pointer to data length ppbData Data buffer. Returns TRUE if the request was handled successfully **************************************************************************/ static BOOL HandleStdEndPointReq(TSetupPacket *pSetup, int *piLen, U8 **ppbData) { U8 *pbData = *ppbData; switch (pSetup->bRequest) { case REQ_GET_STATUS: // bit 0 = endpointed halted or not pbData[0] = USBHwGetEPStall(pSetup->wIndex) ? 1 : 0; pbData[1] = 0; *piLen = 2; break; case REQ_CLEAR_FEATURE: if (pSetup->wValue == FEA_ENDPOINT_HALT) { // clear HALT by unstalling USBHwEPStall(pSetup->wIndex, FALSE); break; } // only ENDPOINT_HALT defined for endpoints return FALSE; case REQ_SET_FEATURE: if (pSetup->wValue == FEA_ENDPOINT_HALT) { // set HALT by stalling USBHwEPStall(pSetup->wIndex, TRUE); break; } // only ENDPOINT_HALT defined for endpoints return FALSE; case REQ_SYNCH_FRAME: DBG("EP req %d not implemented\n", pSetup->bRequest); return FALSE; default: DBG("Illegal EP req %d\n", pSetup->bRequest); return FALSE; } return TRUE; } /************************************************************************* USBHandleStandardRequest =================== Local function to handle a standard request IN pSetup The setup packet IN/OUT *piLen Pointer to data length ppbData Data buffer. Returns TRUE if the request was handled successfully **************************************************************************/ BOOL USBHandleStandardRequest(TSetupPacket *pSetup, int *piLen, U8 **ppbData) { switch (REQTYPE_GET_RECIP(pSetup->bmRequestType)) { case REQTYPE_RECIP_DEVICE: return HandleStdDeviceReq(pSetup, piLen, ppbData); case REQTYPE_RECIP_INTERFACE: return HandleStdInterfaceReq(pSetup, piLen, ppbData); case REQTYPE_RECIP_ENDPOINT: return HandleStdEndPointReq(pSetup, piLen, ppbData); default: return FALSE; } } /************************************************************************* USBRegisterDescriptorHandler ========================= Registers a callback for handling descriptors IN pfnGetDesc Callback function pointer **************************************************************************/ void USBRegisterDescriptorHandler(TFnGetDescriptor *pfnGetDesc) { pfnGetDescriptor = pfnGetDesc; }
gpl-2.0
allanmatthew/linux-fslc
tools/perf/builtin-evlist.c
359
1724
/* * Builtin evlist command: Show the list of event selectors present * in a perf.data file. */ #include "builtin.h" #include "util/util.h" #include <linux/list.h> #include "perf.h" #include "util/evlist.h" #include "util/evsel.h" #include "util/parse-events.h" #include "util/parse-options.h" #include "util/session.h" #include "util/data.h" static int __cmd_evlist(const char *file_name, struct perf_attr_details *details) { struct perf_session *session; struct perf_evsel *pos; struct perf_data_file file = { .path = file_name, .mode = PERF_DATA_MODE_READ, }; session = perf_session__new(&file, 0, NULL); if (session == NULL) return -ENOMEM; evlist__for_each(session->evlist, pos) perf_evsel__fprintf(pos, details, stdout); perf_session__delete(session); return 0; } int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_attr_details details = { .verbose = false, }; const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "Input file name"), OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"), OPT_BOOLEAN('v', "verbose", &details.verbose, "Show all event attr details"), OPT_BOOLEAN('g', "group", &details.event_group, "Show event group information"), OPT_END() }; const char * const evlist_usage[] = { "perf evlist [<options>]", NULL }; argc = parse_options(argc, argv, options, evlist_usage, 0); if (argc) usage_with_options(evlist_usage, options); if (details.event_group && (details.verbose || details.freq)) { pr_err("--group option is not compatible with other options\n"); usage_with_options(evlist_usage, options); } return __cmd_evlist(input_name, &details); }
gpl-2.0
btaidm/JetsonTK1-Kernel-Grinch
arch/ia64/kernel/salinfo.c
1895
19772
/* * salinfo.c * * Creates entries in /proc/sal for various system features. * * Copyright (c) 2003, 2006 Silicon Graphics, Inc. All rights reserved. * Copyright (c) 2003 Hewlett-Packard Co * Bjorn Helgaas <bjorn.helgaas@hp.com> * * 10/30/2001 jbarnes@sgi.com copied much of Stephane's palinfo * code to create this file * Oct 23 2003 kaos@sgi.com * Replace IPI with set_cpus_allowed() to read a record from the required cpu. * Redesign salinfo log processing to separate interrupt and user space * contexts. * Cache the record across multi-block reads from user space. * Support > 64 cpus. * Delete module_exit and MOD_INC/DEC_COUNT, salinfo cannot be a module. * * Jan 28 2004 kaos@sgi.com * Periodically check for outstanding MCA or INIT records. * * Dec 5 2004 kaos@sgi.com * Standardize which records are cleared automatically. * * Aug 18 2005 kaos@sgi.com * mca.c may not pass a buffer, a NULL buffer just indicates that a new * record is available in SAL. * Replace some NR_CPUS by cpus_online, for hotplug cpu. * * Jan 5 2006 kaos@sgi.com * Handle hotplug cpus coming online. * Handle hotplug cpus going offline while they still have outstanding records. * Use the cpu_* macros consistently. * Replace the counting semaphore with a mutex and a test if the cpumask is non-empty. * Modify the locking to make the test for "work to do" an atomic operation. */ #include <linux/capability.h> #include <linux/cpu.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/semaphore.h> #include <asm/sal.h> #include <asm/uaccess.h> MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>"); MODULE_DESCRIPTION("/proc interface to IA-64 SAL features"); MODULE_LICENSE("GPL"); static const struct file_operations proc_salinfo_fops; typedef struct { const char *name; /* name of the proc entry */ unsigned long feature; /* feature bit */ struct proc_dir_entry *entry; /* registered entry (removal) */ } salinfo_entry_t; /* * List {name,feature} pairs for every entry in /proc/sal/<feature> * that this module exports */ static const salinfo_entry_t salinfo_entries[]={ { "bus_lock", IA64_SAL_PLATFORM_FEATURE_BUS_LOCK, }, { "irq_redirection", IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT, }, { "ipi_redirection", IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT, }, { "itc_drift", IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT, }, }; #define NR_SALINFO_ENTRIES ARRAY_SIZE(salinfo_entries) static char *salinfo_log_name[] = { "mca", "init", "cmc", "cpe", }; static struct proc_dir_entry *salinfo_proc_entries[ ARRAY_SIZE(salinfo_entries) + /* /proc/sal/bus_lock */ ARRAY_SIZE(salinfo_log_name) + /* /proc/sal/{mca,...} */ (2 * ARRAY_SIZE(salinfo_log_name)) + /* /proc/sal/mca/{event,data} */ 1]; /* /proc/sal */ /* Some records we get ourselves, some are accessed as saved data in buffers * that are owned by mca.c. */ struct salinfo_data_saved { u8* buffer; u64 size; u64 id; int cpu; }; /* State transitions. Actions are :- * Write "read <cpunum>" to the data file. * Write "clear <cpunum>" to the data file. * Write "oemdata <cpunum> <offset> to the data file. * Read from the data file. * Close the data file. * * Start state is NO_DATA. * * NO_DATA * write "read <cpunum>" -> NO_DATA or LOG_RECORD. * write "clear <cpunum>" -> NO_DATA or LOG_RECORD. * write "oemdata <cpunum> <offset> -> return -EINVAL. * read data -> return EOF. * close -> unchanged. Free record areas. * * LOG_RECORD * write "read <cpunum>" -> NO_DATA or LOG_RECORD. * write "clear <cpunum>" -> NO_DATA or LOG_RECORD. * write "oemdata <cpunum> <offset> -> format the oem data, goto OEMDATA. * read data -> return the INIT/MCA/CMC/CPE record. * close -> unchanged. Keep record areas. * * OEMDATA * write "read <cpunum>" -> NO_DATA or LOG_RECORD. * write "clear <cpunum>" -> NO_DATA or LOG_RECORD. * write "oemdata <cpunum> <offset> -> format the oem data, goto OEMDATA. * read data -> return the formatted oemdata. * close -> unchanged. Keep record areas. * * Closing the data file does not change the state. This allows shell scripts * to manipulate salinfo data, each shell redirection opens the file, does one * action then closes it again. The record areas are only freed at close when * the state is NO_DATA. */ enum salinfo_state { STATE_NO_DATA, STATE_LOG_RECORD, STATE_OEMDATA, }; struct salinfo_data { cpumask_t cpu_event; /* which cpus have outstanding events */ struct semaphore mutex; u8 *log_buffer; u64 log_size; u8 *oemdata; /* decoded oem data */ u64 oemdata_size; int open; /* single-open to prevent races */ u8 type; u8 saved_num; /* using a saved record? */ enum salinfo_state state :8; /* processing state */ u8 padding; int cpu_check; /* next CPU to check */ struct salinfo_data_saved data_saved[5];/* save last 5 records from mca.c, must be < 255 */ }; static struct salinfo_data salinfo_data[ARRAY_SIZE(salinfo_log_name)]; static DEFINE_SPINLOCK(data_lock); static DEFINE_SPINLOCK(data_saved_lock); /** salinfo_platform_oemdata - optional callback to decode oemdata from an error * record. * @sect_header: pointer to the start of the section to decode. * @oemdata: returns vmalloc area containing the decoded output. * @oemdata_size: returns length of decoded output (strlen). * * Description: If user space asks for oem data to be decoded by the kernel * and/or prom and the platform has set salinfo_platform_oemdata to the address * of a platform specific routine then call that routine. salinfo_platform_oemdata * vmalloc's and formats its output area, returning the address of the text * and its strlen. Returns 0 for success, -ve for error. The callback is * invoked on the cpu that generated the error record. */ int (*salinfo_platform_oemdata)(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size); struct salinfo_platform_oemdata_parms { const u8 *efi_guid; u8 **oemdata; u64 *oemdata_size; int ret; }; /* Kick the mutex that tells user space that there is work to do. Instead of * trying to track the state of the mutex across multiple cpus, in user * context, interrupt context, non-maskable interrupt context and hotplug cpu, * it is far easier just to grab the mutex if it is free then release it. * * This routine must be called with data_saved_lock held, to make the down/up * operation atomic. */ static void salinfo_work_to_do(struct salinfo_data *data) { (void)(down_trylock(&data->mutex) ?: 0); up(&data->mutex); } static void salinfo_platform_oemdata_cpu(void *context) { struct salinfo_platform_oemdata_parms *parms = context; parms->ret = salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size); } static void shift1_data_saved (struct salinfo_data *data, int shift) { memcpy(data->data_saved+shift, data->data_saved+shift+1, (ARRAY_SIZE(data->data_saved) - (shift+1)) * sizeof(data->data_saved[0])); memset(data->data_saved + ARRAY_SIZE(data->data_saved) - 1, 0, sizeof(data->data_saved[0])); } /* This routine is invoked in interrupt context. Note: mca.c enables * interrupts before calling this code for CMC/CPE. MCA and INIT events are * not irq safe, do not call any routines that use spinlocks, they may deadlock. * MCA and INIT records are recorded, a timer event will look for any * outstanding events and wake up the user space code. * * The buffer passed from mca.c points to the output from ia64_log_get. This is * a persistent buffer but its contents can change between the interrupt and * when user space processes the record. Save the record id to identify * changes. If the buffer is NULL then just update the bitmap. */ void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) { struct salinfo_data *data = salinfo_data + type; struct salinfo_data_saved *data_saved; unsigned long flags = 0; int i; int saved_size = ARRAY_SIZE(data->data_saved); BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); if (irqsafe) spin_lock_irqsave(&data_saved_lock, flags); if (buffer) { for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { if (!data_saved->buffer) break; } if (i == saved_size) { if (!data->saved_num) { shift1_data_saved(data, 0); data_saved = data->data_saved + saved_size - 1; } else data_saved = NULL; } if (data_saved) { data_saved->cpu = smp_processor_id(); data_saved->id = ((sal_log_record_header_t *)buffer)->id; data_saved->size = size; data_saved->buffer = buffer; } } cpu_set(smp_processor_id(), data->cpu_event); if (irqsafe) { salinfo_work_to_do(data); spin_unlock_irqrestore(&data_saved_lock, flags); } } /* Check for outstanding MCA/INIT records every minute (arbitrary) */ #define SALINFO_TIMER_DELAY (60*HZ) static struct timer_list salinfo_timer; extern void ia64_mlogbuf_dump(void); static void salinfo_timeout_check(struct salinfo_data *data) { unsigned long flags; if (!data->open) return; if (!cpus_empty(data->cpu_event)) { spin_lock_irqsave(&data_saved_lock, flags); salinfo_work_to_do(data); spin_unlock_irqrestore(&data_saved_lock, flags); } } static void salinfo_timeout (unsigned long arg) { ia64_mlogbuf_dump(); salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA); salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_INIT); salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; add_timer(&salinfo_timer); } static int salinfo_event_open(struct inode *inode, struct file *file) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; return 0; } static ssize_t salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct salinfo_data *data = PDE_DATA(file_inode(file)); char cmd[32]; size_t size; int i, n, cpu = -1; retry: if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; if (down_interruptible(&data->mutex)) return -EINTR; } n = data->cpu_check; for (i = 0; i < nr_cpu_ids; i++) { if (cpu_isset(n, data->cpu_event)) { if (!cpu_online(n)) { cpu_clear(n, data->cpu_event); continue; } cpu = n; break; } if (++n == nr_cpu_ids) n = 0; } if (cpu == -1) goto retry; ia64_mlogbuf_dump(); /* for next read, start checking at next CPU */ data->cpu_check = cpu; if (++data->cpu_check == nr_cpu_ids) data->cpu_check = 0; snprintf(cmd, sizeof(cmd), "read %d\n", cpu); size = strlen(cmd); if (size > count) size = count; if (copy_to_user(buffer, cmd, size)) return -EFAULT; return size; } static const struct file_operations salinfo_event_fops = { .open = salinfo_event_open, .read = salinfo_event_read, .llseek = noop_llseek, }; static int salinfo_log_open(struct inode *inode, struct file *file) { struct salinfo_data *data = PDE_DATA(inode); if (!capable(CAP_SYS_ADMIN)) return -EPERM; spin_lock(&data_lock); if (data->open) { spin_unlock(&data_lock); return -EBUSY; } data->open = 1; spin_unlock(&data_lock); if (data->state == STATE_NO_DATA && !(data->log_buffer = vmalloc(ia64_sal_get_state_info_size(data->type)))) { data->open = 0; return -ENOMEM; } return 0; } static int salinfo_log_release(struct inode *inode, struct file *file) { struct salinfo_data *data = PDE_DATA(inode); if (data->state == STATE_NO_DATA) { vfree(data->log_buffer); vfree(data->oemdata); data->log_buffer = NULL; data->oemdata = NULL; } spin_lock(&data_lock); data->open = 0; spin_unlock(&data_lock); return 0; } static void call_on_cpu(int cpu, void (*fn)(void *), void *arg) { cpumask_t save_cpus_allowed = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); (*fn)(arg); set_cpus_allowed_ptr(current, &save_cpus_allowed); } static void salinfo_log_read_cpu(void *context) { struct salinfo_data *data = context; sal_log_record_header_t *rh; data->log_size = ia64_sal_get_state_info(data->type, (u64 *) data->log_buffer); rh = (sal_log_record_header_t *)(data->log_buffer); /* Clear corrected errors as they are read from SAL */ if (rh->severity == sal_log_severity_corrected) ia64_sal_clear_state_info(data->type); } static void salinfo_log_new_read(int cpu, struct salinfo_data *data) { struct salinfo_data_saved *data_saved; unsigned long flags; int i; int saved_size = ARRAY_SIZE(data->data_saved); data->saved_num = 0; spin_lock_irqsave(&data_saved_lock, flags); retry: for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { if (data_saved->buffer && data_saved->cpu == cpu) { sal_log_record_header_t *rh = (sal_log_record_header_t *)(data_saved->buffer); data->log_size = data_saved->size; memcpy(data->log_buffer, rh, data->log_size); barrier(); /* id check must not be moved */ if (rh->id == data_saved->id) { data->saved_num = i+1; break; } /* saved record changed by mca.c since interrupt, discard it */ shift1_data_saved(data, i); goto retry; } } spin_unlock_irqrestore(&data_saved_lock, flags); if (!data->saved_num) call_on_cpu(cpu, salinfo_log_read_cpu, data); if (!data->log_size) { data->state = STATE_NO_DATA; cpu_clear(cpu, data->cpu_event); } else { data->state = STATE_LOG_RECORD; } } static ssize_t salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct salinfo_data *data = PDE_DATA(file_inode(file)); u8 *buf; u64 bufsize; if (data->state == STATE_LOG_RECORD) { buf = data->log_buffer; bufsize = data->log_size; } else if (data->state == STATE_OEMDATA) { buf = data->oemdata; bufsize = data->oemdata_size; } else { buf = NULL; bufsize = 0; } return simple_read_from_buffer(buffer, count, ppos, buf, bufsize); } static void salinfo_log_clear_cpu(void *context) { struct salinfo_data *data = context; ia64_sal_clear_state_info(data->type); } static int salinfo_log_clear(struct salinfo_data *data, int cpu) { sal_log_record_header_t *rh; unsigned long flags; spin_lock_irqsave(&data_saved_lock, flags); data->state = STATE_NO_DATA; if (!cpu_isset(cpu, data->cpu_event)) { spin_unlock_irqrestore(&data_saved_lock, flags); return 0; } cpu_clear(cpu, data->cpu_event); if (data->saved_num) { shift1_data_saved(data, data->saved_num - 1); data->saved_num = 0; } spin_unlock_irqrestore(&data_saved_lock, flags); rh = (sal_log_record_header_t *)(data->log_buffer); /* Corrected errors have already been cleared from SAL */ if (rh->severity != sal_log_severity_corrected) call_on_cpu(cpu, salinfo_log_clear_cpu, data); /* clearing a record may make a new record visible */ salinfo_log_new_read(cpu, data); if (data->state == STATE_LOG_RECORD) { spin_lock_irqsave(&data_saved_lock, flags); cpu_set(cpu, data->cpu_event); salinfo_work_to_do(data); spin_unlock_irqrestore(&data_saved_lock, flags); } return 0; } static ssize_t salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct salinfo_data *data = PDE_DATA(file_inode(file)); char cmd[32]; size_t size; u32 offset; int cpu; size = sizeof(cmd); if (count < size) size = count; if (copy_from_user(cmd, buffer, size)) return -EFAULT; if (sscanf(cmd, "read %d", &cpu) == 1) { salinfo_log_new_read(cpu, data); } else if (sscanf(cmd, "clear %d", &cpu) == 1) { int ret; if ((ret = salinfo_log_clear(data, cpu))) count = ret; } else if (sscanf(cmd, "oemdata %d %d", &cpu, &offset) == 2) { if (data->state != STATE_LOG_RECORD && data->state != STATE_OEMDATA) return -EINVAL; if (offset > data->log_size - sizeof(efi_guid_t)) return -EINVAL; data->state = STATE_OEMDATA; if (salinfo_platform_oemdata) { struct salinfo_platform_oemdata_parms parms = { .efi_guid = data->log_buffer + offset, .oemdata = &data->oemdata, .oemdata_size = &data->oemdata_size }; call_on_cpu(cpu, salinfo_platform_oemdata_cpu, &parms); if (parms.ret) count = parms.ret; } else data->oemdata_size = 0; } else return -EINVAL; return count; } static const struct file_operations salinfo_data_fops = { .open = salinfo_log_open, .release = salinfo_log_release, .read = salinfo_log_read, .write = salinfo_log_write, .llseek = default_llseek, }; static int __cpuinit salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int i, cpu = (unsigned long)hcpu; unsigned long flags; struct salinfo_data *data; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: spin_lock_irqsave(&data_saved_lock, flags); for (i = 0, data = salinfo_data; i < ARRAY_SIZE(salinfo_data); ++i, ++data) { cpu_set(cpu, data->cpu_event); salinfo_work_to_do(data); } spin_unlock_irqrestore(&data_saved_lock, flags); break; case CPU_DEAD: case CPU_DEAD_FROZEN: spin_lock_irqsave(&data_saved_lock, flags); for (i = 0, data = salinfo_data; i < ARRAY_SIZE(salinfo_data); ++i, ++data) { struct salinfo_data_saved *data_saved; int j; for (j = ARRAY_SIZE(data->data_saved) - 1, data_saved = data->data_saved + j; j >= 0; --j, --data_saved) { if (data_saved->buffer && data_saved->cpu == cpu) { shift1_data_saved(data, j); } } cpu_clear(cpu, data->cpu_event); } spin_unlock_irqrestore(&data_saved_lock, flags); break; } return NOTIFY_OK; } static struct notifier_block salinfo_cpu_notifier __cpuinitdata = { .notifier_call = salinfo_cpu_callback, .priority = 0, }; static int __init salinfo_init(void) { struct proc_dir_entry *salinfo_dir; /* /proc/sal dir entry */ struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */ struct proc_dir_entry *dir, *entry; struct salinfo_data *data; int i, j; salinfo_dir = proc_mkdir("sal", NULL); if (!salinfo_dir) return 0; for (i=0; i < NR_SALINFO_ENTRIES; i++) { /* pass the feature bit in question as misc data */ *sdir++ = proc_create_data(salinfo_entries[i].name, 0, salinfo_dir, &proc_salinfo_fops, (void *)salinfo_entries[i].feature); } for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { data = salinfo_data + i; data->type = i; sema_init(&data->mutex, 1); dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); if (!dir) continue; entry = proc_create_data("event", S_IRUSR, dir, &salinfo_event_fops, data); if (!entry) continue; *sdir++ = entry; entry = proc_create_data("data", S_IRUSR | S_IWUSR, dir, &salinfo_data_fops, data); if (!entry) continue; *sdir++ = entry; /* we missed any events before now */ for_each_online_cpu(j) cpu_set(j, data->cpu_event); *sdir++ = dir; } *sdir++ = salinfo_dir; init_timer(&salinfo_timer); salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; salinfo_timer.function = &salinfo_timeout; add_timer(&salinfo_timer); register_hotcpu_notifier(&salinfo_cpu_notifier); return 0; } /* * 'data' contains an integer that corresponds to the feature we're * testing */ static int proc_salinfo_show(struct seq_file *m, void *v) { unsigned long data = (unsigned long)v; seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n"); return 0; } static int proc_salinfo_open(struct inode *inode, struct file *file) { return single_open(file, proc_salinfo_show, PDE_DATA(inode)); } static const struct file_operations proc_salinfo_fops = { .open = proc_salinfo_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; module_init(salinfo_init);
gpl-2.0
zeroblade1984/MotoG2k15
tools/hv/hv_vss_daemon.c
2151
5529
/* * An implementation of the host initiated guest snapshot for Hyper-V. * * * Copyright (C) 2013, Microsoft, Inc. * Author : K. Y. Srinivasan <kys@microsoft.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * */ #include <sys/types.h> #include <sys/socket.h> #include <sys/poll.h> #include <sys/ioctl.h> #include <linux/types.h> #include <fcntl.h> #include <stdio.h> #include <mntent.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <ctype.h> #include <errno.h> #include <arpa/inet.h> #include <linux/fs.h> #include <linux/connector.h> #include <linux/hyperv.h> #include <linux/netlink.h> #include <syslog.h> static char vss_recv_buffer[4096]; static char vss_send_buffer[4096]; static struct sockaddr_nl addr; #ifndef SOL_NETLINK #define SOL_NETLINK 270 #endif static int vss_do_freeze(char *dir, unsigned int cmd, char *fs_op) { int ret, fd = open(dir, O_RDONLY); if (fd < 0) return 1; ret = ioctl(fd, cmd, 0); syslog(LOG_INFO, "VSS: %s of %s: %s\n", fs_op, dir, strerror(errno)); close(fd); return !!ret; } static int vss_operate(int operation) { char *fs_op; char match[] = "/dev/"; FILE *mounts; struct mntent *ent; unsigned int cmd; int error = 0, root_seen = 0; switch (operation) { case VSS_OP_FREEZE: cmd = FIFREEZE; fs_op = "freeze"; break; case VSS_OP_THAW: cmd = FITHAW; fs_op = "thaw"; break; default: return -1; } mounts = setmntent("/proc/mounts", "r"); if (mounts == NULL) return -1; while ((ent = getmntent(mounts))) { if (strncmp(ent->mnt_fsname, match, strlen(match))) continue; if (strcmp(ent->mnt_type, "iso9660") == 0) continue; if (strcmp(ent->mnt_dir, "/") == 0) { root_seen = 1; continue; } error |= vss_do_freeze(ent->mnt_dir, cmd, fs_op); } endmntent(mounts); if (root_seen) { error |= vss_do_freeze("/", cmd, fs_op); } return error; } static int netlink_send(int fd, struct cn_msg *msg) { struct nlmsghdr *nlh; unsigned int size; struct msghdr message; char buffer[64]; struct iovec iov[2]; size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len); nlh = (struct nlmsghdr *)buffer; nlh->nlmsg_seq = 0; nlh->nlmsg_pid = getpid(); nlh->nlmsg_type = NLMSG_DONE; nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh)); nlh->nlmsg_flags = 0; iov[0].iov_base = nlh; iov[0].iov_len = sizeof(*nlh); iov[1].iov_base = msg; iov[1].iov_len = size; memset(&message, 0, sizeof(message)); message.msg_name = &addr; message.msg_namelen = sizeof(addr); message.msg_iov = iov; message.msg_iovlen = 2; return sendmsg(fd, &message, 0); } int main(void) { int fd, len, nl_group; int error; struct cn_msg *message; struct pollfd pfd; struct nlmsghdr *incoming_msg; struct cn_msg *incoming_cn_msg; int op; struct hv_vss_msg *vss_msg; if (daemon(1, 0)) return 1; openlog("Hyper-V VSS", 0, LOG_USER); syslog(LOG_INFO, "VSS starting; pid is:%d", getpid()); fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); if (fd < 0) { syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd); exit(EXIT_FAILURE); } addr.nl_family = AF_NETLINK; addr.nl_pad = 0; addr.nl_pid = 0; addr.nl_groups = 0; error = bind(fd, (struct sockaddr *)&addr, sizeof(addr)); if (error < 0) { syslog(LOG_ERR, "bind failed; error:%d", error); close(fd); exit(EXIT_FAILURE); } nl_group = CN_VSS_IDX; setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &nl_group, sizeof(nl_group)); /* * Register ourselves with the kernel. */ message = (struct cn_msg *)vss_send_buffer; message->id.idx = CN_VSS_IDX; message->id.val = CN_VSS_VAL; message->ack = 0; vss_msg = (struct hv_vss_msg *)message->data; vss_msg->vss_hdr.operation = VSS_OP_REGISTER; message->len = sizeof(struct hv_vss_msg); len = netlink_send(fd, message); if (len < 0) { syslog(LOG_ERR, "netlink_send failed; error:%d", len); close(fd); exit(EXIT_FAILURE); } pfd.fd = fd; while (1) { struct sockaddr *addr_p = (struct sockaddr *) &addr; socklen_t addr_l = sizeof(addr); pfd.events = POLLIN; pfd.revents = 0; poll(&pfd, 1, -1); len = recvfrom(fd, vss_recv_buffer, sizeof(vss_recv_buffer), 0, addr_p, &addr_l); if (len < 0) { syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s", addr.nl_pid, errno, strerror(errno)); close(fd); return -1; } if (addr.nl_pid) { syslog(LOG_WARNING, "Received packet from untrusted pid:%u", addr.nl_pid); continue; } incoming_msg = (struct nlmsghdr *)vss_recv_buffer; if (incoming_msg->nlmsg_type != NLMSG_DONE) continue; incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg); vss_msg = (struct hv_vss_msg *)incoming_cn_msg->data; op = vss_msg->vss_hdr.operation; error = HV_S_OK; switch (op) { case VSS_OP_FREEZE: case VSS_OP_THAW: error = vss_operate(op); if (error) error = HV_E_FAIL; break; default: syslog(LOG_ERR, "Illegal op:%d\n", op); } vss_msg->error = error; len = netlink_send(fd, incoming_cn_msg); if (len < 0) { syslog(LOG_ERR, "net_link send failed; error:%d", len); exit(EXIT_FAILURE); } } }
gpl-2.0
deafnote/kernel-gigabyte-rior1
arch/arm/mach-realview/hotplug.c
2919
2565
/* * linux/arch/arm/mach-realview/hotplug.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <asm/cacheflush.h> extern volatile int pen_release; static inline void cpu_enter_lowpower(void) { unsigned int v; flush_cache_all(); asm volatile( " mcr p15, 0, %1, c7, c5, 0\n" " mcr p15, 0, %1, c7, c10, 4\n" /* * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C) : "cc"); } static inline void cpu_leave_lowpower(void) { unsigned int v; asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" " orr %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 0\n" " mrc p15, 0, %0, c1, c0, 1\n" " orr %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) : "Ir" (CR_C) : "cc"); } static inline void platform_do_lowpower(unsigned int cpu, int *spurious) { /* * there is no power-control hardware on this platform, so all * we can do is put the core into WFI; this is safe as the calling * code will have already disabled interrupts */ for (;;) { /* * here's the WFI */ asm(".word 0xe320f003\n" : : : "memory", "cc"); if (pen_release == cpu) { /* * OK, proper wakeup, we're done */ break; } /* * Getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * Just note it happening - when we're woken, we can report * its occurrence. */ (*spurious)++; } } int platform_cpu_kill(unsigned int cpu) { return 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void platform_cpu_die(unsigned int cpu) { int spurious = 0; /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(); platform_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); } int platform_cpu_disable(unsigned int cpu) { /* * we don't allow CPU 0 to be shutdown (it is still too special * e.g. clock tick interrupts) */ return cpu == 0 ? -EPERM : 0; }
gpl-2.0
varchild/vigor_aosp_kernel
drivers/ide/ide-taskfile.c
2919
16989
/* * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org> * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2001-2002 Klaus Smolin * IBM Storage Technology Division * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz * * The big the bad and the ugly. */ #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/hdreg.h> #include <linux/ide.h> #include <linux/scatterlist.h> #include <linux/uaccess.h> #include <asm/io.h> void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; const struct ide_tp_ops *tp_ops = hwif->tp_ops; /* Be sure we're looking at the low order bytes */ tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); tp_ops->tf_read(drive, &cmd->tf, cmd->valid.in.tf); if (cmd->tf_flags & IDE_TFLAG_LBA48) { tp_ops->write_devctl(hwif, ATA_HOB | ATA_DEVCTL_OBS); tp_ops->tf_read(drive, &cmd->hob, cmd->valid.in.hob); } } void ide_tf_dump(const char *s, struct ide_cmd *cmd) { #ifdef DEBUG printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x " "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", s, cmd->tf.feature, cmd->tf.nsect, cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah, cmd->tf.device, cmd->tf.command); printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n", s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah); #endif } int taskfile_lib_get_identify(ide_drive_t *drive, u8 *buf) { struct ide_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.tf.nsect = 0x01; if (drive->media == ide_disk) cmd.tf.command = ATA_CMD_ID_ATA; else cmd.tf.command = ATA_CMD_ID_ATAPI; cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; cmd.protocol = ATA_PROT_PIO; return ide_raw_taskfile(drive, &cmd, buf, 1); } static ide_startstop_t task_no_data_intr(ide_drive_t *); static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct ide_cmd *); static ide_startstop_t task_pio_intr(ide_drive_t *); ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) { ide_hwif_t *hwif = drive->hwif; struct ide_cmd *cmd = &hwif->cmd; struct ide_taskfile *tf = &cmd->tf; ide_handler_t *handler = NULL; const struct ide_tp_ops *tp_ops = hwif->tp_ops; const struct ide_dma_ops *dma_ops = hwif->dma_ops; if (orig_cmd->protocol == ATA_PROT_PIO && (orig_cmd->tf_flags & IDE_TFLAG_MULTI_PIO) && drive->mult_count == 0) { pr_err("%s: multimode not set!\n", drive->name); return ide_stopped; } if (orig_cmd->ftf_flags & IDE_FTFLAG_FLAGGED) orig_cmd->ftf_flags |= IDE_FTFLAG_SET_IN_FLAGS; memcpy(cmd, orig_cmd, sizeof(*cmd)); if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { ide_tf_dump(drive->name, cmd); tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) { u8 data[2] = { cmd->tf.data, cmd->hob.data }; tp_ops->output_data(drive, cmd, data, 2); } if (cmd->valid.out.tf & IDE_VALID_DEVICE) { u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; if (!(cmd->ftf_flags & IDE_FTFLAG_FLAGGED)) cmd->tf.device &= HIHI; cmd->tf.device |= drive->select; } tp_ops->tf_load(drive, &cmd->hob, cmd->valid.out.hob); tp_ops->tf_load(drive, &cmd->tf, cmd->valid.out.tf); } switch (cmd->protocol) { case ATA_PROT_PIO: if (cmd->tf_flags & IDE_TFLAG_WRITE) { tp_ops->exec_command(hwif, tf->command); ndelay(400); /* FIXME */ return pre_task_out_intr(drive, cmd); } handler = task_pio_intr; /* fall-through */ case ATA_PROT_NODATA: if (handler == NULL) handler = task_no_data_intr; ide_execute_command(drive, cmd, handler, WAIT_WORSTCASE); return ide_started; case ATA_PROT_DMA: if (ide_dma_prepare(drive, cmd)) return ide_stopped; hwif->expiry = dma_ops->dma_timer_expiry; ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD); dma_ops->dma_start(drive); default: return ide_started; } } EXPORT_SYMBOL_GPL(do_rw_taskfile); static ide_startstop_t task_no_data_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct ide_cmd *cmd = &hwif->cmd; struct ide_taskfile *tf = &cmd->tf; int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0; int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1; u8 stat; local_irq_enable_in_hardirq(); while (1) { stat = hwif->tp_ops->read_status(hwif); if ((stat & ATA_BUSY) == 0 || retries-- == 0) break; udelay(10); }; if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { if (custom && tf->command == ATA_CMD_SET_MULTI) { drive->mult_req = drive->mult_count = 0; drive->special_flags |= IDE_SFLAG_RECALIBRATE; (void)ide_dump_status(drive, __func__, stat); return ide_stopped; } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) { if ((stat & (ATA_ERR | ATA_DRQ)) == 0) { ide_set_handler(drive, &task_no_data_intr, WAIT_WORSTCASE); return ide_started; } } return ide_error(drive, "task_no_data_intr", stat); } if (custom && tf->command == ATA_CMD_SET_MULTI) drive->mult_count = drive->mult_req; if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE || tf->command == ATA_CMD_CHK_POWER) { struct request *rq = hwif->rq; if (blk_pm_request(rq)) ide_complete_pm_rq(drive, rq); else ide_finish_cmd(drive, cmd, stat); } return ide_stopped; } static u8 wait_drive_not_busy(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; int retries; u8 stat; /* * Last sector was transferred, wait until device is ready. This can * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms. */ for (retries = 0; retries < 1000; retries++) { stat = hwif->tp_ops->read_status(hwif); if (stat & ATA_BUSY) udelay(10); else break; } if (stat & ATA_BUSY) pr_err("%s: drive still BUSY!\n", drive->name); return stat; } void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, unsigned int write, unsigned int len) { ide_hwif_t *hwif = drive->hwif; struct scatterlist *sg = hwif->sg_table; struct scatterlist *cursg = cmd->cursg; unsigned long uninitialized_var(flags); struct page *page; unsigned int offset; u8 *buf; cursg = cmd->cursg; if (cursg == NULL) cursg = cmd->cursg = sg; while (len) { unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs); int page_is_high; if (nr_bytes > PAGE_SIZE) nr_bytes = PAGE_SIZE; page = sg_page(cursg); offset = cursg->offset + cmd->cursg_ofs; /* get the current page and offset */ page = nth_page(page, (offset >> PAGE_SHIFT)); offset %= PAGE_SIZE; page_is_high = PageHighMem(page); if (page_is_high) local_irq_save(flags); buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset; cmd->nleft -= nr_bytes; cmd->cursg_ofs += nr_bytes; if (cmd->cursg_ofs == cursg->length) { cursg = cmd->cursg = sg_next(cmd->cursg); cmd->cursg_ofs = 0; } /* do the actual data transfer */ if (write) hwif->tp_ops->output_data(drive, cmd, buf, nr_bytes); else hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes); kunmap_atomic(buf, KM_BIO_SRC_IRQ); if (page_is_high) local_irq_restore(flags); len -= nr_bytes; } } EXPORT_SYMBOL_GPL(ide_pio_bytes); static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd, unsigned int write) { unsigned int nr_bytes; u8 saved_io_32bit = drive->io_32bit; if (cmd->tf_flags & IDE_TFLAG_FS) cmd->rq->errors = 0; if (cmd->tf_flags & IDE_TFLAG_IO_16BIT) drive->io_32bit = 0; touch_softlockup_watchdog(); if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) nr_bytes = min_t(unsigned, cmd->nleft, drive->mult_count << 9); else nr_bytes = SECTOR_SIZE; ide_pio_bytes(drive, cmd, write, nr_bytes); drive->io_32bit = saved_io_32bit; } static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) { if (cmd->tf_flags & IDE_TFLAG_FS) { int nr_bytes = cmd->nbytes - cmd->nleft; if (cmd->protocol == ATA_PROT_PIO && ((cmd->tf_flags & IDE_TFLAG_WRITE) || cmd->nleft == 0)) { if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) nr_bytes -= drive->mult_count << 9; else nr_bytes -= SECTOR_SIZE; } if (nr_bytes > 0) ide_complete_rq(drive, 0, nr_bytes); } } void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat) { struct request *rq = drive->hwif->rq; u8 err = ide_read_error(drive), nsect = cmd->tf.nsect; u8 set_xfer = !!(cmd->tf_flags & IDE_TFLAG_SET_XFER); ide_complete_cmd(drive, cmd, stat, err); rq->errors = err; if (err == 0 && set_xfer) { ide_set_xfer_rate(drive, nsect); ide_driveid_update(drive); } ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq)); } /* * Handler for command with PIO data phase. */ static ide_startstop_t task_pio_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct ide_cmd *cmd = &drive->hwif->cmd; u8 stat = hwif->tp_ops->read_status(hwif); u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE); if (write == 0) { /* Error? */ if (stat & ATA_ERR) goto out_err; /* Didn't want any data? Odd. */ if ((stat & ATA_DRQ) == 0) { /* Command all done? */ if (OK_STAT(stat, ATA_DRDY, ATA_BUSY)) goto out_end; /* Assume it was a spurious irq */ goto out_wait; } } else { if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) goto out_err; /* Deal with unexpected ATA data phase. */ if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0)) goto out_err; } if (write && cmd->nleft == 0) goto out_end; /* Still data left to transfer. */ ide_pio_datablock(drive, cmd, write); /* Are we done? Check status and finish transfer. */ if (write == 0 && cmd->nleft == 0) { stat = wait_drive_not_busy(drive); if (!OK_STAT(stat, 0, BAD_STAT)) goto out_err; goto out_end; } out_wait: /* Still data left to transfer. */ ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); return ide_started; out_end: if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) ide_finish_cmd(drive, cmd, stat); else ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9); return ide_stopped; out_err: ide_error_cmd(drive, cmd); return ide_error(drive, __func__, stat); } static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct ide_cmd *cmd) { ide_startstop_t startstop; if (ide_wait_stat(&startstop, drive, ATA_DRQ, drive->bad_wstat, WAIT_DRQ)) { pr_err("%s: no DRQ after issuing %sWRITE%s\n", drive->name, (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) ? "MULT" : "", (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : ""); return startstop; } if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) local_irq_disable(); ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); ide_pio_datablock(drive, cmd, 1); return ide_started; } int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf, u16 nsect) { struct request *rq; int error; int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE; rq = blk_get_request(drive->queue, rw, __GFP_WAIT); rq->cmd_type = REQ_TYPE_ATA_TASKFILE; /* * (ks) We transfer currently only whole sectors. * This is suffient for now. But, it would be great, * if we would find a solution to transfer any size. * To support special commands like READ LONG. */ if (nsect) { error = blk_rq_map_kern(drive->queue, rq, buf, nsect * SECTOR_SIZE, __GFP_WAIT); if (error) goto put_req; } rq->special = cmd; cmd->rq = rq; error = blk_execute_rq(drive->queue, NULL, rq, 0); put_req: blk_put_request(rq); return error; } EXPORT_SYMBOL(ide_raw_taskfile); int ide_no_data_taskfile(ide_drive_t *drive, struct ide_cmd *cmd) { cmd->protocol = ATA_PROT_NODATA; return ide_raw_taskfile(drive, cmd, NULL, 0); } EXPORT_SYMBOL_GPL(ide_no_data_taskfile); #ifdef CONFIG_IDE_TASK_IOCTL int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) { ide_task_request_t *req_task; struct ide_cmd cmd; u8 *outbuf = NULL; u8 *inbuf = NULL; u8 *data_buf = NULL; int err = 0; int tasksize = sizeof(struct ide_task_request_s); unsigned int taskin = 0; unsigned int taskout = 0; u16 nsect = 0; char __user *buf = (char __user *)arg; req_task = memdup_user(buf, tasksize); if (IS_ERR(req_task)) return PTR_ERR(req_task); taskout = req_task->out_size; taskin = req_task->in_size; if (taskin > 65536 || taskout > 65536) { err = -EINVAL; goto abort; } if (taskout) { int outtotal = tasksize; outbuf = kzalloc(taskout, GFP_KERNEL); if (outbuf == NULL) { err = -ENOMEM; goto abort; } if (copy_from_user(outbuf, buf + outtotal, taskout)) { err = -EFAULT; goto abort; } } if (taskin) { int intotal = tasksize + taskout; inbuf = kzalloc(taskin, GFP_KERNEL); if (inbuf == NULL) { err = -ENOMEM; goto abort; } if (copy_from_user(inbuf, buf + intotal, taskin)) { err = -EFAULT; goto abort; } } memset(&cmd, 0, sizeof(cmd)); memcpy(&cmd.hob, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2); memcpy(&cmd.tf, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE); cmd.valid.out.tf = IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_DEVICE | IDE_VALID_IN_TF; cmd.tf_flags = IDE_TFLAG_IO_16BIT; if (drive->dev_flags & IDE_DFLAG_LBA48) { cmd.tf_flags |= IDE_TFLAG_LBA48; cmd.valid.in.hob = IDE_VALID_IN_HOB; } if (req_task->out_flags.all) { cmd.ftf_flags |= IDE_FTFLAG_FLAGGED; if (req_task->out_flags.b.data) cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA; if (req_task->out_flags.b.nsector_hob) cmd.valid.out.hob |= IDE_VALID_NSECT; if (req_task->out_flags.b.sector_hob) cmd.valid.out.hob |= IDE_VALID_LBAL; if (req_task->out_flags.b.lcyl_hob) cmd.valid.out.hob |= IDE_VALID_LBAM; if (req_task->out_flags.b.hcyl_hob) cmd.valid.out.hob |= IDE_VALID_LBAH; if (req_task->out_flags.b.error_feature) cmd.valid.out.tf |= IDE_VALID_FEATURE; if (req_task->out_flags.b.nsector) cmd.valid.out.tf |= IDE_VALID_NSECT; if (req_task->out_flags.b.sector) cmd.valid.out.tf |= IDE_VALID_LBAL; if (req_task->out_flags.b.lcyl) cmd.valid.out.tf |= IDE_VALID_LBAM; if (req_task->out_flags.b.hcyl) cmd.valid.out.tf |= IDE_VALID_LBAH; } else { cmd.valid.out.tf |= IDE_VALID_OUT_TF; if (cmd.tf_flags & IDE_TFLAG_LBA48) cmd.valid.out.hob |= IDE_VALID_OUT_HOB; } if (req_task->in_flags.b.data) cmd.ftf_flags |= IDE_FTFLAG_IN_DATA; if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE) { /* fixup data phase if needed */ if (req_task->data_phase == TASKFILE_IN_DMAQ || req_task->data_phase == TASKFILE_IN_DMA) cmd.tf_flags |= IDE_TFLAG_WRITE; } cmd.protocol = ATA_PROT_DMA; switch (req_task->data_phase) { case TASKFILE_MULTI_OUT: if (!drive->mult_count) { /* (hs): give up if multcount is not set */ pr_err("%s: %s Multimode Write multcount is not set\n", drive->name, __func__); err = -EPERM; goto abort; } cmd.tf_flags |= IDE_TFLAG_MULTI_PIO; /* fall through */ case TASKFILE_OUT: cmd.protocol = ATA_PROT_PIO; /* fall through */ case TASKFILE_OUT_DMAQ: case TASKFILE_OUT_DMA: cmd.tf_flags |= IDE_TFLAG_WRITE; nsect = taskout / SECTOR_SIZE; data_buf = outbuf; break; case TASKFILE_MULTI_IN: if (!drive->mult_count) { /* (hs): give up if multcount is not set */ pr_err("%s: %s Multimode Read multcount is not set\n", drive->name, __func__); err = -EPERM; goto abort; } cmd.tf_flags |= IDE_TFLAG_MULTI_PIO; /* fall through */ case TASKFILE_IN: cmd.protocol = ATA_PROT_PIO; /* fall through */ case TASKFILE_IN_DMAQ: case TASKFILE_IN_DMA: nsect = taskin / SECTOR_SIZE; data_buf = inbuf; break; case TASKFILE_NO_DATA: cmd.protocol = ATA_PROT_NODATA; break; default: err = -EFAULT; goto abort; } if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA) nsect = 0; else if (!nsect) { nsect = (cmd.hob.nsect << 8) | cmd.tf.nsect; if (!nsect) { pr_err("%s: in/out command without data\n", drive->name); err = -EFAULT; goto abort; } } err = ide_raw_taskfile(drive, &cmd, data_buf, nsect); memcpy(req_task->hob_ports, &cmd.hob, HDIO_DRIVE_HOB_HDR_SIZE - 2); memcpy(req_task->io_ports, &cmd.tf, HDIO_DRIVE_TASK_HDR_SIZE); if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) && req_task->in_flags.all == 0) { req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS; if (drive->dev_flags & IDE_DFLAG_LBA48) req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8); } if (copy_to_user(buf, req_task, tasksize)) { err = -EFAULT; goto abort; } if (taskout) { int outtotal = tasksize; if (copy_to_user(buf + outtotal, outbuf, taskout)) { err = -EFAULT; goto abort; } } if (taskin) { int intotal = tasksize + taskout; if (copy_to_user(buf + intotal, inbuf, taskin)) { err = -EFAULT; goto abort; } } abort: kfree(req_task); kfree(outbuf); kfree(inbuf); return err; } #endif
gpl-2.0
Michael-Pizzileo/lichee-3.0.8-leaked
arch/um/os-Linux/irq.c
4711
2948
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdlib.h> #include <errno.h> #include <poll.h> #include <signal.h> #include <string.h> #include "irq_user.h" #include "kern_constants.h" #include "os.h" #include "process.h" #include "um_malloc.h" #include "user.h" /* * Locked by irq_lock in arch/um/kernel/irq.c. Changed by os_create_pollfd * and os_free_irq_by_cb, which are called under irq_lock. */ static struct pollfd *pollfds = NULL; static int pollfds_num = 0; static int pollfds_size = 0; int os_waiting_for_events(struct irq_fd *active_fds) { struct irq_fd *irq_fd; int i, n, err; n = poll(pollfds, pollfds_num, 0); if (n < 0) { err = -errno; if (errno != EINTR) printk(UM_KERN_ERR "os_waiting_for_events:" " poll returned %d, errno = %d\n", n, errno); return err; } if (n == 0) return 0; irq_fd = active_fds; for (i = 0; i < pollfds_num; i++) { if (pollfds[i].revents != 0) { irq_fd->current_events = pollfds[i].revents; pollfds[i].fd = -1; } irq_fd = irq_fd->next; } return n; } int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds) { if (pollfds_num == pollfds_size) { if (size_tmpfds <= pollfds_size * sizeof(pollfds[0])) { /* return min size needed for new pollfds area */ return (pollfds_size + 1) * sizeof(pollfds[0]); } if (pollfds != NULL) { memcpy(tmp_pfd, pollfds, sizeof(pollfds[0]) * pollfds_size); /* remove old pollfds */ kfree(pollfds); } pollfds = tmp_pfd; pollfds_size++; } else kfree(tmp_pfd); /* remove not used tmp_pfd */ pollfds[pollfds_num] = ((struct pollfd) { .fd = fd, .events = events, .revents = 0 }); pollfds_num++; return 0; } void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg, struct irq_fd *active_fds, struct irq_fd ***last_irq_ptr2) { struct irq_fd **prev; int i = 0; prev = &active_fds; while (*prev != NULL) { if ((*test)(*prev, arg)) { struct irq_fd *old_fd = *prev; if ((pollfds[i].fd != -1) && (pollfds[i].fd != (*prev)->fd)) { printk(UM_KERN_ERR "os_free_irq_by_cb - " "mismatch between active_fds and " "pollfds, fd %d vs %d\n", (*prev)->fd, pollfds[i].fd); goto out; } pollfds_num--; /* * This moves the *whole* array after pollfds[i] * (though it doesn't spot as such)! */ memmove(&pollfds[i], &pollfds[i + 1], (pollfds_num - i) * sizeof(pollfds[0])); if (*last_irq_ptr2 == &old_fd->next) *last_irq_ptr2 = prev; *prev = (*prev)->next; if (old_fd->type == IRQ_WRITE) ignore_sigio_fd(old_fd->fd); kfree(old_fd); continue; } prev = &(*prev)->next; i++; } out: return; } int os_get_pollfd(int i) { return pollfds[i].fd; } void os_set_pollfd(int i, int fd) { pollfds[i].fd = fd; } void os_set_ioignore(void) { signal(SIGIO, SIG_IGN); }
gpl-2.0
h2o64/android_kernel_motorola_msm8226
drivers/staging/iio/cdc/ad7746.c
4967
19796
/* * AD7746 capacitive sensor driver supporting AD7745, AD7746 and AD7747 * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/stat.h> #include "../iio.h" #include "../sysfs.h" #include "ad7746.h" /* * AD7746 Register Definition */ #define AD7746_REG_STATUS 0 #define AD7746_REG_CAP_DATA_HIGH 1 #define AD7746_REG_CAP_DATA_MID 2 #define AD7746_REG_CAP_DATA_LOW 3 #define AD7746_REG_VT_DATA_HIGH 4 #define AD7746_REG_VT_DATA_MID 5 #define AD7746_REG_VT_DATA_LOW 6 #define AD7746_REG_CAP_SETUP 7 #define AD7746_REG_VT_SETUP 8 #define AD7746_REG_EXC_SETUP 9 #define AD7746_REG_CFG 10 #define AD7746_REG_CAPDACA 11 #define AD7746_REG_CAPDACB 12 #define AD7746_REG_CAP_OFFH 13 #define AD7746_REG_CAP_OFFL 14 #define AD7746_REG_CAP_GAINH 15 #define AD7746_REG_CAP_GAINL 16 #define AD7746_REG_VOLT_GAINH 17 #define AD7746_REG_VOLT_GAINL 18 /* Status Register Bit Designations (AD7746_REG_STATUS) */ #define AD7746_STATUS_EXCERR (1 << 3) #define AD7746_STATUS_RDY (1 << 2) #define AD7746_STATUS_RDYVT (1 << 1) #define AD7746_STATUS_RDYCAP (1 << 0) /* Capacitive Channel Setup Register Bit Designations (AD7746_REG_CAP_SETUP) */ #define AD7746_CAPSETUP_CAPEN (1 << 7) #define AD7746_CAPSETUP_CIN2 (1 << 6) /* AD7746 only */ #define AD7746_CAPSETUP_CAPDIFF (1 << 5) #define AD7746_CAPSETUP_CACHOP (1 << 0) /* Voltage/Temperature Setup Register Bit Designations (AD7746_REG_VT_SETUP) */ #define AD7746_VTSETUP_VTEN (1 << 7) #define AD7746_VTSETUP_VTMD_INT_TEMP (0 << 5) #define AD7746_VTSETUP_VTMD_EXT_TEMP (1 << 5) #define AD7746_VTSETUP_VTMD_VDD_MON (2 << 5) #define AD7746_VTSETUP_VTMD_EXT_VIN (3 << 5) #define AD7746_VTSETUP_EXTREF (1 << 4) #define AD7746_VTSETUP_VTSHORT (1 << 1) #define AD7746_VTSETUP_VTCHOP (1 << 0) /* Excitation Setup Register Bit Designations (AD7746_REG_EXC_SETUP) */ #define AD7746_EXCSETUP_CLKCTRL (1 << 7) #define AD7746_EXCSETUP_EXCON (1 << 6) #define AD7746_EXCSETUP_EXCB (1 << 5) #define AD7746_EXCSETUP_NEXCB (1 << 4) #define AD7746_EXCSETUP_EXCA (1 << 3) #define AD7746_EXCSETUP_NEXCA (1 << 2) #define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0) /* Config Register Bit Designations (AD7746_REG_CFG) */ #define AD7746_CONF_VTFS(x) ((x) << 6) #define AD7746_CONF_CAPFS(x) ((x) << 3) #define AD7746_CONF_MODE_IDLE (0 << 0) #define AD7746_CONF_MODE_CONT_CONV (1 << 0) #define AD7746_CONF_MODE_SINGLE_CONV (2 << 0) #define AD7746_CONF_MODE_PWRDN (3 << 0) #define AD7746_CONF_MODE_OFFS_CAL (5 << 0) #define AD7746_CONF_MODE_GAIN_CAL (6 << 0) /* CAPDAC Register Bit Designations (AD7746_REG_CAPDACx) */ #define AD7746_CAPDAC_DACEN (1 << 7) #define AD7746_CAPDAC_DACP(x) ((x) & 0x7F) /* * struct ad7746_chip_info - chip specifc information */ struct ad7746_chip_info { struct i2c_client *client; /* * Capacitive channel digital filter setup; * conversion time/update rate setup per channel */ u8 config; u8 cap_setup; u8 vt_setup; u8 capdac[2][2]; s8 capdac_set; }; enum ad7746_chan { VIN, VIN_VDD, TEMP_INT, TEMP_EXT, CIN1, CIN1_DIFF, CIN2, CIN2_DIFF, }; static const struct iio_chan_spec ad7746_channels[] = { [VIN] = { .type = IIO_VOLTAGE, .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, .address = AD7746_REG_VT_DATA_HIGH << 8 | AD7746_VTSETUP_VTMD_EXT_VIN, }, [VIN_VDD] = { .type = IIO_VOLTAGE, .indexed = 1, .channel = 1, .extend_name = "supply", .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, .address = AD7746_REG_VT_DATA_HIGH << 8 | AD7746_VTSETUP_VTMD_VDD_MON, }, [TEMP_INT] = { .type = IIO_TEMP, .indexed = 1, .channel = 0, .processed_val = IIO_PROCESSED, .address = AD7746_REG_VT_DATA_HIGH << 8 | AD7746_VTSETUP_VTMD_INT_TEMP, }, [TEMP_EXT] = { .type = IIO_TEMP, .indexed = 1, .channel = 1, .processed_val = IIO_PROCESSED, .address = AD7746_REG_VT_DATA_HIGH << 8 | AD7746_VTSETUP_VTMD_EXT_TEMP, }, [CIN1] = { .type = IIO_CAPACITANCE, .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT | IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .address = AD7746_REG_CAP_DATA_HIGH << 8, }, [CIN1_DIFF] = { .type = IIO_CAPACITANCE, .differential = 1, .indexed = 1, .channel = 0, .channel2 = 2, .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT | IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .address = AD7746_REG_CAP_DATA_HIGH << 8 | AD7746_CAPSETUP_CAPDIFF }, [CIN2] = { .type = IIO_CAPACITANCE, .indexed = 1, .channel = 1, .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT | IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .address = AD7746_REG_CAP_DATA_HIGH << 8 | AD7746_CAPSETUP_CIN2, }, [CIN2_DIFF] = { .type = IIO_CAPACITANCE, .differential = 1, .indexed = 1, .channel = 1, .channel2 = 3, .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT | IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .address = AD7746_REG_CAP_DATA_HIGH << 8 | AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2, } }; /* Values are Update Rate (Hz), Conversion Time (ms) + 1*/ static const unsigned char ad7746_vt_filter_rate_table[][2] = { {50, 20 + 1}, {31, 32 + 1}, {16, 62 + 1}, {8, 122 + 1}, }; static const unsigned char ad7746_cap_filter_rate_table[][2] = { {91, 11 + 1}, {84, 12 + 1}, {50, 20 + 1}, {26, 38 + 1}, {16, 62 + 1}, {13, 77 + 1}, {11, 92 + 1}, {9, 110 + 1}, }; static int ad7746_select_channel(struct iio_dev *indio_dev, struct iio_chan_spec const *chan) { struct ad7746_chip_info *chip = iio_priv(indio_dev); int ret, delay; u8 vt_setup, cap_setup; switch (chan->type) { case IIO_CAPACITANCE: cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN; vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN; delay = ad7746_cap_filter_rate_table[(chip->config >> 3) & 0x7][1]; if (chip->capdac_set != chan->channel) { ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CAPDACA, chip->capdac[chan->channel][0]); if (ret < 0) return ret; ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CAPDACB, chip->capdac[chan->channel][1]); if (ret < 0) return ret; chip->capdac_set = chan->channel; } break; case IIO_VOLTAGE: case IIO_TEMP: vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN; cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN; delay = ad7746_cap_filter_rate_table[(chip->config >> 6) & 0x3][1]; break; default: return -EINVAL; } if (chip->cap_setup != cap_setup) { ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CAP_SETUP, cap_setup); if (ret < 0) return ret; chip->cap_setup = cap_setup; } if (chip->vt_setup != vt_setup) { ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_VT_SETUP, vt_setup); if (ret < 0) return ret; chip->vt_setup = vt_setup; } return delay; } static inline ssize_t ad7746_start_calib(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, u8 regval) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7746_chip_info *chip = iio_priv(indio_dev); bool doit; int ret, timeout = 10; ret = strtobool(buf, &doit); if (ret < 0) return ret; if (!doit) return 0; mutex_lock(&indio_dev->mlock); regval |= chip->config; ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval); if (ret < 0) { mutex_unlock(&indio_dev->mlock); return ret; } do { msleep(20); ret = i2c_smbus_read_byte_data(chip->client, AD7746_REG_CFG); if (ret < 0) { mutex_unlock(&indio_dev->mlock); return ret; } } while ((ret == regval) && timeout--); mutex_unlock(&indio_dev->mlock); return len; } static ssize_t ad7746_start_offset_calib(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); int ret = ad7746_select_channel(indio_dev, &ad7746_channels[to_iio_dev_attr(attr)->address]); if (ret < 0) return ret; return ad7746_start_calib(dev, attr, buf, len, AD7746_CONF_MODE_OFFS_CAL); } static ssize_t ad7746_start_gain_calib(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); int ret = ad7746_select_channel(indio_dev, &ad7746_channels[to_iio_dev_attr(attr)->address]); if (ret < 0) return ret; return ad7746_start_calib(dev, attr, buf, len, AD7746_CONF_MODE_GAIN_CAL); } static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration, S_IWUSR, NULL, ad7746_start_offset_calib, CIN1); static IIO_DEVICE_ATTR(in_capacitance1_calibbias_calibration, S_IWUSR, NULL, ad7746_start_offset_calib, CIN2); static IIO_DEVICE_ATTR(in_capacitance0_calibscale_calibration, S_IWUSR, NULL, ad7746_start_gain_calib, CIN1); static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration, S_IWUSR, NULL, ad7746_start_gain_calib, CIN2); static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration, S_IWUSR, NULL, ad7746_start_gain_calib, VIN); static ssize_t ad7746_show_cap_filter_rate_setup(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7746_chip_info *chip = iio_priv(indio_dev); return sprintf(buf, "%d\n", ad7746_cap_filter_rate_table[ (chip->config >> 3) & 0x7][0]); } static ssize_t ad7746_store_cap_filter_rate_setup(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7746_chip_info *chip = iio_priv(indio_dev); u8 data; int ret, i; ret = kstrtou8(buf, 10, &data); if (ret < 0) return ret; for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++) if (data >= ad7746_cap_filter_rate_table[i][0]) break; if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table)) i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1; mutex_lock(&indio_dev->mlock); chip->config &= ~AD7746_CONF_CAPFS(0x7); chip->config |= AD7746_CONF_CAPFS(i); mutex_unlock(&indio_dev->mlock); return len; } static ssize_t ad7746_show_vt_filter_rate_setup(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7746_chip_info *chip = iio_priv(indio_dev); return sprintf(buf, "%d\n", ad7746_vt_filter_rate_table[ (chip->config >> 6) & 0x3][0]); } static ssize_t ad7746_store_vt_filter_rate_setup(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7746_chip_info *chip = iio_priv(indio_dev); u8 data; int ret, i; ret = kstrtou8(buf, 10, &data); if (ret < 0) return ret; for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++) if (data >= ad7746_vt_filter_rate_table[i][0]) break; if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table)) i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1; mutex_lock(&indio_dev->mlock); chip->config &= ~AD7746_CONF_VTFS(0x3); chip->config |= AD7746_CONF_VTFS(i); mutex_unlock(&indio_dev->mlock); return len; } static IIO_DEVICE_ATTR(in_capacitance_sampling_frequency, S_IRUGO | S_IWUSR, ad7746_show_cap_filter_rate_setup, ad7746_store_cap_filter_rate_setup, 0); static IIO_DEVICE_ATTR(in_voltage_sampling_frequency, S_IRUGO | S_IWUSR, ad7746_show_vt_filter_rate_setup, ad7746_store_vt_filter_rate_setup, 0); static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8"); static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available, "91 84 50 26 16 13 11 9"); static struct attribute *ad7746_attributes[] = { &iio_dev_attr_in_capacitance_sampling_frequency.dev_attr.attr, &iio_dev_attr_in_voltage_sampling_frequency.dev_attr.attr, &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr, &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr, &iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr, &iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr, &iio_dev_attr_in_voltage0_calibscale_calibration.dev_attr.attr, &iio_const_attr_in_voltage_sampling_frequency_available.dev_attr.attr, &iio_const_attr_in_capacitance_sampling_frequency_available. dev_attr.attr, NULL, }; static const struct attribute_group ad7746_attribute_group = { .attrs = ad7746_attributes, }; static int ad7746_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct ad7746_chip_info *chip = iio_priv(indio_dev); int ret, reg; mutex_lock(&indio_dev->mlock); switch (mask) { case IIO_CHAN_INFO_CALIBSCALE: if (val != 1) { ret = -EINVAL; goto out; } val = (val2 * 1024) / 15625; switch (chan->type) { case IIO_CAPACITANCE: reg = AD7746_REG_CAP_GAINH; break; case IIO_VOLTAGE: reg = AD7746_REG_VOLT_GAINH; break; default: ret = -EINVAL; goto out; } ret = i2c_smbus_write_word_data(chip->client, reg, swab16(val)); if (ret < 0) goto out; ret = 0; break; case IIO_CHAN_INFO_CALIBBIAS: if ((val < 0) | (val > 0xFFFF)) { ret = -EINVAL; goto out; } ret = i2c_smbus_write_word_data(chip->client, AD7746_REG_CAP_OFFH, swab16(val)); if (ret < 0) goto out; ret = 0; break; case IIO_CHAN_INFO_OFFSET: if ((val < 0) | (val > 43008000)) { /* 21pF */ ret = -EINVAL; goto out; } /* CAPDAC Scale = 21pF_typ / 127 * CIN Scale = 8.192pF / 2^24 * Offset Scale = CAPDAC Scale / CIN Scale = 338646 * */ val /= 338646; chip->capdac[chan->channel][chan->differential] = (val > 0 ? AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0); ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CAPDACA, chip->capdac[chan->channel][0]); if (ret < 0) goto out; ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CAPDACB, chip->capdac[chan->channel][1]); if (ret < 0) goto out; chip->capdac_set = chan->channel; ret = 0; break; default: ret = -EINVAL; } out: mutex_unlock(&indio_dev->mlock); return ret; } static int ad7746_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct ad7746_chip_info *chip = iio_priv(indio_dev); int ret, delay; u8 regval, reg; union { u32 d32; u8 d8[4]; } data; mutex_lock(&indio_dev->mlock); switch (mask) { case 0: ret = ad7746_select_channel(indio_dev, chan); if (ret < 0) goto out; delay = ret; regval = chip->config | AD7746_CONF_MODE_SINGLE_CONV; ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval); if (ret < 0) goto out; msleep(delay); /* Now read the actual register */ ret = i2c_smbus_read_i2c_block_data(chip->client, chan->address >> 8, 3, &data.d8[1]); if (ret < 0) goto out; *val = (be32_to_cpu(data.d32) & 0xFFFFFF) - 0x800000; switch (chan->type) { case IIO_TEMP: /* temperature in milli degrees Celsius * T = ((*val / 2048) - 4096) * 1000 */ *val = (*val * 125) / 256; break; case IIO_VOLTAGE: if (chan->channel == 1) /* supply_raw*/ *val = *val * 6; break; default: break; } ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_CALIBSCALE: switch (chan->type) { case IIO_CAPACITANCE: reg = AD7746_REG_CAP_GAINH; break; case IIO_VOLTAGE: reg = AD7746_REG_VOLT_GAINH; break; default: ret = -EINVAL; goto out; } ret = i2c_smbus_read_word_data(chip->client, reg); if (ret < 0) goto out; /* 1 + gain_val / 2^16 */ *val = 1; *val2 = (15625 * swab16(ret)) / 1024; ret = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_CALIBBIAS: ret = i2c_smbus_read_word_data(chip->client, AD7746_REG_CAP_OFFH); if (ret < 0) goto out; *val = swab16(ret); ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_OFFSET: *val = AD7746_CAPDAC_DACP(chip->capdac[chan->channel] [chan->differential]) * 338646; ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_CAPACITANCE: /* 8.192pf / 2^24 */ *val2 = 488; *val = 0; break; case IIO_VOLTAGE: /* 1170mV / 2^23 */ *val2 = 139475; *val = 0; break; default: ret = -EINVAL; goto out; } ret = IIO_VAL_INT_PLUS_NANO; break; default: ret = -EINVAL; }; out: mutex_unlock(&indio_dev->mlock); return ret; } static const struct iio_info ad7746_info = { .attrs = &ad7746_attribute_group, .read_raw = &ad7746_read_raw, .write_raw = &ad7746_write_raw, .driver_module = THIS_MODULE, }; /* * device probe and remove */ static int __devinit ad7746_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ad7746_platform_data *pdata = client->dev.platform_data; struct ad7746_chip_info *chip; struct iio_dev *indio_dev; int ret = 0; unsigned char regval = 0; indio_dev = iio_allocate_device(sizeof(*chip)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } chip = iio_priv(indio_dev); /* this is only used for device removal purposes */ i2c_set_clientdata(client, indio_dev); chip->client = client; chip->capdac_set = -1; /* Establish that the iio_dev is a child of the i2c device */ indio_dev->name = id->name; indio_dev->dev.parent = &client->dev; indio_dev->info = &ad7746_info; indio_dev->channels = ad7746_channels; if (id->driver_data == 7746) indio_dev->num_channels = ARRAY_SIZE(ad7746_channels); else indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2; indio_dev->num_channels = ARRAY_SIZE(ad7746_channels); indio_dev->modes = INDIO_DIRECT_MODE; if (pdata) { if (pdata->exca_en) { if (pdata->exca_inv_en) regval |= AD7746_EXCSETUP_NEXCA; else regval |= AD7746_EXCSETUP_EXCA; } if (pdata->excb_en) { if (pdata->excb_inv_en) regval |= AD7746_EXCSETUP_NEXCB; else regval |= AD7746_EXCSETUP_EXCB; } regval |= AD7746_EXCSETUP_EXCLVL(pdata->exclvl); } else { dev_warn(&client->dev, "No platform data? using default\n"); regval = AD7746_EXCSETUP_EXCA | AD7746_EXCSETUP_EXCB | AD7746_EXCSETUP_EXCLVL(3); } ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_EXC_SETUP, regval); if (ret < 0) goto error_free_dev; ret = iio_device_register(indio_dev); if (ret) goto error_free_dev; dev_info(&client->dev, "%s capacitive sensor registered\n", id->name); return 0; error_free_dev: iio_free_device(indio_dev); error_ret: return ret; } static int __devexit ad7746_remove(struct i2c_client *client) { struct iio_dev *indio_dev = i2c_get_clientdata(client); iio_device_unregister(indio_dev); iio_free_device(indio_dev); return 0; } static const struct i2c_device_id ad7746_id[] = { { "ad7745", 7745 }, { "ad7746", 7746 }, { "ad7747", 7747 }, {} }; MODULE_DEVICE_TABLE(i2c, ad7746_id); static struct i2c_driver ad7746_driver = { .driver = { .name = KBUILD_MODNAME, }, .probe = ad7746_probe, .remove = __devexit_p(ad7746_remove), .id_table = ad7746_id, }; module_i2c_driver(ad7746_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD7746/5/7 capacitive sensor driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Biktorgj/Gear_2_Kernel_3.10
arch/x86/kernel/acpi/cstate.c
7271
5734
/* * Copyright (C) 2005 Intel Corporation * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * - Added _PDC for SMP C-states on Intel CPUs */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/cpu.h> #include <linux/sched.h> #include <acpi/processor.h> #include <asm/acpi.h> #include <asm/mwait.h> #include <asm/special_insns.h> /* * Initialize bm_flags based on the CPU cache properties * On SMP it depends on cache configuration * - When cache is not shared among all CPUs, we flush cache * before entering C3. * - When cache is shared among all CPUs, we use bm_check * mechanism as in UP case * * This routine is called only after all the CPUs are online */ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); flags->bm_check = 0; if (num_online_cpus() == 1) flags->bm_check = 1; else if (c->x86_vendor == X86_VENDOR_INTEL) { /* * Today all MP CPUs that support C3 share cache. * And caches should not be flushed by software while * entering C3 type state. */ flags->bm_check = 1; } /* * On all recent Intel platforms, ARB_DISABLE is a nop. * So, set bm_control to zero to indicate that ARB_DISABLE * is not required while entering C3 type state on * P4, Core and beyond CPUs */ if (c->x86_vendor == X86_VENDOR_INTEL && (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f))) flags->bm_control = 0; } EXPORT_SYMBOL(acpi_processor_power_init_bm_check); /* The code below handles cstate entry with monitor-mwait pair on Intel*/ struct cstate_entry { struct { unsigned int eax; unsigned int ecx; } states[ACPI_PROCESSOR_MAX_POWER]; }; static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; #define NATIVE_CSTATE_BEYOND_HALT (2) static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) { struct acpi_processor_cx *cx = _cx; long retval; unsigned int eax, ebx, ecx, edx; unsigned int edx_part; unsigned int cstate_type; /* C-state type and not ACPI C-state type */ unsigned int num_cstate_subtype; cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); /* Check whether this particular cx_type (in CST) is supported or not */ cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; retval = 0; if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) { retval = -1; goto out; } /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) { retval = -1; goto out; } if (!mwait_supported[cstate_type]) { mwait_supported[cstate_type] = 1; printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " "state\n", cx->type); } snprintf(cx->desc, ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x", cx->address); out: return retval; } int acpi_processor_ffh_cstate_probe(unsigned int cpu, struct acpi_processor_cx *cx, struct acpi_power_register *reg) { struct cstate_entry *percpu_entry; struct cpuinfo_x86 *c = &cpu_data(cpu); long retval; if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF) return -1; if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) return -1; percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); percpu_entry->states[cx->index].eax = 0; percpu_entry->states[cx->index].ecx = 0; /* Make sure we are running on right CPU */ retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx); if (retval == 0) { /* Use the hint in CST */ percpu_entry->states[cx->index].eax = cx->address; percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; } /* * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared, * then we should skip checking BM_STS for this C-state. * ref: "Intel Processor Vendor-Specific ACPI Interface Specification" */ if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2)) cx->bm_sts_skip = 1; return retval; } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); /* * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, * which can obviate IPI to trigger checking of need_resched. * We execute MONITOR against need_resched and enter optimized wait state * through MWAIT. Whenever someone changes need_resched, we would be woken * up from MWAIT (without an IPI). * * New with Core Duo processors, MWAIT can take some hints based on CPU * capability. */ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { if (!need_resched()) { if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)&current_thread_info()->flags); __monitor((void *)&current_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(ax, cx); } } void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); struct cstate_entry *percpu_entry; percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); mwait_idle_with_hints(percpu_entry->states[cx->index].eax, percpu_entry->states[cx->index].ecx); } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter); static int __init ffh_cstate_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; if (c->x86_vendor != X86_VENDOR_INTEL) return -1; cpu_cstate_entry = alloc_percpu(struct cstate_entry); return 0; } static void __exit ffh_cstate_exit(void) { free_percpu(cpu_cstate_entry); cpu_cstate_entry = NULL; } arch_initcall(ffh_cstate_init); __exitcall(ffh_cstate_exit);
gpl-2.0
asuradaimao/linux
drivers/pcmcia/pxa2xx_cm_x255.c
9831
3080
/* * linux/drivers/pcmcia/pxa/pxa_cm_x255.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Compulab Ltd., 2003, 2007, 2008 * Mike Rapoport <mike@compulab.co.il> * */ #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/export.h> #include "soc_common.h" #define GPIO_PCMCIA_SKTSEL (54) #define GPIO_PCMCIA_S0_CD_VALID (16) #define GPIO_PCMCIA_S1_CD_VALID (17) #define GPIO_PCMCIA_S0_RDYINT (6) #define GPIO_PCMCIA_S1_RDYINT (8) #define GPIO_PCMCIA_RESET (9) static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset"); if (ret) return ret; gpio_direction_output(GPIO_PCMCIA_RESET, 0); if (skt->nr == 0) { skt->stat[SOC_STAT_CD].gpio = GPIO_PCMCIA_S0_CD_VALID; skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD"; skt->stat[SOC_STAT_RDY].gpio = GPIO_PCMCIA_S0_RDYINT; skt->stat[SOC_STAT_RDY].name = "PCMCIA0 RDY"; } else { skt->stat[SOC_STAT_CD].gpio = GPIO_PCMCIA_S1_CD_VALID; skt->stat[SOC_STAT_CD].name = "PCMCIA1 CD"; skt->stat[SOC_STAT_RDY].gpio = GPIO_PCMCIA_S1_RDYINT; skt->stat[SOC_STAT_RDY].name = "PCMCIA1 RDY"; } return 0; } static void cmx255_pcmcia_shutdown(struct soc_pcmcia_socket *skt) { gpio_free(GPIO_PCMCIA_RESET); } static void cmx255_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { state->vs_3v = 0; state->vs_Xv = 0; } static int cmx255_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { switch (skt->nr) { case 0: if (state->flags & SS_RESET) { gpio_set_value(GPIO_PCMCIA_SKTSEL, 0); udelay(1); gpio_set_value(GPIO_PCMCIA_RESET, 1); udelay(10); gpio_set_value(GPIO_PCMCIA_RESET, 0); } break; case 1: if (state->flags & SS_RESET) { gpio_set_value(GPIO_PCMCIA_SKTSEL, 1); udelay(1); gpio_set_value(GPIO_PCMCIA_RESET, 1); udelay(10); gpio_set_value(GPIO_PCMCIA_RESET, 0); } break; } return 0; } static struct pcmcia_low_level cmx255_pcmcia_ops __initdata = { .owner = THIS_MODULE, .hw_init = cmx255_pcmcia_hw_init, .hw_shutdown = cmx255_pcmcia_shutdown, .socket_state = cmx255_pcmcia_socket_state, .configure_socket = cmx255_pcmcia_configure_socket, .nr = 1, }; static struct platform_device *cmx255_pcmcia_device; int __init cmx255_pcmcia_init(void) { int ret; cmx255_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!cmx255_pcmcia_device) return -ENOMEM; ret = platform_device_add_data(cmx255_pcmcia_device, &cmx255_pcmcia_ops, sizeof(cmx255_pcmcia_ops)); if (ret == 0) { printk(KERN_INFO "Registering cm-x255 PCMCIA interface.\n"); ret = platform_device_add(cmx255_pcmcia_device); } if (ret) platform_device_put(cmx255_pcmcia_device); return ret; } void __exit cmx255_pcmcia_exit(void) { platform_device_unregister(cmx255_pcmcia_device); }
gpl-2.0
crazy-canux/linux
drivers/acpi/acpica/nsload.c
360
9630
/****************************************************************************** * * Module Name: nsload - namespace loading/expanding/contracting procedures * *****************************************************************************/ /* * Copyright (C) 2000 - 2015, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acdispat.h" #include "actables.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsload") /* Local prototypes */ #ifdef ACPI_FUTURE_IMPLEMENTATION acpi_status acpi_ns_unload_namespace(acpi_handle handle); static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle); #endif #ifndef ACPI_NO_METHOD_EXECUTION /******************************************************************************* * * FUNCTION: acpi_ns_load_table * * PARAMETERS: table_index - Index for table to be loaded * node - Owning NS node * * RETURN: Status * * DESCRIPTION: Load one ACPI table into the namespace * ******************************************************************************/ acpi_status acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) { acpi_status status; ACPI_FUNCTION_TRACE(ns_load_table); /* * Parse the table and load the namespace with all named * objects found within. Control methods are NOT parsed * at this time. In fact, the control methods cannot be * parsed until the entire namespace is loaded, because * if a control method makes a forward reference (call) * to another control method, we can't continue parsing * because we don't know how many arguments to parse next! */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* If table already loaded into namespace, just return */ if (acpi_tb_is_table_loaded(table_index)) { status = AE_ALREADY_EXISTS; goto unlock; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Loading table into namespace ****\n")); status = acpi_tb_allocate_owner_id(table_index); if (ACPI_FAILURE(status)) { goto unlock; } status = acpi_ns_parse_table(table_index, node); if (ACPI_SUCCESS(status)) { acpi_tb_set_table_loaded_flag(table_index, TRUE); } else { /* * On error, delete any namespace objects created by this table. * We cannot initialize these objects, so delete them. There are * a couple of expecially bad cases: * AE_ALREADY_EXISTS - namespace collision. * AE_NOT_FOUND - the target of a Scope operator does not * exist. This target of Scope must already exist in the * namespace, as per the ACPI specification. */ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list. tables[table_index].owner_id); acpi_tb_release_owner_id(table_index); return_ACPI_STATUS(status); } unlock: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Now we can parse the control methods. We always parse * them here for a sanity check, and if configured for * just-in-time parsing, we delete the control method * parse trees. */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Begin Table Object Initialization\n")); status = acpi_ds_initialize_objects(table_index, node); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Completed Table Object Initialization\n")); return_ACPI_STATUS(status); } #ifdef ACPI_OBSOLETE_FUNCTIONS /******************************************************************************* * * FUNCTION: acpi_load_namespace * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Load the name space from what ever is pointed to by DSDT. * (DSDT points to either the BIOS or a buffer.) * ******************************************************************************/ acpi_status acpi_ns_load_namespace(void) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_load_name_space); /* There must be at least a DSDT installed */ if (acpi_gbl_DSDT == NULL) { ACPI_ERROR((AE_INFO, "DSDT is not in memory")); return_ACPI_STATUS(AE_NO_ACPI_TABLES); } /* * Load the namespace. The DSDT is required, * but the SSDT and PSDT tables are optional. */ status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Ignore exceptions from these */ (void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_SSDT); (void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_PSDT); ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "ACPI Namespace successfully loaded at root %p\n", acpi_gbl_root_node)); return_ACPI_STATUS(status); } #endif #ifdef ACPI_FUTURE_IMPLEMENTATION /******************************************************************************* * * FUNCTION: acpi_ns_delete_subtree * * PARAMETERS: start_handle - Handle in namespace where search begins * * RETURNS Status * * DESCRIPTION: Walks the namespace starting at the given handle and deletes * all objects, entries, and scopes in the entire subtree. * * Namespace/Interpreter should be locked or the subsystem should * be in shutdown before this routine is called. * ******************************************************************************/ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle) { acpi_status status; acpi_handle child_handle; acpi_handle parent_handle; acpi_handle next_child_handle; acpi_handle dummy; u32 level; ACPI_FUNCTION_TRACE(ns_delete_subtree); parent_handle = start_handle; child_handle = NULL; level = 1; /* * Traverse the tree of objects until we bubble back up * to where we started. */ while (level > 0) { /* Attempt to get the next object in this scope */ status = acpi_get_next_object(ACPI_TYPE_ANY, parent_handle, child_handle, &next_child_handle); child_handle = next_child_handle; /* Did we get a new object? */ if (ACPI_SUCCESS(status)) { /* Check if this object has any children */ if (ACPI_SUCCESS (acpi_get_next_object (ACPI_TYPE_ANY, child_handle, NULL, &dummy))) { /* * There is at least one child of this object, * visit the object */ level++; parent_handle = child_handle; child_handle = NULL; } } else { /* * No more children in this object, go back up to * the object's parent */ level--; /* Delete all children now */ acpi_ns_delete_children(child_handle); child_handle = parent_handle; status = acpi_get_parent(parent_handle, &parent_handle); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } } /* Now delete the starting object, and we are done */ acpi_ns_remove_node(child_handle); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_unload_name_space * * PARAMETERS: handle - Root of namespace subtree to be deleted * * RETURN: Status * * DESCRIPTION: Shrinks the namespace, typically in response to an undocking * event. Deletes an entire subtree starting from (and * including) the given handle. * ******************************************************************************/ acpi_status acpi_ns_unload_namespace(acpi_handle handle) { acpi_status status; ACPI_FUNCTION_TRACE(ns_unload_name_space); /* Parameter validation */ if (!acpi_gbl_root_node) { return_ACPI_STATUS(AE_NO_NAMESPACE); } if (!handle) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* This function does the real work */ status = acpi_ns_delete_subtree(handle); return_ACPI_STATUS(status); } #endif #endif
gpl-2.0
jingr1/Linux-4.2.1-for-OK6410
drivers/input/serio/hyperv-keyboard.c
1384
10969
/* * Copyright (c) 2013, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/completion.h> #include <linux/hyperv.h> #include <linux/serio.h> #include <linux/slab.h> /* * Current version 1.0 * */ #define SYNTH_KBD_VERSION_MAJOR 1 #define SYNTH_KBD_VERSION_MINOR 0 #define SYNTH_KBD_VERSION (SYNTH_KBD_VERSION_MINOR | \ (SYNTH_KBD_VERSION_MAJOR << 16)) /* * Message types in the synthetic input protocol */ enum synth_kbd_msg_type { SYNTH_KBD_PROTOCOL_REQUEST = 1, SYNTH_KBD_PROTOCOL_RESPONSE = 2, SYNTH_KBD_EVENT = 3, SYNTH_KBD_LED_INDICATORS = 4, }; /* * Basic message structures. */ struct synth_kbd_msg_hdr { __le32 type; }; struct synth_kbd_msg { struct synth_kbd_msg_hdr header; char data[]; /* Enclosed message */ }; union synth_kbd_version { __le32 version; }; /* * Protocol messages */ struct synth_kbd_protocol_request { struct synth_kbd_msg_hdr header; union synth_kbd_version version_requested; }; #define PROTOCOL_ACCEPTED BIT(0) struct synth_kbd_protocol_response { struct synth_kbd_msg_hdr header; __le32 proto_status; }; #define IS_UNICODE BIT(0) #define IS_BREAK BIT(1) #define IS_E0 BIT(2) #define IS_E1 BIT(3) struct synth_kbd_keystroke { struct synth_kbd_msg_hdr header; __le16 make_code; __le16 reserved0; __le32 info; /* Additional information */ }; #define HK_MAXIMUM_MESSAGE_SIZE 256 #define KBD_VSC_SEND_RING_BUFFER_SIZE (10 * PAGE_SIZE) #define KBD_VSC_RECV_RING_BUFFER_SIZE (10 * PAGE_SIZE) #define XTKBD_EMUL0 0xe0 #define XTKBD_EMUL1 0xe1 #define XTKBD_RELEASE 0x80 /* * Represents a keyboard device */ struct hv_kbd_dev { struct hv_device *hv_dev; struct serio *hv_serio; struct synth_kbd_protocol_request protocol_req; struct synth_kbd_protocol_response protocol_resp; /* Synchronize the request/response if needed */ struct completion wait_event; spinlock_t lock; /* protects 'started' field */ bool started; }; static void hv_kbd_on_receive(struct hv_device *hv_dev, struct synth_kbd_msg *msg, u32 msg_length) { struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev); struct synth_kbd_keystroke *ks_msg; unsigned long flags; u32 msg_type = __le32_to_cpu(msg->header.type); u32 info; u16 scan_code; switch (msg_type) { case SYNTH_KBD_PROTOCOL_RESPONSE: /* * Validate the information provided by the host. * If the host is giving us a bogus packet, * drop the packet (hoping the problem * goes away). */ if (msg_length < sizeof(struct synth_kbd_protocol_response)) { dev_err(&hv_dev->device, "Illegal protocol response packet (len: %d)\n", msg_length); break; } memcpy(&kbd_dev->protocol_resp, msg, sizeof(struct synth_kbd_protocol_response)); complete(&kbd_dev->wait_event); break; case SYNTH_KBD_EVENT: /* * Validate the information provided by the host. * If the host is giving us a bogus packet, * drop the packet (hoping the problem * goes away). */ if (msg_length < sizeof(struct synth_kbd_keystroke)) { dev_err(&hv_dev->device, "Illegal keyboard event packet (len: %d)\n", msg_length); break; } ks_msg = (struct synth_kbd_keystroke *)msg; info = __le32_to_cpu(ks_msg->info); /* * Inject the information through the serio interrupt. */ spin_lock_irqsave(&kbd_dev->lock, flags); if (kbd_dev->started) { if (info & IS_E0) serio_interrupt(kbd_dev->hv_serio, XTKBD_EMUL0, 0); if (info & IS_E1) serio_interrupt(kbd_dev->hv_serio, XTKBD_EMUL1, 0); scan_code = __le16_to_cpu(ks_msg->make_code); if (info & IS_BREAK) scan_code |= XTKBD_RELEASE; serio_interrupt(kbd_dev->hv_serio, scan_code, 0); } spin_unlock_irqrestore(&kbd_dev->lock, flags); /* * Only trigger a wakeup on key down, otherwise * "echo freeze > /sys/power/state" can't really enter the * state because the Enter-UP can trigger a wakeup at once. */ if (!(info & IS_BREAK)) pm_wakeup_event(&hv_dev->device, 0); break; default: dev_err(&hv_dev->device, "unhandled message type %d\n", msg_type); } } static void hv_kbd_handle_received_packet(struct hv_device *hv_dev, struct vmpacket_descriptor *desc, u32 bytes_recvd, u64 req_id) { struct synth_kbd_msg *msg; u32 msg_sz; switch (desc->type) { case VM_PKT_COMP: break; case VM_PKT_DATA_INBAND: /* * We have a packet that has "inband" data. The API used * for retrieving the packet guarantees that the complete * packet is read. So, minimally, we should be able to * parse the payload header safely (assuming that the host * can be trusted. Trusting the host seems to be a * reasonable assumption because in a virtualized * environment there is not whole lot you can do if you * don't trust the host. * * Nonetheless, let us validate if the host can be trusted * (in a trivial way). The interesting aspect of this * validation is how do you recover if we discover that the * host is not to be trusted? Simply dropping the packet, I * don't think is an appropriate recovery. In the interest * of failing fast, it may be better to crash the guest. * For now, I will just drop the packet! */ msg_sz = bytes_recvd - (desc->offset8 << 3); if (msg_sz <= sizeof(struct synth_kbd_msg_hdr)) { /* * Drop the packet and hope * the problem magically goes away. */ dev_err(&hv_dev->device, "Illegal packet (type: %d, tid: %llx, size: %d)\n", desc->type, req_id, msg_sz); break; } msg = (void *)desc + (desc->offset8 << 3); hv_kbd_on_receive(hv_dev, msg, msg_sz); break; default: dev_err(&hv_dev->device, "unhandled packet type %d, tid %llx len %d\n", desc->type, req_id, bytes_recvd); break; } } static void hv_kbd_on_channel_callback(void *context) { struct hv_device *hv_dev = context; void *buffer; int bufferlen = 0x100; /* Start with sensible size */ u32 bytes_recvd; u64 req_id; int error; buffer = kmalloc(bufferlen, GFP_ATOMIC); if (!buffer) return; while (1) { error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen, &bytes_recvd, &req_id); switch (error) { case 0: if (bytes_recvd == 0) { kfree(buffer); return; } hv_kbd_handle_received_packet(hv_dev, buffer, bytes_recvd, req_id); break; case -ENOBUFS: kfree(buffer); /* Handle large packet */ bufferlen = bytes_recvd; buffer = kmalloc(bytes_recvd, GFP_ATOMIC); if (!buffer) return; break; } } } static int hv_kbd_connect_to_vsp(struct hv_device *hv_dev) { struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev); struct synth_kbd_protocol_request *request; struct synth_kbd_protocol_response *response; u32 proto_status; int error; request = &kbd_dev->protocol_req; memset(request, 0, sizeof(struct synth_kbd_protocol_request)); request->header.type = __cpu_to_le32(SYNTH_KBD_PROTOCOL_REQUEST); request->version_requested.version = __cpu_to_le32(SYNTH_KBD_VERSION); error = vmbus_sendpacket(hv_dev->channel, request, sizeof(struct synth_kbd_protocol_request), (unsigned long)request, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (error) return error; if (!wait_for_completion_timeout(&kbd_dev->wait_event, 10 * HZ)) return -ETIMEDOUT; response = &kbd_dev->protocol_resp; proto_status = __le32_to_cpu(response->proto_status); if (!(proto_status & PROTOCOL_ACCEPTED)) { dev_err(&hv_dev->device, "synth_kbd protocol request failed (version %d)\n", SYNTH_KBD_VERSION); return -ENODEV; } return 0; } static int hv_kbd_start(struct serio *serio) { struct hv_kbd_dev *kbd_dev = serio->port_data; unsigned long flags; spin_lock_irqsave(&kbd_dev->lock, flags); kbd_dev->started = true; spin_unlock_irqrestore(&kbd_dev->lock, flags); return 0; } static void hv_kbd_stop(struct serio *serio) { struct hv_kbd_dev *kbd_dev = serio->port_data; unsigned long flags; spin_lock_irqsave(&kbd_dev->lock, flags); kbd_dev->started = false; spin_unlock_irqrestore(&kbd_dev->lock, flags); } static int hv_kbd_probe(struct hv_device *hv_dev, const struct hv_vmbus_device_id *dev_id) { struct hv_kbd_dev *kbd_dev; struct serio *hv_serio; int error; kbd_dev = kzalloc(sizeof(struct hv_kbd_dev), GFP_KERNEL); hv_serio = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!kbd_dev || !hv_serio) { error = -ENOMEM; goto err_free_mem; } kbd_dev->hv_dev = hv_dev; kbd_dev->hv_serio = hv_serio; spin_lock_init(&kbd_dev->lock); init_completion(&kbd_dev->wait_event); hv_set_drvdata(hv_dev, kbd_dev); hv_serio->dev.parent = &hv_dev->device; hv_serio->id.type = SERIO_8042_XL; hv_serio->port_data = kbd_dev; strlcpy(hv_serio->name, dev_name(&hv_dev->device), sizeof(hv_serio->name)); strlcpy(hv_serio->phys, dev_name(&hv_dev->device), sizeof(hv_serio->phys)); hv_serio->start = hv_kbd_start; hv_serio->stop = hv_kbd_stop; error = vmbus_open(hv_dev->channel, KBD_VSC_SEND_RING_BUFFER_SIZE, KBD_VSC_RECV_RING_BUFFER_SIZE, NULL, 0, hv_kbd_on_channel_callback, hv_dev); if (error) goto err_free_mem; error = hv_kbd_connect_to_vsp(hv_dev); if (error) goto err_close_vmbus; serio_register_port(kbd_dev->hv_serio); device_init_wakeup(&hv_dev->device, true); return 0; err_close_vmbus: vmbus_close(hv_dev->channel); err_free_mem: kfree(hv_serio); kfree(kbd_dev); return error; } static int hv_kbd_remove(struct hv_device *hv_dev) { struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev); device_init_wakeup(&hv_dev->device, false); serio_unregister_port(kbd_dev->hv_serio); vmbus_close(hv_dev->channel); kfree(kbd_dev); hv_set_drvdata(hv_dev, NULL); return 0; } /* * Keyboard GUID * {f912ad6d-2b17-48ea-bd65-f927a61c7684} */ #define HV_KBD_GUID \ .guid = { \ 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, \ 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 \ } static const struct hv_vmbus_device_id id_table[] = { /* Keyboard guid */ { HV_KBD_GUID, }, { }, }; MODULE_DEVICE_TABLE(vmbus, id_table); static struct hv_driver hv_kbd_drv = { .name = KBUILD_MODNAME, .id_table = id_table, .probe = hv_kbd_probe, .remove = hv_kbd_remove, }; static int __init hv_kbd_init(void) { return vmbus_driver_register(&hv_kbd_drv); } static void __exit hv_kbd_exit(void) { vmbus_driver_unregister(&hv_kbd_drv); } MODULE_LICENSE("GPL"); module_init(hv_kbd_init); module_exit(hv_kbd_exit);
gpl-2.0
Euphoria-OS-Devices/android_kernel_lge_msm8974
drivers/gpu/ion/msm/ion_cp_common.c
1640
8423
/* * Copyright (C) 2011 Google, Inc * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/memory_alloc.h> #include <linux/slab.h> #include <linux/types.h> #include <mach/scm.h> #include <linux/highmem.h> #include "../ion_priv.h" #include "ion_cp_common.h" #define MEM_PROTECT_LOCK_ID 0x05 #define MEM_PROTECT_LOCK_ID2 0x0A struct cp2_mem_chunks { unsigned int *chunk_list; unsigned int chunk_list_size; unsigned int chunk_size; } __attribute__ ((__packed__)); struct cp2_lock2_req { struct cp2_mem_chunks chunks; unsigned int mem_usage; unsigned int lock; unsigned int flags; } __attribute__ ((__packed__)); /* SCM related code for locking down memory for content protection */ #define SCM_CP_LOCK_CMD_ID 0x1 #define SCM_CP_PROTECT 0x1 #define SCM_CP_UNPROTECT 0x0 struct cp_lock_msg { unsigned int start; unsigned int end; unsigned int permission_type; unsigned char lock; } __attribute__ ((__packed__)); static int ion_cp_protect_mem_v1(unsigned int phy_base, unsigned int size, unsigned int permission_type) { struct cp_lock_msg cmd; cmd.start = phy_base; cmd.end = phy_base + size; cmd.permission_type = permission_type; cmd.lock = SCM_CP_PROTECT; return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID, &cmd, sizeof(cmd), NULL, 0); } static int ion_cp_unprotect_mem_v1(unsigned int phy_base, unsigned int size, unsigned int permission_type) { struct cp_lock_msg cmd; cmd.start = phy_base; cmd.end = phy_base + size; cmd.permission_type = permission_type; cmd.lock = SCM_CP_UNPROTECT; return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID, &cmd, sizeof(cmd), NULL, 0); } #define V2_CHUNK_SIZE SZ_1M static int ion_cp_change_mem_v2(unsigned int phy_base, unsigned int size, void *data, int lock) { enum cp_mem_usage usage = (enum cp_mem_usage) data; unsigned long *chunk_list; int nchunks; int ret; int i; int chunk_list_len; phys_addr_t chunk_list_phys; if (usage < 0 || usage >= MAX_USAGE) return -EINVAL; if (!IS_ALIGNED(size, V2_CHUNK_SIZE)) { pr_err("%s: heap size is not aligned to %x\n", __func__, V2_CHUNK_SIZE); return -EINVAL; } nchunks = size / V2_CHUNK_SIZE; chunk_list_len = sizeof(unsigned long)*nchunks; chunk_list = kmalloc(chunk_list_len, GFP_KERNEL); if (!chunk_list) return -ENOMEM; chunk_list_phys = virt_to_phys(chunk_list); for (i = 0; i < nchunks; i++) chunk_list[i] = phy_base + i * V2_CHUNK_SIZE; /* * Flush the chunk list before sending the memory to the * secure environment to ensure the data is actually present * in RAM */ dmac_flush_range(chunk_list, chunk_list + chunk_list_len); outer_flush_range(chunk_list_phys, chunk_list_phys + chunk_list_len); ret = ion_cp_change_chunks_state(chunk_list_phys, nchunks, V2_CHUNK_SIZE, usage, lock); kfree(chunk_list); return ret; } int ion_cp_protect_mem(unsigned int phy_base, unsigned int size, unsigned int permission_type, int version, void *data) { switch (version) { case ION_CP_V1: return ion_cp_protect_mem_v1(phy_base, size, permission_type); case ION_CP_V2: return ion_cp_change_mem_v2(phy_base, size, data, SCM_CP_PROTECT); default: return -EINVAL; } } int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size, unsigned int permission_type, int version, void *data) { switch (version) { case ION_CP_V1: return ion_cp_unprotect_mem_v1(phy_base, size, permission_type); case ION_CP_V2: return ion_cp_change_mem_v2(phy_base, size, data, SCM_CP_UNPROTECT); default: return -EINVAL; } } int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks, unsigned int chunk_size, enum cp_mem_usage usage, int lock) { struct cp2_lock2_req request; u32 resp; request.mem_usage = usage; request.lock = lock; request.flags = 0; request.chunks.chunk_list = (unsigned int *)chunks; request.chunks.chunk_list_size = nchunks; request.chunks.chunk_size = chunk_size; kmap_flush_unused(); kmap_atomic_flush_unused(); return scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2, &request, sizeof(request), &resp, sizeof(resp)); } /* Must be protected by ion_cp_buffer lock */ static int __ion_cp_protect_buffer(struct ion_buffer *buffer, int version, void *data, int flags) { struct ion_cp_buffer *buf = buffer->priv_virt; int ret_value = 0; if (atomic_inc_return(&buf->secure_cnt) == 1) { ret_value = ion_cp_protect_mem(buf->buffer, buffer->size, 0, version, data); if (ret_value) { pr_err("Failed to secure buffer %p, error %d\n", buffer, ret_value); atomic_dec(&buf->secure_cnt); } else { pr_debug("Protected buffer %p from %pa (size %x)\n", buffer, &buf->buffer, buffer->size); buf->want_delayed_unsecure |= flags & ION_UNSECURE_DELAYED ? 1 : 0; buf->data = data; buf->version = version; } } pr_debug("buffer %p protect count %d\n", buffer, atomic_read(&buf->secure_cnt)); BUG_ON(atomic_read(&buf->secure_cnt) < 0); return ret_value; } /* Must be protected by ion_cp_buffer lock */ static int __ion_cp_unprotect_buffer(struct ion_buffer *buffer, int version, void *data, int force_unsecure) { struct ion_cp_buffer *buf = buffer->priv_virt; int ret_value = 0; if (force_unsecure) { if (!buf->is_secure || atomic_read(&buf->secure_cnt) == 0) return 0; if (atomic_read(&buf->secure_cnt) != 1) { WARN(1, "Forcing unsecure of buffer with outstanding secure count %d!\n", atomic_read(&buf->secure_cnt)); atomic_set(&buf->secure_cnt, 1); } } if (atomic_dec_and_test(&buf->secure_cnt)) { ret_value = ion_cp_unprotect_mem( buf->buffer, buffer->size, 0, version, data); if (ret_value) { pr_err("Failed to unsecure buffer %p, error %d\n", buffer, ret_value); /* * If the force unsecure is happening, the buffer * is being destroyed. We failed to unsecure the * buffer even though the memory is given back. * Just die now rather than discovering later what * happens when trying to use the secured memory as * unsecured... */ BUG_ON(force_unsecure); /* Bump the count back up one to try again later */ atomic_inc(&buf->secure_cnt); } else { buf->version = -1; buf->data = NULL; } } pr_debug("buffer %p unprotect count %d\n", buffer, atomic_read(&buf->secure_cnt)); BUG_ON(atomic_read(&buf->secure_cnt) < 0); return ret_value; } int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data, int flags) { int ret_value; struct ion_cp_buffer *buf = buffer->priv_virt; mutex_lock(&buf->lock); if (!buf->is_secure) { pr_err("%s: buffer %p was not allocated as secure\n", __func__, buffer); ret_value = -EINVAL; goto out_unlock; } if (ION_IS_CACHED(buffer->flags)) { pr_err("%s: buffer %p was allocated as cached\n", __func__, buffer); ret_value = -EINVAL; goto out_unlock; } if (atomic_read(&buf->map_cnt)) { pr_err("%s: cannot secure buffer %p with outstanding mappings. Total count: %d", __func__, buffer, atomic_read(&buf->map_cnt)); ret_value = -EINVAL; goto out_unlock; } if (atomic_read(&buf->secure_cnt) && !buf->ignore_check) { if (buf->version != version || buf->data != data) { pr_err("%s: Trying to re-secure buffer with different values", __func__); pr_err("Last secured version: %d Currrent %d\n", buf->version, version); pr_err("Last secured data: %p current %p\n", buf->data, data); ret_value = -EINVAL; goto out_unlock; } } ret_value = __ion_cp_protect_buffer(buffer, version, data, flags); out_unlock: mutex_unlock(&buf->lock); return ret_value; } int ion_cp_unsecure_buffer(struct ion_buffer *buffer, int force_unsecure) { int ret_value = 0; struct ion_cp_buffer *buf = buffer->priv_virt; mutex_lock(&buf->lock); ret_value = __ion_cp_unprotect_buffer(buffer, buf->version, buf->data, force_unsecure); mutex_unlock(&buf->lock); return ret_value; }
gpl-2.0
Xanwar/android_kernel_asus_a400cg
drivers/staging/speakup/speakup_decpc.c
2152
15043
/* * This is the DECtalk PC speakup driver * * Some constants from DEC's DOS driver: * Copyright (c) by Digital Equipment Corp. * * 386BSD DECtalk PC driver: * Copyright (c) 1996 Brian Buhrow <buhrow@lothlorien.nfbcal.org> * * Linux DECtalk PC driver: * Copyright (c) 1997 Nicolas Pitre <nico@cam.org> * * speakup DECtalk PC Internal driver: * Copyright (c) 2003 David Borowski <david575@golden.net> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "speakup.h" #define MODULE_init 0x0dec /* module in boot code */ #define MODULE_self_test 0x8800 /* module in self-test */ #define MODULE_reset 0xffff /* reinit the whole module */ #define MODE_mask 0xf000 /* mode bits in high nibble */ #define MODE_null 0x0000 #define MODE_test 0x2000 /* in testing mode */ #define MODE_status 0x8000 #define STAT_int 0x0001 /* running in interrupt mode */ #define STAT_tr_char 0x0002 /* character data to transmit */ #define STAT_rr_char 0x0004 /* ready to receive char data */ #define STAT_cmd_ready 0x0008 /* ready to accept commands */ #define STAT_dma_ready 0x0010 /* dma command ready */ #define STAT_digitized 0x0020 /* spc in digitized mode */ #define STAT_new_index 0x0040 /* new last index ready */ #define STAT_new_status 0x0080 /* new status posted */ #define STAT_dma_state 0x0100 /* dma state toggle */ #define STAT_index_valid 0x0200 /* indexs are valid */ #define STAT_flushing 0x0400 /* flush in progress */ #define STAT_self_test 0x0800 /* module in self test */ #define MODE_ready 0xc000 /* module ready for next phase */ #define READY_boot 0x0000 #define READY_kernel 0x0001 #define MODE_error 0xf000 #define CMD_mask 0xf000 /* mask for command nibble */ #define CMD_null 0x0000 /* post status */ #define CMD_control 0x1000 /* hard control command */ #define CTRL_mask 0x0F00 /* mask off control nibble */ #define CTRL_data 0x00FF /* mask to get data byte */ #define CTRL_null 0x0000 /* null control */ #define CTRL_vol_up 0x0100 /* increase volume */ #define CTRL_vol_down 0x0200 /* decrease volume */ #define CTRL_vol_set 0x0300 /* set volume */ #define CTRL_pause 0x0400 /* pause spc */ #define CTRL_resume 0x0500 /* resume spc clock */ #define CTRL_resume_spc 0x0001 /* resume spc soft pause */ #define CTRL_flush 0x0600 /* flush all buffers */ #define CTRL_int_enable 0x0700 /* enable status change ints */ #define CTRL_buff_free 0x0800 /* buffer remain count */ #define CTRL_buff_used 0x0900 /* buffer in use */ #define CTRL_speech 0x0a00 /* immediate speech change */ #define CTRL_SP_voice 0x0001 /* voice change */ #define CTRL_SP_rate 0x0002 /* rate change */ #define CTRL_SP_comma 0x0003 /* comma pause change */ #define CTRL_SP_period 0x0004 /* period pause change */ #define CTRL_SP_rate_delta 0x0005 /* delta rate change */ #define CTRL_SP_get_param 0x0006 /* return the desired parameter */ #define CTRL_last_index 0x0b00 /* get last index spoken */ #define CTRL_io_priority 0x0c00 /* change i/o priority */ #define CTRL_free_mem 0x0d00 /* get free paragraphs on module */ #define CTRL_get_lang 0x0e00 /* return bit mask of loaded * languages */ #define CMD_test 0x2000 /* self-test request */ #define TEST_mask 0x0F00 /* isolate test field */ #define TEST_null 0x0000 /* no test requested */ #define TEST_isa_int 0x0100 /* assert isa irq */ #define TEST_echo 0x0200 /* make data in == data out */ #define TEST_seg 0x0300 /* set peek/poke segment */ #define TEST_off 0x0400 /* set peek/poke offset */ #define TEST_peek 0x0500 /* data out == *peek */ #define TEST_poke 0x0600 /* *peek == data in */ #define TEST_sub_code 0x00FF /* user defined test sub codes */ #define CMD_id 0x3000 /* return software id */ #define ID_null 0x0000 /* null id */ #define ID_kernel 0x0100 /* kernel code executing */ #define ID_boot 0x0200 /* boot code executing */ #define CMD_dma 0x4000 /* force a dma start */ #define CMD_reset 0x5000 /* reset module status */ #define CMD_sync 0x6000 /* kernel sync command */ #define CMD_char_in 0x7000 /* single character send */ #define CMD_char_out 0x8000 /* single character get */ #define CHAR_count_1 0x0100 /* one char in cmd_low */ #define CHAR_count_2 0x0200 /* the second in data_low */ #define CHAR_count_3 0x0300 /* the third in data_high */ #define CMD_spc_mode 0x9000 /* change spc mode */ #define CMD_spc_to_text 0x0100 /* set to text mode */ #define CMD_spc_to_digit 0x0200 /* set to digital mode */ #define CMD_spc_rate 0x0400 /* change spc data rate */ #define CMD_error 0xf000 /* severe error */ enum { PRIMARY_DIC = 0, USER_DIC, COMMAND_DIC, ABBREV_DIC }; #define DMA_single_in 0x01 #define DMA_single_out 0x02 #define DMA_buff_in 0x03 #define DMA_buff_out 0x04 #define DMA_control 0x05 #define DT_MEM_ALLOC 0x03 #define DT_SET_DIC 0x04 #define DT_START_TASK 0x05 #define DT_LOAD_MEM 0x06 #define DT_READ_MEM 0x07 #define DT_DIGITAL_IN 0x08 #define DMA_sync 0x06 #define DMA_sync_char 0x07 #define DRV_VERSION "2.12" #define PROCSPEECH 0x0b #define SYNTH_IO_EXTENT 8 static int synth_probe(struct spk_synth *synth); static void dtpc_release(void); static const char *synth_immediate(struct spk_synth *synth, const char *buf); static void do_catch_up(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); static int synth_portlist[] = { 0x340, 0x350, 0x240, 0x250, 0 }; static int in_escape, is_flushing; static int dt_stat, dma_state; static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 200]" } }, { CAPS_STOP, .u.s = {"[:dv ap 100]" } }, { RATE, .u.n = {"[:ra %d]", 9, 0, 18, 150, 25, NULL } }, { PITCH, .u.n = {"[:dv ap %d]", 80, 0, 100, 20, 0, NULL } }, { VOL, .u.n = {"[:vo se %d]", 5, 0, 9, 5, 10, NULL } }, { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/decpc. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_dec_pc = { .name = "decpc", .version = DRV_VERSION, .long_name = "Dectalk PC", .init = "[:pe -380]", .procspeech = PROCSPEECH, .delay = 500, .trigger = 50, .jiffies = 50, .full = 1000, .flags = SF_DEC, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = synth_probe, .release = dtpc_release, .synth_immediate = synth_immediate, .catch_up = do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_nop, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = NULL, .indexing = { .command = NULL, .lowindex = 0, .highindex = 0, .currindex = 0, }, .attributes = { .attrs = synth_attrs, .name = "decpc", }, }; static int dt_getstatus(void) { dt_stat = inb_p(speakup_info.port_tts) | (inb_p(speakup_info.port_tts + 1) << 8); return dt_stat; } static void dt_sendcmd(u_int cmd) { outb_p(cmd & 0xFF, speakup_info.port_tts); outb_p((cmd >> 8) & 0xFF, speakup_info.port_tts+1); } static int dt_waitbit(int bit) { int timeout = 100; while (--timeout > 0) { if ((dt_getstatus() & bit) == bit) return 1; udelay(50); } return 0; } static int dt_wait_dma(void) { int timeout = 100, state = dma_state; if (!dt_waitbit(STAT_dma_ready)) return 0; while (--timeout > 0) { if ((dt_getstatus()&STAT_dma_state) == state) return 1; udelay(50); } dma_state = dt_getstatus() & STAT_dma_state; return 1; } static int dt_ctrl(u_int cmd) { int timeout = 10; if (!dt_waitbit(STAT_cmd_ready)) return -1; outb_p(0, speakup_info.port_tts+2); outb_p(0, speakup_info.port_tts+3); dt_getstatus(); dt_sendcmd(CMD_control|cmd); outb_p(0, speakup_info.port_tts+6); while (dt_getstatus() & STAT_cmd_ready) { udelay(20); if (--timeout == 0) break; } dt_sendcmd(CMD_null); return 0; } static void synth_flush(struct spk_synth *synth) { int timeout = 10; if (is_flushing) return; is_flushing = 4; in_escape = 0; while (dt_ctrl(CTRL_flush)) { if (--timeout == 0) break; udelay(50); } for (timeout = 0; timeout < 10; timeout++) { if (dt_waitbit(STAT_dma_ready)) break; udelay(50); } outb_p(DMA_sync, speakup_info.port_tts+4); outb_p(0, speakup_info.port_tts+4); udelay(100); for (timeout = 0; timeout < 10; timeout++) { if (!(dt_getstatus() & STAT_flushing)) break; udelay(50); } dma_state = dt_getstatus() & STAT_dma_state; dma_state ^= STAT_dma_state; is_flushing = 0; } static int dt_sendchar(char ch) { if (!dt_wait_dma()) return -1; if (!(dt_stat & STAT_rr_char)) return -2; outb_p(DMA_single_in, speakup_info.port_tts+4); outb_p(ch, speakup_info.port_tts+4); dma_state ^= STAT_dma_state; return 0; } static int testkernel(void) { int status = 0; if (dt_getstatus() == 0xffff) { status = -1; goto oops; } dt_sendcmd(CMD_sync); if (!dt_waitbit(STAT_cmd_ready)) status = -2; else if (dt_stat&0x8000) return 0; else if (dt_stat == 0x0dec) pr_warn("dec_pc at 0x%x, software not loaded\n", speakup_info.port_tts); status = -3; oops: synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT); speakup_info.port_tts = 0; return status; } static void do_catch_up(struct spk_synth *synth) { u_char ch; static u_char last; unsigned long flags; unsigned long jiff_max; struct var_t *jiffy_delta; struct var_t *delay_time; int jiffy_delta_val; int delay_time_val; jiffy_delta = spk_get_var(JIFFY); delay_time = spk_get_var(DELAY); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; spk_unlock(flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { spk_lock(flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spk_unlock(flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spk_unlock(flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); delay_time_val = delay_time->u.n.value; spk_unlock(flags); if (ch == '\n') ch = 0x0D; if (dt_sendchar(ch)) { schedule_timeout(msecs_to_jiffies(delay_time_val)); continue; } set_current_state(TASK_RUNNING); spk_lock(flags); synth_buffer_getc(); spk_unlock(flags); if (ch == '[') in_escape = 1; else if (ch == ']') in_escape = 0; else if (ch <= SPACE) { if (!in_escape && strchr(",.!?;:", last)) dt_sendchar(PROCSPEECH); if (jiffies >= jiff_max) { if (!in_escape) dt_sendchar(PROCSPEECH); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; delay_time_val = delay_time->u.n.value; spk_unlock(flags); schedule_timeout(msecs_to_jiffies (delay_time_val)); jiff_max = jiffies + jiffy_delta_val; } } last = ch; ch = 0; } if (!in_escape) dt_sendchar(PROCSPEECH); } static const char *synth_immediate(struct spk_synth *synth, const char *buf) { u_char ch; while ((ch = *buf)) { if (ch == '\n') ch = PROCSPEECH; if (dt_sendchar(ch)) return buf; buf++; } return 0; } static int synth_probe(struct spk_synth *synth) { int i = 0, failed = 0; pr_info("Probing for %s.\n", synth->long_name); for (i = 0; synth_portlist[i]; i++) { if (synth_request_region(synth_portlist[i], SYNTH_IO_EXTENT)) { pr_warn("request_region: failed with 0x%x, %d\n", synth_portlist[i], SYNTH_IO_EXTENT); continue; } speakup_info.port_tts = synth_portlist[i]; failed = testkernel(); if (failed == 0) break; } if (failed) { pr_info("%s: not found\n", synth->long_name); return -ENODEV; } pr_info("%s: %03x-%03x, Driver Version %s,\n", synth->long_name, speakup_info.port_tts, speakup_info.port_tts + 7, synth->version); synth->alive = 1; return 0; } static void dtpc_release(void) { if (speakup_info.port_tts) synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT); speakup_info.port_tts = 0; } module_param_named(start, synth_dec_pc.startup, short, S_IRUGO); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init decpc_init(void) { return synth_add(&synth_dec_pc); } static void __exit decpc_exit(void) { synth_remove(&synth_dec_pc); } module_init(decpc_init); module_exit(decpc_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DECtalk PC synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
rfalize/endeavoru-suitcasekernel
drivers/media/video/gspca/mars.c
2920
12845
/* * Mars-Semi MR97311A library * Copyright (C) 2005 <bradlch@hotmail.com> * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define MODULE_NAME "mars" #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("GSPCA/Mars USB Camera Driver"); MODULE_LICENSE("GPL"); /* controls */ enum e_ctrl { BRIGHTNESS, COLORS, GAMMA, SHARPNESS, ILLUM_TOP, ILLUM_BOT, NCTRLS /* number of controls */ }; /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct gspca_ctrl ctrls[NCTRLS]; u8 quality; #define QUALITY_MIN 40 #define QUALITY_MAX 70 #define QUALITY_DEF 50 u8 jpeg_hdr[JPEG_HDR_SZ]; }; /* V4L2 controls supported by the driver */ static void setbrightness(struct gspca_dev *gspca_dev); static void setcolors(struct gspca_dev *gspca_dev); static void setgamma(struct gspca_dev *gspca_dev); static void setsharpness(struct gspca_dev *gspca_dev); static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val); static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val); static const struct ctrl sd_ctrls[NCTRLS] = { [BRIGHTNESS] = { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 30, .step = 1, .default_value = 15, }, .set_control = setbrightness }, [COLORS] = { { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Color", .minimum = 1, .maximum = 255, .step = 1, .default_value = 200, }, .set_control = setcolors }, [GAMMA] = { { .id = V4L2_CID_GAMMA, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gamma", .minimum = 0, .maximum = 3, .step = 1, .default_value = 1, }, .set_control = setgamma }, [SHARPNESS] = { { .id = V4L2_CID_SHARPNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Sharpness", .minimum = 0, .maximum = 2, .step = 1, .default_value = 1, }, .set_control = setsharpness }, [ILLUM_TOP] = { { .id = V4L2_CID_ILLUMINATORS_1, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Top illuminator", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, .flags = V4L2_CTRL_FLAG_UPDATE, }, .set = sd_setilluminator1 }, [ILLUM_BOT] = { { .id = V4L2_CID_ILLUMINATORS_2, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Bottom illuminator", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, .flags = V4L2_CTRL_FLAG_UPDATE, }, .set = sd_setilluminator2 }, }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, }; static const __u8 mi_data[0x20] = { /* 01 02 03 04 05 06 07 08 */ 0x48, 0x22, 0x01, 0x47, 0x10, 0x00, 0x00, 0x00, /* 09 0a 0b 0c 0d 0e 0f 10 */ 0x00, 0x01, 0x30, 0x01, 0x30, 0x01, 0x30, 0x01, /* 11 12 13 14 15 16 17 18 */ 0x30, 0x00, 0x04, 0x00, 0x06, 0x01, 0xe2, 0x02, /* 19 1a 1b 1c 1d 1e 1f 20 */ 0x82, 0x00, 0x20, 0x17, 0x80, 0x08, 0x0c, 0x00 }; /* write <len> bytes from gspca_dev->usb_buf */ static void reg_w(struct gspca_dev *gspca_dev, int len) { int alen, ret; if (gspca_dev->usb_err < 0) return; ret = usb_bulk_msg(gspca_dev->dev, usb_sndbulkpipe(gspca_dev->dev, 4), gspca_dev->usb_buf, len, &alen, 500); /* timeout in milliseconds */ if (ret < 0) { err("reg write [%02x] error %d", gspca_dev->usb_buf[0], ret); gspca_dev->usb_err = ret; } } static void mi_w(struct gspca_dev *gspca_dev, u8 addr, u8 value) { gspca_dev->usb_buf[0] = 0x1f; gspca_dev->usb_buf[1] = 0; /* control byte */ gspca_dev->usb_buf[2] = addr; gspca_dev->usb_buf[3] = value; reg_w(gspca_dev, 4); } static void setbrightness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; gspca_dev->usb_buf[0] = 0x61; gspca_dev->usb_buf[1] = sd->ctrls[BRIGHTNESS].val; reg_w(gspca_dev, 2); } static void setcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; s16 val; val = sd->ctrls[COLORS].val; gspca_dev->usb_buf[0] = 0x5f; gspca_dev->usb_buf[1] = val << 3; gspca_dev->usb_buf[2] = ((val >> 2) & 0xf8) | 0x04; reg_w(gspca_dev, 3); } static void setgamma(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; gspca_dev->usb_buf[0] = 0x06; gspca_dev->usb_buf[1] = sd->ctrls[GAMMA].val * 0x40; reg_w(gspca_dev, 2); } static void setsharpness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; gspca_dev->usb_buf[0] = 0x67; gspca_dev->usb_buf[1] = sd->ctrls[SHARPNESS].val * 4 + 3; reg_w(gspca_dev, 2); } static void setilluminators(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; gspca_dev->usb_buf[0] = 0x22; if (sd->ctrls[ILLUM_TOP].val) gspca_dev->usb_buf[1] = 0x76; else if (sd->ctrls[ILLUM_BOT].val) gspca_dev->usb_buf[1] = 0x7a; else gspca_dev->usb_buf[1] = 0x7e; reg_w(gspca_dev, 2); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); cam->ctrls = sd->ctrls; sd->quality = QUALITY_DEF; gspca_dev->nbalt = 9; /* use the altsetting 08 */ return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { gspca_dev->ctrl_inac = (1 << ILLUM_TOP) | (1 << ILLUM_BOT); return 0; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 *data; int i; /* create the JPEG header */ jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); data = gspca_dev->usb_buf; data[0] = 0x01; /* address */ data[1] = 0x01; reg_w(gspca_dev, 2); /* Initialize the MR97113 chip register */ data[0] = 0x00; /* address */ data[1] = 0x0c | 0x01; /* reg 0 */ data[2] = 0x01; /* reg 1 */ data[3] = gspca_dev->width / 8; /* h_size , reg 2 */ data[4] = gspca_dev->height / 8; /* v_size , reg 3 */ data[5] = 0x30; /* reg 4, MI, PAS5101 : * 0x30 for 24mhz , 0x28 for 12mhz */ data[6] = 0x02; /* reg 5, H start - was 0x04 */ data[7] = sd->ctrls[GAMMA].val * 0x40; /* reg 0x06: gamma */ data[8] = 0x01; /* reg 7, V start - was 0x03 */ /* if (h_size == 320 ) */ /* data[9]= 0x56; * reg 8, 24MHz, 2:1 scale down */ /* else */ data[9] = 0x52; /* reg 8, 24MHz, no scale down */ /*jfm: from win trace*/ data[10] = 0x18; reg_w(gspca_dev, 11); data[0] = 0x23; /* address */ data[1] = 0x09; /* reg 35, append frame header */ reg_w(gspca_dev, 2); data[0] = 0x3c; /* address */ /* if (gspca_dev->width == 1280) */ /* data[1] = 200; * reg 60, pc-cam frame size * (unit: 4KB) 800KB */ /* else */ data[1] = 50; /* 50 reg 60, pc-cam frame size * (unit: 4KB) 200KB */ reg_w(gspca_dev, 2); /* auto dark-gain */ data[0] = 0x5e; /* address */ data[1] = 0; /* reg 94, Y Gain (auto) */ /*jfm: from win trace*/ /* reg 0x5f/0x60 (LE) = saturation */ /* h (60): xxxx x100 * l (5f): xxxx x000 */ data[2] = sd->ctrls[COLORS].val << 3; data[3] = ((sd->ctrls[COLORS].val >> 2) & 0xf8) | 0x04; data[4] = sd->ctrls[BRIGHTNESS].val; /* reg 0x61 = brightness */ data[5] = 0x00; reg_w(gspca_dev, 6); data[0] = 0x67; /*jfm: from win trace*/ data[1] = sd->ctrls[SHARPNESS].val * 4 + 3; data[2] = 0x14; reg_w(gspca_dev, 3); data[0] = 0x69; data[1] = 0x2f; data[2] = 0x28; data[3] = 0x42; reg_w(gspca_dev, 4); data[0] = 0x63; data[1] = 0x07; reg_w(gspca_dev, 2); /*jfm: win trace - many writes here to reg 0x64*/ /* initialize the MI sensor */ for (i = 0; i < sizeof mi_data; i++) mi_w(gspca_dev, i + 1, mi_data[i]); data[0] = 0x00; data[1] = 0x4d; /* ISOC transferring enable... */ reg_w(gspca_dev, 2); gspca_dev->ctrl_inac = 0; /* activate the illuminator controls */ return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; gspca_dev->ctrl_inac = (1 << ILLUM_TOP) | (1 << ILLUM_BOT); if (sd->ctrls[ILLUM_TOP].val || sd->ctrls[ILLUM_BOT].val) { sd->ctrls[ILLUM_TOP].val = 0; sd->ctrls[ILLUM_BOT].val = 0; setilluminators(gspca_dev); msleep(20); } gspca_dev->usb_buf[0] = 1; gspca_dev->usb_buf[1] = 0; reg_w(gspca_dev, 2); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; int p; if (len < 6) { /* gspca_dev->last_packet_type = DISCARD_PACKET; */ return; } for (p = 0; p < len - 6; p++) { if (data[0 + p] == 0xff && data[1 + p] == 0xff && data[2 + p] == 0x00 && data[3 + p] == 0xff && data[4 + p] == 0x96) { if (data[5 + p] == 0x64 || data[5 + p] == 0x65 || data[5 + p] == 0x66 || data[5 + p] == 0x67) { PDEBUG(D_PACK, "sof offset: %d len: %d", p, len); gspca_frame_add(gspca_dev, LAST_PACKET, data, p); /* put the JPEG header */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); data += p + 16; len -= p + 16; break; } } } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; /* only one illuminator may be on */ sd->ctrls[ILLUM_TOP].val = val; if (val) sd->ctrls[ILLUM_BOT].val = 0; setilluminators(gspca_dev); return gspca_dev->usb_err; } static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; /* only one illuminator may be on */ sd->ctrls[ILLUM_BOT].val = val; if (val) sd->ctrls[ILLUM_TOP].val = 0; setilluminators(gspca_dev); return gspca_dev->usb_err; } static int sd_set_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (jcomp->quality < QUALITY_MIN) sd->quality = QUALITY_MIN; else if (jcomp->quality > QUALITY_MAX) sd->quality = QUALITY_MAX; else sd->quality = jcomp->quality; if (gspca_dev->streaming) jpeg_set_qual(sd->jpeg_hdr, sd->quality); return 0; } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = sd->quality; jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = NCTRLS, .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .get_jcomp = sd_get_jcomp, .set_jcomp = sd_set_jcomp, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x093a, 0x050f)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; /* -- module insert / remove -- */ static int __init sd_mod_init(void) { return usb_register(&sd_driver); } static void __exit sd_mod_exit(void) { usb_deregister(&sd_driver); } module_init(sd_mod_init); module_exit(sd_mod_exit);
gpl-2.0
chillstep1998/AK-OnePone
mm/huge_memory.c
3176
64948
/* * Copyright (C) 2009 Red Hat, Inc. * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/mmu_notifier.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/mm_inline.h> #include <linux/kthread.h> #include <linux/khugepaged.h> #include <linux/freezer.h> #include <linux/mman.h> #include <asm/tlb.h> #include <asm/pgalloc.h> #include "internal.h" /* * By default transparent hugepage support is enabled for all mappings * and khugepaged scans all mappings. Defrag is only invoked by * khugepaged hugepage allocations and by page faults inside * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived * allocations. */ unsigned long transparent_hugepage_flags __read_mostly = #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS (1<<TRANSPARENT_HUGEPAGE_FLAG)| #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| #endif (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); /* default scan 8*512 pte (or vmas) every 30 second */ static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; static unsigned int khugepaged_pages_collapsed; static unsigned int khugepaged_full_scans; static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; /* during fragmentation poll the hugepage allocator once every minute */ static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; static struct task_struct *khugepaged_thread __read_mostly; static DEFINE_MUTEX(khugepaged_mutex); static DEFINE_SPINLOCK(khugepaged_mm_lock); static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); /* * default collapse hugepages if there is at least one pte mapped like * it would have happened if the vma was large enough during page * fault. */ static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; static int khugepaged(void *none); static int mm_slots_hash_init(void); static int khugepaged_slab_init(void); static void khugepaged_slab_free(void); #define MM_SLOTS_HASH_HEADS 1024 static struct hlist_head *mm_slots_hash __read_mostly; static struct kmem_cache *mm_slot_cache __read_mostly; /** * struct mm_slot - hash lookup from mm to mm_slot * @hash: hash collision list * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head * @mm: the mm that this information is valid for */ struct mm_slot { struct hlist_node hash; struct list_head mm_node; struct mm_struct *mm; }; /** * struct khugepaged_scan - cursor for scanning * @mm_head: the head of the mm list to scan * @mm_slot: the current mm_slot we are scanning * @address: the next address inside that to be scanned * * There is only the one khugepaged_scan instance of this cursor structure. */ struct khugepaged_scan { struct list_head mm_head; struct mm_slot *mm_slot; unsigned long address; }; static struct khugepaged_scan khugepaged_scan = { .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), }; static int set_recommended_min_free_kbytes(void) { struct zone *zone; int nr_zones = 0; unsigned long recommended_min; extern int min_free_kbytes; if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags) && !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) return 0; for_each_populated_zone(zone) nr_zones++; /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ recommended_min = pageblock_nr_pages * nr_zones * 2; /* * Make sure that on average at least two pageblocks are almost free * of another type, one for a migratetype to fall back to and a * second to avoid subsequent fallbacks of other types There are 3 * MIGRATE_TYPES we care about. */ recommended_min += pageblock_nr_pages * nr_zones * MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; /* don't ever allow to reserve more than 5% of the lowmem */ recommended_min = min(recommended_min, (unsigned long) nr_free_buffer_pages() / 20); recommended_min <<= (PAGE_SHIFT-10); if (recommended_min > min_free_kbytes) min_free_kbytes = recommended_min; setup_per_zone_wmarks(); return 0; } late_initcall(set_recommended_min_free_kbytes); static int start_khugepaged(void) { int err = 0; if (khugepaged_enabled()) { int wakeup; if (unlikely(!mm_slot_cache || !mm_slots_hash)) { err = -ENOMEM; goto out; } mutex_lock(&khugepaged_mutex); if (!khugepaged_thread) khugepaged_thread = kthread_run(khugepaged, NULL, "khugepaged"); if (unlikely(IS_ERR(khugepaged_thread))) { printk(KERN_ERR "khugepaged: kthread_run(khugepaged) failed\n"); err = PTR_ERR(khugepaged_thread); khugepaged_thread = NULL; } wakeup = !list_empty(&khugepaged_scan.mm_head); mutex_unlock(&khugepaged_mutex); if (wakeup) wake_up_interruptible(&khugepaged_wait); set_recommended_min_free_kbytes(); } else /* wakeup to exit */ wake_up_interruptible(&khugepaged_wait); out: return err; } #ifdef CONFIG_SYSFS static ssize_t double_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag enabled, enum transparent_hugepage_flag req_madv) { if (test_bit(enabled, &transparent_hugepage_flags)) { VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); return sprintf(buf, "[always] madvise never\n"); } else if (test_bit(req_madv, &transparent_hugepage_flags)) return sprintf(buf, "always [madvise] never\n"); else return sprintf(buf, "always madvise [never]\n"); } static ssize_t double_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag enabled, enum transparent_hugepage_flag req_madv) { if (!memcmp("always", buf, min(sizeof("always")-1, count))) { set_bit(enabled, &transparent_hugepage_flags); clear_bit(req_madv, &transparent_hugepage_flags); } else if (!memcmp("madvise", buf, min(sizeof("madvise")-1, count))) { clear_bit(enabled, &transparent_hugepage_flags); set_bit(req_madv, &transparent_hugepage_flags); } else if (!memcmp("never", buf, min(sizeof("never")-1, count))) { clear_bit(enabled, &transparent_hugepage_flags); clear_bit(req_madv, &transparent_hugepage_flags); } else return -EINVAL; return count; } static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return double_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); } static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { ssize_t ret; ret = double_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); if (ret > 0) { int err = start_khugepaged(); if (err) ret = err; } if (ret > 0 && (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags) || test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags))) set_recommended_min_free_kbytes(); return ret; } static struct kobj_attribute enabled_attr = __ATTR(enabled, 0644, enabled_show, enabled_store); static ssize_t single_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag) { return sprintf(buf, "%d\n", !!test_bit(flag, &transparent_hugepage_flags)); } static ssize_t single_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag flag) { unsigned long value; int ret; ret = kstrtoul(buf, 10, &value); if (ret < 0) return ret; if (value > 1) return -EINVAL; if (value) set_bit(flag, &transparent_hugepage_flags); else clear_bit(flag, &transparent_hugepage_flags); return count; } /* * Currently defrag only disables __GFP_NOWAIT for allocation. A blind * __GFP_REPEAT is too aggressive, it's never worth swapping tons of * memory just to allocate one more hugepage. */ static ssize_t defrag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return double_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); } static ssize_t defrag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return double_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); } static struct kobj_attribute defrag_attr = __ATTR(defrag, 0644, defrag_show, defrag_store); #ifdef CONFIG_DEBUG_VM static ssize_t debug_cow_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return single_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); } static ssize_t debug_cow_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return single_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); } static struct kobj_attribute debug_cow_attr = __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); #endif /* CONFIG_DEBUG_VM */ static struct attribute *hugepage_attr[] = { &enabled_attr.attr, &defrag_attr.attr, #ifdef CONFIG_DEBUG_VM &debug_cow_attr.attr, #endif NULL, }; static struct attribute_group hugepage_attr_group = { .attrs = hugepage_attr, }; static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); } static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long msecs; int err; err = strict_strtoul(buf, 10, &msecs); if (err || msecs > UINT_MAX) return -EINVAL; khugepaged_scan_sleep_millisecs = msecs; wake_up_interruptible(&khugepaged_wait); return count; } static struct kobj_attribute scan_sleep_millisecs_attr = __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, scan_sleep_millisecs_store); static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); } static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long msecs; int err; err = strict_strtoul(buf, 10, &msecs); if (err || msecs > UINT_MAX) return -EINVAL; khugepaged_alloc_sleep_millisecs = msecs; wake_up_interruptible(&khugepaged_wait); return count; } static struct kobj_attribute alloc_sleep_millisecs_attr = __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, alloc_sleep_millisecs_store); static ssize_t pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_pages_to_scan); } static ssize_t pages_to_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long pages; err = strict_strtoul(buf, 10, &pages); if (err || !pages || pages > UINT_MAX) return -EINVAL; khugepaged_pages_to_scan = pages; return count; } static struct kobj_attribute pages_to_scan_attr = __ATTR(pages_to_scan, 0644, pages_to_scan_show, pages_to_scan_store); static ssize_t pages_collapsed_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_pages_collapsed); } static struct kobj_attribute pages_collapsed_attr = __ATTR_RO(pages_collapsed); static ssize_t full_scans_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_full_scans); } static struct kobj_attribute full_scans_attr = __ATTR_RO(full_scans); static ssize_t khugepaged_defrag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return single_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); } static ssize_t khugepaged_defrag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return single_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); } static struct kobj_attribute khugepaged_defrag_attr = __ATTR(defrag, 0644, khugepaged_defrag_show, khugepaged_defrag_store); /* * max_ptes_none controls if khugepaged should collapse hugepages over * any unmapped ptes in turn potentially increasing the memory * footprint of the vmas. When max_ptes_none is 0 khugepaged will not * reduce the available free memory in the system as it * runs. Increasing max_ptes_none will instead potentially reduce the * free memory in the system during the khugepaged scan. */ static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_max_ptes_none); } static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long max_ptes_none; err = strict_strtoul(buf, 10, &max_ptes_none); if (err || max_ptes_none > HPAGE_PMD_NR-1) return -EINVAL; khugepaged_max_ptes_none = max_ptes_none; return count; } static struct kobj_attribute khugepaged_max_ptes_none_attr = __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, khugepaged_max_ptes_none_store); static struct attribute *khugepaged_attr[] = { &khugepaged_defrag_attr.attr, &khugepaged_max_ptes_none_attr.attr, &pages_to_scan_attr.attr, &pages_collapsed_attr.attr, &full_scans_attr.attr, &scan_sleep_millisecs_attr.attr, &alloc_sleep_millisecs_attr.attr, NULL, }; static struct attribute_group khugepaged_attr_group = { .attrs = khugepaged_attr, .name = "khugepaged", }; static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) { int err; *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); if (unlikely(!*hugepage_kobj)) { printk(KERN_ERR "hugepage: failed kobject create\n"); return -ENOMEM; } err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); if (err) { printk(KERN_ERR "hugepage: failed register hugeage group\n"); goto delete_obj; } err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); if (err) { printk(KERN_ERR "hugepage: failed register hugeage group\n"); goto remove_hp_group; } return 0; remove_hp_group: sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); delete_obj: kobject_put(*hugepage_kobj); return err; } static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) { sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); kobject_put(hugepage_kobj); } #else static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) { return 0; } static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) { } #endif /* CONFIG_SYSFS */ static int __init hugepage_init(void) { int err; struct kobject *hugepage_kobj; if (!has_transparent_hugepage()) { transparent_hugepage_flags = 0; return -EINVAL; } err = hugepage_init_sysfs(&hugepage_kobj); if (err) return err; err = khugepaged_slab_init(); if (err) goto out; err = mm_slots_hash_init(); if (err) { khugepaged_slab_free(); goto out; } /* * By default disable transparent hugepages on smaller systems, * where the extra memory used could hurt more than TLB overhead * is likely to save. The admin can still enable it through /sys. */ if (totalram_pages < (512 << (20 - PAGE_SHIFT))) transparent_hugepage_flags = 0; start_khugepaged(); set_recommended_min_free_kbytes(); return 0; out: hugepage_exit_sysfs(hugepage_kobj); return err; } module_init(hugepage_init) static int __init setup_transparent_hugepage(char *str) { int ret = 0; if (!str) goto out; if (!strcmp(str, "always")) { set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } else if (!strcmp(str, "madvise")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } else if (!strcmp(str, "never")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } out: if (!ret) printk(KERN_WARNING "transparent_hugepage= cannot parse, ignored\n"); return ret; } __setup("transparent_hugepage=", setup_transparent_hugepage); static void prepare_pmd_huge_pte(pgtable_t pgtable, struct mm_struct *mm) { assert_spin_locked(&mm->page_table_lock); /* FIFO */ if (!mm->pmd_huge_pte) INIT_LIST_HEAD(&pgtable->lru); else list_add(&pgtable->lru, &mm->pmd_huge_pte->lru); mm->pmd_huge_pte = pgtable; } static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) { if (likely(vma->vm_flags & VM_WRITE)) pmd = pmd_mkwrite(pmd); return pmd; } static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *page) { int ret = 0; pgtable_t pgtable; VM_BUG_ON(!PageCompound(page)); pgtable = pte_alloc_one(mm, haddr); if (unlikely(!pgtable)) { mem_cgroup_uncharge_page(page); put_page(page); return VM_FAULT_OOM; } clear_huge_page(page, haddr, HPAGE_PMD_NR); __SetPageUptodate(page); spin_lock(&mm->page_table_lock); if (unlikely(!pmd_none(*pmd))) { spin_unlock(&mm->page_table_lock); mem_cgroup_uncharge_page(page); put_page(page); pte_free(mm, pgtable); } else { pmd_t entry; entry = mk_pmd(page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = pmd_mkhuge(entry); /* * The spinlocking to take the lru_lock inside * page_add_new_anon_rmap() acts as a full memory * barrier to be sure clear_huge_page writes become * visible after the set_pmd_at() write. */ page_add_new_anon_rmap(page, vma, haddr); set_pmd_at(mm, haddr, pmd, entry); prepare_pmd_huge_pte(pgtable, mm); add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); mm->nr_ptes++; spin_unlock(&mm->page_table_lock); } return ret; } static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) { return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; } static inline struct page *alloc_hugepage_vma(int defrag, struct vm_area_struct *vma, unsigned long haddr, int nd, gfp_t extra_gfp) { return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), HPAGE_PMD_ORDER, vma, haddr, nd); } #ifndef CONFIG_NUMA static inline struct page *alloc_hugepage(int defrag) { return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), HPAGE_PMD_ORDER); } #endif int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) { struct page *page; unsigned long haddr = address & HPAGE_PMD_MASK; pte_t *pte; if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) { if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; if (unlikely(khugepaged_enter(vma))) return VM_FAULT_OOM; page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), vma, haddr, numa_node_id(), 0); if (unlikely(!page)) { count_vm_event(THP_FAULT_FALLBACK); goto out; } count_vm_event(THP_FAULT_ALLOC); if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { put_page(page); goto out; } return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page); } out: /* * Use __pte_alloc instead of pte_alloc_map, because we can't * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ if (unlikely(__pte_alloc(mm, vma, pmd, address))) return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) return 0; /* * A regular pmd is established and it can't morph into a huge pmd * from under us anymore at this point because we hold the mmap_sem * read mode and khugepaged takes it in write mode. So now it's * safe to run pte_offset_map(). */ pte = pte_offset_map(pmd, address); return handle_pte_fault(mm, vma, address, pte, pmd, flags); } int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma) { struct page *src_page; pmd_t pmd; pgtable_t pgtable; int ret; ret = -ENOMEM; pgtable = pte_alloc_one(dst_mm, addr); if (unlikely(!pgtable)) goto out; spin_lock(&dst_mm->page_table_lock); spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING); ret = -EAGAIN; pmd = *src_pmd; if (unlikely(!pmd_trans_huge(pmd))) { pte_free(dst_mm, pgtable); goto out_unlock; } if (unlikely(pmd_trans_splitting(pmd))) { /* split huge page running from under us */ spin_unlock(&src_mm->page_table_lock); spin_unlock(&dst_mm->page_table_lock); pte_free(dst_mm, pgtable); wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ goto out; } src_page = pmd_page(pmd); VM_BUG_ON(!PageHead(src_page)); get_page(src_page); page_dup_rmap(src_page); add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); pmdp_set_wrprotect(src_mm, addr, src_pmd); pmd = pmd_mkold(pmd_wrprotect(pmd)); set_pmd_at(dst_mm, addr, dst_pmd, pmd); prepare_pmd_huge_pte(pgtable, dst_mm); dst_mm->nr_ptes++; ret = 0; out_unlock: spin_unlock(&src_mm->page_table_lock); spin_unlock(&dst_mm->page_table_lock); out: return ret; } /* no "address" argument so destroys page coloring of some arch */ pgtable_t get_pmd_huge_pte(struct mm_struct *mm) { pgtable_t pgtable; assert_spin_locked(&mm->page_table_lock); /* FIFO */ pgtable = mm->pmd_huge_pte; if (list_empty(&pgtable->lru)) mm->pmd_huge_pte = NULL; else { mm->pmd_huge_pte = list_entry(pgtable->lru.next, struct page, lru); list_del(&pgtable->lru); } return pgtable; } static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, struct page *page, unsigned long haddr) { pgtable_t pgtable; pmd_t _pmd; int ret = 0, i; struct page **pages; pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, GFP_KERNEL); if (unlikely(!pages)) { ret |= VM_FAULT_OOM; goto out; } for (i = 0; i < HPAGE_PMD_NR; i++) { pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | __GFP_OTHER_NODE, vma, address, page_to_nid(page)); if (unlikely(!pages[i] || mem_cgroup_newpage_charge(pages[i], mm, GFP_KERNEL))) { if (pages[i]) put_page(pages[i]); mem_cgroup_uncharge_start(); while (--i >= 0) { mem_cgroup_uncharge_page(pages[i]); put_page(pages[i]); } mem_cgroup_uncharge_end(); kfree(pages); ret |= VM_FAULT_OOM; goto out; } } for (i = 0; i < HPAGE_PMD_NR; i++) { copy_user_highpage(pages[i], page + i, haddr + PAGE_SIZE * i, vma); __SetPageUptodate(pages[i]); cond_resched(); } spin_lock(&mm->page_table_lock); if (unlikely(!pmd_same(*pmd, orig_pmd))) goto out_free_pages; VM_BUG_ON(!PageHead(page)); pmdp_clear_flush_notify(vma, haddr, pmd); /* leave pmd empty until pte is filled */ pgtable = get_pmd_huge_pte(mm); pmd_populate(mm, &_pmd, pgtable); for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { pte_t *pte, entry; entry = mk_pte(pages[i], vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); page_add_new_anon_rmap(pages[i], vma, haddr); pte = pte_offset_map(&_pmd, haddr); VM_BUG_ON(!pte_none(*pte)); set_pte_at(mm, haddr, pte, entry); pte_unmap(pte); } kfree(pages); smp_wmb(); /* make pte visible before pmd */ pmd_populate(mm, pmd, pgtable); page_remove_rmap(page); spin_unlock(&mm->page_table_lock); ret |= VM_FAULT_WRITE; put_page(page); out: return ret; out_free_pages: spin_unlock(&mm->page_table_lock); mem_cgroup_uncharge_start(); for (i = 0; i < HPAGE_PMD_NR; i++) { mem_cgroup_uncharge_page(pages[i]); put_page(pages[i]); } mem_cgroup_uncharge_end(); kfree(pages); goto out; } int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd) { int ret = 0; struct page *page, *new_page; unsigned long haddr; VM_BUG_ON(!vma->anon_vma); spin_lock(&mm->page_table_lock); if (unlikely(!pmd_same(*pmd, orig_pmd))) goto out_unlock; page = pmd_page(orig_pmd); VM_BUG_ON(!PageCompound(page) || !PageHead(page)); haddr = address & HPAGE_PMD_MASK; if (page_mapcount(page) == 1) { pmd_t entry; entry = pmd_mkyoung(orig_pmd); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) update_mmu_cache(vma, address, entry); ret |= VM_FAULT_WRITE; goto out_unlock; } get_page(page); spin_unlock(&mm->page_table_lock); if (transparent_hugepage_enabled(vma) && !transparent_hugepage_debug_cow()) new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), vma, haddr, numa_node_id(), 0); else new_page = NULL; if (unlikely(!new_page)) { count_vm_event(THP_FAULT_FALLBACK); ret = do_huge_pmd_wp_page_fallback(mm, vma, address, pmd, orig_pmd, page, haddr); put_page(page); goto out; } count_vm_event(THP_FAULT_ALLOC); if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { put_page(new_page); put_page(page); ret |= VM_FAULT_OOM; goto out; } copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); __SetPageUptodate(new_page); spin_lock(&mm->page_table_lock); put_page(page); if (unlikely(!pmd_same(*pmd, orig_pmd))) { mem_cgroup_uncharge_page(new_page); put_page(new_page); } else { pmd_t entry; VM_BUG_ON(!PageHead(page)); entry = mk_pmd(new_page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = pmd_mkhuge(entry); pmdp_clear_flush_notify(vma, haddr, pmd); page_add_new_anon_rmap(new_page, vma, haddr); set_pmd_at(mm, haddr, pmd, entry); update_mmu_cache(vma, address, entry); page_remove_rmap(page); put_page(page); ret |= VM_FAULT_WRITE; } out_unlock: spin_unlock(&mm->page_table_lock); out: return ret; } struct page *follow_trans_huge_pmd(struct mm_struct *mm, unsigned long addr, pmd_t *pmd, unsigned int flags) { struct page *page = NULL; assert_spin_locked(&mm->page_table_lock); if (flags & FOLL_WRITE && !pmd_write(*pmd)) goto out; page = pmd_page(*pmd); VM_BUG_ON(!PageHead(page)); if (flags & FOLL_TOUCH) { pmd_t _pmd; /* * We should set the dirty bit only for FOLL_WRITE but * for now the dirty bit in the pmd is meaningless. * And if the dirty bit will become meaningful and * we'll only set it with FOLL_WRITE, an atomic * set_bit will be required on the pmd to set the * young bit, instead of the current set_pmd_at. */ _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd); } page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; VM_BUG_ON(!PageCompound(page)); if (flags & FOLL_GET) get_page_foll(page); out: return page; } int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) { int ret = 0; if (__pmd_trans_huge_lock(pmd, vma) == 1) { struct page *page; pgtable_t pgtable; pgtable = get_pmd_huge_pte(tlb->mm); page = pmd_page(*pmd); pmd_clear(pmd); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); page_remove_rmap(page); VM_BUG_ON(page_mapcount(page) < 0); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); VM_BUG_ON(!PageHead(page)); tlb->mm->nr_ptes--; spin_unlock(&tlb->mm->page_table_lock); tlb_remove_page(tlb, page); pte_free(tlb->mm, pgtable); ret = 1; } return ret; } int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec) { int ret = 0; if (__pmd_trans_huge_lock(pmd, vma) == 1) { /* * All logical pages in the range are present * if backed by a huge page. */ spin_unlock(&vma->vm_mm->page_table_lock); memset(vec, 1, (end - addr) >> PAGE_SHIFT); ret = 1; } return ret; } int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, pmd_t *old_pmd, pmd_t *new_pmd) { int ret = 0; pmd_t pmd; struct mm_struct *mm = vma->vm_mm; if ((old_addr & ~HPAGE_PMD_MASK) || (new_addr & ~HPAGE_PMD_MASK) || old_end - old_addr < HPAGE_PMD_SIZE || (new_vma->vm_flags & VM_NOHUGEPAGE)) goto out; /* * The destination pmd shouldn't be established, free_pgtables() * should have release it. */ if (WARN_ON(!pmd_none(*new_pmd))) { VM_BUG_ON(pmd_trans_huge(*new_pmd)); goto out; } ret = __pmd_trans_huge_lock(old_pmd, vma); if (ret == 1) { pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); VM_BUG_ON(!pmd_none(*new_pmd)); set_pmd_at(mm, new_addr, new_pmd, pmd); spin_unlock(&mm->page_table_lock); } out: return ret; } int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot) { struct mm_struct *mm = vma->vm_mm; int ret = 0; if (__pmd_trans_huge_lock(pmd, vma) == 1) { pmd_t entry; entry = pmdp_get_and_clear(mm, addr, pmd); entry = pmd_modify(entry, newprot); set_pmd_at(mm, addr, pmd, entry); spin_unlock(&vma->vm_mm->page_table_lock); ret = 1; } return ret; } /* * Returns 1 if a given pmd maps a stable (not under splitting) thp. * Returns -1 if it maps a thp under splitting. Returns 0 otherwise. * * Note that if it returns 1, this routine returns without unlocking page * table locks. So callers must unlock them. */ int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { spin_lock(&vma->vm_mm->page_table_lock); if (likely(pmd_trans_huge(*pmd))) { if (unlikely(pmd_trans_splitting(*pmd))) { spin_unlock(&vma->vm_mm->page_table_lock); wait_split_huge_page(vma->anon_vma, pmd); return -1; } else { /* Thp mapped by 'pmd' is stable, so we can * handle it as it is. */ return 1; } } spin_unlock(&vma->vm_mm->page_table_lock); return 0; } pmd_t *page_check_address_pmd(struct page *page, struct mm_struct *mm, unsigned long address, enum page_check_address_pmd_flag flag) { pgd_t *pgd; pud_t *pud; pmd_t *pmd, *ret = NULL; if (address & ~HPAGE_PMD_MASK) goto out; pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; pud = pud_offset(pgd, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) goto out; if (pmd_page(*pmd) != page) goto out; /* * split_vma() may create temporary aliased mappings. There is * no risk as long as all huge pmd are found and have their * splitting bit set before __split_huge_page_refcount * runs. Finding the same huge pmd more than once during the * same rmap walk is not a problem. */ if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && pmd_trans_splitting(*pmd)) goto out; if (pmd_trans_huge(*pmd)) { VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && !pmd_trans_splitting(*pmd)); ret = pmd; } out: return ret; } static int __split_huge_page_splitting(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; pmd_t *pmd; int ret = 0; spin_lock(&mm->page_table_lock); pmd = page_check_address_pmd(page, mm, address, PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG); if (pmd) { /* * We can't temporarily set the pmd to null in order * to split it, the pmd must remain marked huge at all * times or the VM won't take the pmd_trans_huge paths * and it won't wait on the anon_vma->root->mutex to * serialize against split_huge_page*. */ pmdp_splitting_flush_notify(vma, address, pmd); ret = 1; } spin_unlock(&mm->page_table_lock); return ret; } static void __split_huge_page_refcount(struct page *page) { int i; struct zone *zone = page_zone(page); int tail_count = 0; /* prevent PageLRU to go away from under us, and freeze lru stats */ spin_lock_irq(&zone->lru_lock); compound_lock(page); /* complete memcg works before add pages to LRU */ mem_cgroup_split_huge_fixup(page); for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { struct page *page_tail = page + i; /* tail_page->_mapcount cannot change */ BUG_ON(page_mapcount(page_tail) < 0); tail_count += page_mapcount(page_tail); /* check for overflow */ BUG_ON(tail_count < 0); BUG_ON(atomic_read(&page_tail->_count) != 0); /* * tail_page->_count is zero and not changing from * under us. But get_page_unless_zero() may be running * from under us on the tail_page. If we used * atomic_set() below instead of atomic_add(), we * would then run atomic_set() concurrently with * get_page_unless_zero(), and atomic_set() is * implemented in C not using locked ops. spin_unlock * on x86 sometime uses locked ops because of PPro * errata 66, 92, so unless somebody can guarantee * atomic_set() here would be safe on all archs (and * not only on x86), it's safer to use atomic_add(). */ atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, &page_tail->_count); /* after clearing PageTail the gup refcount can be released */ smp_mb(); /* * retain hwpoison flag of the poisoned tail page: * fix for the unsuitable process killed on Guest Machine(KVM) * by the memory-failure. */ page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; page_tail->flags |= (page->flags & ((1L << PG_referenced) | (1L << PG_swapbacked) | (1L << PG_mlocked) | (1L << PG_uptodate))); page_tail->flags |= (1L << PG_dirty); /* clear PageTail before overwriting first_page */ smp_wmb(); /* * __split_huge_page_splitting() already set the * splitting bit in all pmd that could map this * hugepage, that will ensure no CPU can alter the * mapcount on the head page. The mapcount is only * accounted in the head page and it has to be * transferred to all tail pages in the below code. So * for this code to be safe, the split the mapcount * can't change. But that doesn't mean userland can't * keep changing and reading the page contents while * we transfer the mapcount, so the pmd splitting * status is achieved setting a reserved bit in the * pmd, not by clearing the present bit. */ page_tail->_mapcount = page->_mapcount; BUG_ON(page_tail->mapping); page_tail->mapping = page->mapping; page_tail->index = page->index + i; BUG_ON(!PageAnon(page_tail)); BUG_ON(!PageUptodate(page_tail)); BUG_ON(!PageDirty(page_tail)); BUG_ON(!PageSwapBacked(page_tail)); lru_add_page_tail(zone, page, page_tail); } atomic_sub(tail_count, &page->_count); BUG_ON(atomic_read(&page->_count) <= 0); __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); ClearPageCompound(page); compound_unlock(page); spin_unlock_irq(&zone->lru_lock); for (i = 1; i < HPAGE_PMD_NR; i++) { struct page *page_tail = page + i; BUG_ON(page_count(page_tail) <= 0); /* * Tail pages may be freed if there wasn't any mapping * like if add_to_swap() is running on a lru page that * had its mapping zapped. And freeing these pages * requires taking the lru_lock so we do the put_page * of the tail pages after the split is complete. */ put_page(page_tail); } /* * Only the head page (now become a regular page) is required * to be pinned by the caller. */ BUG_ON(page_count(page) <= 0); } static int __split_huge_page_map(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; pmd_t *pmd, _pmd; int ret = 0, i; pgtable_t pgtable; unsigned long haddr; spin_lock(&mm->page_table_lock); pmd = page_check_address_pmd(page, mm, address, PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); if (pmd) { pgtable = get_pmd_huge_pte(mm); pmd_populate(mm, &_pmd, pgtable); for (i = 0, haddr = address; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { pte_t *pte, entry; BUG_ON(PageCompound(page+i)); entry = mk_pte(page + i, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (!pmd_write(*pmd)) entry = pte_wrprotect(entry); else BUG_ON(page_mapcount(page) != 1); if (!pmd_young(*pmd)) entry = pte_mkold(entry); pte = pte_offset_map(&_pmd, haddr); BUG_ON(!pte_none(*pte)); set_pte_at(mm, haddr, pte, entry); pte_unmap(pte); } smp_wmb(); /* make pte visible before pmd */ /* * Up to this point the pmd is present and huge and * userland has the whole access to the hugepage * during the split (which happens in place). If we * overwrite the pmd with the not-huge version * pointing to the pte here (which of course we could * if all CPUs were bug free), userland could trigger * a small page size TLB miss on the small sized TLB * while the hugepage TLB entry is still established * in the huge TLB. Some CPU doesn't like that. See * http://support.amd.com/us/Processor_TechDocs/41322.pdf, * Erratum 383 on page 93. Intel should be safe but is * also warns that it's only safe if the permission * and cache attributes of the two entries loaded in * the two TLB is identical (which should be the case * here). But it is generally safer to never allow * small and huge TLB entries for the same virtual * address to be loaded simultaneously. So instead of * doing "pmd_populate(); flush_tlb_range();" we first * mark the current pmd notpresent (atomically because * here the pmd_trans_huge and pmd_trans_splitting * must remain set at all times on the pmd until the * split is complete for this pmd), then we flush the * SMP TLB and finally we write the non-huge version * of the pmd entry with pmd_populate. */ set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd)); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmd_populate(mm, pmd, pgtable); ret = 1; } spin_unlock(&mm->page_table_lock); return ret; } /* must be called with anon_vma->root->mutex hold */ static void __split_huge_page(struct page *page, struct anon_vma *anon_vma) { int mapcount, mapcount2; struct anon_vma_chain *avc; BUG_ON(!PageHead(page)); BUG_ON(PageTail(page)); mapcount = 0; list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { struct vm_area_struct *vma = avc->vma; unsigned long addr = vma_address(page, vma); BUG_ON(is_vma_temporary_stack(vma)); if (addr == -EFAULT) continue; mapcount += __split_huge_page_splitting(page, vma, addr); } /* * It is critical that new vmas are added to the tail of the * anon_vma list. This guarantes that if copy_huge_pmd() runs * and establishes a child pmd before * __split_huge_page_splitting() freezes the parent pmd (so if * we fail to prevent copy_huge_pmd() from running until the * whole __split_huge_page() is complete), we will still see * the newly established pmd of the child later during the * walk, to be able to set it as pmd_trans_splitting too. */ if (mapcount != page_mapcount(page)) printk(KERN_ERR "mapcount %d page_mapcount %d\n", mapcount, page_mapcount(page)); BUG_ON(mapcount != page_mapcount(page)); __split_huge_page_refcount(page); mapcount2 = 0; list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { struct vm_area_struct *vma = avc->vma; unsigned long addr = vma_address(page, vma); BUG_ON(is_vma_temporary_stack(vma)); if (addr == -EFAULT) continue; mapcount2 += __split_huge_page_map(page, vma, addr); } if (mapcount != mapcount2) printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n", mapcount, mapcount2, page_mapcount(page)); BUG_ON(mapcount != mapcount2); } int split_huge_page(struct page *page) { struct anon_vma *anon_vma; int ret = 1; BUG_ON(!PageAnon(page)); anon_vma = page_lock_anon_vma(page); if (!anon_vma) goto out; ret = 0; if (!PageCompound(page)) goto out_unlock; BUG_ON(!PageSwapBacked(page)); __split_huge_page(page, anon_vma); count_vm_event(THP_SPLIT); BUG_ON(PageCompound(page)); out_unlock: page_unlock_anon_vma(anon_vma); out: return ret; } #define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \ VM_HUGETLB|VM_SHARED|VM_MAYSHARE) int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { switch (advice) { case MADV_HUGEPAGE: /* * Be somewhat over-protective like KSM for now! */ if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) return -EINVAL; *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; /* * If the vma become good for khugepaged to scan, * register it here without waiting a page fault that * may not happen any time soon. */ if (unlikely(khugepaged_enter_vma_merge(vma))) return -ENOMEM; break; case MADV_NOHUGEPAGE: /* * Be somewhat over-protective like KSM for now! */ if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) return -EINVAL; *vm_flags &= ~VM_HUGEPAGE; *vm_flags |= VM_NOHUGEPAGE; /* * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning * this vma even if we leave the mm registered in khugepaged if * it got registered before VM_NOHUGEPAGE was set. */ break; } return 0; } static int __init khugepaged_slab_init(void) { mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", sizeof(struct mm_slot), __alignof__(struct mm_slot), 0, NULL); if (!mm_slot_cache) return -ENOMEM; return 0; } static void __init khugepaged_slab_free(void) { kmem_cache_destroy(mm_slot_cache); mm_slot_cache = NULL; } static inline struct mm_slot *alloc_mm_slot(void) { if (!mm_slot_cache) /* initialization failed */ return NULL; return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); } static inline void free_mm_slot(struct mm_slot *mm_slot) { kmem_cache_free(mm_slot_cache, mm_slot); } static int __init mm_slots_hash_init(void) { mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), GFP_KERNEL); if (!mm_slots_hash) return -ENOMEM; return 0; } #if 0 static void __init mm_slots_hash_free(void) { kfree(mm_slots_hash); mm_slots_hash = NULL; } #endif static struct mm_slot *get_mm_slot(struct mm_struct *mm) { struct mm_slot *mm_slot; struct hlist_head *bucket; struct hlist_node *node; bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) % MM_SLOTS_HASH_HEADS]; hlist_for_each_entry(mm_slot, node, bucket, hash) { if (mm == mm_slot->mm) return mm_slot; } return NULL; } static void insert_to_mm_slots_hash(struct mm_struct *mm, struct mm_slot *mm_slot) { struct hlist_head *bucket; bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) % MM_SLOTS_HASH_HEADS]; mm_slot->mm = mm; hlist_add_head(&mm_slot->hash, bucket); } static inline int khugepaged_test_exit(struct mm_struct *mm) { return atomic_read(&mm->mm_users) == 0; } int __khugepaged_enter(struct mm_struct *mm) { struct mm_slot *mm_slot; int wakeup; mm_slot = alloc_mm_slot(); if (!mm_slot) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ VM_BUG_ON(khugepaged_test_exit(mm)); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; } spin_lock(&khugepaged_mm_lock); insert_to_mm_slots_hash(mm, mm_slot); /* * Insert just behind the scanning cursor, to let the area settle * down a little. */ wakeup = list_empty(&khugepaged_scan.mm_head); list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); spin_unlock(&khugepaged_mm_lock); atomic_inc(&mm->mm_count); if (wakeup) wake_up_interruptible(&khugepaged_wait); return 0; } int khugepaged_enter_vma_merge(struct vm_area_struct *vma) { unsigned long hstart, hend; if (!vma->anon_vma) /* * Not yet faulted in so we will register later in the * page fault if needed. */ return 0; if (vma->vm_ops) /* khugepaged not yet working on file or special mappings */ return 0; /* * If is_pfn_mapping() is true is_learn_pfn_mapping() must be * true too, verify it here. */ VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) return khugepaged_enter(vma); return 0; } void __khugepaged_exit(struct mm_struct *mm) { struct mm_slot *mm_slot; int free = 0; spin_lock(&khugepaged_mm_lock); mm_slot = get_mm_slot(mm); if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { hlist_del(&mm_slot->hash); list_del(&mm_slot->mm_node); free = 1; } spin_unlock(&khugepaged_mm_lock); if (free) { clear_bit(MMF_VM_HUGEPAGE, &mm->flags); free_mm_slot(mm_slot); mmdrop(mm); } else if (mm_slot) { /* * This is required to serialize against * khugepaged_test_exit() (which is guaranteed to run * under mmap sem read mode). Stop here (after we * return all pagetables will be destroyed) until * khugepaged has finished working on the pagetables * under the mmap_sem. */ down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); } } static void release_pte_page(struct page *page) { /* 0 stands for page_is_file_cache(page) == false */ dec_zone_page_state(page, NR_ISOLATED_ANON + 0); unlock_page(page); putback_lru_page(page); } static void release_pte_pages(pte_t *pte, pte_t *_pte) { while (--_pte >= pte) { pte_t pteval = *_pte; if (!pte_none(pteval)) release_pte_page(pte_page(pteval)); } } static void release_all_pte_pages(pte_t *pte) { release_pte_pages(pte, pte + HPAGE_PMD_NR); } static int __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long address, pte_t *pte) { struct page *page; pte_t *_pte; int referenced = 0, isolated = 0, none = 0; for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { pte_t pteval = *_pte; if (pte_none(pteval)) { if (++none <= khugepaged_max_ptes_none) continue; else { release_pte_pages(pte, _pte); goto out; } } if (!pte_present(pteval) || !pte_write(pteval)) { release_pte_pages(pte, _pte); goto out; } page = vm_normal_page(vma, address, pteval); if (unlikely(!page)) { release_pte_pages(pte, _pte); goto out; } VM_BUG_ON(PageCompound(page)); BUG_ON(!PageAnon(page)); VM_BUG_ON(!PageSwapBacked(page)); /* cannot use mapcount: can't collapse if there's a gup pin */ if (page_count(page) != 1) { release_pte_pages(pte, _pte); goto out; } /* * We can do it before isolate_lru_page because the * page can't be freed from under us. NOTE: PG_lock * is needed to serialize against split_huge_page * when invoked from the VM. */ if (!trylock_page(page)) { release_pte_pages(pte, _pte); goto out; } /* * Isolate the page to avoid collapsing an hugepage * currently in use by the VM. */ if (isolate_lru_page(page)) { unlock_page(page); release_pte_pages(pte, _pte); goto out; } /* 0 stands for page_is_file_cache(page) == false */ inc_zone_page_state(page, NR_ISOLATED_ANON + 0); VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageLRU(page)); /* If there is no mapped pte young don't collapse the page */ if (pte_young(pteval) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) referenced = 1; } if (unlikely(!referenced)) release_all_pte_pages(pte); else isolated = 1; out: return isolated; } static void __collapse_huge_page_copy(pte_t *pte, struct page *page, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl) { pte_t *_pte; for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { pte_t pteval = *_pte; struct page *src_page; if (pte_none(pteval)) { clear_user_highpage(page, address); add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); } else { src_page = pte_page(pteval); copy_user_highpage(page, src_page, address, vma); VM_BUG_ON(page_mapcount(src_page) != 1); VM_BUG_ON(page_count(src_page) != 2); release_pte_page(src_page); /* * ptl mostly unnecessary, but preempt has to * be disabled to update the per-cpu stats * inside page_remove_rmap(). */ spin_lock(ptl); /* * paravirt calls inside pte_clear here are * superfluous. */ pte_clear(vma->vm_mm, address, _pte); page_remove_rmap(src_page); spin_unlock(ptl); free_page_and_swap_cache(src_page); } address += PAGE_SIZE; page++; } } static void collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, int node) { pgd_t *pgd; pud_t *pud; pmd_t *pmd, _pmd; pte_t *pte; pgtable_t pgtable; struct page *new_page; spinlock_t *ptl; int isolated; unsigned long hstart, hend; VM_BUG_ON(address & ~HPAGE_PMD_MASK); #ifndef CONFIG_NUMA up_read(&mm->mmap_sem); VM_BUG_ON(!*hpage); new_page = *hpage; #else VM_BUG_ON(*hpage); /* * Allocate the page while the vma is still valid and under * the mmap_sem read mode so there is no memory allocation * later when we take the mmap_sem in write mode. This is more * friendly behavior (OTOH it may actually hide bugs) to * filesystems in userland with daemons allocating memory in * the userland I/O paths. Allocating memory with the * mmap_sem in read mode is good idea also to allow greater * scalability. */ new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, node, __GFP_OTHER_NODE); /* * After allocating the hugepage, release the mmap_sem read lock in * preparation for taking it in write mode. */ up_read(&mm->mmap_sem); if (unlikely(!new_page)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); return; } #endif count_vm_event(THP_COLLAPSE_ALLOC); if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { #ifdef CONFIG_NUMA put_page(new_page); #endif return; } /* * Prevent all access to pagetables with the exception of * gup_fast later hanlded by the ptep_clear_flush and the VM * handled by the anon_vma lock + PG_lock. */ down_write(&mm->mmap_sem); if (unlikely(khugepaged_test_exit(mm))) goto out; vma = find_vma(mm, address); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (address < hstart || address + HPAGE_PMD_SIZE > hend) goto out; if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || (vma->vm_flags & VM_NOHUGEPAGE)) goto out; if (!vma->anon_vma || vma->vm_ops) goto out; if (is_vma_temporary_stack(vma)) goto out; /* * If is_pfn_mapping() is true is_learn_pfn_mapping() must be * true too, verify it here. */ VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; pud = pud_offset(pgd, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); /* pmd can't go away or become huge under us */ if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) goto out; anon_vma_lock(vma->anon_vma); pte = pte_offset_map(pmd, address); ptl = pte_lockptr(mm, pmd); spin_lock(&mm->page_table_lock); /* probably unnecessary */ /* * After this gup_fast can't run anymore. This also removes * any huge TLB entry from the CPU so we won't allow * huge and small TLB entries for the same virtual address * to avoid the risk of CPU bugs in that area. */ _pmd = pmdp_clear_flush_notify(vma, address, pmd); spin_unlock(&mm->page_table_lock); spin_lock(ptl); isolated = __collapse_huge_page_isolate(vma, address, pte); spin_unlock(ptl); if (unlikely(!isolated)) { pte_unmap(pte); spin_lock(&mm->page_table_lock); BUG_ON(!pmd_none(*pmd)); set_pmd_at(mm, address, pmd, _pmd); spin_unlock(&mm->page_table_lock); anon_vma_unlock(vma->anon_vma); goto out; } /* * All pages are isolated and locked so anon_vma rmap * can't run anymore. */ anon_vma_unlock(vma->anon_vma); __collapse_huge_page_copy(pte, new_page, vma, address, ptl); pte_unmap(pte); __SetPageUptodate(new_page); pgtable = pmd_pgtable(_pmd); VM_BUG_ON(page_count(pgtable) != 1); VM_BUG_ON(page_mapcount(pgtable) != 0); _pmd = mk_pmd(new_page, vma->vm_page_prot); _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); _pmd = pmd_mkhuge(_pmd); /* * spin_lock() below is not the equivalent of smp_wmb(), so * this is needed to avoid the copy_huge_page writes to become * visible after the set_pmd_at() write. */ smp_wmb(); spin_lock(&mm->page_table_lock); BUG_ON(!pmd_none(*pmd)); page_add_new_anon_rmap(new_page, vma, address); set_pmd_at(mm, address, pmd, _pmd); update_mmu_cache(vma, address, _pmd); prepare_pmd_huge_pte(pgtable, mm); spin_unlock(&mm->page_table_lock); #ifndef CONFIG_NUMA *hpage = NULL; #endif khugepaged_pages_collapsed++; out_up_write: up_write(&mm->mmap_sem); return; out: mem_cgroup_uncharge_page(new_page); #ifdef CONFIG_NUMA put_page(new_page); #endif goto out_up_write; } static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, struct page **hpage) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte, *_pte; int ret = 0, referenced = 0, none = 0; struct page *page; unsigned long _address; spinlock_t *ptl; int node = -1; VM_BUG_ON(address & ~HPAGE_PMD_MASK); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; pud = pud_offset(pgd, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) goto out; pte = pte_offset_map_lock(mm, pmd, address, &ptl); for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; if (pte_none(pteval)) { if (++none <= khugepaged_max_ptes_none) continue; else goto out_unmap; } if (!pte_present(pteval) || !pte_write(pteval)) goto out_unmap; page = vm_normal_page(vma, _address, pteval); if (unlikely(!page)) goto out_unmap; /* * Chose the node of the first page. This could * be more sophisticated and look at more pages, * but isn't for now. */ if (node == -1) node = page_to_nid(page); VM_BUG_ON(PageCompound(page)); if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) goto out_unmap; /* cannot use mapcount: can't collapse if there's a gup pin */ if (page_count(page) != 1) goto out_unmap; if (pte_young(pteval) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) referenced = 1; } if (referenced) ret = 1; out_unmap: pte_unmap_unlock(pte, ptl); if (ret) /* collapse_huge_page will return with the mmap_sem released */ collapse_huge_page(mm, address, hpage, vma, node); out: return ret; } static void collect_mm_slot(struct mm_slot *mm_slot) { struct mm_struct *mm = mm_slot->mm; VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); if (khugepaged_test_exit(mm)) { /* free mm_slot */ hlist_del(&mm_slot->hash); list_del(&mm_slot->mm_node); /* * Not strictly needed because the mm exited already. * * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); */ /* khugepaged_mm_lock actually not necessary for the below */ free_mm_slot(mm_slot); mmdrop(mm); } } static unsigned int khugepaged_scan_mm_slot(unsigned int pages, struct page **hpage) __releases(&khugepaged_mm_lock) __acquires(&khugepaged_mm_lock) { struct mm_slot *mm_slot; struct mm_struct *mm; struct vm_area_struct *vma; int progress = 0; VM_BUG_ON(!pages); VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); if (khugepaged_scan.mm_slot) mm_slot = khugepaged_scan.mm_slot; else { mm_slot = list_entry(khugepaged_scan.mm_head.next, struct mm_slot, mm_node); khugepaged_scan.address = 0; khugepaged_scan.mm_slot = mm_slot; } spin_unlock(&khugepaged_mm_lock); mm = mm_slot->mm; down_read(&mm->mmap_sem); if (unlikely(khugepaged_test_exit(mm))) vma = NULL; else vma = find_vma(mm, khugepaged_scan.address); progress++; for (; vma; vma = vma->vm_next) { unsigned long hstart, hend; cond_resched(); if (unlikely(khugepaged_test_exit(mm))) { progress++; break; } if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || (vma->vm_flags & VM_NOHUGEPAGE)) { skip: progress++; continue; } if (!vma->anon_vma || vma->vm_ops) goto skip; if (is_vma_temporary_stack(vma)) goto skip; /* * If is_pfn_mapping() is true is_learn_pfn_mapping() * must be true too, verify it here. */ VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart >= hend) goto skip; if (khugepaged_scan.address > hend) goto skip; if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); while (khugepaged_scan.address < hend) { int ret; cond_resched(); if (unlikely(khugepaged_test_exit(mm))) goto breakouterloop; VM_BUG_ON(khugepaged_scan.address < hstart || khugepaged_scan.address + HPAGE_PMD_SIZE > hend); ret = khugepaged_scan_pmd(mm, vma, khugepaged_scan.address, hpage); /* move to next address */ khugepaged_scan.address += HPAGE_PMD_SIZE; progress += HPAGE_PMD_NR; if (ret) /* we released mmap_sem so break loop */ goto breakouterloop_mmap_sem; if (progress >= pages) goto breakouterloop; } } breakouterloop: up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ breakouterloop_mmap_sem: spin_lock(&khugepaged_mm_lock); VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); /* * Release the current mm_slot if this mm is about to die, or * if we scanned all vmas of this mm. */ if (khugepaged_test_exit(mm) || !vma) { /* * Make sure that if mm_users is reaching zero while * khugepaged runs here, khugepaged_exit will find * mm_slot not pointing to the exiting mm. */ if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { khugepaged_scan.mm_slot = list_entry( mm_slot->mm_node.next, struct mm_slot, mm_node); khugepaged_scan.address = 0; } else { khugepaged_scan.mm_slot = NULL; khugepaged_full_scans++; } collect_mm_slot(mm_slot); } return progress; } static int khugepaged_has_work(void) { return !list_empty(&khugepaged_scan.mm_head) && khugepaged_enabled(); } static int khugepaged_wait_event(void) { return !list_empty(&khugepaged_scan.mm_head) || !khugepaged_enabled(); } static void khugepaged_do_scan(struct page **hpage) { unsigned int progress = 0, pass_through_head = 0; unsigned int pages = khugepaged_pages_to_scan; barrier(); /* write khugepaged_pages_to_scan to local stack */ while (progress < pages) { cond_resched(); #ifndef CONFIG_NUMA if (!*hpage) { *hpage = alloc_hugepage(khugepaged_defrag()); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); break; } count_vm_event(THP_COLLAPSE_ALLOC); } #else if (IS_ERR(*hpage)) break; #endif if (unlikely(kthread_should_stop() || freezing(current))) break; spin_lock(&khugepaged_mm_lock); if (!khugepaged_scan.mm_slot) pass_through_head++; if (khugepaged_has_work() && pass_through_head < 2) progress += khugepaged_scan_mm_slot(pages - progress, hpage); else progress = pages; spin_unlock(&khugepaged_mm_lock); } } static void khugepaged_alloc_sleep(void) { wait_event_freezable_timeout(khugepaged_wait, false, msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); } #ifndef CONFIG_NUMA static struct page *khugepaged_alloc_hugepage(void) { struct page *hpage; do { hpage = alloc_hugepage(khugepaged_defrag()); if (!hpage) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); khugepaged_alloc_sleep(); } else count_vm_event(THP_COLLAPSE_ALLOC); } while (unlikely(!hpage) && likely(khugepaged_enabled())); return hpage; } #endif static void khugepaged_loop(void) { struct page *hpage; #ifdef CONFIG_NUMA hpage = NULL; #endif while (likely(khugepaged_enabled())) { #ifndef CONFIG_NUMA hpage = khugepaged_alloc_hugepage(); if (unlikely(!hpage)) break; #else if (IS_ERR(hpage)) { khugepaged_alloc_sleep(); hpage = NULL; } #endif khugepaged_do_scan(&hpage); #ifndef CONFIG_NUMA if (hpage) put_page(hpage); #endif try_to_freeze(); if (unlikely(kthread_should_stop())) break; if (khugepaged_has_work()) { if (!khugepaged_scan_sleep_millisecs) continue; wait_event_freezable_timeout(khugepaged_wait, false, msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); } else if (khugepaged_enabled()) wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); } } static int khugepaged(void *none) { struct mm_slot *mm_slot; set_freezable(); set_user_nice(current, 19); /* serialize with start_khugepaged() */ mutex_lock(&khugepaged_mutex); for (;;) { mutex_unlock(&khugepaged_mutex); VM_BUG_ON(khugepaged_thread != current); khugepaged_loop(); VM_BUG_ON(khugepaged_thread != current); mutex_lock(&khugepaged_mutex); if (!khugepaged_enabled()) break; if (unlikely(kthread_should_stop())) break; } spin_lock(&khugepaged_mm_lock); mm_slot = khugepaged_scan.mm_slot; khugepaged_scan.mm_slot = NULL; if (mm_slot) collect_mm_slot(mm_slot); spin_unlock(&khugepaged_mm_lock); khugepaged_thread = NULL; mutex_unlock(&khugepaged_mutex); return 0; } void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) { struct page *page; spin_lock(&mm->page_table_lock); if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(&mm->page_table_lock); return; } page = pmd_page(*pmd); VM_BUG_ON(!page_count(page)); get_page(page); spin_unlock(&mm->page_table_lock); split_huge_page(page); put_page(page); BUG_ON(pmd_trans_huge(*pmd)); } static void split_huge_page_address(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) return; pud = pud_offset(pgd, address); if (!pud_present(*pud)) return; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return; /* * Caller holds the mmap_sem write mode, so a huge pmd cannot * materialize from under us. */ split_huge_page_pmd(mm, pmd); } void __vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) { /* * If the new start address isn't hpage aligned and it could * previously contain an hugepage: check if we need to split * an huge pmd. */ if (start & ~HPAGE_PMD_MASK && (start & HPAGE_PMD_MASK) >= vma->vm_start && (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) split_huge_page_address(vma->vm_mm, start); /* * If the new end address isn't hpage aligned and it could * previously contain an hugepage: check if we need to split * an huge pmd. */ if (end & ~HPAGE_PMD_MASK && (end & HPAGE_PMD_MASK) >= vma->vm_start && (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) split_huge_page_address(vma->vm_mm, end); /* * If we're also updating the vma->vm_next->vm_start, if the new * vm_next->vm_start isn't page aligned and it could previously * contain an hugepage: check if we need to split an huge pmd. */ if (adjust_next > 0) { struct vm_area_struct *next = vma->vm_next; unsigned long nstart = next->vm_start; nstart += adjust_next << PAGE_SHIFT; if (nstart & ~HPAGE_PMD_MASK && (nstart & HPAGE_PMD_MASK) >= next->vm_start && (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) split_huge_page_address(next->vm_mm, nstart); } }
gpl-2.0
NamelessRom/android_kernel_google_msm
drivers/hwspinlock/u8500_hsem.c
5224
5046
/* * u8500 HWSEM driver * * Copyright (C) 2010-2011 ST-Ericsson * * Implements u8500 semaphore handling for protocol 1, no interrupts. * * Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Heavily borrowed from the work of : * Simon Que <sque@ti.com> * Hari Kanigeri <h-kanigeri2@ti.com> * Ohad Ben-Cohen <ohad@wizery.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/hwspinlock.h> #include <linux/platform_device.h> #include "hwspinlock_internal.h" /* * Implementation of STE's HSem protocol 1 without interrutps. * The only masterID we allow is '0x01' to force people to use * HSems for synchronisation between processors rather than processes * on the ARM core. */ #define U8500_MAX_SEMAPHORE 32 /* a total of 32 semaphore */ #define RESET_SEMAPHORE (0) /* free */ /* * CPU ID for master running u8500 kernel. * Hswpinlocks should only be used to synchonise operations * between the Cortex A9 core and the other CPUs. Hence * forcing the masterID to a preset value. */ #define HSEM_MASTER_ID 0x01 #define HSEM_REGISTER_OFFSET 0x08 #define HSEM_CTRL_REG 0x00 #define HSEM_ICRALL 0x90 #define HSEM_PROTOCOL_1 0x01 static int u8500_hsem_trylock(struct hwspinlock *lock) { void __iomem *lock_addr = lock->priv; writel(HSEM_MASTER_ID, lock_addr); /* get only first 4 bit and compare to masterID. * if equal, we have the semaphore, otherwise * someone else has it. */ return (HSEM_MASTER_ID == (0x0F & readl(lock_addr))); } static void u8500_hsem_unlock(struct hwspinlock *lock) { void __iomem *lock_addr = lock->priv; /* release the lock by writing 0 to it */ writel(RESET_SEMAPHORE, lock_addr); } /* * u8500: what value is recommended here ? */ static void u8500_hsem_relax(struct hwspinlock *lock) { ndelay(50); } static const struct hwspinlock_ops u8500_hwspinlock_ops = { .trylock = u8500_hsem_trylock, .unlock = u8500_hsem_unlock, .relax = u8500_hsem_relax, }; static int __devinit u8500_hsem_probe(struct platform_device *pdev) { struct hwspinlock_pdata *pdata = pdev->dev.platform_data; struct hwspinlock_device *bank; struct hwspinlock *hwlock; struct resource *res; void __iomem *io_base; int i, ret, num_locks = U8500_MAX_SEMAPHORE; ulong val; if (!pdata) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; io_base = ioremap(res->start, resource_size(res)); if (!io_base) return -ENOMEM; /* make sure protocol 1 is selected */ val = readl(io_base + HSEM_CTRL_REG); writel((val & ~HSEM_PROTOCOL_1), io_base + HSEM_CTRL_REG); /* clear all interrupts */ writel(0xFFFF, io_base + HSEM_ICRALL); bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL); if (!bank) { ret = -ENOMEM; goto iounmap_base; } platform_set_drvdata(pdev, bank); for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++) hwlock->priv = io_base + HSEM_REGISTER_OFFSET + sizeof(u32) * i; /* no pm needed for HSem but required to comply with hwspilock core */ pm_runtime_enable(&pdev->dev); ret = hwspin_lock_register(bank, &pdev->dev, &u8500_hwspinlock_ops, pdata->base_id, num_locks); if (ret) goto reg_fail; return 0; reg_fail: pm_runtime_disable(&pdev->dev); kfree(bank); iounmap_base: iounmap(io_base); return ret; } static int __devexit u8500_hsem_remove(struct platform_device *pdev) { struct hwspinlock_device *bank = platform_get_drvdata(pdev); void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET; int ret; /* clear all interrupts */ writel(0xFFFF, io_base + HSEM_ICRALL); ret = hwspin_lock_unregister(bank); if (ret) { dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret); return ret; } pm_runtime_disable(&pdev->dev); iounmap(io_base); kfree(bank); return 0; } static struct platform_driver u8500_hsem_driver = { .probe = u8500_hsem_probe, .remove = __devexit_p(u8500_hsem_remove), .driver = { .name = "u8500_hsem", .owner = THIS_MODULE, }, }; static int __init u8500_hsem_init(void) { return platform_driver_register(&u8500_hsem_driver); } /* board init code might need to reserve hwspinlocks for predefined purposes */ postcore_initcall(u8500_hsem_init); static void __exit u8500_hsem_exit(void) { platform_driver_unregister(&u8500_hsem_driver); } module_exit(u8500_hsem_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Hardware Spinlock driver for u8500"); MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
gpl-2.0
devcreations07/spirit_cancro
arch/x86/xen/vga.c
7784
2473
#include <linux/screen_info.h> #include <linux/init.h> #include <asm/bootparam.h> #include <asm/setup.h> #include <xen/interface/xen.h> #include "xen-ops.h" void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) { struct screen_info *screen_info = &boot_params.screen_info; /* This is drawn from a dump from vgacon:startup in * standard Linux. */ screen_info->orig_video_mode = 3; screen_info->orig_video_isVGA = 1; screen_info->orig_video_lines = 25; screen_info->orig_video_cols = 80; screen_info->orig_video_ega_bx = 3; screen_info->orig_video_points = 16; screen_info->orig_y = screen_info->orig_video_lines - 1; switch (info->video_type) { case XEN_VGATYPE_TEXT_MODE_3: if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3) + sizeof(info->u.text_mode_3)) break; screen_info->orig_video_lines = info->u.text_mode_3.rows; screen_info->orig_video_cols = info->u.text_mode_3.columns; screen_info->orig_x = info->u.text_mode_3.cursor_x; screen_info->orig_y = info->u.text_mode_3.cursor_y; screen_info->orig_video_points = info->u.text_mode_3.font_height; break; case XEN_VGATYPE_VESA_LFB: if (size < offsetof(struct dom0_vga_console_info, u.vesa_lfb.gbl_caps)) break; screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB; screen_info->lfb_width = info->u.vesa_lfb.width; screen_info->lfb_height = info->u.vesa_lfb.height; screen_info->lfb_depth = info->u.vesa_lfb.bits_per_pixel; screen_info->lfb_base = info->u.vesa_lfb.lfb_base; screen_info->lfb_size = info->u.vesa_lfb.lfb_size; screen_info->lfb_linelength = info->u.vesa_lfb.bytes_per_line; screen_info->red_size = info->u.vesa_lfb.red_size; screen_info->red_pos = info->u.vesa_lfb.red_pos; screen_info->green_size = info->u.vesa_lfb.green_size; screen_info->green_pos = info->u.vesa_lfb.green_pos; screen_info->blue_size = info->u.vesa_lfb.blue_size; screen_info->blue_pos = info->u.vesa_lfb.blue_pos; screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size; screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos; if (size >= offsetof(struct dom0_vga_console_info, u.vesa_lfb.gbl_caps) + sizeof(info->u.vesa_lfb.gbl_caps)) screen_info->capabilities = info->u.vesa_lfb.gbl_caps; if (size >= offsetof(struct dom0_vga_console_info, u.vesa_lfb.mode_attrs) + sizeof(info->u.vesa_lfb.mode_attrs)) screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs; break; } }
gpl-2.0
ShinySide/HispAsian_S5
arch/x86/xen/vga.c
7784
2473
#include <linux/screen_info.h> #include <linux/init.h> #include <asm/bootparam.h> #include <asm/setup.h> #include <xen/interface/xen.h> #include "xen-ops.h" void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) { struct screen_info *screen_info = &boot_params.screen_info; /* This is drawn from a dump from vgacon:startup in * standard Linux. */ screen_info->orig_video_mode = 3; screen_info->orig_video_isVGA = 1; screen_info->orig_video_lines = 25; screen_info->orig_video_cols = 80; screen_info->orig_video_ega_bx = 3; screen_info->orig_video_points = 16; screen_info->orig_y = screen_info->orig_video_lines - 1; switch (info->video_type) { case XEN_VGATYPE_TEXT_MODE_3: if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3) + sizeof(info->u.text_mode_3)) break; screen_info->orig_video_lines = info->u.text_mode_3.rows; screen_info->orig_video_cols = info->u.text_mode_3.columns; screen_info->orig_x = info->u.text_mode_3.cursor_x; screen_info->orig_y = info->u.text_mode_3.cursor_y; screen_info->orig_video_points = info->u.text_mode_3.font_height; break; case XEN_VGATYPE_VESA_LFB: if (size < offsetof(struct dom0_vga_console_info, u.vesa_lfb.gbl_caps)) break; screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB; screen_info->lfb_width = info->u.vesa_lfb.width; screen_info->lfb_height = info->u.vesa_lfb.height; screen_info->lfb_depth = info->u.vesa_lfb.bits_per_pixel; screen_info->lfb_base = info->u.vesa_lfb.lfb_base; screen_info->lfb_size = info->u.vesa_lfb.lfb_size; screen_info->lfb_linelength = info->u.vesa_lfb.bytes_per_line; screen_info->red_size = info->u.vesa_lfb.red_size; screen_info->red_pos = info->u.vesa_lfb.red_pos; screen_info->green_size = info->u.vesa_lfb.green_size; screen_info->green_pos = info->u.vesa_lfb.green_pos; screen_info->blue_size = info->u.vesa_lfb.blue_size; screen_info->blue_pos = info->u.vesa_lfb.blue_pos; screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size; screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos; if (size >= offsetof(struct dom0_vga_console_info, u.vesa_lfb.gbl_caps) + sizeof(info->u.vesa_lfb.gbl_caps)) screen_info->capabilities = info->u.vesa_lfb.gbl_caps; if (size >= offsetof(struct dom0_vga_console_info, u.vesa_lfb.mode_attrs) + sizeof(info->u.vesa_lfb.mode_attrs)) screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs; break; } }
gpl-2.0
juston-li/hammerhead
drivers/ata/pata_ninja32.c
8040
5498
/* * pata_ninja32.c - Ninja32 PATA for new ATA layer * (C) 2007 Red Hat Inc * * Note: The controller like many controllers has shared timings for * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back * in the dma_stop function. Thus we actually don't need a set_dmamode * method as the PIO method is always called and will set the right PIO * timing parameters. * * The Ninja32 Cardbus is not a generic SFF controller. Instead it is * laid out as follows off BAR 0. This is based upon Mark Lord's delkin * driver and the extensive analysis done by the BSD developers, notably * ITOH Yasufumi. * * Base + 0x00 IRQ Status * Base + 0x01 IRQ control * Base + 0x02 Chipset control * Base + 0x03 Unknown * Base + 0x04 VDMA and reset control + wait bits * Base + 0x08 BMIMBA * Base + 0x0C DMA Length * Base + 0x10 Taskfile * Base + 0x18 BMDMA Status ? * Base + 0x1C * Base + 0x1D Bus master control * bit 0 = enable * bit 1 = 0 write/1 read * bit 2 = 1 sgtable * bit 3 = go * bit 4-6 wait bits * bit 7 = done * Base + 0x1E AltStatus * Base + 0x1F timing register */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_ninja32" #define DRV_VERSION "0.1.5" /** * ninja32_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * * Called to do the PIO mode setup. Our timing registers are shared * but we want to set the PIO timing by default. */ static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev) { static u16 pio_timing[5] = { 0xd6, 0x85, 0x44, 0x33, 0x13 }; iowrite8(pio_timing[adev->pio_mode - XFER_PIO_0], ap->ioaddr.bmdma_addr + 0x1f); ap->private_data = adev; } static void ninja32_dev_select(struct ata_port *ap, unsigned int device) { struct ata_device *adev = &ap->link.device[device]; if (ap->private_data != adev) { iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f); ata_sff_dev_select(ap, device); ninja32_set_piomode(ap, adev); } } static struct scsi_host_template ninja32_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations ninja32_port_ops = { .inherits = &ata_bmdma_port_ops, .sff_dev_select = ninja32_dev_select, .cable_detect = ata_cable_40wire, .set_piomode = ninja32_set_piomode, .sff_data_xfer = ata_sff_data_xfer32 }; static void ninja32_program(void __iomem *base) { iowrite8(0x05, base + 0x01); /* Enable interrupt lines */ iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */ iowrite8(0x01, base + 0x03); /* Unknown */ iowrite8(0x20, base + 0x04); /* WAIT0 */ iowrite8(0x8f, base + 0x05); /* Unknown */ iowrite8(0xa4, base + 0x1c); /* Unknown */ iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */ } static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) { struct ata_host *host; struct ata_port *ap; void __iomem *base; int rc; host = ata_host_alloc(&dev->dev, 1); if (!host) return -ENOMEM; ap = host->ports[0]; /* Set up the PCI device */ rc = pcim_enable_device(dev); if (rc) return rc; rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(dev); if (rc) return rc; host->iomap = pcim_iomap_table(dev); rc = pci_set_dma_mask(dev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK); if (rc) return rc; pci_set_master(dev); /* Set up the register mappings. We use the I/O mapping as only the older chips also have MMIO on BAR 1 */ base = host->iomap[0]; if (!base) return -ENOMEM; ap->ops = &ninja32_port_ops; ap->pio_mask = ATA_PIO4; ap->flags |= ATA_FLAG_SLAVE_POSS; ap->ioaddr.cmd_addr = base + 0x10; ap->ioaddr.ctl_addr = base + 0x1E; ap->ioaddr.altstatus_addr = base + 0x1E; ap->ioaddr.bmdma_addr = base; ata_sff_std_ports(&ap->ioaddr); ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; ninja32_program(base); /* FIXME: Should we disable them at remove ? */ return ata_host_activate(host, dev->irq, ata_bmdma_interrupt, IRQF_SHARED, &ninja32_sht); } #ifdef CONFIG_PM static int ninja32_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; ninja32_program(host->iomap[0]); ata_host_resume(host); return 0; } #endif static const struct pci_device_id ninja32[] = { { 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { }, }; static struct pci_driver ninja32_pci_driver = { .name = DRV_NAME, .id_table = ninja32, .probe = ninja32_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ninja32_reinit_one, #endif }; static int __init ninja32_init(void) { return pci_register_driver(&ninja32_pci_driver); } static void __exit ninja32_exit(void) { pci_unregister_driver(&ninja32_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Ninja32 ATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ninja32); MODULE_VERSION(DRV_VERSION); module_init(ninja32_init); module_exit(ninja32_exit);
gpl-2.0
jaaron/linux-mips-ip30
drivers/ata/pata_hpt3x2n.c
8040
16160
/* * Libata driver for the HighPoint 371N, 372N, and 302N UDMA66 ATA controllers. * * This driver is heavily based upon: * * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003 * * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> * Portions Copyright (C) 2001 Sun Microsystems, Inc. * Portions Copyright (C) 2003 Red Hat Inc * Portions Copyright (C) 2005-2010 MontaVista Software, Inc. * * * TODO * Work out best PLL policy */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_hpt3x2n" #define DRV_VERSION "0.3.15" enum { HPT_PCI_FAST = (1 << 31), PCI66 = (1 << 1), USE_DPLL = (1 << 0) }; struct hpt_clock { u8 xfer_speed; u32 timing; }; struct hpt_chip { const char *name; struct hpt_clock *clocks[3]; }; /* key for bus clock timings * bit * 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file * register access. * 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file * register access. * 18:20 udma_cycle_time. Clock cycles for UDMA xfer. * 21 CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock. * 22:24 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer. * 25:27 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file * register access. * 28 UDMA enable. * 29 DMA enable. * 30 PIO_MST enable. If set, the chip is in bus master mode during * PIO xfer. * 31 FIFO enable. Only for PIO. */ /* 66MHz DPLL clocks */ static struct hpt_clock hpt3x2n_clocks[] = { { XFER_UDMA_7, 0x1c869c62 }, { XFER_UDMA_6, 0x1c869c62 }, { XFER_UDMA_5, 0x1c8a9c62 }, { XFER_UDMA_4, 0x1c8a9c62 }, { XFER_UDMA_3, 0x1c8e9c62 }, { XFER_UDMA_2, 0x1c929c62 }, { XFER_UDMA_1, 0x1c9a9c62 }, { XFER_UDMA_0, 0x1c829c62 }, { XFER_MW_DMA_2, 0x2c829c62 }, { XFER_MW_DMA_1, 0x2c829c66 }, { XFER_MW_DMA_0, 0x2c829d2e }, { XFER_PIO_4, 0x0c829c62 }, { XFER_PIO_3, 0x0c829c84 }, { XFER_PIO_2, 0x0c829ca6 }, { XFER_PIO_1, 0x0d029d26 }, { XFER_PIO_0, 0x0d029d5e }, }; /** * hpt3x2n_find_mode - reset the hpt3x2n bus * @ap: ATA port * @speed: transfer mode * * Return the 32bit register programming information for this channel * that matches the speed provided. For the moment the clocks table * is hard coded but easy to change. This will be needed if we use * different DPLLs */ static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed) { struct hpt_clock *clocks = hpt3x2n_clocks; while (clocks->xfer_speed) { if (clocks->xfer_speed == speed) return clocks->timing; clocks++; } BUG(); return 0xffffffffU; /* silence compiler warning */ } /** * hpt372n_filter - mode selection filter * @adev: ATA device * @mask: mode mask * * The Marvell bridge chips used on the HighPoint SATA cards do not seem * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... */ static unsigned long hpt372n_filter(struct ata_device *adev, unsigned long mask) { if (ata_id_is_sata(adev->id)) mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA); return mask; } /** * hpt3x2n_cable_detect - Detect the cable type * @ap: ATA port to detect on * * Return the cable type attached to this port */ static int hpt3x2n_cable_detect(struct ata_port *ap) { u8 scr2, ata66; struct pci_dev *pdev = to_pci_dev(ap->host->dev); pci_read_config_byte(pdev, 0x5B, &scr2); pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01); udelay(10); /* debounce */ /* Cable register now active */ pci_read_config_byte(pdev, 0x5A, &ata66); /* Restore state */ pci_write_config_byte(pdev, 0x5B, scr2); if (ata66 & (2 >> ap->port_no)) return ATA_CBL_PATA40; else return ATA_CBL_PATA80; } /** * hpt3x2n_pre_reset - reset the hpt3x2n bus * @link: ATA link to reset * @deadline: deadline jiffies for the operation * * Perform the initial reset handling for the 3x2n series controllers. * Reset the hardware and state machine, */ static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); /* Reset the state machine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(100); return ata_sff_prereset(link, deadline); } static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 addr1, addr2; u32 reg, timing, mask; u8 fast; addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); addr2 = 0x51 + 4 * ap->port_no; /* Fast interrupt prediction disable, hold off interrupt disable */ pci_read_config_byte(pdev, addr2, &fast); fast &= ~0x07; pci_write_config_byte(pdev, addr2, fast); /* Determine timing mask and find matching mode entry */ if (mode < XFER_MW_DMA_0) mask = 0xcfc3ffff; else if (mode < XFER_UDMA_0) mask = 0x31c001ff; else mask = 0x303c0000; timing = hpt3x2n_find_mode(ap, mode); pci_read_config_dword(pdev, addr1, &reg); reg = (reg & ~mask) | (timing & mask); pci_write_config_dword(pdev, addr1, reg); } /** * hpt3x2n_set_piomode - PIO setup * @ap: ATA interface * @adev: device on the interface * * Perform PIO mode setup. */ static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev) { hpt3x2n_set_mode(ap, adev, adev->pio_mode); } /** * hpt3x2n_set_dmamode - DMA timing setup * @ap: ATA interface * @adev: Device being configured * * Set up the channel for MWDMA or UDMA modes. */ static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev) { hpt3x2n_set_mode(ap, adev, adev->dma_mode); } /** * hpt3x2n_bmdma_end - DMA engine stop * @qc: ATA command * * Clean up after the HPT3x2n and later DMA engine */ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); int mscreg = 0x50 + 2 * ap->port_no; u8 bwsr_stat, msc_stat; pci_read_config_byte(pdev, 0x6A, &bwsr_stat); pci_read_config_byte(pdev, mscreg, &msc_stat); if (bwsr_stat & (1 << ap->port_no)) pci_write_config_byte(pdev, mscreg, msc_stat | 0x30); ata_bmdma_stop(qc); } /** * hpt3x2n_set_clock - clock control * @ap: ATA port * @source: 0x21 or 0x23 for PLL or PCI sourced clock * * Switch the ATA bus clock between the PLL and PCI clock sources * while correctly isolating the bus and resetting internal logic * * We must use the DPLL for * - writing * - second channel UDMA7 (SATA ports) or higher * - 66MHz PCI * * or we will underclock the device and get reduced performance. */ static void hpt3x2n_set_clock(struct ata_port *ap, int source) { void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8; /* Tristate the bus */ iowrite8(0x80, bmdma+0x73); iowrite8(0x80, bmdma+0x77); /* Switch clock and reset channels */ iowrite8(source, bmdma+0x7B); iowrite8(0xC0, bmdma+0x79); /* Reset state machines, avoid enabling the disabled channels */ iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70); iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74); /* Complete reset */ iowrite8(0x00, bmdma+0x79); /* Reconnect channels to bus */ iowrite8(0x00, bmdma+0x73); iowrite8(0x00, bmdma+0x77); } static int hpt3x2n_use_dpll(struct ata_port *ap, int writing) { long flags = (long)ap->host->private_data; /* See if we should use the DPLL */ if (writing) return USE_DPLL; /* Needed for write */ if (flags & PCI66) return USE_DPLL; /* Needed at 66Mhz */ return 0; } static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_port *alt = ap->host->ports[ap->port_no ^ 1]; int rc, flags = (long)ap->host->private_data; int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); /* First apply the usual rules */ rc = ata_std_qc_defer(qc); if (rc != 0) return rc; if ((flags & USE_DPLL) != dpll && alt->qc_active) return ATA_DEFER_PORT; return 0; } static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; int flags = (long)ap->host->private_data; int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); if ((flags & USE_DPLL) != dpll) { flags &= ~USE_DPLL; flags |= dpll; ap->host->private_data = (void *)(long)flags; hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23); } return ata_bmdma_qc_issue(qc); } static struct scsi_host_template hpt3x2n_sht = { ATA_BMDMA_SHT(DRV_NAME), }; /* * Configuration for HPT302N/371N. */ static struct ata_port_operations hpt3xxn_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt3x2n_bmdma_stop, .qc_defer = hpt3x2n_qc_defer, .qc_issue = hpt3x2n_qc_issue, .cable_detect = hpt3x2n_cable_detect, .set_piomode = hpt3x2n_set_piomode, .set_dmamode = hpt3x2n_set_dmamode, .prereset = hpt3x2n_pre_reset, }; /* * Configuration for HPT372N. Same as 302N/371N but we have a mode filter. */ static struct ata_port_operations hpt372n_port_ops = { .inherits = &hpt3xxn_port_ops, .mode_filter = &hpt372n_filter, }; /** * hpt3xn_calibrate_dpll - Calibrate the DPLL loop * @dev: PCI device * * Perform a calibration cycle on the HPT3xN DPLL. Returns 1 if this * succeeds */ static int hpt3xn_calibrate_dpll(struct pci_dev *dev) { u8 reg5b; u32 reg5c; int tries; for (tries = 0; tries < 0x5000; tries++) { udelay(50); pci_read_config_byte(dev, 0x5b, &reg5b); if (reg5b & 0x80) { /* See if it stays set */ for (tries = 0; tries < 0x1000; tries++) { pci_read_config_byte(dev, 0x5b, &reg5b); /* Failed ? */ if ((reg5b & 0x80) == 0) return 0; } /* Turn off tuning, we have the DPLL set */ pci_read_config_dword(dev, 0x5c, &reg5c); pci_write_config_dword(dev, 0x5c, reg5c & ~0x100); return 1; } } /* Never went stable */ return 0; } static int hpt3x2n_pci_clock(struct pci_dev *pdev) { unsigned long freq; u32 fcnt; unsigned long iobase = pci_resource_start(pdev, 4); fcnt = inl(iobase + 0x90); /* Not PCI readable for some chips */ if ((fcnt >> 12) != 0xABCDE) { int i; u16 sr; u32 total = 0; pr_warn("BIOS clock data not set\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { pci_read_config_word(pdev, 0x78, &sr); total += sr & 0x1FF; udelay(15); } fcnt = total / 128; } fcnt &= 0x1FF; freq = (fcnt * 77) / 192; /* Clamp to bands */ if (freq < 40) return 33; if (freq < 45) return 40; if (freq < 55) return 50; return 66; } /** * hpt3x2n_init_one - Initialise an HPT37X/302 * @dev: PCI device * @id: Entry in match table * * Initialise an HPT3x2n device. There are some interesting complications * here. Firstly the chip may report 366 and be one of several variants. * Secondly all the timings depend on the clock for the chip which we must * detect and look up * * This is the known chip mappings. It may be missing a couple of later * releases. * * Chip version PCI Rev Notes * HPT372 4 (HPT366) 5 Other driver * HPT372N 4 (HPT366) 6 UDMA133 * HPT372 5 (HPT372) 1 Other driver * HPT372N 5 (HPT372) 2 UDMA133 * HPT302 6 (HPT302) * Other driver * HPT302N 6 (HPT302) > 1 UDMA133 * HPT371 7 (HPT371) * Other driver * HPT371N 7 (HPT371) > 1 UDMA133 * HPT374 8 (HPT374) * Other driver * HPT372N 9 (HPT372N) * UDMA133 * * (1) UDMA133 support depends on the bus clock */ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) { /* HPT372N - UDMA133 */ static const struct ata_port_info info_hpt372n = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &hpt372n_port_ops }; /* HPT302N and HPT371N - UDMA133 */ static const struct ata_port_info info_hpt3xxn = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &hpt3xxn_port_ops }; const struct ata_port_info *ppi[] = { &info_hpt3xxn, NULL }; u8 rev = dev->revision; u8 irqmask; unsigned int pci_mhz; unsigned int f_low, f_high; int adjust; unsigned long iobase = pci_resource_start(dev, 4); void *hpriv = (void *)USE_DPLL; int rc; rc = pcim_enable_device(dev); if (rc) return rc; switch (dev->device) { case PCI_DEVICE_ID_TTI_HPT366: /* 372N if rev >= 6 */ if (rev < 6) return -ENODEV; goto hpt372n; case PCI_DEVICE_ID_TTI_HPT371: /* 371N if rev >= 2 */ if (rev < 2) return -ENODEV; break; case PCI_DEVICE_ID_TTI_HPT372: /* 372N if rev >= 2 */ if (rev < 2) return -ENODEV; goto hpt372n; case PCI_DEVICE_ID_TTI_HPT302: /* 302N if rev >= 2 */ if (rev < 2) return -ENODEV; break; case PCI_DEVICE_ID_TTI_HPT372N: hpt372n: ppi[0] = &info_hpt372n; break; default: pr_err("PCI table is bogus, please report (%d)\n", dev->device); return -ENODEV; } /* Ok so this is a chip we support */ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); pci_write_config_byte(dev, PCI_MAX_LAT, 0x08); pci_read_config_byte(dev, 0x5A, &irqmask); irqmask &= ~0x10; pci_write_config_byte(dev, 0x5a, irqmask); /* * HPT371 chips physically have only one channel, the secondary one, * but the primary channel registers do exist! Go figure... * So, we manually disable the non-existing channel here * (if the BIOS hasn't done this already). */ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { u8 mcr1; pci_read_config_byte(dev, 0x50, &mcr1); mcr1 &= ~0x04; pci_write_config_byte(dev, 0x50, mcr1); } /* * Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or * 50 for UDMA100. Right now we always use 66 */ pci_mhz = hpt3x2n_pci_clock(dev); f_low = (pci_mhz * 48) / 66; /* PCI Mhz for 66Mhz DPLL */ f_high = f_low + 2; /* Tolerance */ pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100); /* PLL clock */ pci_write_config_byte(dev, 0x5B, 0x21); /* Unlike the 37x we don't try jiggling the frequency */ for (adjust = 0; adjust < 8; adjust++) { if (hpt3xn_calibrate_dpll(dev)) break; pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low); } if (adjust == 8) { pr_err("DPLL did not stabilize!\n"); return -ENODEV; } pr_info("bus clock %dMHz, using 66MHz DPLL\n", pci_mhz); /* * Set our private data up. We only need a few flags * so we use it directly. */ if (pci_mhz > 60) hpriv = (void *)(PCI66 | USE_DPLL); /* * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in * the MISC. register to stretch the UltraDMA Tss timing. * NOTE: This register is only writeable via I/O space. */ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c); /* Now kick off ATA set up */ return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); } static const struct pci_device_id hpt3x2n[] = { { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), }, { }, }; static struct pci_driver hpt3x2n_pci_driver = { .name = DRV_NAME, .id_table = hpt3x2n, .probe = hpt3x2n_init_one, .remove = ata_pci_remove_one }; static int __init hpt3x2n_init(void) { return pci_register_driver(&hpt3x2n_pci_driver); } static void __exit hpt3x2n_exit(void) { pci_unregister_driver(&hpt3x2n_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3xxN"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, hpt3x2n); MODULE_VERSION(DRV_VERSION); module_init(hpt3x2n_init); module_exit(hpt3x2n_exit);
gpl-2.0
h8rift/android_kernel_htc_msm8960_evita-h8x
drivers/ata/pata_hpt3x2n.c
8040
16160
/* * Libata driver for the HighPoint 371N, 372N, and 302N UDMA66 ATA controllers. * * This driver is heavily based upon: * * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003 * * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> * Portions Copyright (C) 2001 Sun Microsystems, Inc. * Portions Copyright (C) 2003 Red Hat Inc * Portions Copyright (C) 2005-2010 MontaVista Software, Inc. * * * TODO * Work out best PLL policy */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_hpt3x2n" #define DRV_VERSION "0.3.15" enum { HPT_PCI_FAST = (1 << 31), PCI66 = (1 << 1), USE_DPLL = (1 << 0) }; struct hpt_clock { u8 xfer_speed; u32 timing; }; struct hpt_chip { const char *name; struct hpt_clock *clocks[3]; }; /* key for bus clock timings * bit * 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file * register access. * 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file * register access. * 18:20 udma_cycle_time. Clock cycles for UDMA xfer. * 21 CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock. * 22:24 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer. * 25:27 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file * register access. * 28 UDMA enable. * 29 DMA enable. * 30 PIO_MST enable. If set, the chip is in bus master mode during * PIO xfer. * 31 FIFO enable. Only for PIO. */ /* 66MHz DPLL clocks */ static struct hpt_clock hpt3x2n_clocks[] = { { XFER_UDMA_7, 0x1c869c62 }, { XFER_UDMA_6, 0x1c869c62 }, { XFER_UDMA_5, 0x1c8a9c62 }, { XFER_UDMA_4, 0x1c8a9c62 }, { XFER_UDMA_3, 0x1c8e9c62 }, { XFER_UDMA_2, 0x1c929c62 }, { XFER_UDMA_1, 0x1c9a9c62 }, { XFER_UDMA_0, 0x1c829c62 }, { XFER_MW_DMA_2, 0x2c829c62 }, { XFER_MW_DMA_1, 0x2c829c66 }, { XFER_MW_DMA_0, 0x2c829d2e }, { XFER_PIO_4, 0x0c829c62 }, { XFER_PIO_3, 0x0c829c84 }, { XFER_PIO_2, 0x0c829ca6 }, { XFER_PIO_1, 0x0d029d26 }, { XFER_PIO_0, 0x0d029d5e }, }; /** * hpt3x2n_find_mode - reset the hpt3x2n bus * @ap: ATA port * @speed: transfer mode * * Return the 32bit register programming information for this channel * that matches the speed provided. For the moment the clocks table * is hard coded but easy to change. This will be needed if we use * different DPLLs */ static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed) { struct hpt_clock *clocks = hpt3x2n_clocks; while (clocks->xfer_speed) { if (clocks->xfer_speed == speed) return clocks->timing; clocks++; } BUG(); return 0xffffffffU; /* silence compiler warning */ } /** * hpt372n_filter - mode selection filter * @adev: ATA device * @mask: mode mask * * The Marvell bridge chips used on the HighPoint SATA cards do not seem * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... */ static unsigned long hpt372n_filter(struct ata_device *adev, unsigned long mask) { if (ata_id_is_sata(adev->id)) mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA); return mask; } /** * hpt3x2n_cable_detect - Detect the cable type * @ap: ATA port to detect on * * Return the cable type attached to this port */ static int hpt3x2n_cable_detect(struct ata_port *ap) { u8 scr2, ata66; struct pci_dev *pdev = to_pci_dev(ap->host->dev); pci_read_config_byte(pdev, 0x5B, &scr2); pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01); udelay(10); /* debounce */ /* Cable register now active */ pci_read_config_byte(pdev, 0x5A, &ata66); /* Restore state */ pci_write_config_byte(pdev, 0x5B, scr2); if (ata66 & (2 >> ap->port_no)) return ATA_CBL_PATA40; else return ATA_CBL_PATA80; } /** * hpt3x2n_pre_reset - reset the hpt3x2n bus * @link: ATA link to reset * @deadline: deadline jiffies for the operation * * Perform the initial reset handling for the 3x2n series controllers. * Reset the hardware and state machine, */ static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); /* Reset the state machine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(100); return ata_sff_prereset(link, deadline); } static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 addr1, addr2; u32 reg, timing, mask; u8 fast; addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); addr2 = 0x51 + 4 * ap->port_no; /* Fast interrupt prediction disable, hold off interrupt disable */ pci_read_config_byte(pdev, addr2, &fast); fast &= ~0x07; pci_write_config_byte(pdev, addr2, fast); /* Determine timing mask and find matching mode entry */ if (mode < XFER_MW_DMA_0) mask = 0xcfc3ffff; else if (mode < XFER_UDMA_0) mask = 0x31c001ff; else mask = 0x303c0000; timing = hpt3x2n_find_mode(ap, mode); pci_read_config_dword(pdev, addr1, &reg); reg = (reg & ~mask) | (timing & mask); pci_write_config_dword(pdev, addr1, reg); } /** * hpt3x2n_set_piomode - PIO setup * @ap: ATA interface * @adev: device on the interface * * Perform PIO mode setup. */ static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev) { hpt3x2n_set_mode(ap, adev, adev->pio_mode); } /** * hpt3x2n_set_dmamode - DMA timing setup * @ap: ATA interface * @adev: Device being configured * * Set up the channel for MWDMA or UDMA modes. */ static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev) { hpt3x2n_set_mode(ap, adev, adev->dma_mode); } /** * hpt3x2n_bmdma_end - DMA engine stop * @qc: ATA command * * Clean up after the HPT3x2n and later DMA engine */ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); int mscreg = 0x50 + 2 * ap->port_no; u8 bwsr_stat, msc_stat; pci_read_config_byte(pdev, 0x6A, &bwsr_stat); pci_read_config_byte(pdev, mscreg, &msc_stat); if (bwsr_stat & (1 << ap->port_no)) pci_write_config_byte(pdev, mscreg, msc_stat | 0x30); ata_bmdma_stop(qc); } /** * hpt3x2n_set_clock - clock control * @ap: ATA port * @source: 0x21 or 0x23 for PLL or PCI sourced clock * * Switch the ATA bus clock between the PLL and PCI clock sources * while correctly isolating the bus and resetting internal logic * * We must use the DPLL for * - writing * - second channel UDMA7 (SATA ports) or higher * - 66MHz PCI * * or we will underclock the device and get reduced performance. */ static void hpt3x2n_set_clock(struct ata_port *ap, int source) { void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8; /* Tristate the bus */ iowrite8(0x80, bmdma+0x73); iowrite8(0x80, bmdma+0x77); /* Switch clock and reset channels */ iowrite8(source, bmdma+0x7B); iowrite8(0xC0, bmdma+0x79); /* Reset state machines, avoid enabling the disabled channels */ iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70); iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74); /* Complete reset */ iowrite8(0x00, bmdma+0x79); /* Reconnect channels to bus */ iowrite8(0x00, bmdma+0x73); iowrite8(0x00, bmdma+0x77); } static int hpt3x2n_use_dpll(struct ata_port *ap, int writing) { long flags = (long)ap->host->private_data; /* See if we should use the DPLL */ if (writing) return USE_DPLL; /* Needed for write */ if (flags & PCI66) return USE_DPLL; /* Needed at 66Mhz */ return 0; } static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_port *alt = ap->host->ports[ap->port_no ^ 1]; int rc, flags = (long)ap->host->private_data; int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); /* First apply the usual rules */ rc = ata_std_qc_defer(qc); if (rc != 0) return rc; if ((flags & USE_DPLL) != dpll && alt->qc_active) return ATA_DEFER_PORT; return 0; } static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; int flags = (long)ap->host->private_data; int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); if ((flags & USE_DPLL) != dpll) { flags &= ~USE_DPLL; flags |= dpll; ap->host->private_data = (void *)(long)flags; hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23); } return ata_bmdma_qc_issue(qc); } static struct scsi_host_template hpt3x2n_sht = { ATA_BMDMA_SHT(DRV_NAME), }; /* * Configuration for HPT302N/371N. */ static struct ata_port_operations hpt3xxn_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt3x2n_bmdma_stop, .qc_defer = hpt3x2n_qc_defer, .qc_issue = hpt3x2n_qc_issue, .cable_detect = hpt3x2n_cable_detect, .set_piomode = hpt3x2n_set_piomode, .set_dmamode = hpt3x2n_set_dmamode, .prereset = hpt3x2n_pre_reset, }; /* * Configuration for HPT372N. Same as 302N/371N but we have a mode filter. */ static struct ata_port_operations hpt372n_port_ops = { .inherits = &hpt3xxn_port_ops, .mode_filter = &hpt372n_filter, }; /** * hpt3xn_calibrate_dpll - Calibrate the DPLL loop * @dev: PCI device * * Perform a calibration cycle on the HPT3xN DPLL. Returns 1 if this * succeeds */ static int hpt3xn_calibrate_dpll(struct pci_dev *dev) { u8 reg5b; u32 reg5c; int tries; for (tries = 0; tries < 0x5000; tries++) { udelay(50); pci_read_config_byte(dev, 0x5b, &reg5b); if (reg5b & 0x80) { /* See if it stays set */ for (tries = 0; tries < 0x1000; tries++) { pci_read_config_byte(dev, 0x5b, &reg5b); /* Failed ? */ if ((reg5b & 0x80) == 0) return 0; } /* Turn off tuning, we have the DPLL set */ pci_read_config_dword(dev, 0x5c, &reg5c); pci_write_config_dword(dev, 0x5c, reg5c & ~0x100); return 1; } } /* Never went stable */ return 0; } static int hpt3x2n_pci_clock(struct pci_dev *pdev) { unsigned long freq; u32 fcnt; unsigned long iobase = pci_resource_start(pdev, 4); fcnt = inl(iobase + 0x90); /* Not PCI readable for some chips */ if ((fcnt >> 12) != 0xABCDE) { int i; u16 sr; u32 total = 0; pr_warn("BIOS clock data not set\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { pci_read_config_word(pdev, 0x78, &sr); total += sr & 0x1FF; udelay(15); } fcnt = total / 128; } fcnt &= 0x1FF; freq = (fcnt * 77) / 192; /* Clamp to bands */ if (freq < 40) return 33; if (freq < 45) return 40; if (freq < 55) return 50; return 66; } /** * hpt3x2n_init_one - Initialise an HPT37X/302 * @dev: PCI device * @id: Entry in match table * * Initialise an HPT3x2n device. There are some interesting complications * here. Firstly the chip may report 366 and be one of several variants. * Secondly all the timings depend on the clock for the chip which we must * detect and look up * * This is the known chip mappings. It may be missing a couple of later * releases. * * Chip version PCI Rev Notes * HPT372 4 (HPT366) 5 Other driver * HPT372N 4 (HPT366) 6 UDMA133 * HPT372 5 (HPT372) 1 Other driver * HPT372N 5 (HPT372) 2 UDMA133 * HPT302 6 (HPT302) * Other driver * HPT302N 6 (HPT302) > 1 UDMA133 * HPT371 7 (HPT371) * Other driver * HPT371N 7 (HPT371) > 1 UDMA133 * HPT374 8 (HPT374) * Other driver * HPT372N 9 (HPT372N) * UDMA133 * * (1) UDMA133 support depends on the bus clock */ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) { /* HPT372N - UDMA133 */ static const struct ata_port_info info_hpt372n = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &hpt372n_port_ops }; /* HPT302N and HPT371N - UDMA133 */ static const struct ata_port_info info_hpt3xxn = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &hpt3xxn_port_ops }; const struct ata_port_info *ppi[] = { &info_hpt3xxn, NULL }; u8 rev = dev->revision; u8 irqmask; unsigned int pci_mhz; unsigned int f_low, f_high; int adjust; unsigned long iobase = pci_resource_start(dev, 4); void *hpriv = (void *)USE_DPLL; int rc; rc = pcim_enable_device(dev); if (rc) return rc; switch (dev->device) { case PCI_DEVICE_ID_TTI_HPT366: /* 372N if rev >= 6 */ if (rev < 6) return -ENODEV; goto hpt372n; case PCI_DEVICE_ID_TTI_HPT371: /* 371N if rev >= 2 */ if (rev < 2) return -ENODEV; break; case PCI_DEVICE_ID_TTI_HPT372: /* 372N if rev >= 2 */ if (rev < 2) return -ENODEV; goto hpt372n; case PCI_DEVICE_ID_TTI_HPT302: /* 302N if rev >= 2 */ if (rev < 2) return -ENODEV; break; case PCI_DEVICE_ID_TTI_HPT372N: hpt372n: ppi[0] = &info_hpt372n; break; default: pr_err("PCI table is bogus, please report (%d)\n", dev->device); return -ENODEV; } /* Ok so this is a chip we support */ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); pci_write_config_byte(dev, PCI_MAX_LAT, 0x08); pci_read_config_byte(dev, 0x5A, &irqmask); irqmask &= ~0x10; pci_write_config_byte(dev, 0x5a, irqmask); /* * HPT371 chips physically have only one channel, the secondary one, * but the primary channel registers do exist! Go figure... * So, we manually disable the non-existing channel here * (if the BIOS hasn't done this already). */ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { u8 mcr1; pci_read_config_byte(dev, 0x50, &mcr1); mcr1 &= ~0x04; pci_write_config_byte(dev, 0x50, mcr1); } /* * Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or * 50 for UDMA100. Right now we always use 66 */ pci_mhz = hpt3x2n_pci_clock(dev); f_low = (pci_mhz * 48) / 66; /* PCI Mhz for 66Mhz DPLL */ f_high = f_low + 2; /* Tolerance */ pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100); /* PLL clock */ pci_write_config_byte(dev, 0x5B, 0x21); /* Unlike the 37x we don't try jiggling the frequency */ for (adjust = 0; adjust < 8; adjust++) { if (hpt3xn_calibrate_dpll(dev)) break; pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low); } if (adjust == 8) { pr_err("DPLL did not stabilize!\n"); return -ENODEV; } pr_info("bus clock %dMHz, using 66MHz DPLL\n", pci_mhz); /* * Set our private data up. We only need a few flags * so we use it directly. */ if (pci_mhz > 60) hpriv = (void *)(PCI66 | USE_DPLL); /* * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in * the MISC. register to stretch the UltraDMA Tss timing. * NOTE: This register is only writeable via I/O space. */ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c); /* Now kick off ATA set up */ return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); } static const struct pci_device_id hpt3x2n[] = { { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), }, { }, }; static struct pci_driver hpt3x2n_pci_driver = { .name = DRV_NAME, .id_table = hpt3x2n, .probe = hpt3x2n_init_one, .remove = ata_pci_remove_one }; static int __init hpt3x2n_init(void) { return pci_register_driver(&hpt3x2n_pci_driver); } static void __exit hpt3x2n_exit(void) { pci_unregister_driver(&hpt3x2n_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3xxN"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, hpt3x2n); MODULE_VERSION(DRV_VERSION); module_init(hpt3x2n_init); module_exit(hpt3x2n_exit);
gpl-2.0
Hogman500/ouya_1_1-kernel
drivers/macintosh/nvram.c
9320
2552
/* * /dev/nvram driver for Power Macintosh. */ #define NVRAM_VERSION "1.0" #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/fcntl.h> #include <linux/nvram.h> #include <linux/init.h> #include <asm/uaccess.h> #include <asm/nvram.h> #define NVRAM_SIZE 8192 static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) { switch (origin) { case 0: break; case 1: offset += file->f_pos; break; case 2: offset += NVRAM_SIZE; break; default: offset = -1; } if (offset < 0) return -EINVAL; file->f_pos = offset; return file->f_pos; } static ssize_t read_nvram(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned int i; char __user *p = buf; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; if (*ppos >= NVRAM_SIZE) return 0; for (i = *ppos; count > 0 && i < NVRAM_SIZE; ++i, ++p, --count) if (__put_user(nvram_read_byte(i), p)) return -EFAULT; *ppos = i; return p - buf; } static ssize_t write_nvram(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned int i; const char __user *p = buf; char c; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; if (*ppos >= NVRAM_SIZE) return 0; for (i = *ppos; count > 0 && i < NVRAM_SIZE; ++i, ++p, --count) { if (__get_user(c, p)) return -EFAULT; nvram_write_byte(c, i); } *ppos = i; return p - buf; } static long nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch(cmd) { case PMAC_NVRAM_GET_OFFSET: { int part, offset; if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0) return -EFAULT; if (part < pmac_nvram_OF || part > pmac_nvram_NR) return -EINVAL; offset = pmac_get_partition(part); if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0) return -EFAULT; break; } default: return -EINVAL; } return 0; } const struct file_operations nvram_fops = { .owner = THIS_MODULE, .llseek = nvram_llseek, .read = read_nvram, .write = write_nvram, .unlocked_ioctl = nvram_ioctl, }; static struct miscdevice nvram_dev = { NVRAM_MINOR, "nvram", &nvram_fops }; int __init nvram_init(void) { printk(KERN_INFO "Macintosh non-volatile memory driver v%s\n", NVRAM_VERSION); return misc_register(&nvram_dev); } void __exit nvram_cleanup(void) { misc_deregister( &nvram_dev ); } module_init(nvram_init); module_exit(nvram_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
venkatkamesh/lg_ally_kernel-2.6.XX
net/llc/llc_s_st.c
13416
5815
/* * llc_s_st.c - Defines SAP component state machine transitions. * * The followed transitions are SAP component state machine transitions * which are described in 802.2 LLC protocol standard document. * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/types.h> #include <net/llc_if.h> #include <net/llc_s_ev.h> #include <net/llc_s_ac.h> #include <net/llc_s_st.h> /* dummy last-transition indicator; common to all state transition groups * last entry for this state * all members are zeros, .bss zeroes it */ static struct llc_sap_state_trans llc_sap_state_trans_end; /* state LLC_SAP_STATE_INACTIVE transition for * LLC_SAP_EV_ACTIVATION_REQ event */ static llc_sap_action_t llc_sap_inactive_state_actions_1[] = { [0] = llc_sap_action_report_status, [1] = NULL, }; static struct llc_sap_state_trans llc_sap_inactive_state_trans_1 = { .ev = llc_sap_ev_activation_req, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_inactive_state_actions_1, }; /* array of pointers; one to each transition */ static struct llc_sap_state_trans *llc_sap_inactive_state_transitions[] = { [0] = &llc_sap_inactive_state_trans_1, [1] = &llc_sap_state_trans_end, }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_UI event */ static llc_sap_action_t llc_sap_active_state_actions_1[] = { [0] = llc_sap_action_unitdata_ind, [1] = NULL, }; static struct llc_sap_state_trans llc_sap_active_state_trans_1 = { .ev = llc_sap_ev_rx_ui, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_1, }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_UNITDATA_REQ event */ static llc_sap_action_t llc_sap_active_state_actions_2[] = { [0] = llc_sap_action_send_ui, [1] = NULL, }; static struct llc_sap_state_trans llc_sap_active_state_trans_2 = { .ev = llc_sap_ev_unitdata_req, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_2, }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_XID_REQ event */ static llc_sap_action_t llc_sap_active_state_actions_3[] = { [0] = llc_sap_action_send_xid_c, [1] = NULL, }; static struct llc_sap_state_trans llc_sap_active_state_trans_3 = { .ev = llc_sap_ev_xid_req, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_3, }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_C event */ static llc_sap_action_t llc_sap_active_state_actions_4[] = { [0] = llc_sap_action_send_xid_r, [1] = NULL, }; static struct llc_sap_state_trans llc_sap_active_state_trans_4 = { .ev = llc_sap_ev_rx_xid_c, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_4, }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_R event */ static llc_sap_action_t llc_sap_active_state_actions_5[] = { [0] = llc_sap_action_xid_ind, [1] = NULL, }; static struct llc_sap_state_trans llc_sap_active_state_trans_5 = { .ev = llc_sap_ev_rx_xid_r, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_5, }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_TEST_REQ event */ static llc_sap_action_t llc_sap_active_state_actions_6[] = { [0] = llc_sap_action_send_test_c, [1] = NULL, }; static struct llc_sap_state_trans llc_sap_active_state_trans_6 = { .ev = llc_sap_ev_test_req, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_6, }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_C event */ static llc_sap_action_t llc_sap_active_state_actions_7[] = { [0] = llc_sap_action_send_test_r, [1] = NULL, }; static struct llc_sap_state_trans llc_sap_active_state_trans_7 = { .ev = llc_sap_ev_rx_test_c, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_7 }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_R event */ static llc_sap_action_t llc_sap_active_state_actions_8[] = { [0] = llc_sap_action_test_ind, [1] = NULL, }; static struct llc_sap_state_trans llc_sap_active_state_trans_8 = { .ev = llc_sap_ev_rx_test_r, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_8, }; /* state LLC_SAP_STATE_ACTIVE transition for * LLC_SAP_EV_DEACTIVATION_REQ event */ static llc_sap_action_t llc_sap_active_state_actions_9[] = { [0] = llc_sap_action_report_status, [1] = NULL, }; static struct llc_sap_state_trans llc_sap_active_state_trans_9 = { .ev = llc_sap_ev_deactivation_req, .next_state = LLC_SAP_STATE_INACTIVE, .ev_actions = llc_sap_active_state_actions_9 }; /* array of pointers; one to each transition */ static struct llc_sap_state_trans *llc_sap_active_state_transitions[] = { [0] = &llc_sap_active_state_trans_2, [1] = &llc_sap_active_state_trans_1, [2] = &llc_sap_active_state_trans_3, [3] = &llc_sap_active_state_trans_4, [4] = &llc_sap_active_state_trans_5, [5] = &llc_sap_active_state_trans_6, [6] = &llc_sap_active_state_trans_7, [7] = &llc_sap_active_state_trans_8, [8] = &llc_sap_active_state_trans_9, [9] = &llc_sap_state_trans_end, }; /* SAP state transition table */ struct llc_sap_state llc_sap_state_table[LLC_NR_SAP_STATES] = { [LLC_SAP_STATE_INACTIVE - 1] = { .curr_state = LLC_SAP_STATE_INACTIVE, .transitions = llc_sap_inactive_state_transitions, }, [LLC_SAP_STATE_ACTIVE - 1] = { .curr_state = LLC_SAP_STATE_ACTIVE, .transitions = llc_sap_active_state_transitions, }, };
gpl-2.0
makerbot/linux-Birdwing
fs/ncpfs/mmap.c
105
2972
/* * mmap.c * * Copyright (C) 1995, 1996 by Volker Lendecke * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache * */ #include <linux/stat.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/shm.h> #include <linux/errno.h> #include <linux/mman.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/memcontrol.h> #include <asm/uaccess.h> #include "ncp_fs.h" /* * Fill in the supplied page for mmap * XXX: how are we excluding truncate/invalidate here? Maybe need to lock * page? */ static int ncp_file_mmap_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct file *file = area->vm_file; struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; char *pg_addr; unsigned int already_read; unsigned int count; int bufsize; int pos; /* XXX: loff_t ? */ /* * ncpfs has nothing against high pages as long * as recvmsg and memset works on it */ vmf->page = alloc_page(GFP_HIGHUSER); if (!vmf->page) return VM_FAULT_OOM; pg_addr = kmap(vmf->page); pos = vmf->pgoff << PAGE_SHIFT; count = PAGE_SIZE; /* what we can read in one go */ bufsize = NCP_SERVER(inode)->buffer_size; already_read = 0; if (ncp_make_open(inode, O_RDONLY) >= 0) { while (already_read < count) { int read_this_time; int to_read; to_read = bufsize - (pos % bufsize); to_read = min_t(unsigned int, to_read, count - already_read); if (ncp_read_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, pos, to_read, pg_addr + already_read, &read_this_time) != 0) { read_this_time = 0; } pos += read_this_time; already_read += read_this_time; if (read_this_time < to_read) { break; } } ncp_inode_close(inode); } if (already_read < PAGE_SIZE) memset(pg_addr + already_read, 0, PAGE_SIZE - already_read); flush_dcache_page(vmf->page); kunmap(vmf->page); /* * If I understand ncp_read_kernel() properly, the above always * fetches from the network, here the analogue of disk. * -- nyc */ count_vm_event(PGMAJFAULT); mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT); return VM_FAULT_MAJOR; } static const struct vm_operations_struct ncp_file_mmap = { .fault = ncp_file_mmap_fault, }; /* This is used for a general mmap of a ncp file */ int ncp_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file->f_path.dentry->d_inode; DPRINTK("ncp_mmap: called\n"); if (!ncp_conn_valid(NCP_SERVER(inode))) return -EIO; /* only PAGE_COW or read-only supported now */ if (vma->vm_flags & VM_SHARED) return -EINVAL; /* we do not support files bigger than 4GB... We eventually supports just 4GB... */ if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff > (1U << (32 - PAGE_SHIFT))) return -EFBIG; vma->vm_ops = &ncp_file_mmap; file_accessed(file); return 0; }
gpl-2.0
jiangdapeng/btrfs-next
drivers/net/usb/mcs7830.c
105
18838
/* * MOSCHIP MCS7830 based (7730/7830/7832) USB 2.0 Ethernet Devices * * based on usbnet.c, asix.c and the vendor provided mcs7830 driver * * Copyright (C) 2010 Andreas Mohr <andi@lisas.de> * Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de> * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> * Copyright (c) 2002-2003 TiVo Inc. * * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!). * * 2010-12-19: add 7832 USB PID ("functionality same as MCS7830"), * per active notification by manufacturer * * TODO: * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?) * - implement ethtool_ops get_pauseparam/set_pauseparam * via HIF_REG_PAUSE_THRESHOLD (>= revision C only!) * - implement get_eeprom/[set_eeprom] * - switch PHY on/off on ifup/ifdown (perhaps in usbnet.c, via MII) * - mcs7830_get_regs() handling is weird: for rev 2 we return 32 regs, * can access only ~ 24, remaining user buffer is uninitialized garbage * - anything else? * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/crc32.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> /* requests */ #define MCS7830_RD_BMREQ (USB_DIR_IN | USB_TYPE_VENDOR | \ USB_RECIP_DEVICE) #define MCS7830_WR_BMREQ (USB_DIR_OUT | USB_TYPE_VENDOR | \ USB_RECIP_DEVICE) #define MCS7830_RD_BREQ 0x0E #define MCS7830_WR_BREQ 0x0D #define MCS7830_CTRL_TIMEOUT 1000 #define MCS7830_MAX_MCAST 64 #define MCS7830_VENDOR_ID 0x9710 #define MCS7832_PRODUCT_ID 0x7832 #define MCS7830_PRODUCT_ID 0x7830 #define MCS7730_PRODUCT_ID 0x7730 #define SITECOM_VENDOR_ID 0x0DF6 #define LN_030_PRODUCT_ID 0x0021 #define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \ ADVERTISE_100HALF | ADVERTISE_10FULL | \ ADVERTISE_10HALF | ADVERTISE_CSMA) /* HIF_REG_XX corresponding index value */ enum { HIF_REG_MULTICAST_HASH = 0x00, HIF_REG_PACKET_GAP1 = 0x08, HIF_REG_PACKET_GAP2 = 0x09, HIF_REG_PHY_DATA = 0x0a, HIF_REG_PHY_CMD1 = 0x0c, HIF_REG_PHY_CMD1_READ = 0x40, HIF_REG_PHY_CMD1_WRITE = 0x20, HIF_REG_PHY_CMD1_PHYADDR = 0x01, HIF_REG_PHY_CMD2 = 0x0d, HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80, HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40, HIF_REG_CONFIG = 0x0e, /* hmm, spec sez: "R/W", "Except bit 3" (likely TXENABLE). */ HIF_REG_CONFIG_CFG = 0x80, HIF_REG_CONFIG_SPEED100 = 0x40, HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20, HIF_REG_CONFIG_RXENABLE = 0x10, HIF_REG_CONFIG_TXENABLE = 0x08, HIF_REG_CONFIG_SLEEPMODE = 0x04, HIF_REG_CONFIG_ALLMULTICAST = 0x02, HIF_REG_CONFIG_PROMISCUOUS = 0x01, HIF_REG_ETHERNET_ADDR = 0x0f, HIF_REG_FRAME_DROP_COUNTER = 0x15, /* 0..ff; reset: 0 */ HIF_REG_PAUSE_THRESHOLD = 0x16, HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0, }; /* Trailing status byte in Ethernet Rx frame */ enum { MCS7830_RX_SHORT_FRAME = 0x01, /* < 64 bytes */ MCS7830_RX_LENGTH_ERROR = 0x02, /* framelen != Ethernet length field */ MCS7830_RX_ALIGNMENT_ERROR = 0x04, /* non-even number of nibbles */ MCS7830_RX_CRC_ERROR = 0x08, MCS7830_RX_LARGE_FRAME = 0x10, /* > 1518 bytes */ MCS7830_RX_FRAME_CORRECT = 0x20, /* frame is correct */ /* [7:6] reserved */ }; struct mcs7830_data { u8 multi_filter[8]; u8 config; }; static const char driver_name[] = "MOSCHIP usb-ethernet driver"; static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) { struct usb_device *xdev = dev->udev; int ret; void *buffer; buffer = kmalloc(size, GFP_NOIO); if (buffer == NULL) return -ENOMEM; ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ, MCS7830_RD_BMREQ, 0x0000, index, buffer, size, MCS7830_CTRL_TIMEOUT); memcpy(data, buffer, size); kfree(buffer); return ret; } static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data) { struct usb_device *xdev = dev->udev; int ret; void *buffer; buffer = kmemdup(data, size, GFP_NOIO); if (buffer == NULL) return -ENOMEM; ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ, MCS7830_WR_BMREQ, 0x0000, index, buffer, size, MCS7830_CTRL_TIMEOUT); kfree(buffer); return ret; } static void mcs7830_async_cmd_callback(struct urb *urb) { struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; int status = urb->status; if (status < 0) printk(KERN_DEBUG "%s() failed with %d\n", __func__, status); kfree(req); usb_free_urb(urb); } static void mcs7830_set_reg_async(struct usbnet *dev, u16 index, u16 size, void *data) { struct usb_ctrlrequest *req; int ret; struct urb *urb; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { dev_dbg(&dev->udev->dev, "Error allocating URB in write_cmd_async!\n"); return; } req = kmalloc(sizeof *req, GFP_ATOMIC); if (!req) { dev_err(&dev->udev->dev, "Failed to allocate memory for control request\n"); goto out; } req->bRequestType = MCS7830_WR_BMREQ; req->bRequest = MCS7830_WR_BREQ; req->wValue = 0; req->wIndex = cpu_to_le16(index); req->wLength = cpu_to_le16(size); usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (void *)req, data, size, mcs7830_async_cmd_callback, req); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { dev_err(&dev->udev->dev, "Error submitting the control message: ret=%d\n", ret); goto out; } return; out: kfree(req); usb_free_urb(urb); } static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr) { int ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr); if (ret < 0) return ret; return 0; } static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr) { int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr); if (ret < 0) return ret; return 0; } static int mcs7830_set_mac_address(struct net_device *netdev, void *p) { int ret; struct usbnet *dev = netdev_priv(netdev); struct sockaddr *addr = p; if (netif_running(netdev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; ret = mcs7830_hif_set_mac_address(dev, addr->sa_data); if (ret < 0) return ret; /* it worked --> adopt it on netdev side */ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); return 0; } static int mcs7830_read_phy(struct usbnet *dev, u8 index) { int ret; int i; __le16 val; u8 cmd[2] = { HIF_REG_PHY_CMD1_READ | HIF_REG_PHY_CMD1_PHYADDR, HIF_REG_PHY_CMD2_PEND_FLAG_BIT | index, }; mutex_lock(&dev->phy_mutex); /* write the MII command */ ret = mcs7830_set_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if (ret < 0) goto out; /* wait for the data to become valid, should be within < 1ms */ for (i = 0; i < 10; i++) { ret = mcs7830_get_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if ((ret < 0) || (cmd[1] & HIF_REG_PHY_CMD2_READY_FLAG_BIT)) break; ret = -EIO; msleep(1); } if (ret < 0) goto out; /* read actual register contents */ ret = mcs7830_get_reg(dev, HIF_REG_PHY_DATA, 2, &val); if (ret < 0) goto out; ret = le16_to_cpu(val); dev_dbg(&dev->udev->dev, "read PHY reg %02x: %04x (%d tries)\n", index, val, i); out: mutex_unlock(&dev->phy_mutex); return ret; } static int mcs7830_write_phy(struct usbnet *dev, u8 index, u16 val) { int ret; int i; __le16 le_val; u8 cmd[2] = { HIF_REG_PHY_CMD1_WRITE | HIF_REG_PHY_CMD1_PHYADDR, HIF_REG_PHY_CMD2_PEND_FLAG_BIT | (index & 0x1F), }; mutex_lock(&dev->phy_mutex); /* write the new register contents */ le_val = cpu_to_le16(val); ret = mcs7830_set_reg(dev, HIF_REG_PHY_DATA, 2, &le_val); if (ret < 0) goto out; /* write the MII command */ ret = mcs7830_set_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if (ret < 0) goto out; /* wait for the command to be accepted by the PHY */ for (i = 0; i < 10; i++) { ret = mcs7830_get_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if ((ret < 0) || (cmd[1] & HIF_REG_PHY_CMD2_READY_FLAG_BIT)) break; ret = -EIO; msleep(1); } if (ret < 0) goto out; ret = 0; dev_dbg(&dev->udev->dev, "write PHY reg %02x: %04x (%d tries)\n", index, val, i); out: mutex_unlock(&dev->phy_mutex); return ret; } /* * This algorithm comes from the original mcs7830 version 1.4 driver, * not sure if it is needed. */ static int mcs7830_set_autoneg(struct usbnet *dev, int ptrUserPhyMode) { int ret; /* Enable all media types */ ret = mcs7830_write_phy(dev, MII_ADVERTISE, MCS7830_MII_ADVERTISE); /* First reset BMCR */ if (!ret) ret = mcs7830_write_phy(dev, MII_BMCR, 0x0000); /* Enable Auto Neg */ if (!ret) ret = mcs7830_write_phy(dev, MII_BMCR, BMCR_ANENABLE); /* Restart Auto Neg (Keep the Enable Auto Neg Bit Set) */ if (!ret) ret = mcs7830_write_phy(dev, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART ); return ret; } /* * if we can read register 22, the chip revision is C or higher */ static int mcs7830_get_rev(struct usbnet *dev) { u8 dummy[2]; int ret; ret = mcs7830_get_reg(dev, HIF_REG_FRAME_DROP_COUNTER, 2, dummy); if (ret > 0) return 2; /* Rev C or later */ return 1; /* earlier revision */ } /* * On rev. C we need to set the pause threshold */ static void mcs7830_rev_C_fixup(struct usbnet *dev) { u8 pause_threshold = HIF_REG_PAUSE_THRESHOLD_DEFAULT; int retry; for (retry = 0; retry < 2; retry++) { if (mcs7830_get_rev(dev) == 2) { dev_info(&dev->udev->dev, "applying rev.C fixup\n"); mcs7830_set_reg(dev, HIF_REG_PAUSE_THRESHOLD, 1, &pause_threshold); } msleep(1); } } static int mcs7830_mdio_read(struct net_device *netdev, int phy_id, int location) { struct usbnet *dev = netdev_priv(netdev); return mcs7830_read_phy(dev, location); } static void mcs7830_mdio_write(struct net_device *netdev, int phy_id, int location, int val) { struct usbnet *dev = netdev_priv(netdev); mcs7830_write_phy(dev, location, val); } static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(net); return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } static inline struct mcs7830_data *mcs7830_get_data(struct usbnet *dev) { return (struct mcs7830_data *)&dev->data; } static void mcs7830_hif_update_multicast_hash(struct usbnet *dev) { struct mcs7830_data *data = mcs7830_get_data(dev); mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH, sizeof data->multi_filter, data->multi_filter); } static void mcs7830_hif_update_config(struct usbnet *dev) { /* implementation specific to data->config (argument needs to be heap-based anyway - USB DMA!) */ struct mcs7830_data *data = mcs7830_get_data(dev); mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config); } static void mcs7830_data_set_multicast(struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct mcs7830_data *data = mcs7830_get_data(dev); memset(data->multi_filter, 0, sizeof data->multi_filter); data->config = HIF_REG_CONFIG_TXENABLE; /* this should not be needed, but it doesn't work otherwise */ data->config |= HIF_REG_CONFIG_ALLMULTICAST; if (net->flags & IFF_PROMISC) { data->config |= HIF_REG_CONFIG_PROMISCUOUS; } else if (net->flags & IFF_ALLMULTI || netdev_mc_count(net) > MCS7830_MAX_MCAST) { data->config |= HIF_REG_CONFIG_ALLMULTICAST; } else if (netdev_mc_empty(net)) { /* just broadcast and directed */ } else { /* We use the 20 byte dev->data * for our 8 byte filter buffer * to avoid allocating memory that * is tricky to free later */ struct netdev_hw_addr *ha; u32 crc_bits; /* Build the multicast hash filter. */ netdev_for_each_mc_addr(ha, net) { crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); } } } static int mcs7830_apply_base_config(struct usbnet *dev) { int ret; /* re-configure known MAC (suspend case etc.) */ ret = mcs7830_hif_set_mac_address(dev, dev->net->dev_addr); if (ret) { dev_info(&dev->udev->dev, "Cannot set MAC address\n"); goto out; } /* Set up PHY */ ret = mcs7830_set_autoneg(dev, 0); if (ret) { dev_info(&dev->udev->dev, "Cannot set autoneg\n"); goto out; } mcs7830_hif_update_multicast_hash(dev); mcs7830_hif_update_config(dev); mcs7830_rev_C_fixup(dev); ret = 0; out: return ret; } /* credits go to asix_set_multicast */ static void mcs7830_set_multicast(struct net_device *net) { struct usbnet *dev = netdev_priv(net); mcs7830_data_set_multicast(net); mcs7830_hif_update_multicast_hash(dev); mcs7830_hif_update_config(dev); } static int mcs7830_get_regs_len(struct net_device *net) { struct usbnet *dev = netdev_priv(net); switch (mcs7830_get_rev(dev)) { case 1: return 21; case 2: return 32; } return 0; } static void mcs7830_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *drvinfo) { usbnet_get_drvinfo(net, drvinfo); drvinfo->regdump_len = mcs7830_get_regs_len(net); } static void mcs7830_get_regs(struct net_device *net, struct ethtool_regs *regs, void *data) { struct usbnet *dev = netdev_priv(net); regs->version = mcs7830_get_rev(dev); mcs7830_get_reg(dev, 0, regs->len, data); } static const struct ethtool_ops mcs7830_ethtool_ops = { .get_drvinfo = mcs7830_get_drvinfo, .get_regs_len = mcs7830_get_regs_len, .get_regs = mcs7830_get_regs, /* common usbnet calls */ .get_link = usbnet_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_settings = usbnet_get_settings, .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, }; static const struct net_device_ops mcs7830_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = mcs7830_ioctl, .ndo_set_rx_mode = mcs7830_set_multicast, .ndo_set_mac_address = mcs7830_set_mac_address, }; static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) { struct net_device *net = dev->net; int ret; int retry; /* Initial startup: Gather MAC address setting from EEPROM */ ret = -EINVAL; for (retry = 0; retry < 5 && ret; retry++) ret = mcs7830_hif_get_mac_address(dev, net->dev_addr); if (ret) { dev_warn(&dev->udev->dev, "Cannot read MAC address\n"); goto out; } mcs7830_data_set_multicast(net); ret = mcs7830_apply_base_config(dev); if (ret) goto out; net->ethtool_ops = &mcs7830_ethtool_ops; net->netdev_ops = &mcs7830_netdev_ops; /* reserve space for the status byte on rx */ dev->rx_urb_size = ETH_FRAME_LEN + 1; dev->mii.mdio_read = mcs7830_mdio_read; dev->mii.mdio_write = mcs7830_mdio_write; dev->mii.dev = net; dev->mii.phy_id_mask = 0x3f; dev->mii.reg_num_mask = 0x1f; dev->mii.phy_id = *((u8 *) net->dev_addr + 1); ret = usbnet_get_endpoints(dev, udev); out: return ret; } /* The chip always appends a status byte that we need to strip */ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { u8 status; if (skb->len == 0) { dev_err(&dev->udev->dev, "unexpected empty rx frame\n"); return 0; } skb_trim(skb, skb->len - 1); status = skb->data[skb->len]; if (status != MCS7830_RX_FRAME_CORRECT) { dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status); /* hmm, perhaps usbnet.c already sees a globally visible frame error and increments rx_errors on its own already? */ dev->net->stats.rx_errors++; if (status & (MCS7830_RX_SHORT_FRAME |MCS7830_RX_LENGTH_ERROR |MCS7830_RX_LARGE_FRAME)) dev->net->stats.rx_length_errors++; if (status & MCS7830_RX_ALIGNMENT_ERROR) dev->net->stats.rx_frame_errors++; if (status & MCS7830_RX_CRC_ERROR) dev->net->stats.rx_crc_errors++; } return skb->len > 0; } static void mcs7830_status(struct usbnet *dev, struct urb *urb) { u8 *buf = urb->transfer_buffer; bool link; if (urb->actual_length < 16) return; link = !(buf[1] & 0x20); if (netif_carrier_ok(dev->net) != link) { if (link) { netif_carrier_on(dev->net); usbnet_defer_kevent(dev, EVENT_LINK_RESET); } else netif_carrier_off(dev->net); netdev_dbg(dev->net, "Link Status is: %d\n", link); } } static const struct driver_info moschip_info = { .description = "MOSCHIP 7830/7832/7730 usb-NET adapter", .bind = mcs7830_bind, .rx_fixup = mcs7830_rx_fixup, .flags = FLAG_ETHER | FLAG_LINK_INTR, .status = mcs7830_status, .in = 1, .out = 2, }; static const struct driver_info sitecom_info = { .description = "Sitecom LN-30 usb-NET adapter", .bind = mcs7830_bind, .rx_fixup = mcs7830_rx_fixup, .flags = FLAG_ETHER | FLAG_LINK_INTR, .status = mcs7830_status, .in = 1, .out = 2, }; static const struct usb_device_id products[] = { { USB_DEVICE(MCS7830_VENDOR_ID, MCS7832_PRODUCT_ID), .driver_info = (unsigned long) &moschip_info, }, { USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID), .driver_info = (unsigned long) &moschip_info, }, { USB_DEVICE(MCS7830_VENDOR_ID, MCS7730_PRODUCT_ID), .driver_info = (unsigned long) &moschip_info, }, { USB_DEVICE(SITECOM_VENDOR_ID, LN_030_PRODUCT_ID), .driver_info = (unsigned long) &sitecom_info, }, {}, }; MODULE_DEVICE_TABLE(usb, products); static int mcs7830_reset_resume (struct usb_interface *intf) { /* YES, this function is successful enough that ethtool -d does show same output pre-/post-suspend */ struct usbnet *dev = usb_get_intfdata(intf); mcs7830_apply_base_config(dev); usbnet_resume(intf); return 0; } static struct usb_driver mcs7830_driver = { .name = driver_name, .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .reset_resume = mcs7830_reset_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(mcs7830_driver); MODULE_DESCRIPTION("USB to network adapter MCS7830)"); MODULE_LICENSE("GPL");
gpl-2.0
neobuddy89/hammerhead
drivers/media/platform/msm/camera_v1/flash/msm_flash.c
361
14006
/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/hrtimer.h> #include <linux/export.h> #include <linux/of.h> #include <mach/pmic.h> #include <mach/camera.h> #include <mach/gpio.h> #include "msm_flash.h" #include "msm.h" static struct timer_list timer_flash; enum msm_cam_flash_stat { MSM_CAM_FLASH_OFF, MSM_CAM_FLASH_ON, }; static int config_flash_gpio_table(enum msm_cam_flash_stat stat, struct msm_camera_sensor_strobe_flash_data *sfdata) { int rc = 0, i = 0; int msm_cam_flash_gpio_tbl[][2] = { {sfdata->flash_trigger, 1}, {sfdata->flash_charge, 1}, {sfdata->flash_charge_done, 0} }; if (stat == MSM_CAM_FLASH_ON) { for (i = 0; i < ARRAY_SIZE(msm_cam_flash_gpio_tbl); i++) { rc = gpio_request(msm_cam_flash_gpio_tbl[i][0], "CAM_FLASH_GPIO"); if (unlikely(rc < 0)) { pr_err("%s not able to get gpio\n", __func__); for (i--; i >= 0; i--) gpio_free(msm_cam_flash_gpio_tbl[i][0]); break; } if (msm_cam_flash_gpio_tbl[i][1]) gpio_direction_output( msm_cam_flash_gpio_tbl[i][0], 0); else gpio_direction_input( msm_cam_flash_gpio_tbl[i][0]); } } else { for (i = 0; i < ARRAY_SIZE(msm_cam_flash_gpio_tbl); i++) { gpio_direction_input(msm_cam_flash_gpio_tbl[i][0]); gpio_free(msm_cam_flash_gpio_tbl[i][0]); } } return rc; } static int msm_strobe_flash_xenon_charge(int32_t flash_charge, int32_t charge_enable, uint32_t flash_recharge_duration) { gpio_set_value_cansleep(flash_charge, charge_enable); if (charge_enable) { timer_flash.expires = jiffies + msecs_to_jiffies(flash_recharge_duration); /* add timer for the recharge */ if (!timer_pending(&timer_flash)) add_timer(&timer_flash); } else del_timer_sync(&timer_flash); return 0; } static void strobe_flash_xenon_recharge_handler(unsigned long data) { unsigned long flags; struct msm_camera_sensor_strobe_flash_data *sfdata = (struct msm_camera_sensor_strobe_flash_data *)data; spin_lock_irqsave(&sfdata->timer_lock, flags); msm_strobe_flash_xenon_charge(sfdata->flash_charge, 1, sfdata->flash_recharge_duration); spin_unlock_irqrestore(&sfdata->timer_lock, flags); return; } static irqreturn_t strobe_flash_charge_ready_irq(int irq_num, void *data) { struct msm_camera_sensor_strobe_flash_data *sfdata = (struct msm_camera_sensor_strobe_flash_data *)data; /* put the charge signal to low */ gpio_set_value_cansleep(sfdata->flash_charge, 0); return IRQ_HANDLED; } static int msm_strobe_flash_xenon_init( struct msm_camera_sensor_strobe_flash_data *sfdata) { unsigned long flags; int rc = 0; spin_lock_irqsave(&sfdata->spin_lock, flags); if (!sfdata->state) { rc = config_flash_gpio_table(MSM_CAM_FLASH_ON, sfdata); if (rc < 0) { pr_err("%s: gpio_request failed\n", __func__); goto go_out; } rc = request_irq(sfdata->irq, strobe_flash_charge_ready_irq, IRQF_TRIGGER_RISING, "charge_ready", sfdata); if (rc < 0) { pr_err("%s: request_irq failed %d\n", __func__, rc); goto go_out; } spin_lock_init(&sfdata->timer_lock); /* setup timer */ init_timer(&timer_flash); timer_flash.function = strobe_flash_xenon_recharge_handler; timer_flash.data = (unsigned long)sfdata; } sfdata->state++; go_out: spin_unlock_irqrestore(&sfdata->spin_lock, flags); return rc; } static int msm_strobe_flash_xenon_release (struct msm_camera_sensor_strobe_flash_data *sfdata, int32_t final_release) { unsigned long flags; spin_lock_irqsave(&sfdata->spin_lock, flags); if (sfdata->state > 0) { if (final_release) sfdata->state = 0; else sfdata->state--; if (!sfdata->state) { free_irq(sfdata->irq, sfdata); config_flash_gpio_table(MSM_CAM_FLASH_OFF, sfdata); if (timer_pending(&timer_flash)) del_timer_sync(&timer_flash); } } spin_unlock_irqrestore(&sfdata->spin_lock, flags); return 0; } static int msm_strobe_flash_ctrl( struct msm_camera_sensor_strobe_flash_data *sfdata, struct strobe_flash_ctrl_data *strobe_ctrl) { int rc = 0; switch (strobe_ctrl->type) { case STROBE_FLASH_CTRL_INIT: if (!sfdata) return -ENODEV; rc = msm_strobe_flash_xenon_init(sfdata); break; case STROBE_FLASH_CTRL_CHARGE: rc = msm_strobe_flash_xenon_charge(sfdata->flash_charge, strobe_ctrl->charge_en, sfdata->flash_recharge_duration); break; case STROBE_FLASH_CTRL_RELEASE: if (sfdata) rc = msm_strobe_flash_xenon_release(sfdata, 0); break; default: pr_err("Invalid Strobe Flash State\n"); rc = -EINVAL; } return rc; } int msm_flash_led_init(struct msm_flash_ctrl_t *fctrl) { int rc = 0; struct msm_camera_sensor_flash_external *external = NULL; CDBG("%s:%d called\n", __func__, __LINE__); if (!fctrl) { pr_err("%s:%d fctrl NULL\n", __func__, __LINE__); return -EINVAL; } external = &fctrl->flash_data->flash_src->_fsrc.ext_driver_src; if (external->expander_info && !fctrl->expander_client) { struct i2c_adapter *adapter = i2c_get_adapter(external->expander_info->bus_id); if (adapter) fctrl->expander_client = i2c_new_device(adapter, external->expander_info->board_info); if (!fctrl->expander_client || !adapter) { pr_err("fctrl->expander_client is not available\n"); rc = -ENOTSUPP; return rc; } i2c_put_adapter(adapter); } rc = msm_camera_init_gpio_table( fctrl->flash_data->flash_src->init_gpio_tbl, fctrl->flash_data->flash_src->init_gpio_tbl_size, 1); if (rc < 0) pr_err("%s:%d failed\n", __func__, __LINE__); return rc; } int msm_flash_led_release(struct msm_flash_ctrl_t *fctrl) { struct msm_camera_sensor_flash_external *external = NULL; CDBG("%s:%d called\n", __func__, __LINE__); if (!fctrl) { pr_err("%s:%d fctrl NULL\n", __func__, __LINE__); return -EINVAL; } external = &fctrl->flash_data->flash_src->_fsrc.ext_driver_src; msm_camera_set_gpio_table( fctrl->flash_data->flash_src->set_gpio_tbl, fctrl->flash_data->flash_src->set_gpio_tbl_size, 0); msm_camera_init_gpio_table( fctrl->flash_data->flash_src->init_gpio_tbl, fctrl->flash_data->flash_src->init_gpio_tbl_size, 0); if (external->expander_info && fctrl->expander_client) { i2c_unregister_device(fctrl->expander_client); fctrl->expander_client = NULL; } return 0; } int msm_flash_led_off(struct msm_flash_ctrl_t *fctrl) { int rc = 0; struct msm_camera_sensor_flash_external *external = NULL; CDBG("%s:%d called\n", __func__, __LINE__); if (!fctrl) { pr_err("%s:%d fctrl NULL\n", __func__, __LINE__); return -EINVAL; } external = &fctrl->flash_data->flash_src->_fsrc.ext_driver_src; if (fctrl->flash_i2c_client && fctrl->reg_setting) { rc = msm_camera_i2c_write_tbl( fctrl->flash_i2c_client, fctrl->reg_setting->off_setting, fctrl->reg_setting->off_setting_size, fctrl->reg_setting->default_data_type); if (rc < 0) pr_err("%s:%d failed\n", __func__, __LINE__); } msm_camera_set_gpio_table( fctrl->flash_data->flash_src->set_gpio_tbl, fctrl->flash_data->flash_src->set_gpio_tbl_size, 0); return rc; } int msm_flash_led_low(struct msm_flash_ctrl_t *fctrl) { int rc = 0; struct msm_camera_sensor_flash_external *external = NULL; CDBG("%s:%d called\n", __func__, __LINE__); if (!fctrl) { pr_err("%s:%d fctrl NULL\n", __func__, __LINE__); return -EINVAL; } external = &fctrl->flash_data->flash_src->_fsrc.ext_driver_src; msm_camera_set_gpio_table( fctrl->flash_data->flash_src->set_gpio_tbl, fctrl->flash_data->flash_src->set_gpio_tbl_size, 1); if (fctrl->flash_i2c_client && fctrl->reg_setting) { rc = msm_camera_i2c_write_tbl( fctrl->flash_i2c_client, fctrl->reg_setting->low_setting, fctrl->reg_setting->low_setting_size, fctrl->reg_setting->default_data_type); if (rc < 0) pr_err("%s:%d failed\n", __func__, __LINE__); } return rc; } int msm_flash_led_high(struct msm_flash_ctrl_t *fctrl) { int rc = 0; struct msm_camera_sensor_flash_external *external = NULL; CDBG("%s:%d called\n", __func__, __LINE__); if (!fctrl) { pr_err("%s:%d fctrl NULL\n", __func__, __LINE__); return -EINVAL; } external = &fctrl->flash_data->flash_src->_fsrc.ext_driver_src; msm_camera_set_gpio_table( fctrl->flash_data->flash_src->set_gpio_tbl, fctrl->flash_data->flash_src->set_gpio_tbl_size, 1); if (fctrl->flash_i2c_client && fctrl->reg_setting) { rc = msm_camera_i2c_write_tbl( fctrl->flash_i2c_client, fctrl->reg_setting->high_setting, fctrl->reg_setting->high_setting_size, fctrl->reg_setting->default_data_type); if (rc < 0) pr_err("%s:%d failed\n", __func__, __LINE__); } return rc; } int msm_camera_flash_led_config(struct msm_flash_ctrl_t *fctrl, uint8_t led_state) { int rc = 0; CDBG("%s:%d called\n", __func__, __LINE__); if (!fctrl->func_tbl) { pr_err("%s flash func tbl NULL\n", __func__); return 0; } switch (led_state) { case MSM_CAMERA_LED_INIT: if (fctrl->func_tbl->flash_led_init) rc = fctrl->func_tbl->flash_led_init(fctrl); break; case MSM_CAMERA_LED_RELEASE: if (fctrl->func_tbl->flash_led_release) rc = fctrl->func_tbl-> flash_led_release(fctrl); break; case MSM_CAMERA_LED_OFF: if (fctrl->func_tbl->flash_led_off) rc = fctrl->func_tbl->flash_led_off(fctrl); break; case MSM_CAMERA_LED_LOW: if (fctrl->func_tbl->flash_led_low) rc = fctrl->func_tbl->flash_led_low(fctrl); break; case MSM_CAMERA_LED_HIGH: if (fctrl->func_tbl->flash_led_high) rc = fctrl->func_tbl->flash_led_high(fctrl); break; default: rc = -EFAULT; break; } return rc; } static struct msm_flash_ctrl_t *get_fctrl(struct v4l2_subdev *sd) { return container_of(sd, struct msm_flash_ctrl_t, v4l2_sdev); } static long msm_flash_config(struct msm_flash_ctrl_t *fctrl, void __user *argp) { long rc = 0; struct flash_ctrl_data flash_info; if (!argp) { pr_err("%s argp NULL\n", __func__); return -EINVAL; } if (copy_from_user(&flash_info, argp, sizeof(flash_info))) { pr_err("%s:%d failed\n", __func__, __LINE__); return -EFAULT; } switch (flash_info.flashtype) { case LED_FLASH: if (fctrl->func_tbl->flash_led_config) rc = fctrl->func_tbl->flash_led_config(fctrl, flash_info.ctrl_data.led_state); if (rc < 0) pr_err("%s:%d failed\n", __func__, __LINE__); break; case STROBE_FLASH: rc = msm_strobe_flash_ctrl(fctrl->strobe_flash_data, &(flash_info.ctrl_data.strobe_ctrl)); break; default: pr_err("Invalid Flash MODE\n"); rc = -EINVAL; } return rc; } static long msm_flash_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct msm_flash_ctrl_t *fctrl = NULL; void __user *argp = (void __user *)arg; if (!sd) { pr_err("%s:%d sd NULL\n", __func__, __LINE__); return -EINVAL; } fctrl = get_fctrl(sd); if (!fctrl) { pr_err("%s:%d fctrl NULL\n", __func__, __LINE__); return -EINVAL; } switch (cmd) { case VIDIOC_MSM_FLASH_LED_DATA_CFG: fctrl->flash_data = (struct msm_camera_sensor_flash_data *)argp; return 0; case VIDIOC_MSM_FLASH_STROBE_DATA_CFG: fctrl->strobe_flash_data = (struct msm_camera_sensor_strobe_flash_data *)argp; return 0; case VIDIOC_MSM_FLASH_CFG: return msm_flash_config(fctrl, argp); default: return -ENOIOCTLCMD; } } static struct v4l2_subdev_core_ops msm_flash_subdev_core_ops = { .ioctl = msm_flash_subdev_ioctl, }; static struct v4l2_subdev_ops msm_flash_subdev_ops = { .core = &msm_flash_subdev_core_ops, }; int msm_flash_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; struct msm_flash_ctrl_t *fctrl = NULL; CDBG("%s:%d called\n", __func__, __LINE__); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { pr_err("i2c_check_functionality failed\n"); goto probe_failure; } fctrl = (struct msm_flash_ctrl_t *)(id->driver_data); if (fctrl->flash_i2c_client) fctrl->flash_i2c_client->client = client; /* Assign name for sub device */ snprintf(fctrl->v4l2_sdev.name, sizeof(fctrl->v4l2_sdev.name), "%s", id->name); /* Initialize sub device */ v4l2_i2c_subdev_init(&fctrl->v4l2_sdev, client, &msm_flash_subdev_ops); CDBG("%s:%d probe success\n", __func__, __LINE__); return 0; probe_failure: CDBG("%s:%d probe failed\n", __func__, __LINE__); return rc; } int msm_flash_platform_probe(struct platform_device *pdev, void *data) { struct msm_flash_ctrl_t *fctrl = (struct msm_flash_ctrl_t *)data; struct msm_cam_subdev_info sd_info; CDBG("%s:%d called\n", __func__, __LINE__); if (!fctrl) { pr_err("%s fctrl NULL\n", __func__); return -EINVAL; } /* Initialize sub device */ v4l2_subdev_init(&fctrl->v4l2_sdev, &msm_flash_subdev_ops); /* Assign name for sub device */ snprintf(fctrl->v4l2_sdev.name, sizeof(fctrl->v4l2_sdev.name), "%s", "msm_flash"); fctrl->pdev = pdev; sd_info.sdev_type = FLASH_DEV; sd_info.sd_index = pdev->id; msm_cam_register_subdev_node(&fctrl->v4l2_sdev, &sd_info); CDBG("%s:%d probe success\n", __func__, __LINE__); return 0; } int msm_flash_create_v4l2_subdev(void *data, uint8_t sd_index) { struct msm_flash_ctrl_t *fctrl = (struct msm_flash_ctrl_t *)data; struct msm_cam_subdev_info sd_info; CDBG("%s:%d called\n", __func__, __LINE__); /* Initialize sub device */ v4l2_subdev_init(&fctrl->v4l2_sdev, &msm_flash_subdev_ops); /* Assign name for sub device */ snprintf(fctrl->v4l2_sdev.name, sizeof(fctrl->v4l2_sdev.name), "%s", "msm_flash"); sd_info.sdev_type = FLASH_DEV; sd_info.sd_index = sd_index; msm_cam_register_subdev_node(&fctrl->v4l2_sdev, &sd_info); CDBG("%s:%d probe success\n", __func__, __LINE__); return 0; }
gpl-2.0
googlehim/linux
drivers/scsi/bfa/bfad_im.c
617
34641
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * bfad_im.c Linux driver IM module. */ #include <linux/export.h> #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_fcs.h" BFA_TRC_FILE(LDRV, IM); DEFINE_IDR(bfad_im_port_index); struct scsi_transport_template *bfad_im_scsi_transport_template; struct scsi_transport_template *bfad_im_scsi_vport_transport_template; static void bfad_im_itnim_work_handler(struct work_struct *work); static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); static int bfad_im_slave_alloc(struct scsi_device *sdev); static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim); void bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, enum bfi_ioim_status io_status, u8 scsi_status, int sns_len, u8 *sns_info, s32 residue) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; struct bfad_s *bfad = drv; struct bfad_itnim_data_s *itnim_data; struct bfad_itnim_s *itnim; u8 host_status = DID_OK; switch (io_status) { case BFI_IOIM_STS_OK: bfa_trc(bfad, scsi_status); scsi_set_resid(cmnd, 0); if (sns_len > 0) { bfa_trc(bfad, sns_len); if (sns_len > SCSI_SENSE_BUFFERSIZE) sns_len = SCSI_SENSE_BUFFERSIZE; memcpy(cmnd->sense_buffer, sns_info, sns_len); } if (residue > 0) { bfa_trc(bfad, residue); scsi_set_resid(cmnd, residue); if (!sns_len && (scsi_status == SAM_STAT_GOOD) && (scsi_bufflen(cmnd) - residue) < cmnd->underflow) { bfa_trc(bfad, 0); host_status = DID_ERROR; } } cmnd->result = ScsiResult(host_status, scsi_status); break; case BFI_IOIM_STS_TIMEDOUT: host_status = DID_TIME_OUT; cmnd->result = ScsiResult(host_status, 0); break; case BFI_IOIM_STS_PATHTOV: host_status = DID_TRANSPORT_DISRUPTED; cmnd->result = ScsiResult(host_status, 0); break; default: host_status = DID_ERROR; cmnd->result = ScsiResult(host_status, 0); } /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ if (cmnd->device->host != NULL) scsi_dma_unmap(cmnd); cmnd->host_scribble = NULL; bfa_trc(bfad, cmnd->result); itnim_data = cmnd->device->hostdata; if (itnim_data) { itnim = itnim_data->itnim; if (!cmnd->result && itnim && (bfa_lun_queue_depth > cmnd->device->queue_depth)) { /* Queue depth adjustment for good status completion */ bfad_ramp_up_qdepth(itnim, cmnd->device); } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) { /* qfull handling */ bfad_handle_qfull(itnim, cmnd->device); } } cmnd->scsi_done(cmnd); } void bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; struct bfad_itnim_data_s *itnim_data; struct bfad_itnim_s *itnim; cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD); /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ if (cmnd->device->host != NULL) scsi_dma_unmap(cmnd); cmnd->host_scribble = NULL; /* Queue depth adjustment */ if (bfa_lun_queue_depth > cmnd->device->queue_depth) { itnim_data = cmnd->device->hostdata; if (itnim_data) { itnim = itnim_data->itnim; if (itnim) bfad_ramp_up_qdepth(itnim, cmnd->device); } } cmnd->scsi_done(cmnd); } void bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; struct bfad_s *bfad = drv; cmnd->result = ScsiResult(DID_ERROR, 0); /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ if (cmnd->device->host != NULL) scsi_dma_unmap(cmnd); bfa_trc(bfad, cmnd->result); cmnd->host_scribble = NULL; } void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, enum bfi_tskim_status tsk_status) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk; wait_queue_head_t *wq; cmnd->SCp.Status |= tsk_status << 1; set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status); wq = (wait_queue_head_t *) cmnd->SCp.ptr; cmnd->SCp.ptr = NULL; if (wq) wake_up(wq); } /* * Scsi_Host_template SCSI host template */ /* * Scsi_Host template entry, returns BFAD PCI info. */ static const char * bfad_im_info(struct Scsi_Host *shost) { static char bfa_buf[256]; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; memset(bfa_buf, 0, sizeof(bfa_buf)); snprintf(bfa_buf, sizeof(bfa_buf), "Brocade FC/FCOE Adapter, " "hwpath: %s driver: %s", bfad->pci_name, BFAD_DRIVER_VERSION); return bfa_buf; } /* * Scsi_Host template entry, aborts the specified SCSI command. * * Returns: SUCCESS or FAILED. */ static int bfad_im_abort_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_ioim_s *hal_io; unsigned long flags; u32 timeout; int rc = FAILED; spin_lock_irqsave(&bfad->bfad_lock, flags); hal_io = (struct bfa_ioim_s *) cmnd->host_scribble; if (!hal_io) { /* IO has been completed, return success */ rc = SUCCESS; goto out; } if (hal_io->dio != (struct bfad_ioim_s *) cmnd) { rc = FAILED; goto out; } bfa_trc(bfad, hal_io->iotag); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "scsi%d: abort cmnd %p iotag %x\n", im_port->shost->host_no, cmnd, hal_io->iotag); (void) bfa_ioim_abort(hal_io); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* Need to wait until the command get aborted */ timeout = 10; while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(timeout); if (timeout < 4 * HZ) timeout *= 2; } cmnd->scsi_done(cmnd); bfa_trc(bfad, hal_io->iotag); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "scsi%d: complete abort 0x%p iotag 0x%x\n", im_port->shost->host_no, cmnd, hal_io->iotag); return SUCCESS; out: spin_unlock_irqrestore(&bfad->bfad_lock, flags); return rc; } static bfa_status_t bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, struct bfad_itnim_s *itnim) { struct bfa_tskim_s *tskim; struct bfa_itnim_s *bfa_itnim; bfa_status_t rc = BFA_STATUS_OK; struct scsi_lun scsilun; tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); if (!tskim) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "target reset, fail to allocate tskim\n"); rc = BFA_STATUS_FAILED; goto out; } /* * Set host_scribble to NULL to avoid aborting a task command if * happens. */ cmnd->host_scribble = NULL; cmnd->SCp.Status = 0; bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); memset(&scsilun, 0, sizeof(scsilun)); bfa_tskim_start(tskim, bfa_itnim, scsilun, FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO); out: return rc; } /* * Scsi_Host template entry, resets a LUN and abort its all commands. * * Returns: SUCCESS or FAILED. * */ static int bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; struct bfad_s *bfad = im_port->bfad; struct bfa_tskim_s *tskim; struct bfad_itnim_s *itnim; struct bfa_itnim_s *bfa_itnim; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); int rc = SUCCESS; unsigned long flags; enum bfi_tskim_status task_status; struct scsi_lun scsilun; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = itnim_data->itnim; if (!itnim) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = FAILED; goto out; } tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); if (!tskim) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "LUN reset, fail to allocate tskim"); spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = FAILED; goto out; } /* * Set host_scribble to NULL to avoid aborting a task command * if happens. */ cmnd->host_scribble = NULL; cmnd->SCp.ptr = (char *)&wq; cmnd->SCp.Status = 0; bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); int_to_scsilun(cmnd->device->lun, &scsilun); bfa_tskim_start(tskim, bfa_itnim, scsilun, FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_event(wq, test_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status)); task_status = cmnd->SCp.Status >> 1; if (task_status != BFI_TSKIM_STS_OK) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "LUN reset failure, status: %d\n", task_status); rc = FAILED; } out: return rc; } /* * Scsi_Host template entry, resets the bus and abort all commands. */ static int bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_itnim_s *itnim; unsigned long flags; u32 i, rc, err_cnt = 0; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); enum bfi_tskim_status task_status; spin_lock_irqsave(&bfad->bfad_lock, flags); for (i = 0; i < MAX_FCP_TARGET; i++) { itnim = bfad_get_itnim(im_port, i); if (itnim) { cmnd->SCp.ptr = (char *)&wq; rc = bfad_im_target_reset_send(bfad, cmnd, itnim); if (rc != BFA_STATUS_OK) { err_cnt++; continue; } /* wait target reset to complete */ spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_event(wq, test_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status)); spin_lock_irqsave(&bfad->bfad_lock, flags); task_status = cmnd->SCp.Status >> 1; if (task_status != BFI_TSKIM_STS_OK) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "target reset failure," " status: %d\n", task_status); err_cnt++; } } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (err_cnt) return FAILED; return SUCCESS; } /* * Scsi_Host template entry slave_destroy. */ static void bfad_im_slave_destroy(struct scsi_device *sdev) { sdev->hostdata = NULL; return; } /* * BFA FCS itnim callbacks */ /* * BFA FCS itnim alloc callback, after successful PRLI * Context: Interrupt */ void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, struct bfad_itnim_s **itnim_drv) { *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC); if (*itnim_drv == NULL) return; (*itnim_drv)->im = bfad->im; *itnim = &(*itnim_drv)->fcs_itnim; (*itnim_drv)->state = ITNIM_STATE_NONE; /* * Initiaze the itnim_work */ INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler); bfad->bfad_flags |= BFAD_RPORT_ONLINE; } /* * BFA FCS itnim free callback. * Context: Interrupt. bfad_lock is held */ void bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) { struct bfad_port_s *port; wwn_t wwpn; u32 fcid; char wwpn_str[32], fcid_str[16]; struct bfad_im_s *im = itnim_drv->im; /* online to free state transtion should not happen */ WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE); itnim_drv->queue_work = 1; /* offline request is not yet done, use the same request to free */ if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING) itnim_drv->queue_work = 0; itnim_drv->state = ITNIM_STATE_FREE; port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); itnim_drv->im_port = port->im_port; wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim); fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim); wwn2str(wwpn_str, wwpn); fcid2str(fcid_str, fcid); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n", port->im_port->shost->host_no, fcid_str, wwpn_str); /* ITNIM processing */ if (itnim_drv->queue_work) queue_work(im->drv_workq, &itnim_drv->itnim_work); } /* * BFA FCS itnim online callback. * Context: Interrupt. bfad_lock is held */ void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv) { struct bfad_port_s *port; struct bfad_im_s *im = itnim_drv->im; itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim); port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); itnim_drv->state = ITNIM_STATE_ONLINE; itnim_drv->queue_work = 1; itnim_drv->im_port = port->im_port; /* ITNIM processing */ if (itnim_drv->queue_work) queue_work(im->drv_workq, &itnim_drv->itnim_work); } /* * BFA FCS itnim offline callback. * Context: Interrupt. bfad_lock is held */ void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv) { struct bfad_port_s *port; struct bfad_s *bfad; struct bfad_im_s *im = itnim_drv->im; port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); bfad = port->bfad; if ((bfad->pport.flags & BFAD_PORT_DELETE) || (port->flags & BFAD_PORT_DELETE)) { itnim_drv->state = ITNIM_STATE_OFFLINE; return; } itnim_drv->im_port = port->im_port; itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING; itnim_drv->queue_work = 1; /* ITNIM processing */ if (itnim_drv->queue_work) queue_work(im->drv_workq, &itnim_drv->itnim_work); } /* * Allocate a Scsi_Host for a port. */ int bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, struct device *dev) { int error = 1; mutex_lock(&bfad_mutex); error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL); if (error < 0) { mutex_unlock(&bfad_mutex); printk(KERN_WARNING "idr_alloc failure\n"); goto out; } im_port->idr_id = error; mutex_unlock(&bfad_mutex); im_port->shost = bfad_scsi_host_alloc(im_port, bfad); if (!im_port->shost) { error = 1; goto out_free_idr; } im_port->shost->hostdata[0] = (unsigned long)im_port; im_port->shost->unique_id = im_port->idr_id; im_port->shost->this_id = -1; im_port->shost->max_id = MAX_FCP_TARGET; im_port->shost->max_lun = MAX_FCP_LUN; im_port->shost->max_cmd_len = 16; im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth; if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) im_port->shost->transportt = bfad_im_scsi_transport_template; else im_port->shost->transportt = bfad_im_scsi_vport_transport_template; error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev); if (error) { printk(KERN_WARNING "scsi_add_host failure %d\n", error); goto out_fc_rel; } return 0; out_fc_rel: scsi_host_put(im_port->shost); im_port->shost = NULL; out_free_idr: mutex_lock(&bfad_mutex); idr_remove(&bfad_im_port_index, im_port->idr_id); mutex_unlock(&bfad_mutex); out: return error; } void bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) { bfa_trc(bfad, bfad->inst_no); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n", im_port->shost->host_no); fc_remove_host(im_port->shost); scsi_remove_host(im_port->shost); scsi_host_put(im_port->shost); mutex_lock(&bfad_mutex); idr_remove(&bfad_im_port_index, im_port->idr_id); mutex_unlock(&bfad_mutex); } static void bfad_im_port_delete_handler(struct work_struct *work) { struct bfad_im_port_s *im_port = container_of(work, struct bfad_im_port_s, port_delete_work); if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { im_port->flags |= BFAD_PORT_DELETE; fc_vport_terminate(im_port->fc_vport); } } bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port) { int rc = BFA_STATUS_OK; struct bfad_im_port_s *im_port; im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC); if (im_port == NULL) { rc = BFA_STATUS_ENOMEM; goto ext; } port->im_port = im_port; im_port->port = port; im_port->bfad = bfad; INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler); INIT_LIST_HEAD(&im_port->itnim_mapped_list); INIT_LIST_HEAD(&im_port->binding_list); ext: return rc; } void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port) { struct bfad_im_port_s *im_port = port->im_port; queue_work(bfad->im->drv_workq, &im_port->port_delete_work); } void bfad_im_port_clean(struct bfad_im_port_s *im_port) { struct bfad_fcp_binding *bp, *bp_new; unsigned long flags; struct bfad_s *bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); list_for_each_entry_safe(bp, bp_new, &im_port->binding_list, list_entry) { list_del(&bp->list_entry); kfree(bp); } /* the itnim_mapped_list must be empty at this time */ WARN_ON(!list_empty(&im_port->itnim_mapped_list)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } static void bfad_aen_im_notify_handler(struct work_struct *work) { struct bfad_im_s *im = container_of(work, struct bfad_im_s, aen_im_notify_work); struct bfa_aen_entry_s *aen_entry; struct bfad_s *bfad = im->bfad; struct Scsi_Host *shost = bfad->pport.im_port->shost; void *event_data; unsigned long flags; while (!list_empty(&bfad->active_aen_q)) { spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); bfa_q_deq(&bfad->active_aen_q, &aen_entry); spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); event_data = (char *)aen_entry + sizeof(struct list_head); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(struct bfa_aen_entry_s) - sizeof(struct list_head), (char *)event_data, BFAD_NL_VENDOR_ID); spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); list_add_tail(&aen_entry->qe, &bfad->free_aen_q); spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); } } bfa_status_t bfad_im_probe(struct bfad_s *bfad) { struct bfad_im_s *im; im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL); if (im == NULL) return BFA_STATUS_ENOMEM; bfad->im = im; im->bfad = bfad; if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { kfree(im); return BFA_STATUS_FAILED; } INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler); return BFA_STATUS_OK; } void bfad_im_probe_undo(struct bfad_s *bfad) { if (bfad->im) { bfad_destroy_workq(bfad->im); kfree(bfad->im); bfad->im = NULL; } } struct Scsi_Host * bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) { struct scsi_host_template *sht; if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) sht = &bfad_im_scsi_host_template; else sht = &bfad_im_vport_template; if (max_xfer_size != BFAD_MAX_SECTORS >> 1) sht->max_sectors = max_xfer_size << 1; sht->sg_tablesize = bfad->cfg_data.io_max_sge; return scsi_host_alloc(sht, sizeof(unsigned long)); } void bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) { if (!(im_port->flags & BFAD_PORT_DELETE)) flush_workqueue(bfad->im->drv_workq); bfad_im_scsi_host_free(im_port->bfad, im_port); bfad_im_port_clean(im_port); kfree(im_port); } void bfad_destroy_workq(struct bfad_im_s *im) { if (im && im->drv_workq) { flush_workqueue(im->drv_workq); destroy_workqueue(im->drv_workq); im->drv_workq = NULL; } } bfa_status_t bfad_thread_workq(struct bfad_s *bfad) { struct bfad_im_s *im = bfad->im; bfa_trc(bfad, 0); snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d", bfad->inst_no); im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); if (!im->drv_workq) return BFA_STATUS_FAILED; return BFA_STATUS_OK; } /* * Scsi_Host template entry. * * Description: * OS entry point to adjust the queue_depths on a per-device basis. * Called once per device during the bus scan. * Return non-zero if fails. */ static int bfad_im_slave_configure(struct scsi_device *sdev) { scsi_change_queue_depth(sdev, bfa_lun_queue_depth); return 0; } struct scsi_host_template bfad_im_scsi_host_template = { .module = THIS_MODULE, .name = BFAD_DRIVER_NAME, .info = bfad_im_info, .queuecommand = bfad_im_queuecommand, .eh_abort_handler = bfad_im_abort_handler, .eh_device_reset_handler = bfad_im_reset_lun_handler, .eh_bus_reset_handler = bfad_im_reset_bus_handler, .slave_alloc = bfad_im_slave_alloc, .slave_configure = bfad_im_slave_configure, .slave_destroy = bfad_im_slave_destroy, .this_id = -1, .sg_tablesize = BFAD_IO_MAX_SGE, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = bfad_im_host_attrs, .max_sectors = BFAD_MAX_SECTORS, .vendor_id = BFA_PCI_VENDOR_ID_BROCADE, .use_blk_tags = 1, }; struct scsi_host_template bfad_im_vport_template = { .module = THIS_MODULE, .name = BFAD_DRIVER_NAME, .info = bfad_im_info, .queuecommand = bfad_im_queuecommand, .eh_abort_handler = bfad_im_abort_handler, .eh_device_reset_handler = bfad_im_reset_lun_handler, .eh_bus_reset_handler = bfad_im_reset_bus_handler, .slave_alloc = bfad_im_slave_alloc, .slave_configure = bfad_im_slave_configure, .slave_destroy = bfad_im_slave_destroy, .this_id = -1, .sg_tablesize = BFAD_IO_MAX_SGE, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = bfad_im_vport_attrs, .max_sectors = BFAD_MAX_SECTORS, .use_blk_tags = 1, }; bfa_status_t bfad_im_module_init(void) { bfad_im_scsi_transport_template = fc_attach_transport(&bfad_im_fc_function_template); if (!bfad_im_scsi_transport_template) return BFA_STATUS_ENOMEM; bfad_im_scsi_vport_transport_template = fc_attach_transport(&bfad_im_vport_fc_function_template); if (!bfad_im_scsi_vport_transport_template) { fc_release_transport(bfad_im_scsi_transport_template); return BFA_STATUS_ENOMEM; } return BFA_STATUS_OK; } void bfad_im_module_exit(void) { if (bfad_im_scsi_transport_template) fc_release_transport(bfad_im_scsi_transport_template); if (bfad_im_scsi_vport_transport_template) fc_release_transport(bfad_im_scsi_vport_transport_template); } void bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) { struct scsi_device *tmp_sdev; if (((jiffies - itnim->last_ramp_up_time) > BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) && ((jiffies - itnim->last_queue_full_time) > BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) { shost_for_each_device(tmp_sdev, sdev->host) { if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { if (tmp_sdev->id != sdev->id) continue; scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1); itnim->last_ramp_up_time = jiffies; } } } } void bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) { struct scsi_device *tmp_sdev; itnim->last_queue_full_time = jiffies; shost_for_each_device(tmp_sdev, sdev->host) { if (tmp_sdev->id != sdev->id) continue; scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); } } struct bfad_itnim_s * bfad_get_itnim(struct bfad_im_port_s *im_port, int id) { struct bfad_itnim_s *itnim = NULL; /* Search the mapped list for this target ID */ list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) { if (id == itnim->scsi_tgt_id) return itnim; } return NULL; } /* * Function is invoked from the SCSI Host Template slave_alloc() entry point. * Has the logic to query the LUN Mask database to check if this LUN needs to * be made visible to the SCSI mid-layer or not. * * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack. * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack. */ static int bfad_im_check_if_make_lun_visible(struct scsi_device *sdev, struct fc_rport *rport) { struct bfad_itnim_data_s *itnim_data = (struct bfad_itnim_data_s *) rport->dd_data; struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa; struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport; struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa); int i = 0, ret = -ENXIO; for (i = 0; i < MAX_LUN_MASK_CFG; i++) { if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE && scsilun_to_int(&lun_list[i].lun) == sdev->lun && lun_list[i].rp_tag == bfa_rport->rport_tag && lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) { ret = BFA_STATUS_OK; break; } } return ret; } /* * Scsi_Host template entry slave_alloc */ static int bfad_im_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct bfad_itnim_data_s *itnim_data; struct bfa_s *bfa; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; itnim_data = (struct bfad_itnim_data_s *) rport->dd_data; bfa = itnim_data->itnim->bfa_itnim->bfa; if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) { /* * We should not mask LUN 0 - since this will translate * to no LUN / TARGET for SCSI ml resulting no scan. */ if (sdev->lun == 0) { sdev->sdev_bflags |= BLIST_NOREPORTLUN | BLIST_SPARSELUN; goto done; } /* * Query LUN Mask configuration - to expose this LUN * to the SCSI mid-layer or to mask it. */ if (bfad_im_check_if_make_lun_visible(sdev, rport) != BFA_STATUS_OK) return -ENXIO; } done: sdev->hostdata = rport->dd_data; return 0; } u32 bfad_im_supported_speeds(struct bfa_s *bfa) { struct bfa_ioc_attr_s *ioc_attr; u32 supported_speed = 0; ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL); if (!ioc_attr) return 0; bfa_ioc_get_attr(&bfa->ioc, ioc_attr); if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS) supported_speed |= FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { if (ioc_attr->adapter_attr.is_mezz) { supported_speed |= FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; } else { supported_speed |= FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; } } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { supported_speed |= FC_PORTSPEED_10GBIT; } kfree(ioc_attr); return supported_speed; } void bfad_fc_host_init(struct bfad_im_port_s *im_port) { struct Scsi_Host *host = im_port->shost; struct bfad_s *bfad = im_port->bfad; struct bfad_port_s *port = im_port->port; char symname[BFA_SYMNAME_MAXLEN]; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); fc_host_node_name(host) = cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port))); fc_host_port_name(host) = cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port))); fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); fc_host_supported_classes(host) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(host), 0, sizeof(fc_host_supported_fc4s(host))); if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) /* For FCP type 0x08 */ fc_host_supported_fc4s(host)[2] = 1; /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(host)[7] = 1; strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); sprintf(fc_host_symbolic_name(host), "%s", symname); fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; } static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim) { struct fc_rport_identifiers rport_ids; struct fc_rport *fc_rport; struct bfad_itnim_data_s *itnim_data; rport_ids.node_name = cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); rport_ids.port_name = cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); rport_ids.port_id = bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; itnim->fc_rport = fc_rport = fc_remote_port_add(im_port->shost, 0, &rport_ids); if (!fc_rport) return; fc_rport->maxframe_size = bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim); fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim); itnim_data = fc_rport->dd_data; itnim_data->itnim = itnim; rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) fc_remote_port_rolechg(fc_rport, rport_ids.roles); if ((fc_rport->scsi_target_id != -1) && (fc_rport->scsi_target_id < MAX_FCP_TARGET)) itnim->scsi_tgt_id = fc_rport->scsi_target_id; itnim->channel = fc_rport->channel; return; } /* * Work queue handler using FC transport service * Context: kernel */ static void bfad_im_itnim_work_handler(struct work_struct *work) { struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s, itnim_work); struct bfad_im_s *im = itnim->im; struct bfad_s *bfad = im->bfad; struct bfad_im_port_s *im_port; unsigned long flags; struct fc_rport *fc_rport; wwn_t wwpn; u32 fcid; char wwpn_str[32], fcid_str[16]; spin_lock_irqsave(&bfad->bfad_lock, flags); im_port = itnim->im_port; bfa_trc(bfad, itnim->state); switch (itnim->state) { case ITNIM_STATE_ONLINE: if (!itnim->fc_rport) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad_im_fc_rport_add(im_port, itnim); spin_lock_irqsave(&bfad->bfad_lock, flags); wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); wwn2str(wwpn_str, wwpn); fcid2str(fcid_str, fcid); list_add_tail(&itnim->list_entry, &im_port->itnim_mapped_list); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "ITNIM ONLINE Target: %d:0:%d " "FCID: %s WWPN: %s\n", im_port->shost->host_no, itnim->scsi_tgt_id, fcid_str, wwpn_str); } else { printk(KERN_WARNING "%s: itnim %llx is already in online state\n", __func__, bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); } break; case ITNIM_STATE_OFFLINE_PENDING: itnim->state = ITNIM_STATE_OFFLINE; if (itnim->fc_rport) { fc_rport = itnim->fc_rport; ((struct bfad_itnim_data_s *) fc_rport->dd_data)->itnim = NULL; itnim->fc_rport = NULL; if (!(im_port->port->flags & BFAD_PORT_DELETE)) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); fc_rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa) + 1; fc_remote_port_delete(fc_rport); spin_lock_irqsave(&bfad->bfad_lock, flags); } wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); wwn2str(wwpn_str, wwpn); fcid2str(fcid_str, fcid); list_del(&itnim->list_entry); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "ITNIM OFFLINE Target: %d:0:%d " "FCID: %s WWPN: %s\n", im_port->shost->host_no, itnim->scsi_tgt_id, fcid_str, wwpn_str); } break; case ITNIM_STATE_FREE: if (itnim->fc_rport) { fc_rport = itnim->fc_rport; ((struct bfad_itnim_data_s *) fc_rport->dd_data)->itnim = NULL; itnim->fc_rport = NULL; if (!(im_port->port->flags & BFAD_PORT_DELETE)) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); fc_rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa) + 1; fc_remote_port_delete(fc_rport); spin_lock_irqsave(&bfad->bfad_lock, flags); } list_del(&itnim->list_entry); } kfree(itnim); break; default: WARN_ON(1); break; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * Scsi_Host template entry, queue a SCSI command to the BFAD. */ static int bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; struct bfad_itnim_s *itnim; struct bfa_ioim_s *hal_io; unsigned long flags; int rc; int sg_cnt = 0; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); rc = fc_remote_port_chkready(rport); if (rc) { cmnd->result = rc; done(cmnd); return 0; } if (bfad->bfad_flags & BFAD_EEH_BUSY) { if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE) cmnd->result = DID_NO_CONNECT << 16; else cmnd->result = DID_REQUEUE << 16; done(cmnd); return 0; } sg_cnt = scsi_dma_map(cmnd); if (sg_cnt < 0) return SCSI_MLQUEUE_HOST_BUSY; cmnd->scsi_done = done; spin_lock_irqsave(&bfad->bfad_lock, flags); if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) { printk(KERN_WARNING "bfad%d, queuecommand %p %x failed, BFA stopped\n", bfad->inst_no, cmnd, cmnd->cmnd[0]); cmnd->result = ScsiResult(DID_NO_CONNECT, 0); goto out_fail_cmd; } itnim = itnim_data->itnim; if (!itnim) { cmnd->result = ScsiResult(DID_IMM_RETRY, 0); goto out_fail_cmd; } hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd, itnim->bfa_itnim, sg_cnt); if (!hal_io) { printk(KERN_WARNING "hal_io failure\n"); spin_unlock_irqrestore(&bfad->bfad_lock, flags); scsi_dma_unmap(cmnd); return SCSI_MLQUEUE_HOST_BUSY; } cmnd->host_scribble = (char *)hal_io; bfa_ioim_start(hal_io); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; out_fail_cmd: spin_unlock_irqrestore(&bfad->bfad_lock, flags); scsi_dma_unmap(cmnd); if (done) done(cmnd); return 0; } static DEF_SCSI_QCMD(bfad_im_queuecommand) void bfad_rport_online_wait(struct bfad_s *bfad) { int i; int rport_delay = 10; for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) && i < bfa_linkup_delay; i++) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ); } if (bfad->bfad_flags & BFAD_PORT_ONLINE) { rport_delay = rport_delay < bfa_linkup_delay ? rport_delay : bfa_linkup_delay; for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) && i < rport_delay; i++) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ); } if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(rport_delay * HZ); } } } int bfad_get_linkup_delay(struct bfad_s *bfad) { u8 nwwns = 0; wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; int linkup_delay; /* * Querying for the boot target port wwns * -- read from boot information in flash. * If nwwns > 0 => boot over SAN and set linkup_delay = 30 * else => local boot machine set linkup_delay = 0 */ bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); if (nwwns > 0) /* If Boot over SAN set linkup_delay = 30sec */ linkup_delay = 30; else /* If local boot; no linkup_delay */ linkup_delay = 0; return linkup_delay; }
gpl-2.0
rlnelson-git/linux-nvme
drivers/net/wireless/ath/carl9170/debug.c
873
24144
/* * Atheros CARL9170 driver * * debug(fs) probing * * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, see * http://www.gnu.org/licenses/. * * This file incorporates work covered by the following copyright and * permission notice: * Copyright (c) 2008-2009 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/vmalloc.h> #include "carl9170.h" #include "cmd.h" #define ADD(buf, off, max, fmt, args...) \ off += snprintf(&buf[off], max - off, fmt, ##args); struct carl9170_debugfs_fops { unsigned int read_bufsize; umode_t attr; char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len); ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size); const struct file_operations fops; enum carl9170_device_state req_dev_state; }; static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct carl9170_debugfs_fops *dfops; struct ar9170 *ar; char *buf = NULL, *res_buf = NULL; ssize_t ret = 0; int err = 0; if (!count) return 0; ar = file->private_data; if (!ar) return -ENODEV; dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops); if (!dfops->read) return -ENOSYS; if (dfops->read_bufsize) { buf = vmalloc(dfops->read_bufsize); if (!buf) return -ENOMEM; } mutex_lock(&ar->mutex); if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) { err = -ENODEV; res_buf = buf; goto out_free; } res_buf = dfops->read(ar, buf, dfops->read_bufsize, &ret); if (ret > 0) err = simple_read_from_buffer(userbuf, count, ppos, res_buf, ret); else err = ret; WARN_ON_ONCE(dfops->read_bufsize && (res_buf != buf)); out_free: vfree(res_buf); mutex_unlock(&ar->mutex); return err; } static ssize_t carl9170_debugfs_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct carl9170_debugfs_fops *dfops; struct ar9170 *ar; char *buf = NULL; int err = 0; if (!count) return 0; if (count > PAGE_SIZE) return -E2BIG; ar = file->private_data; if (!ar) return -ENODEV; dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops); if (!dfops->write) return -ENOSYS; buf = vmalloc(count); if (!buf) return -ENOMEM; if (copy_from_user(buf, userbuf, count)) { err = -EFAULT; goto out_free; } if (mutex_trylock(&ar->mutex) == 0) { err = -EAGAIN; goto out_free; } if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) { err = -ENODEV; goto out_unlock; } err = dfops->write(ar, buf, count); if (err) goto out_unlock; out_unlock: mutex_unlock(&ar->mutex); out_free: vfree(buf); return err; } #define __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \ _attr, _dstate) \ static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\ .read_bufsize = _read_bufsize, \ .read = _read, \ .write = _write, \ .attr = _attr, \ .req_dev_state = _dstate, \ .fops = { \ .open = simple_open, \ .read = carl9170_debugfs_read, \ .write = carl9170_debugfs_write, \ .owner = THIS_MODULE \ }, \ } #define DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, _attr) \ __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \ _attr, CARL9170_STARTED) \ #define DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize) \ DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \ NULL, _read_bufsize, S_IRUSR) #define DEBUGFS_DECLARE_WO_FILE(name) \ DEBUGFS_DECLARE_FILE(name, NULL, carl9170_debugfs_##name ##_write,\ 0, S_IWUSR) #define DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize) \ DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \ carl9170_debugfs_##name ##_write, \ _read_bufsize, S_IRUSR | S_IWUSR) #define __DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize, _dstate) \ __DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \ carl9170_debugfs_##name ##_write, \ _read_bufsize, S_IRUSR | S_IWUSR, _dstate) #define DEBUGFS_READONLY_FILE(name, _read_bufsize, fmt, value...) \ static char *carl9170_debugfs_ ##name ## _read(struct ar9170 *ar, \ char *buf, size_t buf_size,\ ssize_t *len) \ { \ ADD(buf, *len, buf_size, fmt "\n", ##value); \ return buf; \ } \ DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize) static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { ADD(buf, *len, bufsize, "jar: ["); spin_lock_bh(&ar->mem_lock); *len += bitmap_scnprintf(&buf[*len], bufsize - *len, ar->mem_bitmap, ar->fw.mem_blocks); ADD(buf, *len, bufsize, "]\n"); ADD(buf, *len, bufsize, "cookies: used:%3d / total:%3d, allocs:%d\n", bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks), ar->fw.mem_blocks, atomic_read(&ar->mem_allocs)); ADD(buf, *len, bufsize, "memory: free:%3d (%3d KiB) / total:%3d KiB)\n", atomic_read(&ar->mem_free_blocks), (atomic_read(&ar->mem_free_blocks) * ar->fw.mem_block_size) / 1024, (ar->fw.mem_blocks * ar->fw.mem_block_size) / 1024); spin_unlock_bh(&ar->mem_lock); return buf; } DEBUGFS_DECLARE_RO_FILE(mem_usage, 512); static char *carl9170_debugfs_qos_stat_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { ADD(buf, *len, bufsize, "%s QoS AC\n", modparam_noht ? "Hardware" : "Software"); ADD(buf, *len, bufsize, "[ VO VI " " BE BK ]\n"); spin_lock_bh(&ar->tx_stats_lock); ADD(buf, *len, bufsize, "[length/limit length/limit " "length/limit length/limit ]\n" "[ %3d/%3d %3d/%3d " " %3d/%3d %3d/%3d ]\n\n", ar->tx_stats[0].len, ar->tx_stats[0].limit, ar->tx_stats[1].len, ar->tx_stats[1].limit, ar->tx_stats[2].len, ar->tx_stats[2].limit, ar->tx_stats[3].len, ar->tx_stats[3].limit); ADD(buf, *len, bufsize, "[ total total " " total total ]\n" "[%10d %10d %10d %10d ]\n\n", ar->tx_stats[0].count, ar->tx_stats[1].count, ar->tx_stats[2].count, ar->tx_stats[3].count); spin_unlock_bh(&ar->tx_stats_lock); ADD(buf, *len, bufsize, "[ pend/waittx pend/waittx " " pend/waittx pend/waittx]\n" "[ %3d/%3d %3d/%3d " " %3d/%3d %3d/%3d ]\n\n", skb_queue_len(&ar->tx_pending[0]), skb_queue_len(&ar->tx_status[0]), skb_queue_len(&ar->tx_pending[1]), skb_queue_len(&ar->tx_status[1]), skb_queue_len(&ar->tx_pending[2]), skb_queue_len(&ar->tx_status[2]), skb_queue_len(&ar->tx_pending[3]), skb_queue_len(&ar->tx_status[3])); return buf; } DEBUGFS_DECLARE_RO_FILE(qos_stat, 512); static void carl9170_debugfs_format_frame(struct ar9170 *ar, struct sk_buff *skb, const char *prefix, char *buf, ssize_t *off, ssize_t bufsize) { struct _carl9170_tx_superframe *txc = (void *) skb->data; struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); struct carl9170_tx_info *arinfo = (void *) txinfo->rate_driver_data; struct ieee80211_hdr *hdr = (void *) txc->frame_data; ADD(buf, *off, bufsize, "%s %p, c:%2x, DA:%pM, sq:%4d, mc:%.4x, " "pc:%.8x, to:%d ms\n", prefix, skb, txc->s.cookie, ieee80211_get_DA(hdr), get_seq_h(hdr), le16_to_cpu(txc->f.mac_control), le32_to_cpu(txc->f.phy_control), jiffies_to_msecs(jiffies - arinfo->timeout)); } static char *carl9170_debugfs_ampdu_state_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { struct carl9170_sta_tid *iter; struct sk_buff *skb; int cnt = 0, fc; int offset; rcu_read_lock(); list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { spin_lock_bh(&iter->lock); ADD(buf, *len, bufsize, "Entry: #%2d TID:%1d, BSN:%4d, " "SNX:%4d, HSN:%4d, BAW:%2d, state:%1d, toggles:%d\n", cnt, iter->tid, iter->bsn, iter->snx, iter->hsn, iter->max, iter->state, iter->counter); ADD(buf, *len, bufsize, "\tWindow: ["); *len += bitmap_scnprintf(&buf[*len], bufsize - *len, iter->bitmap, CARL9170_BAW_BITS); #define BM_STR_OFF(offset) \ ((CARL9170_BAW_BITS - (offset) - 1) / 4 + \ (CARL9170_BAW_BITS - (offset) - 1) / 32 + 1) ADD(buf, *len, bufsize, ",W]\n"); offset = BM_STR_OFF(0); ADD(buf, *len, bufsize, "\tBase Seq: %*s\n", offset, "T"); offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn)); ADD(buf, *len, bufsize, "\tNext Seq: %*s\n", offset, "W"); offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) % CARL9170_BAW_BITS); ADD(buf, *len, bufsize, "\tLast Seq: %*s\n", offset, "N"); ADD(buf, *len, bufsize, "\tPre-Aggregation reorder buffer: " " currently queued:%d\n", skb_queue_len(&iter->queue)); fc = 0; skb_queue_walk(&iter->queue, skb) { char prefix[32]; snprintf(prefix, sizeof(prefix), "\t\t%3d :", fc); carl9170_debugfs_format_frame(ar, skb, prefix, buf, len, bufsize); fc++; } spin_unlock_bh(&iter->lock); cnt++; } rcu_read_unlock(); return buf; } DEBUGFS_DECLARE_RO_FILE(ampdu_state, 8000); static void carl9170_debugfs_queue_dump(struct ar9170 *ar, char *buf, ssize_t *len, size_t bufsize, struct sk_buff_head *queue) { struct sk_buff *skb; char prefix[16]; int fc = 0; spin_lock_bh(&queue->lock); skb_queue_walk(queue, skb) { snprintf(prefix, sizeof(prefix), "%3d :", fc); carl9170_debugfs_format_frame(ar, skb, prefix, buf, len, bufsize); fc++; } spin_unlock_bh(&queue->lock); } #define DEBUGFS_QUEUE_DUMP(q, qi) \ static char *carl9170_debugfs_##q ##_##qi ##_read(struct ar9170 *ar, \ char *buf, size_t bufsize, ssize_t *len) \ { \ carl9170_debugfs_queue_dump(ar, buf, len, bufsize, &ar->q[qi]); \ return buf; \ } \ DEBUGFS_DECLARE_RO_FILE(q##_##qi, 8000); static char *carl9170_debugfs_sta_psm_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { ADD(buf, *len, bufsize, "psm state: %s\n", (ar->ps.off_override ? "FORCE CAM" : (ar->ps.state ? "PSM" : "CAM"))); ADD(buf, *len, bufsize, "sleep duration: %d ms.\n", ar->ps.sleep_ms); ADD(buf, *len, bufsize, "last power-state transition: %d ms ago.\n", jiffies_to_msecs(jiffies - ar->ps.last_action)); ADD(buf, *len, bufsize, "last CAM->PSM transition: %d ms ago.\n", jiffies_to_msecs(jiffies - ar->ps.last_slept)); return buf; } DEBUGFS_DECLARE_RO_FILE(sta_psm, 160); static char *carl9170_debugfs_tx_stuck_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { int i; for (i = 0; i < ar->hw->queues; i++) { ADD(buf, *len, bufsize, "TX queue [%d]: %10d max:%10d ms.\n", i, ieee80211_queue_stopped(ar->hw, i) ? jiffies_to_msecs(jiffies - ar->queue_stop_timeout[i]) : 0, jiffies_to_msecs(ar->max_queue_stop_timeout[i])); ar->max_queue_stop_timeout[i] = 0; } return buf; } DEBUGFS_DECLARE_RO_FILE(tx_stuck, 180); static char *carl9170_debugfs_phy_noise_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { int err; err = carl9170_get_noisefloor(ar); if (err) { *len = err; return buf; } ADD(buf, *len, bufsize, "Chain 0: %10d dBm, ext. chan.:%10d dBm\n", ar->noise[0], ar->noise[2]); ADD(buf, *len, bufsize, "Chain 2: %10d dBm, ext. chan.:%10d dBm\n", ar->noise[1], ar->noise[3]); return buf; } DEBUGFS_DECLARE_RO_FILE(phy_noise, 180); static char *carl9170_debugfs_vif_dump_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *len) { struct carl9170_vif_info *iter; int i = 0; ADD(buf, *len, bufsize, "registered VIFs:%d \\ %d\n", ar->vifs, ar->fw.vif_num); ADD(buf, *len, bufsize, "VIF bitmap: ["); *len += bitmap_scnprintf(&buf[*len], bufsize - *len, &ar->vif_bitmap, ar->fw.vif_num); ADD(buf, *len, bufsize, "]\n"); rcu_read_lock(); list_for_each_entry_rcu(iter, &ar->vif_list, list) { struct ieee80211_vif *vif = carl9170_get_vif(iter); ADD(buf, *len, bufsize, "\t%d = [%s VIF, id:%d, type:%x " " mac:%pM %s]\n", i, (carl9170_get_main_vif(ar) == vif ? "Master" : " Slave"), iter->id, vif->type, vif->addr, iter->enable_beacon ? "beaconing " : ""); i++; } rcu_read_unlock(); return buf; } DEBUGFS_DECLARE_RO_FILE(vif_dump, 8000); #define UPDATE_COUNTER(ar, name) ({ \ u32 __tmp[ARRAY_SIZE(name##_regs)]; \ unsigned int __i, __err = -ENODEV; \ \ for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \ __tmp[__i] = name##_regs[__i].reg; \ ar->debug.stats.name##_counter[__i] = 0; \ } \ \ if (IS_STARTED(ar)) \ __err = carl9170_read_mreg(ar, ARRAY_SIZE(name##_regs), \ __tmp, ar->debug.stats.name##_counter); \ (__err); }) #define TALLY_SUM_UP(ar, name) do { \ unsigned int __i; \ \ for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \ ar->debug.stats.name##_sum[__i] += \ ar->debug.stats.name##_counter[__i]; \ } \ } while (0) #define DEBUGFS_HW_TALLY_FILE(name, f) \ static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \ char *dum, size_t bufsize, ssize_t *ret) \ { \ char *buf; \ int i, max_len, err; \ \ max_len = ARRAY_SIZE(name##_regs) * 80; \ buf = vmalloc(max_len); \ if (!buf) \ return NULL; \ \ err = UPDATE_COUNTER(ar, name); \ if (err) { \ *ret = err; \ return buf; \ } \ \ TALLY_SUM_UP(ar, name); \ \ for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \ ADD(buf, *ret, max_len, "%22s = %" f "[+%" f "]\n", \ name##_regs[i].nreg, ar->debug.stats.name ##_sum[i],\ ar->debug.stats.name ##_counter[i]); \ } \ \ return buf; \ } \ DEBUGFS_DECLARE_RO_FILE(name, 0); #define DEBUGFS_HW_REG_FILE(name, f) \ static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \ char *dum, size_t bufsize, ssize_t *ret) \ { \ char *buf; \ int i, max_len, err; \ \ max_len = ARRAY_SIZE(name##_regs) * 80; \ buf = vmalloc(max_len); \ if (!buf) \ return NULL; \ \ err = UPDATE_COUNTER(ar, name); \ if (err) { \ *ret = err; \ return buf; \ } \ \ for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \ ADD(buf, *ret, max_len, "%22s = %" f "\n", \ name##_regs[i].nreg, \ ar->debug.stats.name##_counter[i]); \ } \ \ return buf; \ } \ DEBUGFS_DECLARE_RO_FILE(name, 0); static ssize_t carl9170_debugfs_hw_ioread32_write(struct ar9170 *ar, const char *buf, size_t count) { int err = 0, i, n = 0, max_len = 32, res; unsigned int reg, tmp; if (!count) return 0; if (count > max_len) return -E2BIG; res = sscanf(buf, "0x%X %d", &reg, &n); if (res < 1) { err = -EINVAL; goto out; } if (res == 1) n = 1; if (n > 15) { err = -EMSGSIZE; goto out; } if ((reg >= 0x280000) || ((reg + (n << 2)) >= 0x280000)) { err = -EADDRNOTAVAIL; goto out; } if (reg & 3) { err = -EINVAL; goto out; } for (i = 0; i < n; i++) { err = carl9170_read_reg(ar, reg + (i << 2), &tmp); if (err) goto out; ar->debug.ring[ar->debug.ring_tail].reg = reg + (i << 2); ar->debug.ring[ar->debug.ring_tail].value = tmp; ar->debug.ring_tail++; ar->debug.ring_tail %= CARL9170_DEBUG_RING_SIZE; } out: return err ? err : count; } static char *carl9170_debugfs_hw_ioread32_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *ret) { int i = 0; while (ar->debug.ring_head != ar->debug.ring_tail) { ADD(buf, *ret, bufsize, "%.8x = %.8x\n", ar->debug.ring[ar->debug.ring_head].reg, ar->debug.ring[ar->debug.ring_head].value); ar->debug.ring_head++; ar->debug.ring_head %= CARL9170_DEBUG_RING_SIZE; if (i++ == 64) break; } ar->debug.ring_head = ar->debug.ring_tail; return buf; } DEBUGFS_DECLARE_RW_FILE(hw_ioread32, CARL9170_DEBUG_RING_SIZE * 40); static ssize_t carl9170_debugfs_bug_write(struct ar9170 *ar, const char *buf, size_t count) { int err; if (count < 1) return -EINVAL; switch (buf[0]) { case 'F': ar->needs_full_reset = true; break; case 'R': if (!IS_STARTED(ar)) { err = -EAGAIN; goto out; } ar->needs_full_reset = false; break; case 'M': err = carl9170_mac_reset(ar); if (err < 0) count = err; goto out; case 'P': err = carl9170_set_channel(ar, ar->hw->conf.chandef.chan, cfg80211_get_chandef_type(&ar->hw->conf.chandef)); if (err < 0) count = err; goto out; default: return -EINVAL; } carl9170_restart(ar, CARL9170_RR_USER_REQUEST); out: return count; } static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *ret) { ADD(buf, *ret, bufsize, "[P]hy reinit, [R]estart, [F]ull usb reset, " "[M]ac reset\n"); ADD(buf, *ret, bufsize, "firmware restarts:%d, last reason:%d\n", ar->restart_counter, ar->last_reason); ADD(buf, *ret, bufsize, "phy reinit errors:%d (%d)\n", ar->total_chan_fail, ar->chan_fail); ADD(buf, *ret, bufsize, "reported firmware errors:%d\n", ar->fw.err_counter); ADD(buf, *ret, bufsize, "reported firmware BUGs:%d\n", ar->fw.bug_counter); ADD(buf, *ret, bufsize, "pending restart requests:%d\n", atomic_read(&ar->pending_restarts)); return buf; } __DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED); static const char *const erp_modes[] = { [CARL9170_ERP_INVALID] = "INVALID", [CARL9170_ERP_AUTO] = "Automatic", [CARL9170_ERP_MAC80211] = "Set by MAC80211", [CARL9170_ERP_OFF] = "Force Off", [CARL9170_ERP_RTS] = "Force RTS", [CARL9170_ERP_CTS] = "Force CTS" }; static char *carl9170_debugfs_erp_read(struct ar9170 *ar, char *buf, size_t bufsize, ssize_t *ret) { ADD(buf, *ret, bufsize, "ERP Setting: (%d) -> %s\n", ar->erp_mode, erp_modes[ar->erp_mode]); return buf; } static ssize_t carl9170_debugfs_erp_write(struct ar9170 *ar, const char *buf, size_t count) { int res, val; if (count < 1) return -EINVAL; res = sscanf(buf, "%d", &val); if (res != 1) return -EINVAL; if (!((val > CARL9170_ERP_INVALID) && (val < __CARL9170_ERP_NUM))) return -EINVAL; ar->erp_mode = val; return count; } DEBUGFS_DECLARE_RW_FILE(erp, 80); static ssize_t carl9170_debugfs_hw_iowrite32_write(struct ar9170 *ar, const char *buf, size_t count) { int err = 0, max_len = 22, res; u32 reg, val; if (!count) return 0; if (count > max_len) return -E2BIG; res = sscanf(buf, "0x%X 0x%X", &reg, &val); if (res != 2) { err = -EINVAL; goto out; } if (reg <= 0x100000 || reg >= 0x280000) { err = -EADDRNOTAVAIL; goto out; } if (reg & 3) { err = -EINVAL; goto out; } err = carl9170_write_reg(ar, reg, val); if (err) goto out; out: return err ? err : count; } DEBUGFS_DECLARE_WO_FILE(hw_iowrite32); DEBUGFS_HW_TALLY_FILE(hw_tx_tally, "u"); DEBUGFS_HW_TALLY_FILE(hw_rx_tally, "u"); DEBUGFS_HW_TALLY_FILE(hw_phy_errors, "u"); DEBUGFS_HW_REG_FILE(hw_wlan_queue, ".8x"); DEBUGFS_HW_REG_FILE(hw_pta_queue, ".8x"); DEBUGFS_HW_REG_FILE(hw_ampdu_info, ".8x"); DEBUGFS_QUEUE_DUMP(tx_status, 0); DEBUGFS_QUEUE_DUMP(tx_status, 1); DEBUGFS_QUEUE_DUMP(tx_status, 2); DEBUGFS_QUEUE_DUMP(tx_status, 3); DEBUGFS_QUEUE_DUMP(tx_pending, 0); DEBUGFS_QUEUE_DUMP(tx_pending, 1); DEBUGFS_QUEUE_DUMP(tx_pending, 2); DEBUGFS_QUEUE_DUMP(tx_pending, 3); DEBUGFS_READONLY_FILE(usb_tx_anch_urbs, 20, "%d", atomic_read(&ar->tx_anch_urbs)); DEBUGFS_READONLY_FILE(usb_rx_anch_urbs, 20, "%d", atomic_read(&ar->rx_anch_urbs)); DEBUGFS_READONLY_FILE(usb_rx_work_urbs, 20, "%d", atomic_read(&ar->rx_work_urbs)); DEBUGFS_READONLY_FILE(usb_rx_pool_urbs, 20, "%d", atomic_read(&ar->rx_pool_urbs)); DEBUGFS_READONLY_FILE(tx_total_queued, 20, "%d", atomic_read(&ar->tx_total_queued)); DEBUGFS_READONLY_FILE(tx_ampdu_scheduler, 20, "%d", atomic_read(&ar->tx_ampdu_scheduler)); DEBUGFS_READONLY_FILE(tx_total_pending, 20, "%d", atomic_read(&ar->tx_total_pending)); DEBUGFS_READONLY_FILE(tx_ampdu_list_len, 20, "%d", ar->tx_ampdu_list_len); DEBUGFS_READONLY_FILE(tx_ampdu_upload, 20, "%d", atomic_read(&ar->tx_ampdu_upload)); DEBUGFS_READONLY_FILE(tx_janitor_last_run, 64, "last run:%d ms ago", jiffies_to_msecs(jiffies - ar->tx_janitor_last_run)); DEBUGFS_READONLY_FILE(tx_dropped, 20, "%d", ar->tx_dropped); DEBUGFS_READONLY_FILE(rx_dropped, 20, "%d", ar->rx_dropped); DEBUGFS_READONLY_FILE(sniffer_enabled, 20, "%d", ar->sniffer_enabled); DEBUGFS_READONLY_FILE(rx_software_decryption, 20, "%d", ar->rx_software_decryption); DEBUGFS_READONLY_FILE(ampdu_factor, 20, "%d", ar->current_factor); DEBUGFS_READONLY_FILE(ampdu_density, 20, "%d", ar->current_density); DEBUGFS_READONLY_FILE(beacon_int, 20, "%d TU", ar->global_beacon_int); DEBUGFS_READONLY_FILE(pretbtt, 20, "%d TU", ar->global_pretbtt); void carl9170_debugfs_register(struct ar9170 *ar) { ar->debug_dir = debugfs_create_dir(KBUILD_MODNAME, ar->hw->wiphy->debugfsdir); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, carl_debugfs_##name ##_ops.attr, \ ar->debug_dir, ar, \ &carl_debugfs_##name ## _ops.fops); DEBUGFS_ADD(usb_tx_anch_urbs); DEBUGFS_ADD(usb_rx_pool_urbs); DEBUGFS_ADD(usb_rx_anch_urbs); DEBUGFS_ADD(usb_rx_work_urbs); DEBUGFS_ADD(tx_total_queued); DEBUGFS_ADD(tx_total_pending); DEBUGFS_ADD(tx_dropped); DEBUGFS_ADD(tx_stuck); DEBUGFS_ADD(tx_ampdu_upload); DEBUGFS_ADD(tx_ampdu_scheduler); DEBUGFS_ADD(tx_ampdu_list_len); DEBUGFS_ADD(rx_dropped); DEBUGFS_ADD(sniffer_enabled); DEBUGFS_ADD(rx_software_decryption); DEBUGFS_ADD(mem_usage); DEBUGFS_ADD(qos_stat); DEBUGFS_ADD(sta_psm); DEBUGFS_ADD(ampdu_state); DEBUGFS_ADD(hw_tx_tally); DEBUGFS_ADD(hw_rx_tally); DEBUGFS_ADD(hw_phy_errors); DEBUGFS_ADD(phy_noise); DEBUGFS_ADD(hw_wlan_queue); DEBUGFS_ADD(hw_pta_queue); DEBUGFS_ADD(hw_ampdu_info); DEBUGFS_ADD(ampdu_density); DEBUGFS_ADD(ampdu_factor); DEBUGFS_ADD(tx_janitor_last_run); DEBUGFS_ADD(tx_status_0); DEBUGFS_ADD(tx_status_1); DEBUGFS_ADD(tx_status_2); DEBUGFS_ADD(tx_status_3); DEBUGFS_ADD(tx_pending_0); DEBUGFS_ADD(tx_pending_1); DEBUGFS_ADD(tx_pending_2); DEBUGFS_ADD(tx_pending_3); DEBUGFS_ADD(hw_ioread32); DEBUGFS_ADD(hw_iowrite32); DEBUGFS_ADD(bug); DEBUGFS_ADD(erp); DEBUGFS_ADD(vif_dump); DEBUGFS_ADD(beacon_int); DEBUGFS_ADD(pretbtt); #undef DEBUGFS_ADD } void carl9170_debugfs_unregister(struct ar9170 *ar) { debugfs_remove_recursive(ar->debug_dir); }
gpl-2.0
devttys1/linux-fslc
drivers/gpu/drm/r128/r128_cce.c
1129
24259
/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*- * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com */ /* * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes <gareth@valinux.com> */ #include <linux/firmware.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> #include <drm/drmP.h> #include <drm/r128_drm.h> #include "r128_drv.h" #define R128_FIFO_DEBUG 0 #define FIRMWARE_NAME "r128/r128_cce.bin" MODULE_FIRMWARE(FIRMWARE_NAME); static int R128_READ_PLL(struct drm_device *dev, int addr) { drm_r128_private_t *dev_priv = dev->dev_private; R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f); return R128_READ(R128_CLOCK_CNTL_DATA); } #if R128_FIFO_DEBUG static void r128_status(drm_r128_private_t *dev_priv) { printk("GUI_STAT = 0x%08x\n", (unsigned int)R128_READ(R128_GUI_STAT)); printk("PM4_STAT = 0x%08x\n", (unsigned int)R128_READ(R128_PM4_STAT)); printk("PM4_BUFFER_DL_WPTR = 0x%08x\n", (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR)); printk("PM4_BUFFER_DL_RPTR = 0x%08x\n", (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR)); printk("PM4_MICRO_CNTL = 0x%08x\n", (unsigned int)R128_READ(R128_PM4_MICRO_CNTL)); printk("PM4_BUFFER_CNTL = 0x%08x\n", (unsigned int)R128_READ(R128_PM4_BUFFER_CNTL)); } #endif /* ================================================================ * Engine, FIFO control */ static int r128_do_pixcache_flush(drm_r128_private_t *dev_priv) { u32 tmp; int i; tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL; R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp); for (i = 0; i < dev_priv->usec_timeout; i++) { if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) return 0; DRM_UDELAY(1); } #if R128_FIFO_DEBUG DRM_ERROR("failed!\n"); #endif return -EBUSY; } static int r128_do_wait_for_fifo(drm_r128_private_t *dev_priv, int entries) { int i; for (i = 0; i < dev_priv->usec_timeout; i++) { int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK; if (slots >= entries) return 0; DRM_UDELAY(1); } #if R128_FIFO_DEBUG DRM_ERROR("failed!\n"); #endif return -EBUSY; } static int r128_do_wait_for_idle(drm_r128_private_t *dev_priv) { int i, ret; ret = r128_do_wait_for_fifo(dev_priv, 64); if (ret) return ret; for (i = 0; i < dev_priv->usec_timeout; i++) { if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) { r128_do_pixcache_flush(dev_priv); return 0; } DRM_UDELAY(1); } #if R128_FIFO_DEBUG DRM_ERROR("failed!\n"); #endif return -EBUSY; } /* ================================================================ * CCE control, initialization */ /* Load the microcode for the CCE */ static int r128_cce_load_microcode(drm_r128_private_t *dev_priv) { struct platform_device *pdev; const struct firmware *fw; const __be32 *fw_data; int rc, i; DRM_DEBUG("\n"); pdev = platform_device_register_simple("r128_cce", 0, NULL, 0); if (IS_ERR(pdev)) { printk(KERN_ERR "r128_cce: Failed to register firmware\n"); return PTR_ERR(pdev); } rc = request_firmware(&fw, FIRMWARE_NAME, &pdev->dev); platform_device_unregister(pdev); if (rc) { printk(KERN_ERR "r128_cce: Failed to load firmware \"%s\"\n", FIRMWARE_NAME); return rc; } if (fw->size != 256 * 8) { printk(KERN_ERR "r128_cce: Bogus length %zu in firmware \"%s\"\n", fw->size, FIRMWARE_NAME); rc = -EINVAL; goto out_release; } r128_do_wait_for_idle(dev_priv); fw_data = (const __be32 *)fw->data; R128_WRITE(R128_PM4_MICROCODE_ADDR, 0); for (i = 0; i < 256; i++) { R128_WRITE(R128_PM4_MICROCODE_DATAH, be32_to_cpup(&fw_data[i * 2])); R128_WRITE(R128_PM4_MICROCODE_DATAL, be32_to_cpup(&fw_data[i * 2 + 1])); } out_release: release_firmware(fw); return rc; } /* Flush any pending commands to the CCE. This should only be used just * prior to a wait for idle, as it informs the engine that the command * stream is ending. */ static void r128_do_cce_flush(drm_r128_private_t *dev_priv) { u32 tmp; tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE; R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp); } /* Wait for the CCE to go idle. */ int r128_do_cce_idle(drm_r128_private_t *dev_priv) { int i; for (i = 0; i < dev_priv->usec_timeout; i++) { if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) { int pm4stat = R128_READ(R128_PM4_STAT); if (((pm4stat & R128_PM4_FIFOCNT_MASK) >= dev_priv->cce_fifo_size) && !(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) { return r128_do_pixcache_flush(dev_priv); } } DRM_UDELAY(1); } #if R128_FIFO_DEBUG DRM_ERROR("failed!\n"); r128_status(dev_priv); #endif return -EBUSY; } /* Start the Concurrent Command Engine. */ static void r128_do_cce_start(drm_r128_private_t *dev_priv) { r128_do_wait_for_idle(dev_priv); R128_WRITE(R128_PM4_BUFFER_CNTL, dev_priv->cce_mode | dev_priv->ring.size_l2qw | R128_PM4_BUFFER_CNTL_NOUPDATE); R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */ R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN); dev_priv->cce_running = 1; } /* Reset the Concurrent Command Engine. This will not flush any pending * commands, so you must wait for the CCE command stream to complete * before calling this routine. */ static void r128_do_cce_reset(drm_r128_private_t *dev_priv) { R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0); R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0); dev_priv->ring.tail = 0; } /* Stop the Concurrent Command Engine. This will not flush any pending * commands, so you must flush the command stream and wait for the CCE * to go idle before calling this routine. */ static void r128_do_cce_stop(drm_r128_private_t *dev_priv) { R128_WRITE(R128_PM4_MICRO_CNTL, 0); R128_WRITE(R128_PM4_BUFFER_CNTL, R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE); dev_priv->cce_running = 0; } /* Reset the engine. This will stop the CCE if it is running. */ static int r128_do_engine_reset(struct drm_device *dev) { drm_r128_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index, mclk_cntl, gen_reset_cntl; r128_do_pixcache_flush(dev_priv); clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX); mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL); R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP); gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL); /* Taken from the sample code - do not change */ R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI); R128_READ(R128_GEN_RESET_CNTL); R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI); R128_READ(R128_GEN_RESET_CNTL); R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl); R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index); R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl); /* Reset the CCE ring */ r128_do_cce_reset(dev_priv); /* The CCE is no longer running after an engine reset */ dev_priv->cce_running = 0; /* Reset any pending vertex, indirect buffers */ r128_freelist_reset(dev); return 0; } static void r128_cce_init_ring_buffer(struct drm_device *dev, drm_r128_private_t *dev_priv) { u32 ring_start; u32 tmp; DRM_DEBUG("\n"); /* The manual (p. 2) says this address is in "VM space". This * means it's an offset from the start of AGP space. */ #if __OS_HAS_AGP if (!dev_priv->is_pci) ring_start = dev_priv->cce_ring->offset - dev->agp->base; else #endif ring_start = dev_priv->cce_ring->offset - (unsigned long)dev->sg->virtual; R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET); R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0); R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0); /* Set watermark control */ R128_WRITE(R128_PM4_BUFFER_WM_CNTL, ((R128_WATERMARK_L / 4) << R128_WMA_SHIFT) | ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT) | ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT) | ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT)); /* Force read. Why? Because it's in the examples... */ R128_READ(R128_PM4_BUFFER_ADDR); /* Turn on bus mastering */ tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS; R128_WRITE(R128_BUS_CNTL, tmp); } static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) { drm_r128_private_t *dev_priv; int rc; DRM_DEBUG("\n"); if (dev->dev_private) { DRM_DEBUG("called when already initialized\n"); return -EINVAL; } dev_priv = kzalloc(sizeof(drm_r128_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; dev_priv->is_pci = init->is_pci; if (dev_priv->is_pci && !dev->sg) { DRM_ERROR("PCI GART memory not allocated!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -EINVAL; } dev_priv->usec_timeout = init->usec_timeout; if (dev_priv->usec_timeout < 1 || dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) { DRM_DEBUG("TIMEOUT problem!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -EINVAL; } dev_priv->cce_mode = init->cce_mode; /* GH: Simple idle check. */ atomic_set(&dev_priv->idle_count, 0); /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring * read pointer. */ if ((init->cce_mode != R128_PM4_192BM) && (init->cce_mode != R128_PM4_128BM_64INDBM) && (init->cce_mode != R128_PM4_64BM_128INDBM) && (init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) { DRM_DEBUG("Bad cce_mode!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -EINVAL; } switch (init->cce_mode) { case R128_PM4_NONPM4: dev_priv->cce_fifo_size = 0; break; case R128_PM4_192PIO: case R128_PM4_192BM: dev_priv->cce_fifo_size = 192; break; case R128_PM4_128PIO_64INDBM: case R128_PM4_128BM_64INDBM: dev_priv->cce_fifo_size = 128; break; case R128_PM4_64PIO_128INDBM: case R128_PM4_64BM_128INDBM: case R128_PM4_64PIO_64VCBM_64INDBM: case R128_PM4_64BM_64VCBM_64INDBM: case R128_PM4_64PIO_64VCPIO_64INDPIO: dev_priv->cce_fifo_size = 64; break; } switch (init->fb_bpp) { case 16: dev_priv->color_fmt = R128_DATATYPE_RGB565; break; case 32: default: dev_priv->color_fmt = R128_DATATYPE_ARGB8888; break; } dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; switch (init->depth_bpp) { case 16: dev_priv->depth_fmt = R128_DATATYPE_RGB565; break; case 24: case 32: default: dev_priv->depth_fmt = R128_DATATYPE_ARGB8888; break; } dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; dev_priv->span_offset = init->span_offset; dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) | (dev_priv->front_offset >> 5)); dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) | (dev_priv->back_offset >> 5)); dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) | (dev_priv->depth_offset >> 5) | R128_DST_TILE); dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) | (dev_priv->span_offset >> 5)); dev_priv->sarea = drm_legacy_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -EINVAL; } dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset); if (!dev_priv->mmio) { DRM_ERROR("could not find mmio region!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -EINVAL; } dev_priv->cce_ring = drm_legacy_findmap(dev, init->ring_offset); if (!dev_priv->cce_ring) { DRM_ERROR("could not find cce ring region!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -EINVAL; } dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset); if (!dev_priv->ring_rptr) { DRM_ERROR("could not find ring read pointer!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("could not find dma buffer region!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -EINVAL; } if (!dev_priv->is_pci) { dev_priv->agp_textures = drm_legacy_findmap(dev, init->agp_textures_offset); if (!dev_priv->agp_textures) { DRM_ERROR("could not find agp texture region!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -EINVAL; } } dev_priv->sarea_priv = (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); #if __OS_HAS_AGP if (!dev_priv->is_pci) { drm_legacy_ioremap_wc(dev_priv->cce_ring, dev); drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); if (!dev_priv->cce_ring->handle || !dev_priv->ring_rptr->handle || !dev->agp_buffer_map->handle) { DRM_ERROR("Could not ioremap agp regions!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -ENOMEM; } } else #endif { dev_priv->cce_ring->handle = (void *)(unsigned long)dev_priv->cce_ring->offset; dev_priv->ring_rptr->handle = (void *)(unsigned long)dev_priv->ring_rptr->offset; dev->agp_buffer_map->handle = (void *)(unsigned long)dev->agp_buffer_map->offset; } #if __OS_HAS_AGP if (!dev_priv->is_pci) dev_priv->cce_buffers_offset = dev->agp->base; else #endif dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual; dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle; dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8); dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.high_mark = 128; dev_priv->sarea_priv->last_frame = 0; R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); dev_priv->sarea_priv->last_dispatch = 0; R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch); #if __OS_HAS_AGP if (dev_priv->is_pci) { #endif dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN; dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE; dev_priv->gart_info.addr = NULL; dev_priv->gart_info.bus_addr = 0; dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { DRM_ERROR("failed to init PCI GART!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); return -ENOMEM; } R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); #if __OS_HAS_AGP } #endif r128_cce_init_ring_buffer(dev, dev_priv); rc = r128_cce_load_microcode(dev_priv); dev->dev_private = (void *)dev_priv; r128_do_engine_reset(dev); if (rc) { DRM_ERROR("Failed to load firmware!\n"); r128_do_cleanup_cce(dev); } return rc; } int r128_do_cleanup_cce(struct drm_device *dev) { /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if (dev->irq_enabled) drm_irq_uninstall(dev); if (dev->dev_private) { drm_r128_private_t *dev_priv = dev->dev_private; #if __OS_HAS_AGP if (!dev_priv->is_pci) { if (dev_priv->cce_ring != NULL) drm_legacy_ioremapfree(dev_priv->cce_ring, dev); if (dev_priv->ring_rptr != NULL) drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); if (dev->agp_buffer_map != NULL) { drm_legacy_ioremapfree(dev->agp_buffer_map, dev); dev->agp_buffer_map = NULL; } } else #endif { if (dev_priv->gart_info.bus_addr) if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) DRM_ERROR ("failed to cleanup PCI GART!\n"); } kfree(dev->dev_private); dev->dev_private = NULL; } return 0; } int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_init_t *init = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); switch (init->func) { case R128_INIT_CCE: return r128_do_init_cce(dev, init); case R128_CLEANUP_CCE: return r128_do_cleanup_cce(dev); } return -EINVAL; } int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) { DRM_DEBUG("while CCE running\n"); return 0; } r128_do_cce_start(dev_priv); return 0; } /* Stop the CCE. The engine must have been idled before calling this * routine. */ int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_cce_stop_t *stop = data; int ret; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); /* Flush any pending CCE commands. This ensures any outstanding * commands are exectuted by the engine before we turn it off. */ if (stop->flush) r128_do_cce_flush(dev_priv); /* If we fail to make the engine go idle, we return an error * code so that the DRM ioctl wrapper can try again. */ if (stop->idle) { ret = r128_do_cce_idle(dev_priv); if (ret) return ret; } /* Finally, we can turn off the CCE. If the engine isn't idle, * we will get some dropped triangles as they won't be fully * rendered before the CCE is shut down. */ r128_do_cce_stop(dev_priv); /* Reset the engine */ r128_do_engine_reset(dev); return 0; } /* Just reset the CCE ring. Called as part of an X Server engine reset. */ int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); r128_do_cce_reset(dev_priv); /* The CCE is no longer running after an engine reset */ dev_priv->cce_running = 0; return 0; } int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); if (dev_priv->cce_running) r128_do_cce_flush(dev_priv); return r128_do_cce_idle(dev_priv); } int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev->dev_private); return r128_do_engine_reset(dev); } int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) { return -EINVAL; } /* ================================================================ * Freelist management */ #define R128_BUFFER_USED 0xffffffff #define R128_BUFFER_FREE 0 #if 0 static int r128_freelist_init(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_freelist_t *entry; int i; dev_priv->head = kzalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL); if (dev_priv->head == NULL) return -ENOMEM; dev_priv->head->age = R128_BUFFER_USED; for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; entry = kmalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL); if (!entry) return -ENOMEM; entry->age = R128_BUFFER_FREE; entry->buf = buf; entry->prev = dev_priv->head; entry->next = dev_priv->head->next; if (!entry->next) dev_priv->tail = entry; buf_priv->discard = 0; buf_priv->dispatched = 0; buf_priv->list_entry = entry; dev_priv->head->next = entry; if (dev_priv->head->next) dev_priv->head->next->prev = entry; } return 0; } #endif static struct drm_buf *r128_freelist_get(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv; struct drm_buf *buf; int i, t; /* FIXME: Optimize -- use freelist code */ for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; if (!buf->file_priv) return buf; } for (t = 0; t < dev_priv->usec_timeout; t++) { u32 done_age = R128_READ(R128_LAST_DISPATCH_REG); for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; if (buf->pending && buf_priv->age <= done_age) { /* The buffer has been processed, so it * can now be used. */ buf->pending = 0; return buf; } } DRM_UDELAY(1); } DRM_DEBUG("returning NULL!\n"); return NULL; } void r128_freelist_reset(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; int i; for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; drm_r128_buf_priv_t *buf_priv = buf->dev_private; buf_priv->age = 0; } } /* ================================================================ * CCE command submission */ int r128_wait_ring(drm_r128_private_t *dev_priv, int n) { drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; for (i = 0; i < dev_priv->usec_timeout; i++) { r128_update_ring_snapshot(dev_priv); if (ring->space >= n) return 0; DRM_UDELAY(1); } /* FIXME: This is being ignored... */ DRM_ERROR("failed!\n"); return -EBUSY; } static int r128_cce_get_buffers(struct drm_device *dev, struct drm_file *file_priv, struct drm_dma *d) { int i; struct drm_buf *buf; for (i = d->granted_count; i < d->request_count; i++) { buf = r128_freelist_get(dev); if (!buf) return -EAGAIN; buf->file_priv = file_priv; if (copy_to_user(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) return -EFAULT; if (copy_to_user(&d->request_sizes[i], &buf->total, sizeof(buf->total))) return -EFAULT; d->granted_count++; } return 0; } int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int ret = 0; struct drm_dma *d = data; LOCK_TEST_WITH_RETURN(dev, file_priv); /* Please don't send us buffers. */ if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } d->granted_count = 0; if (d->request_count) ret = r128_cce_get_buffers(dev, file_priv, d); return ret; }
gpl-2.0
davidmueller13/valexKernel-lt03wifi
lib/kobject.c
1385
23914
/* * kobject.c - library routines for handling generic kernel objects * * Copyright (c) 2002-2003 Patrick Mochel <mochel@osdl.org> * Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com> * Copyright (c) 2006-2007 Novell Inc. * * This file is released under the GPLv2. * * * Please see the file Documentation/kobject.txt for critical information * about using the kobject interface. */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/export.h> #include <linux/stat.h> #include <linux/slab.h> /* * populate_dir - populate directory with attributes. * @kobj: object we're working on. * * Most subsystems have a set of default attributes that are associated * with an object that registers with them. This is a helper called during * object registration that loops through the default attributes of the * subsystem and creates attributes files for them in sysfs. */ static int populate_dir(struct kobject *kobj) { struct kobj_type *t = get_ktype(kobj); struct attribute *attr; int error = 0; int i; if (t && t->default_attrs) { for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) { error = sysfs_create_file(kobj, attr); if (error) break; } } return error; } static int create_dir(struct kobject *kobj) { int error = 0; if (kobject_name(kobj)) { error = sysfs_create_dir(kobj); if (!error) { error = populate_dir(kobj); if (error) sysfs_remove_dir(kobj); } } return error; } static int get_kobj_path_length(struct kobject *kobj) { int length = 1; struct kobject *parent = kobj; /* walk up the ancestors until we hit the one pointing to the * root. * Add 1 to strlen for leading '/' of each level. */ do { if (kobject_name(parent) == NULL) return 0; length += strlen(kobject_name(parent)) + 1; parent = parent->parent; } while (parent); return length; } static void fill_kobj_path(struct kobject *kobj, char *path, int length) { struct kobject *parent; --length; for (parent = kobj; parent; parent = parent->parent) { int cur = strlen(kobject_name(parent)); /* back up enough to print this name with '/' */ length -= cur; strncpy(path + length, kobject_name(parent), cur); *(path + --length) = '/'; } pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj), kobj, __func__, path); } /** * kobject_get_path - generate and return the path associated with a given kobj and kset pair. * * @kobj: kobject in question, with which to build the path * @gfp_mask: the allocation type used to allocate the path * * The result must be freed by the caller with kfree(). */ char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) { char *path; int len; len = get_kobj_path_length(kobj); if (len == 0) return NULL; path = kzalloc(len, gfp_mask); if (!path) return NULL; fill_kobj_path(kobj, path, len); return path; } EXPORT_SYMBOL_GPL(kobject_get_path); /* add the kobject to its kset's list */ static void kobj_kset_join(struct kobject *kobj) { if (!kobj->kset) return; kset_get(kobj->kset); spin_lock(&kobj->kset->list_lock); list_add_tail(&kobj->entry, &kobj->kset->list); spin_unlock(&kobj->kset->list_lock); } /* remove the kobject from its kset's list */ static void kobj_kset_leave(struct kobject *kobj) { if (!kobj->kset) return; spin_lock(&kobj->kset->list_lock); list_del_init(&kobj->entry); spin_unlock(&kobj->kset->list_lock); kset_put(kobj->kset); } static void kobject_init_internal(struct kobject *kobj) { if (!kobj) return; kref_init(&kobj->kref); INIT_LIST_HEAD(&kobj->entry); kobj->state_in_sysfs = 0; kobj->state_add_uevent_sent = 0; kobj->state_remove_uevent_sent = 0; kobj->state_initialized = 1; } static int kobject_add_internal(struct kobject *kobj) { int error = 0; struct kobject *parent; if (!kobj) return -ENOENT; if (!kobj->name || !kobj->name[0]) { WARN(1, "kobject: (%p): attempted to be registered with empty " "name!\n", kobj); return -EINVAL; } parent = kobject_get(kobj->parent); /* join kset if set, use it as parent if we do not already have one */ if (kobj->kset) { if (!parent) parent = kobject_get(&kobj->kset->kobj); kobj_kset_join(kobj); kobj->parent = parent; } pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n", kobject_name(kobj), kobj, __func__, parent ? kobject_name(parent) : "<NULL>", kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>"); error = create_dir(kobj); if (error) { kobj_kset_leave(kobj); kobject_put(parent); kobj->parent = NULL; /* be noisy on error issues */ if (error == -EEXIST) WARN(1, "%s failed for %s with " "-EEXIST, don't try to register things with " "the same name in the same directory.\n", __func__, kobject_name(kobj)); else WARN(1, "%s failed for %s (error: %d parent: %s)\n", __func__, kobject_name(kobj), error, parent ? kobject_name(parent) : "'none'"); } else kobj->state_in_sysfs = 1; return error; } /** * kobject_set_name_vargs - Set the name of an kobject * @kobj: struct kobject to set the name of * @fmt: format string used to build the name * @vargs: vargs to format the string. */ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs) { const char *old_name = kobj->name; char *s; if (kobj->name && !fmt) return 0; kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); if (!kobj->name) return -ENOMEM; /* ewww... some of these buggers have '/' in the name ... */ while ((s = strchr(kobj->name, '/'))) s[0] = '!'; kfree(old_name); return 0; } /** * kobject_set_name - Set the name of a kobject * @kobj: struct kobject to set the name of * @fmt: format string used to build the name * * This sets the name of the kobject. If you have already added the * kobject to the system, you must call kobject_rename() in order to * change the name of the kobject. */ int kobject_set_name(struct kobject *kobj, const char *fmt, ...) { va_list vargs; int retval; va_start(vargs, fmt); retval = kobject_set_name_vargs(kobj, fmt, vargs); va_end(vargs); return retval; } EXPORT_SYMBOL(kobject_set_name); /** * kobject_init - initialize a kobject structure * @kobj: pointer to the kobject to initialize * @ktype: pointer to the ktype for this kobject. * * This function will properly initialize a kobject such that it can then * be passed to the kobject_add() call. * * After this function is called, the kobject MUST be cleaned up by a call * to kobject_put(), not by a call to kfree directly to ensure that all of * the memory is cleaned up properly. */ void kobject_init(struct kobject *kobj, struct kobj_type *ktype) { char *err_str; if (!kobj) { err_str = "invalid kobject pointer!"; goto error; } if (!ktype) { err_str = "must have a ktype to be initialized properly!\n"; goto error; } if (kobj->state_initialized) { /* do not error out as sometimes we can recover */ printk(KERN_ERR "kobject (%p): tried to init an initialized " "object, something is seriously wrong.\n", kobj); dump_stack(); } kobject_init_internal(kobj); kobj->ktype = ktype; return; error: printk(KERN_ERR "kobject (%p): %s\n", kobj, err_str); dump_stack(); } EXPORT_SYMBOL(kobject_init); static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, const char *fmt, va_list vargs) { int retval; retval = kobject_set_name_vargs(kobj, fmt, vargs); if (retval) { printk(KERN_ERR "kobject: can not set name properly!\n"); return retval; } kobj->parent = parent; return kobject_add_internal(kobj); } /** * kobject_add - the main kobject add function * @kobj: the kobject to add * @parent: pointer to the parent of the kobject. * @fmt: format to name the kobject with. * * The kobject name is set and added to the kobject hierarchy in this * function. * * If @parent is set, then the parent of the @kobj will be set to it. * If @parent is NULL, then the parent of the @kobj will be set to the * kobject associted with the kset assigned to this kobject. If no kset * is assigned to the kobject, then the kobject will be located in the * root of the sysfs tree. * * If this function returns an error, kobject_put() must be called to * properly clean up the memory associated with the object. * Under no instance should the kobject that is passed to this function * be directly freed with a call to kfree(), that can leak memory. * * Note, no "add" uevent will be created with this call, the caller should set * up all of the necessary sysfs files for the object and then call * kobject_uevent() with the UEVENT_ADD parameter to ensure that * userspace is properly notified of this kobject's creation. */ int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) { va_list args; int retval; if (!kobj) return -EINVAL; if (!kobj->state_initialized) { printk(KERN_ERR "kobject '%s' (%p): tried to add an " "uninitialized object, something is seriously wrong.\n", kobject_name(kobj), kobj); dump_stack(); return -EINVAL; } va_start(args, fmt); retval = kobject_add_varg(kobj, parent, fmt, args); va_end(args); return retval; } EXPORT_SYMBOL(kobject_add); /** * kobject_init_and_add - initialize a kobject structure and add it to the kobject hierarchy * @kobj: pointer to the kobject to initialize * @ktype: pointer to the ktype for this kobject. * @parent: pointer to the parent of this kobject. * @fmt: the name of the kobject. * * This function combines the call to kobject_init() and * kobject_add(). The same type of error handling after a call to * kobject_add() and kobject lifetime rules are the same here. */ int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...) { va_list args; int retval; kobject_init(kobj, ktype); va_start(args, fmt); retval = kobject_add_varg(kobj, parent, fmt, args); va_end(args); return retval; } EXPORT_SYMBOL_GPL(kobject_init_and_add); /** * kobject_rename - change the name of an object * @kobj: object in question. * @new_name: object's new name * * It is the responsibility of the caller to provide mutual * exclusion between two different calls of kobject_rename * on the same kobject and to ensure that new_name is valid and * won't conflict with other kobjects. */ int kobject_rename(struct kobject *kobj, const char *new_name) { int error = 0; const char *devpath = NULL; const char *dup_name = NULL, *name; char *devpath_string = NULL; char *envp[2]; kobj = kobject_get(kobj); if (!kobj) return -EINVAL; if (!kobj->parent) return -EINVAL; devpath = kobject_get_path(kobj, GFP_KERNEL); if (!devpath) { error = -ENOMEM; goto out; } devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL); if (!devpath_string) { error = -ENOMEM; goto out; } sprintf(devpath_string, "DEVPATH_OLD=%s", devpath); envp[0] = devpath_string; envp[1] = NULL; name = dup_name = kstrdup(new_name, GFP_KERNEL); if (!name) { error = -ENOMEM; goto out; } error = sysfs_rename_dir(kobj, new_name); if (error) goto out; /* Install the new kobject name */ dup_name = kobj->name; kobj->name = name; /* This function is mostly/only used for network interface. * Some hotplug package track interfaces by their name and * therefore want to know when the name is changed by the user. */ kobject_uevent_env(kobj, KOBJ_MOVE, envp); out: kfree(dup_name); kfree(devpath_string); kfree(devpath); kobject_put(kobj); return error; } EXPORT_SYMBOL_GPL(kobject_rename); /** * kobject_move - move object to another parent * @kobj: object in question. * @new_parent: object's new parent (can be NULL) */ int kobject_move(struct kobject *kobj, struct kobject *new_parent) { int error; struct kobject *old_parent; const char *devpath = NULL; char *devpath_string = NULL; char *envp[2]; kobj = kobject_get(kobj); if (!kobj) return -EINVAL; new_parent = kobject_get(new_parent); if (!new_parent) { if (kobj->kset) new_parent = kobject_get(&kobj->kset->kobj); } /* old object path */ devpath = kobject_get_path(kobj, GFP_KERNEL); if (!devpath) { error = -ENOMEM; goto out; } devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL); if (!devpath_string) { error = -ENOMEM; goto out; } sprintf(devpath_string, "DEVPATH_OLD=%s", devpath); envp[0] = devpath_string; envp[1] = NULL; error = sysfs_move_dir(kobj, new_parent); if (error) goto out; old_parent = kobj->parent; kobj->parent = new_parent; new_parent = NULL; kobject_put(old_parent); kobject_uevent_env(kobj, KOBJ_MOVE, envp); out: kobject_put(new_parent); kobject_put(kobj); kfree(devpath_string); kfree(devpath); return error; } /** * kobject_del - unlink kobject from hierarchy. * @kobj: object. */ void kobject_del(struct kobject *kobj) { if (!kobj) return; sysfs_remove_dir(kobj); kobj->state_in_sysfs = 0; kobj_kset_leave(kobj); kobject_put(kobj->parent); kobj->parent = NULL; } /** * kobject_get - increment refcount for object. * @kobj: object. */ struct kobject *kobject_get(struct kobject *kobj) { if (kobj) kref_get(&kobj->kref); return kobj; } static struct kobject *kobject_get_unless_zero(struct kobject *kobj) { if (!kref_get_unless_zero(&kobj->kref)) kobj = NULL; return kobj; } /* * kobject_cleanup - free kobject resources. * @kobj: object to cleanup */ static void kobject_cleanup(struct kobject *kobj) { struct kobj_type *t = get_ktype(kobj); const char *name = kobj->name; pr_debug("kobject: '%s' (%p): %s\n", kobject_name(kobj), kobj, __func__); if (t && !t->release) pr_debug("kobject: '%s' (%p): does not have a release() " "function, it is broken and must be fixed.\n", kobject_name(kobj), kobj); /* send "remove" if the caller did not do it but sent "add" */ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) { pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n", kobject_name(kobj), kobj); kobject_uevent(kobj, KOBJ_REMOVE); } /* remove from sysfs if the caller did not do it */ if (kobj->state_in_sysfs) { pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n", kobject_name(kobj), kobj); kobject_del(kobj); } if (t && t->release) { pr_debug("kobject: '%s' (%p): calling ktype release\n", kobject_name(kobj), kobj); t->release(kobj); } /* free name if we allocated it */ if (name) { pr_debug("kobject: '%s': free name\n", name); kfree(name); } } static void kobject_release(struct kref *kref) { kobject_cleanup(container_of(kref, struct kobject, kref)); } /** * kobject_put - decrement refcount for object. * @kobj: object. * * Decrement the refcount, and if 0, call kobject_cleanup(). */ void kobject_put(struct kobject *kobj) { if (kobj) { if (!kobj->state_initialized) WARN(1, KERN_WARNING "kobject: '%s' (%p): is not " "initialized, yet kobject_put() is being " "called.\n", kobject_name(kobj), kobj); kref_put(&kobj->kref, kobject_release); } } static void dynamic_kobj_release(struct kobject *kobj) { pr_debug("kobject: (%p): %s\n", kobj, __func__); kfree(kobj); } static struct kobj_type dynamic_kobj_ktype = { .release = dynamic_kobj_release, .sysfs_ops = &kobj_sysfs_ops, }; /** * kobject_create - create a struct kobject dynamically * * This function creates a kobject structure dynamically and sets it up * to be a "dynamic" kobject with a default release function set up. * * If the kobject was not able to be created, NULL will be returned. * The kobject structure returned from here must be cleaned up with a * call to kobject_put() and not kfree(), as kobject_init() has * already been called on this structure. */ struct kobject *kobject_create(void) { struct kobject *kobj; kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); if (!kobj) return NULL; kobject_init(kobj, &dynamic_kobj_ktype); return kobj; } /** * kobject_create_and_add - create a struct kobject dynamically and register it with sysfs * * @name: the name for the kset * @parent: the parent kobject of this kobject, if any. * * This function creates a kobject structure dynamically and registers it * with sysfs. When you are finished with this structure, call * kobject_put() and the structure will be dynamically freed when * it is no longer being used. * * If the kobject was not able to be created, NULL will be returned. */ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent) { struct kobject *kobj; int retval; kobj = kobject_create(); if (!kobj) return NULL; retval = kobject_add(kobj, parent, "%s", name); if (retval) { printk(KERN_WARNING "%s: kobject_add error: %d\n", __func__, retval); kobject_put(kobj); kobj = NULL; } return kobj; } EXPORT_SYMBOL_GPL(kobject_create_and_add); /** * kset_init - initialize a kset for use * @k: kset */ void kset_init(struct kset *k) { kobject_init_internal(&k->kobj); INIT_LIST_HEAD(&k->list); spin_lock_init(&k->list_lock); } /* default kobject attribute operations */ static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct kobj_attribute *kattr; ssize_t ret = -EIO; kattr = container_of(attr, struct kobj_attribute, attr); if (kattr->show) ret = kattr->show(kobj, kattr, buf); return ret; } static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct kobj_attribute *kattr; ssize_t ret = -EIO; kattr = container_of(attr, struct kobj_attribute, attr); if (kattr->store) ret = kattr->store(kobj, kattr, buf, count); return ret; } const struct sysfs_ops kobj_sysfs_ops = { .show = kobj_attr_show, .store = kobj_attr_store, }; /** * kset_register - initialize and add a kset. * @k: kset. */ int kset_register(struct kset *k) { int err; if (!k) return -EINVAL; kset_init(k); err = kobject_add_internal(&k->kobj); if (err) return err; kobject_uevent(&k->kobj, KOBJ_ADD); return 0; } /** * kset_unregister - remove a kset. * @k: kset. */ void kset_unregister(struct kset *k) { if (!k) return; kobject_put(&k->kobj); } /** * kset_find_obj - search for object in kset. * @kset: kset we're looking in. * @name: object's name. * * Lock kset via @kset->subsys, and iterate over @kset->list, * looking for a matching kobject. If matching object is found * take a reference and return the object. */ struct kobject *kset_find_obj(struct kset *kset, const char *name) { struct kobject *k; struct kobject *ret = NULL; spin_lock(&kset->list_lock); list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { ret = kobject_get_unless_zero(k); break; } } spin_unlock(&kset->list_lock); return ret; } static void kset_release(struct kobject *kobj) { struct kset *kset = container_of(kobj, struct kset, kobj); pr_debug("kobject: '%s' (%p): %s\n", kobject_name(kobj), kobj, __func__); kfree(kset); } static struct kobj_type kset_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = kset_release, }; /** * kset_create - create a struct kset dynamically * * @name: the name for the kset * @uevent_ops: a struct kset_uevent_ops for the kset * @parent_kobj: the parent kobject of this kset, if any. * * This function creates a kset structure dynamically. This structure can * then be registered with the system and show up in sysfs with a call to * kset_register(). When you are finished with this structure, if * kset_register() has been called, call kset_unregister() and the * structure will be dynamically freed when it is no longer being used. * * If the kset was not able to be created, NULL will be returned. */ static struct kset *kset_create(const char *name, const struct kset_uevent_ops *uevent_ops, struct kobject *parent_kobj) { struct kset *kset; int retval; kset = kzalloc(sizeof(*kset), GFP_KERNEL); if (!kset) return NULL; retval = kobject_set_name(&kset->kobj, name); if (retval) { kfree(kset); return NULL; } kset->uevent_ops = uevent_ops; kset->kobj.parent = parent_kobj; /* * The kobject of this kset will have a type of kset_ktype and belong to * no kset itself. That way we can properly free it when it is * finished being used. */ kset->kobj.ktype = &kset_ktype; kset->kobj.kset = NULL; return kset; } /** * kset_create_and_add - create a struct kset dynamically and add it to sysfs * * @name: the name for the kset * @uevent_ops: a struct kset_uevent_ops for the kset * @parent_kobj: the parent kobject of this kset, if any. * * This function creates a kset structure dynamically and registers it * with sysfs. When you are finished with this structure, call * kset_unregister() and the structure will be dynamically freed when it * is no longer being used. * * If the kset was not able to be created, NULL will be returned. */ struct kset *kset_create_and_add(const char *name, const struct kset_uevent_ops *uevent_ops, struct kobject *parent_kobj) { struct kset *kset; int error; kset = kset_create(name, uevent_ops, parent_kobj); if (!kset) return NULL; error = kset_register(kset); if (error) { kfree(kset); return NULL; } return kset; } EXPORT_SYMBOL_GPL(kset_create_and_add); static DEFINE_SPINLOCK(kobj_ns_type_lock); static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES]; int kobj_ns_type_register(const struct kobj_ns_type_operations *ops) { enum kobj_ns_type type = ops->type; int error; spin_lock(&kobj_ns_type_lock); error = -EINVAL; if (type >= KOBJ_NS_TYPES) goto out; error = -EINVAL; if (type <= KOBJ_NS_TYPE_NONE) goto out; error = -EBUSY; if (kobj_ns_ops_tbl[type]) goto out; error = 0; kobj_ns_ops_tbl[type] = ops; out: spin_unlock(&kobj_ns_type_lock); return error; } int kobj_ns_type_registered(enum kobj_ns_type type) { int registered = 0; spin_lock(&kobj_ns_type_lock); if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES)) registered = kobj_ns_ops_tbl[type] != NULL; spin_unlock(&kobj_ns_type_lock); return registered; } const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent) { const struct kobj_ns_type_operations *ops = NULL; if (parent && parent->ktype->child_ns_type) ops = parent->ktype->child_ns_type(parent); return ops; } const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj) { return kobj_child_ns_ops(kobj->parent); } void *kobj_ns_grab_current(enum kobj_ns_type type) { void *ns = NULL; spin_lock(&kobj_ns_type_lock); if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && kobj_ns_ops_tbl[type]) ns = kobj_ns_ops_tbl[type]->grab_current_ns(); spin_unlock(&kobj_ns_type_lock); return ns; } const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk) { const void *ns = NULL; spin_lock(&kobj_ns_type_lock); if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && kobj_ns_ops_tbl[type]) ns = kobj_ns_ops_tbl[type]->netlink_ns(sk); spin_unlock(&kobj_ns_type_lock); return ns; } const void *kobj_ns_initial(enum kobj_ns_type type) { const void *ns = NULL; spin_lock(&kobj_ns_type_lock); if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && kobj_ns_ops_tbl[type]) ns = kobj_ns_ops_tbl[type]->initial_ns(); spin_unlock(&kobj_ns_type_lock); return ns; } void kobj_ns_drop(enum kobj_ns_type type, void *ns) { spin_lock(&kobj_ns_type_lock); if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns) kobj_ns_ops_tbl[type]->drop_ns(ns); spin_unlock(&kobj_ns_type_lock); } EXPORT_SYMBOL(kobject_get); EXPORT_SYMBOL(kobject_put); EXPORT_SYMBOL(kobject_del); EXPORT_SYMBOL(kset_register); EXPORT_SYMBOL(kset_unregister);
gpl-2.0
Cryptoo/kernel
arch/arm/mach-omap2/clockdomains3xxx_data.c
1897
13411
/* * OMAP3xxx clockdomains * * Copyright (C) 2008-2011 Texas Instruments, Inc. * Copyright (C) 2008-2010 Nokia Corporation * * Paul Walmsley, Jouni Högander * * This file contains clockdomains and clockdomain wakeup/sleep * dependencies for the OMAP3xxx chips. Some notes: * * A useful validation rule for struct clockdomain: Any clockdomain * referenced by a wkdep_srcs or sleepdep_srcs array must have a * dep_bit assigned. So wkdep_srcs/sleepdep_srcs are really just * software-controllable dependencies. Non-software-controllable * dependencies do exist, but they are not encoded below (yet). * * The overly-specific dep_bit names are due to a bit name collision * with CM_FCLKEN_{DSP,IVA2}. The DSP/IVA2 PM_WKDEP and CM_SLEEPDEP shift * value are the same for all powerdomains: 2 * * XXX should dep_bit be a mask, so we can test to see if it is 0 as a * sanity check? * XXX encode hardware fixed wakeup dependencies -- esp. for 3430 CORE */ /* * To-Do List * -> Port the Sleep/Wakeup dependencies for the domains * from the Power domain framework */ #include <linux/kernel.h> #include <linux/io.h> #include "soc.h" #include "clockdomain.h" #include "prm2xxx_3xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" #include "prm-regbits-34xx.h" /* * Clockdomain dependencies for wkdeps/sleepdeps * * XXX Hardware dependencies (e.g., dependencies that cannot be * changed in software) are not included here yet, but should be. */ /* OMAP3-specific possible dependencies */ /* * 3430ES1 PM_WKDEP_GFX: adds IVA2, removes CORE * 3430ES2 PM_WKDEP_SGX: adds IVA2, removes CORE */ static struct clkdm_dep gfx_sgx_3xxx_wkdeps[] = { { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; static struct clkdm_dep gfx_sgx_am35x_wkdeps[] = { { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 3430: PM_WKDEP_PER: CORE, IVA2, MPU, WKUP */ static struct clkdm_dep per_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; static struct clkdm_dep per_am35x_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 3430ES2: PM_WKDEP_USBHOST: CORE, IVA2, MPU, WKUP */ static struct clkdm_dep usbhost_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; static struct clkdm_dep usbhost_am35x_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 3430 PM_WKDEP_MPU: CORE, IVA2, DSS, PER */ static struct clkdm_dep mpu_3xxx_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "per_clkdm" }, { NULL }, }; static struct clkdm_dep mpu_am35x_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "per_clkdm" }, { NULL }, }; /* 3430 PM_WKDEP_IVA2: CORE, MPU, WKUP, DSS, PER */ static struct clkdm_dep iva2_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "per_clkdm" }, { NULL }, }; /* 3430 PM_WKDEP_CAM: IVA2, MPU, WKUP */ static struct clkdm_dep cam_wkdeps[] = { { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 3430 PM_WKDEP_DSS: IVA2, MPU, WKUP */ static struct clkdm_dep dss_wkdeps[] = { { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; static struct clkdm_dep dss_am35x_wkdeps[] = { { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 3430: PM_WKDEP_NEON: MPU */ static struct clkdm_dep neon_wkdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* Sleep dependency source arrays for OMAP3-specific clkdms */ /* 3430: CM_SLEEPDEP_DSS: MPU, IVA */ static struct clkdm_dep dss_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { NULL }, }; static struct clkdm_dep dss_am35x_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* 3430: CM_SLEEPDEP_PER: MPU, IVA */ static struct clkdm_dep per_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { NULL }, }; static struct clkdm_dep per_am35x_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* 3430ES2: CM_SLEEPDEP_USBHOST: MPU, IVA */ static struct clkdm_dep usbhost_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { NULL }, }; static struct clkdm_dep usbhost_am35x_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* 3430: CM_SLEEPDEP_CAM: MPU */ static struct clkdm_dep cam_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* * 3430ES1: CM_SLEEPDEP_GFX: MPU * 3430ES2: CM_SLEEPDEP_SGX: MPU * These can share data since they will never be present simultaneously * on the same device. */ static struct clkdm_dep gfx_sgx_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* * OMAP3 clockdomains */ static struct clockdomain mpu_3xxx_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .flags = CLKDM_CAN_HWSUP | CLKDM_CAN_FORCE_WAKEUP, .dep_bit = OMAP3430_EN_MPU_SHIFT, .wkdep_srcs = mpu_3xxx_wkdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_MPU_MASK, }; static struct clockdomain mpu_am35x_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .flags = CLKDM_CAN_HWSUP | CLKDM_CAN_FORCE_WAKEUP, .dep_bit = OMAP3430_EN_MPU_SHIFT, .wkdep_srcs = mpu_am35x_wkdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_MPU_MASK, }; static struct clockdomain neon_clkdm = { .name = "neon_clkdm", .pwrdm = { .name = "neon_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = neon_wkdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_NEON_MASK, }; static struct clockdomain iva2_clkdm = { .name = "iva2_clkdm", .pwrdm = { .name = "iva2_pwrdm" }, .flags = CLKDM_CAN_SWSUP, .dep_bit = OMAP3430_PM_WKDEP_MPU_EN_IVA2_SHIFT, .wkdep_srcs = iva2_wkdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_IVA2_MASK, }; static struct clockdomain gfx_3430es1_clkdm = { .name = "gfx_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = gfx_sgx_3xxx_wkdeps, .sleepdep_srcs = gfx_sgx_sleepdeps, .clktrctrl_mask = OMAP3430ES1_CLKTRCTRL_GFX_MASK, }; static struct clockdomain sgx_clkdm = { .name = "sgx_clkdm", .pwrdm = { .name = "sgx_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = gfx_sgx_3xxx_wkdeps, .sleepdep_srcs = gfx_sgx_sleepdeps, .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_SGX_MASK, }; static struct clockdomain sgx_am35x_clkdm = { .name = "sgx_clkdm", .pwrdm = { .name = "sgx_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = gfx_sgx_am35x_wkdeps, .sleepdep_srcs = gfx_sgx_sleepdeps, .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_SGX_MASK, }; /* * The die-to-die clockdomain was documented in the 34xx ES1 TRM, but * then that information was removed from the 34xx ES2+ TRM. It is * unclear whether the core is still there, but the clockdomain logic * is there, and must be programmed to an appropriate state if the * CORE clockdomain is to become inactive. */ static struct clockdomain d2d_clkdm = { .name = "d2d_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .clktrctrl_mask = OMAP3430ES1_CLKTRCTRL_D2D_MASK, }; /* * XXX add usecounting for clkdm dependencies, otherwise the presence * of a single dep bit for core_l3_3xxx_clkdm and core_l4_3xxx_clkdm * could cause trouble */ static struct clockdomain core_l3_3xxx_clkdm = { .name = "core_l3_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .dep_bit = OMAP3430_EN_CORE_SHIFT, .clktrctrl_mask = OMAP3430_CLKTRCTRL_L3_MASK, }; /* * XXX add usecounting for clkdm dependencies, otherwise the presence * of a single dep bit for core_l3_3xxx_clkdm and core_l4_3xxx_clkdm * could cause trouble */ static struct clockdomain core_l4_3xxx_clkdm = { .name = "core_l4_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .dep_bit = OMAP3430_EN_CORE_SHIFT, .clktrctrl_mask = OMAP3430_CLKTRCTRL_L4_MASK, }; /* Another case of bit name collisions between several registers: EN_DSS */ static struct clockdomain dss_3xxx_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "dss_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP3430_PM_WKDEP_MPU_EN_DSS_SHIFT, .wkdep_srcs = dss_wkdeps, .sleepdep_srcs = dss_sleepdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_DSS_MASK, }; static struct clockdomain dss_am35x_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "dss_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP3430_PM_WKDEP_MPU_EN_DSS_SHIFT, .wkdep_srcs = dss_am35x_wkdeps, .sleepdep_srcs = dss_am35x_sleepdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_DSS_MASK, }; static struct clockdomain cam_clkdm = { .name = "cam_clkdm", .pwrdm = { .name = "cam_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = cam_wkdeps, .sleepdep_srcs = cam_sleepdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_CAM_MASK, }; static struct clockdomain usbhost_clkdm = { .name = "usbhost_clkdm", .pwrdm = { .name = "usbhost_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = usbhost_wkdeps, .sleepdep_srcs = usbhost_sleepdeps, .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_USBHOST_MASK, }; static struct clockdomain usbhost_am35x_clkdm = { .name = "usbhost_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = usbhost_am35x_wkdeps, .sleepdep_srcs = usbhost_am35x_sleepdeps, .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_USBHOST_MASK, }; static struct clockdomain per_clkdm = { .name = "per_clkdm", .pwrdm = { .name = "per_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP3430_EN_PER_SHIFT, .wkdep_srcs = per_wkdeps, .sleepdep_srcs = per_sleepdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_PER_MASK, }; static struct clockdomain per_am35x_clkdm = { .name = "per_clkdm", .pwrdm = { .name = "per_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP3430_EN_PER_SHIFT, .wkdep_srcs = per_am35x_wkdeps, .sleepdep_srcs = per_am35x_sleepdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_PER_MASK, }; static struct clockdomain emu_clkdm = { .name = "emu_clkdm", .pwrdm = { .name = "emu_pwrdm" }, .flags = (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_SWSUP | CLKDM_MISSING_IDLE_REPORTING), .clktrctrl_mask = OMAP3430_CLKTRCTRL_EMU_MASK, }; static struct clockdomain dpll1_clkdm = { .name = "dpll1_clkdm", .pwrdm = { .name = "dpll1_pwrdm" }, }; static struct clockdomain dpll2_clkdm = { .name = "dpll2_clkdm", .pwrdm = { .name = "dpll2_pwrdm" }, }; static struct clockdomain dpll3_clkdm = { .name = "dpll3_clkdm", .pwrdm = { .name = "dpll3_pwrdm" }, }; static struct clockdomain dpll4_clkdm = { .name = "dpll4_clkdm", .pwrdm = { .name = "dpll4_pwrdm" }, }; static struct clockdomain dpll5_clkdm = { .name = "dpll5_clkdm", .pwrdm = { .name = "dpll5_pwrdm" }, }; /* * Clockdomain hwsup dependencies */ static struct clkdm_autodep clkdm_autodeps[] = { { .clkdm = { .name = "mpu_clkdm" }, }, { .clkdm = { .name = "iva2_clkdm" }, }, { .clkdm = { .name = NULL }, } }; static struct clkdm_autodep clkdm_am35x_autodeps[] = { { .clkdm = { .name = "mpu_clkdm" }, }, { .clkdm = { .name = NULL }, } }; /* * */ static struct clockdomain *clockdomains_common[] __initdata = { &wkup_common_clkdm, &neon_clkdm, &core_l3_3xxx_clkdm, &core_l4_3xxx_clkdm, &emu_clkdm, &dpll1_clkdm, &dpll3_clkdm, &dpll4_clkdm, NULL }; static struct clockdomain *clockdomains_omap3430[] __initdata = { &mpu_3xxx_clkdm, &iva2_clkdm, &d2d_clkdm, &dss_3xxx_clkdm, &cam_clkdm, &per_clkdm, &dpll2_clkdm, NULL }; static struct clockdomain *clockdomains_omap3430es1[] __initdata = { &gfx_3430es1_clkdm, NULL, }; static struct clockdomain *clockdomains_omap3430es2plus[] __initdata = { &sgx_clkdm, &dpll5_clkdm, &usbhost_clkdm, NULL, }; static struct clockdomain *clockdomains_am35x[] __initdata = { &mpu_am35x_clkdm, &sgx_am35x_clkdm, &dss_am35x_clkdm, &per_am35x_clkdm, &usbhost_am35x_clkdm, &dpll5_clkdm, NULL }; void __init omap3xxx_clockdomains_init(void) { struct clockdomain **sc; unsigned int rev; if (!cpu_is_omap34xx()) return; clkdm_register_platform_funcs(&omap3_clkdm_operations); clkdm_register_clkdms(clockdomains_common); rev = omap_rev(); if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) { clkdm_register_clkdms(clockdomains_am35x); clkdm_register_autodeps(clkdm_am35x_autodeps); } else { clkdm_register_clkdms(clockdomains_omap3430); sc = (rev == OMAP3430_REV_ES1_0) ? clockdomains_omap3430es1 : clockdomains_omap3430es2plus; clkdm_register_clkdms(sc); clkdm_register_autodeps(clkdm_autodeps); } clkdm_complete_init(); }
gpl-2.0
Surge1223/android_kernel_moto_shamu
drivers/rtc/rtc-tx4939.c
2153
8660
/* * TX4939 internal RTC driver * Based on RBTX49xx patch from CELF patch archive. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * (C) Copyright TOSHIBA CORPORATION 2005-2007 */ #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/io.h> #include <linux/gfp.h> #include <asm/txx9/tx4939.h> struct tx4939rtc_plat_data { struct rtc_device *rtc; struct tx4939_rtc_reg __iomem *rtcreg; spinlock_t lock; }; static struct tx4939rtc_plat_data *get_tx4939rtc_plat_data(struct device *dev) { return platform_get_drvdata(to_platform_device(dev)); } static int tx4939_rtc_cmd(struct tx4939_rtc_reg __iomem *rtcreg, int cmd) { int i = 0; __raw_writel(cmd, &rtcreg->ctl); /* This might take 30us (next 32.768KHz clock) */ while (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_BUSY) { /* timeout on approx. 100us (@ GBUS200MHz) */ if (i++ > 200 * 100) return -EBUSY; cpu_relax(); } return 0; } static int tx4939_rtc_set_mmss(struct device *dev, unsigned long secs) { struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev); struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; int i, ret; unsigned char buf[6]; buf[0] = 0; buf[1] = 0; buf[2] = secs; buf[3] = secs >> 8; buf[4] = secs >> 16; buf[5] = secs >> 24; spin_lock_irq(&pdata->lock); __raw_writel(0, &rtcreg->adr); for (i = 0; i < 6; i++) __raw_writel(buf[i], &rtcreg->dat); ret = tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_SETTIME | (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME)); spin_unlock_irq(&pdata->lock); return ret; } static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev); struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; int i, ret; unsigned long sec; unsigned char buf[6]; spin_lock_irq(&pdata->lock); ret = tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_GETTIME | (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME)); if (ret) { spin_unlock_irq(&pdata->lock); return ret; } __raw_writel(2, &rtcreg->adr); for (i = 2; i < 6; i++) buf[i] = __raw_readl(&rtcreg->dat); spin_unlock_irq(&pdata->lock); sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; rtc_time_to_tm(sec, tm); return rtc_valid_tm(tm); } static int tx4939_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev); struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; int i, ret; unsigned long sec; unsigned char buf[6]; if (alrm->time.tm_sec < 0 || alrm->time.tm_min < 0 || alrm->time.tm_hour < 0 || alrm->time.tm_mday < 0 || alrm->time.tm_mon < 0 || alrm->time.tm_year < 0) return -EINVAL; rtc_tm_to_time(&alrm->time, &sec); buf[0] = 0; buf[1] = 0; buf[2] = sec; buf[3] = sec >> 8; buf[4] = sec >> 16; buf[5] = sec >> 24; spin_lock_irq(&pdata->lock); __raw_writel(0, &rtcreg->adr); for (i = 0; i < 6; i++) __raw_writel(buf[i], &rtcreg->dat); ret = tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_SETALARM | (alrm->enabled ? TX4939_RTCCTL_ALME : 0)); spin_unlock_irq(&pdata->lock); return ret; } static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev); struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; int i, ret; unsigned long sec; unsigned char buf[6]; u32 ctl; spin_lock_irq(&pdata->lock); ret = tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_GETALARM | (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME)); if (ret) { spin_unlock_irq(&pdata->lock); return ret; } __raw_writel(2, &rtcreg->adr); for (i = 2; i < 6; i++) buf[i] = __raw_readl(&rtcreg->dat); ctl = __raw_readl(&rtcreg->ctl); alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0; alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0; spin_unlock_irq(&pdata->lock); sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; rtc_time_to_tm(sec, &alrm->time); return rtc_valid_tm(&alrm->time); } static int tx4939_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev); spin_lock_irq(&pdata->lock); tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP | (enabled ? TX4939_RTCCTL_ALME : 0)); spin_unlock_irq(&pdata->lock); return 0; } static irqreturn_t tx4939_rtc_interrupt(int irq, void *dev_id) { struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev_id); struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; unsigned long events = RTC_IRQF; spin_lock(&pdata->lock); if (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALMD) { events |= RTC_AF; tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_NOP); } spin_unlock(&pdata->lock); if (likely(pdata->rtc)) rtc_update_irq(pdata->rtc, 1, events); return IRQ_HANDLED; } static const struct rtc_class_ops tx4939_rtc_ops = { .read_time = tx4939_rtc_read_time, .read_alarm = tx4939_rtc_read_alarm, .set_alarm = tx4939_rtc_set_alarm, .set_mmss = tx4939_rtc_set_mmss, .alarm_irq_enable = tx4939_rtc_alarm_irq_enable, }; static ssize_t tx4939_rtc_nvram_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev); struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; ssize_t count; spin_lock_irq(&pdata->lock); for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE; count++, size--) { __raw_writel(pos++, &rtcreg->adr); *buf++ = __raw_readl(&rtcreg->dat); } spin_unlock_irq(&pdata->lock); return count; } static ssize_t tx4939_rtc_nvram_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev); struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; ssize_t count; spin_lock_irq(&pdata->lock); for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE; count++, size--) { __raw_writel(pos++, &rtcreg->adr); __raw_writel(*buf++, &rtcreg->dat); } spin_unlock_irq(&pdata->lock); return count; } static struct bin_attribute tx4939_rtc_nvram_attr = { .attr = { .name = "nvram", .mode = S_IRUGO | S_IWUSR, }, .size = TX4939_RTC_REG_RAMSIZE, .read = tx4939_rtc_nvram_read, .write = tx4939_rtc_nvram_write, }; static int __init tx4939_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; struct tx4939rtc_plat_data *pdata; struct resource *res; int irq, ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; irq = platform_get_irq(pdev, 0); if (irq < 0) return -ENODEV; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; platform_set_drvdata(pdev, pdata); if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), pdev->name)) return -EBUSY; pdata->rtcreg = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!pdata->rtcreg) return -EBUSY; spin_lock_init(&pdata->lock); tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP); if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt, 0, pdev->name, &pdev->dev) < 0) return -EBUSY; rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &tx4939_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); pdata->rtc = rtc; ret = sysfs_create_bin_file(&pdev->dev.kobj, &tx4939_rtc_nvram_attr); return ret; } static int __exit tx4939_rtc_remove(struct platform_device *pdev) { struct tx4939rtc_plat_data *pdata = platform_get_drvdata(pdev); sysfs_remove_bin_file(&pdev->dev.kobj, &tx4939_rtc_nvram_attr); spin_lock_irq(&pdata->lock); tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP); spin_unlock_irq(&pdata->lock); return 0; } static struct platform_driver tx4939_rtc_driver = { .remove = __exit_p(tx4939_rtc_remove), .driver = { .name = "tx4939rtc", .owner = THIS_MODULE, }, }; module_platform_driver_probe(tx4939_rtc_driver, tx4939_rtc_probe); MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); MODULE_DESCRIPTION("TX4939 internal RTC driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:tx4939rtc");
gpl-2.0
DevriesL/SM-G9208_ImageBreaker
drivers/gpu/drm/nouveau/dispnv04/hw.c
2153
27513
/* * Copyright 2006 Dave Airlie * Copyright 2007 Maarten Maathuis * Copyright 2007-2009 Stuart Bennett * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <drm/drmP.h> #include "nouveau_drm.h" #include "hw.h" #include <subdev/bios/pll.h> #include <subdev/clock.h> #include <subdev/timer.h> #define CHIPSET_NFORCE 0x01a0 #define CHIPSET_NFORCE2 0x01f0 /* * misc hw access wrappers/control functions */ void NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value) { NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value); } uint8_t NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index) { NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); return NVReadPRMVIO(dev, head, NV_PRMVIO_SR); } void NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value) { NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value); } uint8_t NVReadVgaGr(struct drm_device *dev, int head, uint8_t index) { NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); return NVReadPRMVIO(dev, head, NV_PRMVIO_GX); } /* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied) * it affects only the 8 bit vga io regs, which we access using mmio at * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d* * in general, the set value of cr44 does not matter: reg access works as * expected and values can be set for the appropriate head by using a 0x2000 * offset as required * however: * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and * cr44 must be set to 0 or 3 for accessing values on the correct head * through the common 0xc03c* addresses * b) in tied mode (4) head B is programmed to the values set on head A, and * access using the head B addresses can have strange results, ergo we leave * tied mode in init once we know to what cr44 should be restored on exit * * the owner parameter is slightly abused: * 0 and 1 are treated as head values and so the set value is (owner * 3) * other values are treated as literal values to set */ void NVSetOwner(struct drm_device *dev, int owner) { struct nouveau_drm *drm = nouveau_drm(dev); if (owner == 1) owner *= 3; if (nv_device(drm->device)->chipset == 0x11) { /* This might seem stupid, but the blob does it and * omitting it often locks the system up. */ NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX); NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX); } /* CR44 is always changed on CRTC0 */ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); if (nv_device(drm->device)->chipset == 0x11) { /* set me harder */ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); } } void NVBlankScreen(struct drm_device *dev, int head, bool blank) { unsigned char seq1; if (nv_two_heads(dev)) NVSetOwner(dev, head); seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); NVVgaSeqReset(dev, head, true); if (blank) NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); else NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); NVVgaSeqReset(dev, head, false); } /* * PLL getting */ static void nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1, uint32_t pll2, struct nouveau_pll_vals *pllvals) { struct nouveau_drm *drm = nouveau_drm(dev); /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */ /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */ pllvals->log2P = (pll1 >> 16) & 0x7; pllvals->N2 = pllvals->M2 = 1; if (reg1 <= 0x405c) { pllvals->NM1 = pll2 & 0xffff; /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */ if (!(pll1 & 0x1100)) pllvals->NM2 = pll2 >> 16; } else { pllvals->NM1 = pll1 & 0xffff; if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) pllvals->NM2 = pll2 & 0xffff; else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) { pllvals->M1 &= 0xf; /* only 4 bits */ if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { pllvals->M2 = (pll1 >> 4) & 0x7; pllvals->N2 = ((pll1 >> 21) & 0x18) | ((pll1 >> 19) & 0x7); } } } } int nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype, struct nouveau_pll_vals *pllvals) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_device *device = nv_device(drm->device); struct nouveau_bios *bios = nouveau_bios(device); uint32_t reg1, pll1, pll2 = 0; struct nvbios_pll pll_lim; int ret; ret = nvbios_pll_parse(bios, plltype, &pll_lim); if (ret || !(reg1 = pll_lim.reg)) return -ENOENT; pll1 = nv_rd32(device, reg1); if (reg1 <= 0x405c) pll2 = nv_rd32(device, reg1 + 4); else if (nv_two_reg_pll(dev)) { uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70); pll2 = nv_rd32(device, reg2); } if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); /* check whether vpll has been forced into single stage mode */ if (reg1 == NV_PRAMDAC_VPLL_COEFF) { if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE) pll2 = 0; } else if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE) pll2 = 0; } nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals); pllvals->refclk = pll_lim.refclk; return 0; } int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv) { /* Avoid divide by zero if called at an inappropriate time */ if (!pv->M1 || !pv->M2) return 0; return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P; } int nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) { struct nouveau_pll_vals pllvals; int ret; if (plltype == PLL_MEMORY && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { uint32_t mpllP; pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); if (!mpllP) mpllP = 4; return 400000 / mpllP; } else if (plltype == PLL_MEMORY && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { uint32_t clock; pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); return clock; } ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); if (ret) return ret; return nouveau_hw_pllvals_to_clk(&pllvals); } static void nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) { /* the vpll on an unused head can come up with a random value, way * beyond the pll limits. for some reason this causes the chip to * lock up when reading the dac palette regs, so set a valid pll here * when such a condition detected. only seen on nv11 to date */ struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_device *device = nv_device(drm->device); struct nouveau_clock *clk = nouveau_clock(device); struct nouveau_bios *bios = nouveau_bios(device); struct nvbios_pll pll_lim; struct nouveau_pll_vals pv; enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0; if (nvbios_pll_parse(bios, pll, &pll_lim)) return; nouveau_hw_get_pllvals(dev, pll, &pv); if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && pv.log2P <= pll_lim.max_p) return; NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1); /* set lowest clock within static limits */ pv.M1 = pll_lim.vco1.max_m; pv.N1 = pll_lim.vco1.min_n; pv.log2P = pll_lim.max_p_usable; clk->pll_prog(clk, pll_lim.reg, &pv); } /* * vga font save/restore */ static void nouveau_vga_font_io(struct drm_device *dev, void __iomem *iovram, bool save, unsigned plane) { unsigned i; NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane); NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane); for (i = 0; i < 16384; i++) { if (save) { nv04_display(dev)->saved_vga_font[plane][i] = ioread32_native(iovram + i * 4); } else { iowrite32_native(nv04_display(dev)->saved_vga_font[plane][i], iovram + i * 4); } } } void nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save) { struct nouveau_drm *drm = nouveau_drm(dev); uint8_t misc, gr4, gr5, gr6, seq2, seq4; bool graphicsmode; unsigned plane; void __iomem *iovram; if (nv_two_heads(dev)) NVSetOwner(dev, 0); NVSetEnablePalette(dev, 0, true); graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1; NVSetEnablePalette(dev, 0, false); if (graphicsmode) /* graphics mode => framebuffer => no need to save */ return; NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor"); /* map first 64KiB of VRAM, holds VGA fonts etc */ iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536); if (!iovram) { NV_ERROR(drm, "Failed to map VRAM, " "cannot save/restore VGA fonts.\n"); return; } if (nv_two_heads(dev)) NVBlankScreen(dev, 1, true); NVBlankScreen(dev, 0, true); /* save control regs */ misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ); seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX); seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX); gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX); gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX); gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX); NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67); NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6); NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0); NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5); /* store font in planes 0..3 */ for (plane = 0; plane < 4; plane++) nouveau_vga_font_io(dev, iovram, save, plane); /* restore control regs */ NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc); NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4); NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5); NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6); NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2); NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4); if (nv_two_heads(dev)) NVBlankScreen(dev, 1, false); NVBlankScreen(dev, 0, false); iounmap(iovram); } /* * mode state save/load */ static void rd_cio_state(struct drm_device *dev, int head, struct nv04_crtc_reg *crtcstate, int index) { crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index); } static void wr_cio_state(struct drm_device *dev, int head, struct nv04_crtc_reg *crtcstate, int index) { NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]); } static void nv_save_state_ramdac(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_drm *drm = nouveau_drm(dev); struct nv04_crtc_reg *regp = &state->crtc_reg[head]; int i; if (nv_device(drm->device)->card_type >= NV_10) regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals); state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); if (nv_two_heads(dev)) state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); if (nv_device(drm->device)->chipset == 0x11) regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); if (nv_gf4_disp_arch(dev)) regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); if (nv_device(drm->device)->chipset >= 0x30) regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL); regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW); regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY); regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL); regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW); regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY); regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2); for (i = 0; i < 7; i++) { uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4); regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg); regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20); } if (nv_gf4_disp_arch(dev)) { regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER); for (i = 0; i < 3; i++) { regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4); regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4); } } regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0); if (!nv_gf4_disp_arch(dev) && head == 0) { /* early chips don't allow access to PRAMDAC_TMDS_* without * the head A FPCLK on (nv11 even locks up) */ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 & ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK); } regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1); regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2); regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR); if (nv_gf4_disp_arch(dev)) regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); if (nv_device(drm->device)->card_type == NV_40) { regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); for (i = 0; i < 38; i++) regp->ctv_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_CTV + 4*i); } } static void nv_load_state_ramdac(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_clock *clk = nouveau_clock(drm->device); struct nv04_crtc_reg *regp = &state->crtc_reg[head]; uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; int i; if (nv_device(drm->device)->card_type >= NV_10) NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); clk->pll_prog(clk, pllreg, &regp->pllvals); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); if (nv_two_heads(dev)) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); if (nv_device(drm->device)->chipset == 0x11) NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); if (nv_gf4_disp_arch(dev)) NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); if (nv_device(drm->device)->chipset >= 0x30) NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2); for (i = 0; i < 7; i++) { uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4); NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]); NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]); } if (nv_gf4_disp_arch(dev)) { NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither); for (i = 0; i < 3; i++) { NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]); NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]); } } NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color); if (nv_gf4_disp_arch(dev)) NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); if (nv_device(drm->device)->card_type == NV_40) { NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); for (i = 0; i < 38; i++) NVWriteRAMDAC(dev, head, NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]); } } static void nv_save_state_vga(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nv04_crtc_reg *regp = &state->crtc_reg[head]; int i; regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ); for (i = 0; i < 25; i++) rd_cio_state(dev, head, regp, i); NVSetEnablePalette(dev, head, true); for (i = 0; i < 21; i++) regp->Attribute[i] = NVReadVgaAttr(dev, head, i); NVSetEnablePalette(dev, head, false); for (i = 0; i < 9; i++) regp->Graphics[i] = NVReadVgaGr(dev, head, i); for (i = 0; i < 5; i++) regp->Sequencer[i] = NVReadVgaSeq(dev, head, i); } static void nv_load_state_vga(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nv04_crtc_reg *regp = &state->crtc_reg[head]; int i; NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg); for (i = 0; i < 5; i++) NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]); nv_lock_vga_crtc_base(dev, head, false); for (i = 0; i < 25; i++) wr_cio_state(dev, head, regp, i); nv_lock_vga_crtc_base(dev, head, true); for (i = 0; i < 9; i++) NVWriteVgaGr(dev, head, i, regp->Graphics[i]); NVSetEnablePalette(dev, head, true); for (i = 0; i < 21; i++) NVWriteVgaAttr(dev, head, i, regp->Attribute[i]); NVSetEnablePalette(dev, head, false); } static void nv_save_state_ext(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_drm *drm = nouveau_drm(dev); struct nv04_crtc_reg *regp = &state->crtc_reg[head]; int i; rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_21); if (nv_device(drm->device)->card_type >= NV_20) rd_cio_state(dev, head, regp, NV_CIO_CRE_47); if (nv_device(drm->device)->card_type >= NV_30) rd_cio_state(dev, head, regp, 0x9f); rd_cio_state(dev, head, regp, NV_CIO_CRE_49); rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); if (nv_device(drm->device)->card_type >= NV_10) { regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); if (nv_device(drm->device)->card_type >= NV_30) regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); if (nv_device(drm->device)->card_type == NV_40) regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); if (nv_two_heads(dev)) regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL); regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG); } regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG); rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); if (nv_device(drm->device)->card_type >= NV_10) { rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); } /* NV11 and NV20 don't have this, they stop at 0x52. */ if (nv_gf4_disp_arch(dev)) { rd_cio_state(dev, head, regp, NV_CIO_CRE_42); rd_cio_state(dev, head, regp, NV_CIO_CRE_53); rd_cio_state(dev, head, regp, NV_CIO_CRE_54); for (i = 0; i < 0x10; i++) regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i); rd_cio_state(dev, head, regp, NV_CIO_CRE_59); rd_cio_state(dev, head, regp, NV_CIO_CRE_5B); rd_cio_state(dev, head, regp, NV_CIO_CRE_85); rd_cio_state(dev, head, regp, NV_CIO_CRE_86); } regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START); } static void nv_load_state_ext(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_device *device = nv_device(drm->device); struct nouveau_timer *ptimer = nouveau_timer(device); struct nv04_crtc_reg *regp = &state->crtc_reg[head]; uint32_t reg900; int i; if (nv_device(drm->device)->card_type >= NV_10) { if (nv_two_heads(dev)) /* setting ENGINE_CTRL (EC) *must* come before * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in * EC that should not be overwritten by writing stale EC */ NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); nv_wr32(device, NV_PVIDEO_STOP, 1); nv_wr32(device, NV_PVIDEO_INTR_EN, 0); nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1); nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1); nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1); nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1); nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); if (nv_device(drm->device)->card_type >= NV_30) NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); if (nv_device(drm->device)->card_type == NV_40) { NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC) NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); else NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); } } NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg); wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); if (nv_device(drm->device)->card_type >= NV_20) wr_cio_state(dev, head, regp, NV_CIO_CRE_47); if (nv_device(drm->device)->card_type >= NV_30) wr_cio_state(dev, head, regp, 0x9f); wr_cio_state(dev, head, regp, NV_CIO_CRE_49); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); if (nv_device(drm->device)->card_type == NV_40) nv_fix_nv40_hw_cursor(dev, head); wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); if (nv_device(drm->device)->card_type >= NV_10) { wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); } /* NV11 and NV20 stop at 0x52. */ if (nv_gf4_disp_arch(dev)) { if (nv_device(drm->device)->card_type == NV_10) { /* Not waiting for vertical retrace before modifying CRE_53/CRE_54 causes lockups. */ nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); } wr_cio_state(dev, head, regp, NV_CIO_CRE_42); wr_cio_state(dev, head, regp, NV_CIO_CRE_53); wr_cio_state(dev, head, regp, NV_CIO_CRE_54); for (i = 0; i < 0x10; i++) NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]); wr_cio_state(dev, head, regp, NV_CIO_CRE_59); wr_cio_state(dev, head, regp, NV_CIO_CRE_5B); wr_cio_state(dev, head, regp, NV_CIO_CRE_85); wr_cio_state(dev, head, regp, NV_CIO_CRE_86); } NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); } static void nv_save_state_palette(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_device *device = nouveau_dev(dev); int head_offset = head * NV_PRMDIO_SIZE, i; nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, NV_PRMDIO_PIXEL_MASK_MASK); nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0); for (i = 0; i < 768; i++) { state->crtc_reg[head].DAC[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA + head_offset); } NVSetEnablePalette(dev, head, false); } void nouveau_hw_load_state_palette(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_device *device = nouveau_dev(dev); int head_offset = head * NV_PRMDIO_SIZE, i; nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, NV_PRMDIO_PIXEL_MASK_MASK); nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0); for (i = 0; i < 768; i++) { nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset, state->crtc_reg[head].DAC[i]); } NVSetEnablePalette(dev, head, false); } void nouveau_hw_save_state(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_drm *drm = nouveau_drm(dev); if (nv_device(drm->device)->chipset == 0x11) /* NB: no attempt is made to restore the bad pll later on */ nouveau_hw_fix_bad_vpll(dev, head); nv_save_state_ramdac(dev, head, state); nv_save_state_vga(dev, head, state); nv_save_state_palette(dev, head, state); nv_save_state_ext(dev, head, state); } void nouveau_hw_load_state(struct drm_device *dev, int head, struct nv04_mode_state *state) { NVVgaProtect(dev, head, true); nv_load_state_ramdac(dev, head, state); nv_load_state_ext(dev, head, state); nouveau_hw_load_state_palette(dev, head, state); nv_load_state_vga(dev, head, state); NVVgaProtect(dev, head, false); }
gpl-2.0
cwxda/android_kernel_xiaomi_armani_caf
drivers/gpio/gpio-msm-v1.c
2153
25938
/* linux/arch/arm/mach-msm/gpio.c * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/bitops.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/platform_device.h> #include <asm/mach/irq.h> #include <mach/gpiomux.h> #include <mach/msm_iomap.h> #include <mach/msm_smem.h> #include <mach/proc_comm.h> /* see 80-VA736-2 Rev C pp 695-751 ** ** These are actually the *shadow* gpio registers, since the ** real ones (which allow full access) are only available to the ** ARM9 side of the world. ** ** Since the _BASE need to be page-aligned when we're mapping them ** to virtual addresses, adjust for the additional offset in these ** macros. */ #if defined(CONFIG_ARCH_MSM7X30) #define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + (off)) #define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off)) #else #define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + 0x800 + (off)) #define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off)) #endif #if defined(CONFIG_ARCH_MSM7X00A) || defined(CONFIG_ARCH_MSM7X25) ||\ defined(CONFIG_ARCH_MSM7X27) /* output value */ #define MSM_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */ #define MSM_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 42-16 */ #define MSM_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-43 */ #define MSM_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */ #define MSM_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */ #define MSM_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 107-121 */ /* same pin map as above, output enable */ #define MSM_GPIO_OE_0 MSM_GPIO1_REG(0x10) #define MSM_GPIO_OE_1 MSM_GPIO2_REG(0x08) #define MSM_GPIO_OE_2 MSM_GPIO1_REG(0x14) #define MSM_GPIO_OE_3 MSM_GPIO1_REG(0x18) #define MSM_GPIO_OE_4 MSM_GPIO1_REG(0x1C) #define MSM_GPIO_OE_5 MSM_GPIO1_REG(0x54) /* same pin map as above, input read */ #define MSM_GPIO_IN_0 MSM_GPIO1_REG(0x34) #define MSM_GPIO_IN_1 MSM_GPIO2_REG(0x20) #define MSM_GPIO_IN_2 MSM_GPIO1_REG(0x38) #define MSM_GPIO_IN_3 MSM_GPIO1_REG(0x3C) #define MSM_GPIO_IN_4 MSM_GPIO1_REG(0x40) #define MSM_GPIO_IN_5 MSM_GPIO1_REG(0x44) /* same pin map as above, 1=edge 0=level interrup */ #define MSM_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60) #define MSM_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50) #define MSM_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64) #define MSM_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68) #define MSM_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C) #define MSM_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0) /* same pin map as above, 1=positive 0=negative */ #define MSM_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70) #define MSM_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58) #define MSM_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74) #define MSM_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78) #define MSM_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C) #define MSM_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC) /* same pin map as above, interrupt enable */ #define MSM_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80) #define MSM_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60) #define MSM_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84) #define MSM_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88) #define MSM_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C) #define MSM_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8) /* same pin map as above, write 1 to clear interrupt */ #define MSM_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90) #define MSM_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68) #define MSM_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94) #define MSM_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98) #define MSM_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C) #define MSM_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4) /* same pin map as above, 1=interrupt pending */ #define MSM_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0) #define MSM_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70) #define MSM_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4) #define MSM_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8) #define MSM_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC) #define MSM_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0) #endif #if defined(CONFIG_ARCH_MSM7X30) /* output value */ #define MSM_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */ #define MSM_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 43-16 */ #define MSM_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-44 */ #define MSM_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */ #define MSM_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */ #define MSM_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 133-107 */ #define MSM_GPIO_OUT_6 MSM_GPIO1_REG(0xC4) /* gpio 150-134 */ #define MSM_GPIO_OUT_7 MSM_GPIO1_REG(0x214) /* gpio 181-151 */ /* same pin map as above, output enable */ #define MSM_GPIO_OE_0 MSM_GPIO1_REG(0x10) #define MSM_GPIO_OE_1 MSM_GPIO2_REG(0x08) #define MSM_GPIO_OE_2 MSM_GPIO1_REG(0x14) #define MSM_GPIO_OE_3 MSM_GPIO1_REG(0x18) #define MSM_GPIO_OE_4 MSM_GPIO1_REG(0x1C) #define MSM_GPIO_OE_5 MSM_GPIO1_REG(0x54) #define MSM_GPIO_OE_6 MSM_GPIO1_REG(0xC8) #define MSM_GPIO_OE_7 MSM_GPIO1_REG(0x218) /* same pin map as above, input read */ #define MSM_GPIO_IN_0 MSM_GPIO1_REG(0x34) #define MSM_GPIO_IN_1 MSM_GPIO2_REG(0x20) #define MSM_GPIO_IN_2 MSM_GPIO1_REG(0x38) #define MSM_GPIO_IN_3 MSM_GPIO1_REG(0x3C) #define MSM_GPIO_IN_4 MSM_GPIO1_REG(0x40) #define MSM_GPIO_IN_5 MSM_GPIO1_REG(0x44) #define MSM_GPIO_IN_6 MSM_GPIO1_REG(0xCC) #define MSM_GPIO_IN_7 MSM_GPIO1_REG(0x21C) /* same pin map as above, 1=edge 0=level interrup */ #define MSM_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60) #define MSM_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50) #define MSM_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64) #define MSM_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68) #define MSM_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C) #define MSM_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0) #define MSM_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0xD0) #define MSM_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x240) /* same pin map as above, 1=positive 0=negative */ #define MSM_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70) #define MSM_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58) #define MSM_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74) #define MSM_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78) #define MSM_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C) #define MSM_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC) #define MSM_GPIO_INT_POS_6 MSM_GPIO1_REG(0xD4) #define MSM_GPIO_INT_POS_7 MSM_GPIO1_REG(0x228) /* same pin map as above, interrupt enable */ #define MSM_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80) #define MSM_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60) #define MSM_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84) #define MSM_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88) #define MSM_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C) #define MSM_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8) #define MSM_GPIO_INT_EN_6 MSM_GPIO1_REG(0xD8) #define MSM_GPIO_INT_EN_7 MSM_GPIO1_REG(0x22C) /* same pin map as above, write 1 to clear interrupt */ #define MSM_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90) #define MSM_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68) #define MSM_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94) #define MSM_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98) #define MSM_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C) #define MSM_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4) #define MSM_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xDC) #define MSM_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0x230) /* same pin map as above, 1=interrupt pending */ #define MSM_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0) #define MSM_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70) #define MSM_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4) #define MSM_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8) #define MSM_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC) #define MSM_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0) #define MSM_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0xE0) #define MSM_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x234) #endif enum { GPIO_DEBUG_SLEEP = 1U << 0, }; static int msm_gpio_debug_mask; module_param_named(debug_mask, msm_gpio_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0) #define MSM_GPIO_BANK(bank, first, last) \ { \ .regs = { \ .out = MSM_GPIO_OUT_##bank, \ .in = MSM_GPIO_IN_##bank, \ .int_status = MSM_GPIO_INT_STATUS_##bank, \ .int_clear = MSM_GPIO_INT_CLEAR_##bank, \ .int_en = MSM_GPIO_INT_EN_##bank, \ .int_edge = MSM_GPIO_INT_EDGE_##bank, \ .int_pos = MSM_GPIO_INT_POS_##bank, \ .oe = MSM_GPIO_OE_##bank, \ }, \ .chip = { \ .base = (first), \ .ngpio = (last) - (first) + 1, \ .get = msm_gpio_get, \ .set = msm_gpio_set, \ .direction_input = msm_gpio_direction_input, \ .direction_output = msm_gpio_direction_output, \ .to_irq = msm_gpio_to_irq, \ .request = msm_gpio_request, \ .free = msm_gpio_free, \ } \ } #define MSM_GPIO_BROKEN_INT_CLEAR 1 struct msm_gpio_regs { void __iomem *out; void __iomem *in; void __iomem *int_status; void __iomem *int_clear; void __iomem *int_en; void __iomem *int_edge; void __iomem *int_pos; void __iomem *oe; }; struct msm_gpio_chip { spinlock_t lock; struct gpio_chip chip; struct msm_gpio_regs regs; #if MSM_GPIO_BROKEN_INT_CLEAR unsigned int_status_copy; #endif unsigned int both_edge_detect; unsigned int int_enable[2]; /* 0: awake, 1: sleep */ }; static int msm_gpio_write(struct msm_gpio_chip *msm_chip, unsigned offset, unsigned on) { unsigned mask = BIT(offset); unsigned val; val = __raw_readl(msm_chip->regs.out); if (on) __raw_writel(val | mask, msm_chip->regs.out); else __raw_writel(val & ~mask, msm_chip->regs.out); return 0; } static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip) { int loop_limit = 100; unsigned pol, val, val2, intstat; do { val = __raw_readl(msm_chip->regs.in); pol = __raw_readl(msm_chip->regs.int_pos); pol = (pol & ~msm_chip->both_edge_detect) | (~val & msm_chip->both_edge_detect); __raw_writel(pol, msm_chip->regs.int_pos); intstat = __raw_readl(msm_chip->regs.int_status); val2 = __raw_readl(msm_chip->regs.in); if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0) return; } while (loop_limit-- > 0); printk(KERN_ERR "msm_gpio_update_both_edge_detect, " "failed to reach stable state %x != %x\n", val, val2); } static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip, unsigned offset) { unsigned bit = BIT(offset); #if MSM_GPIO_BROKEN_INT_CLEAR /* Save interrupts that already triggered before we loose them. */ /* Any interrupt that triggers between the read of int_status */ /* and the write to int_clear will still be lost though. */ msm_chip->int_status_copy |= __raw_readl(msm_chip->regs.int_status); msm_chip->int_status_copy &= ~bit; #endif __raw_writel(bit, msm_chip->regs.int_clear); msm_gpio_update_both_edge_detect(msm_chip); return 0; } static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct msm_gpio_chip *msm_chip; unsigned long irq_flags; msm_chip = container_of(chip, struct msm_gpio_chip, chip); spin_lock_irqsave(&msm_chip->lock, irq_flags); __raw_writel(__raw_readl(msm_chip->regs.oe) & ~BIT(offset), msm_chip->regs.oe); mb(); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); return 0; } static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct msm_gpio_chip *msm_chip; unsigned long irq_flags; msm_chip = container_of(chip, struct msm_gpio_chip, chip); spin_lock_irqsave(&msm_chip->lock, irq_flags); msm_gpio_write(msm_chip, offset, value); __raw_writel(__raw_readl(msm_chip->regs.oe) | BIT(offset), msm_chip->regs.oe); mb(); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); return 0; } static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) { struct msm_gpio_chip *msm_chip; int rc; msm_chip = container_of(chip, struct msm_gpio_chip, chip); rc = (__raw_readl(msm_chip->regs.in) & (1U << offset)) ? 1 : 0; mb(); return rc; } static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct msm_gpio_chip *msm_chip; unsigned long irq_flags; msm_chip = container_of(chip, struct msm_gpio_chip, chip); spin_lock_irqsave(&msm_chip->lock, irq_flags); msm_gpio_write(msm_chip, offset, value); mb(); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); } static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { return MSM_GPIO_TO_INT(chip->base + offset); } #ifdef CONFIG_MSM_GPIOMUX static int msm_gpio_request(struct gpio_chip *chip, unsigned offset) { return msm_gpiomux_get(chip->base + offset); } static void msm_gpio_free(struct gpio_chip *chip, unsigned offset) { msm_gpiomux_put(chip->base + offset); } #else #define msm_gpio_request NULL #define msm_gpio_free NULL #endif struct msm_gpio_chip msm_gpio_chips[] = { #if defined(CONFIG_ARCH_MSM7X00A) MSM_GPIO_BANK(0, 0, 15), MSM_GPIO_BANK(1, 16, 42), MSM_GPIO_BANK(2, 43, 67), MSM_GPIO_BANK(3, 68, 94), MSM_GPIO_BANK(4, 95, 106), MSM_GPIO_BANK(5, 107, 121), #elif defined(CONFIG_ARCH_MSM7X25) || defined(CONFIG_ARCH_MSM7X27) MSM_GPIO_BANK(0, 0, 15), MSM_GPIO_BANK(1, 16, 42), MSM_GPIO_BANK(2, 43, 67), MSM_GPIO_BANK(3, 68, 94), MSM_GPIO_BANK(4, 95, 106), MSM_GPIO_BANK(5, 107, 132), #elif defined(CONFIG_ARCH_MSM7X30) MSM_GPIO_BANK(0, 0, 15), MSM_GPIO_BANK(1, 16, 43), MSM_GPIO_BANK(2, 44, 67), MSM_GPIO_BANK(3, 68, 94), MSM_GPIO_BANK(4, 95, 106), MSM_GPIO_BANK(5, 107, 133), MSM_GPIO_BANK(6, 134, 150), MSM_GPIO_BANK(7, 151, 181), #elif defined(CONFIG_ARCH_QSD8X50) MSM_GPIO_BANK(0, 0, 15), MSM_GPIO_BANK(1, 16, 42), MSM_GPIO_BANK(2, 43, 67), MSM_GPIO_BANK(3, 68, 94), MSM_GPIO_BANK(4, 95, 103), MSM_GPIO_BANK(5, 104, 121), MSM_GPIO_BANK(6, 122, 152), MSM_GPIO_BANK(7, 153, 164), #endif }; static void msm_gpio_irq_ack(struct irq_data *d) { unsigned long irq_flags; struct msm_gpio_chip *msm_chip = irq_get_chip_data(d->irq); spin_lock_irqsave(&msm_chip->lock, irq_flags); msm_gpio_clear_detect_status(msm_chip, d->irq - gpio_to_irq(msm_chip->chip.base)); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); } static void msm_gpio_irq_mask(struct irq_data *d) { unsigned long irq_flags; struct msm_gpio_chip *msm_chip = irq_get_chip_data(d->irq); unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); spin_lock_irqsave(&msm_chip->lock, irq_flags); /* level triggered interrupts are also latched */ if (!(__raw_readl(msm_chip->regs.int_edge) & BIT(offset))) msm_gpio_clear_detect_status(msm_chip, offset); msm_chip->int_enable[0] &= ~BIT(offset); __raw_writel(msm_chip->int_enable[0], msm_chip->regs.int_en); mb(); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); } static void msm_gpio_irq_unmask(struct irq_data *d) { unsigned long irq_flags; struct msm_gpio_chip *msm_chip = irq_get_chip_data(d->irq); unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); spin_lock_irqsave(&msm_chip->lock, irq_flags); /* level triggered interrupts are also latched */ if (!(__raw_readl(msm_chip->regs.int_edge) & BIT(offset))) msm_gpio_clear_detect_status(msm_chip, offset); msm_chip->int_enable[0] |= BIT(offset); __raw_writel(msm_chip->int_enable[0], msm_chip->regs.int_en); mb(); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); } static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) { unsigned long irq_flags; struct msm_gpio_chip *msm_chip = irq_get_chip_data(d->irq); unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); spin_lock_irqsave(&msm_chip->lock, irq_flags); if (on) msm_chip->int_enable[1] |= BIT(offset); else msm_chip->int_enable[1] &= ~BIT(offset); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); return 0; } static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) { unsigned long irq_flags; struct msm_gpio_chip *msm_chip = irq_get_chip_data(d->irq); unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); unsigned val, mask = BIT(offset); spin_lock_irqsave(&msm_chip->lock, irq_flags); val = __raw_readl(msm_chip->regs.int_edge); if (flow_type & IRQ_TYPE_EDGE_BOTH) { __raw_writel(val | mask, msm_chip->regs.int_edge); __irq_set_handler_locked(d->irq, handle_edge_irq); } else { __raw_writel(val & ~mask, msm_chip->regs.int_edge); __irq_set_handler_locked(d->irq, handle_level_irq); } if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { msm_chip->both_edge_detect |= mask; msm_gpio_update_both_edge_detect(msm_chip); } else { msm_chip->both_edge_detect &= ~mask; val = __raw_readl(msm_chip->regs.int_pos); if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH)) __raw_writel(val | mask, msm_chip->regs.int_pos); else __raw_writel(val & ~mask, msm_chip->regs.int_pos); } mb(); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); return 0; } static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { int i, j, mask; unsigned val; struct irq_chip *chip = irq_desc_get_chip(desc); chained_irq_enter(chip, desc); for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i]; val = __raw_readl(msm_chip->regs.int_status); val &= msm_chip->int_enable[0]; while (val) { mask = val & -val; j = fls(mask) - 1; /* printk("%s %08x %08x bit %d gpio %d irq %d\n", __func__, v, m, j, msm_chip->chip.start + j, FIRST_GPIO_IRQ + msm_chip->chip.start + j); */ val &= ~mask; generic_handle_irq(FIRST_GPIO_IRQ + msm_chip->chip.base + j); } } chained_irq_exit(chip, desc); } static struct irq_chip msm_gpio_irq_chip = { .name = "msmgpio", .irq_ack = msm_gpio_irq_ack, .irq_mask = msm_gpio_irq_mask, .irq_unmask = msm_gpio_irq_unmask, .irq_set_wake = msm_gpio_irq_set_wake, .irq_set_type = msm_gpio_irq_set_type, }; #define NUM_GPIO_SMEM_BANKS 6 #define GPIO_SMEM_NUM_GROUPS 2 #define GPIO_SMEM_MAX_PC_INTERRUPTS 8 struct tramp_gpio_smem { uint16_t num_fired[GPIO_SMEM_NUM_GROUPS]; uint16_t fired[GPIO_SMEM_NUM_GROUPS][GPIO_SMEM_MAX_PC_INTERRUPTS]; uint32_t enabled[NUM_GPIO_SMEM_BANKS]; uint32_t detection[NUM_GPIO_SMEM_BANKS]; uint32_t polarity[NUM_GPIO_SMEM_BANKS]; }; static void msm_gpio_sleep_int(unsigned long arg) { int i, j; struct tramp_gpio_smem *smem_gpio; BUILD_BUG_ON(NR_GPIO_IRQS > NUM_GPIO_SMEM_BANKS * 32); smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio)); if (smem_gpio == NULL) return; local_irq_disable(); for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++) { int count = smem_gpio->num_fired[i]; for (j = 0; j < count; j++) { /* TODO: Check mask */ generic_handle_irq( MSM_GPIO_TO_INT(smem_gpio->fired[i][j])); } } local_irq_enable(); } static DECLARE_TASKLET(msm_gpio_sleep_int_tasklet, msm_gpio_sleep_int, 0); void msm_gpio_enter_sleep(int from_idle) { int i; struct tramp_gpio_smem *smem_gpio; smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio)); if (smem_gpio) { for (i = 0; i < ARRAY_SIZE(smem_gpio->enabled); i++) { smem_gpio->enabled[i] = 0; smem_gpio->detection[i] = 0; smem_gpio->polarity[i] = 0; } } for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { __raw_writel(msm_gpio_chips[i].int_enable[!from_idle], msm_gpio_chips[i].regs.int_en); if (smem_gpio) { uint32_t tmp; int start, index, shiftl, shiftr; start = msm_gpio_chips[i].chip.base; index = start / 32; shiftl = start % 32; shiftr = 32 - shiftl; tmp = msm_gpio_chips[i].int_enable[!from_idle]; smem_gpio->enabled[index] |= tmp << shiftl; smem_gpio->enabled[index+1] |= tmp >> shiftr; smem_gpio->detection[index] |= __raw_readl(msm_gpio_chips[i].regs.int_edge) << shiftl; smem_gpio->detection[index+1] |= __raw_readl(msm_gpio_chips[i].regs.int_edge) >> shiftr; smem_gpio->polarity[index] |= __raw_readl(msm_gpio_chips[i].regs.int_pos) << shiftl; smem_gpio->polarity[index+1] |= __raw_readl(msm_gpio_chips[i].regs.int_pos) >> shiftr; } } mb(); if (smem_gpio) { if (msm_gpio_debug_mask & GPIO_DEBUG_SLEEP) for (i = 0; i < ARRAY_SIZE(smem_gpio->enabled); i++) { printk("msm_gpio_enter_sleep gpio %d-%d: enable" " %08x, edge %08x, polarity %08x\n", i * 32, i * 32 + 31, smem_gpio->enabled[i], smem_gpio->detection[i], smem_gpio->polarity[i]); } for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++) smem_gpio->num_fired[i] = 0; } } void msm_gpio_exit_sleep(void) { int i; struct tramp_gpio_smem *smem_gpio; smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio)); for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { __raw_writel(msm_gpio_chips[i].int_enable[0], msm_gpio_chips[i].regs.int_en); } mb(); if (smem_gpio && (smem_gpio->num_fired[0] || smem_gpio->num_fired[1])) { if (msm_gpio_debug_mask & GPIO_DEBUG_SLEEP) printk(KERN_INFO "gpio: fired %x %x\n", smem_gpio->num_fired[0], smem_gpio->num_fired[1]); tasklet_schedule(&msm_gpio_sleep_int_tasklet); } } int gpio_tlmm_config(unsigned config, unsigned disable) { return msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, &disable); } EXPORT_SYMBOL(gpio_tlmm_config); int msm_gpios_request_enable(const struct msm_gpio *table, int size) { int rc = msm_gpios_request(table, size); if (rc) return rc; rc = msm_gpios_enable(table, size); if (rc) msm_gpios_free(table, size); return rc; } EXPORT_SYMBOL(msm_gpios_request_enable); void msm_gpios_disable_free(const struct msm_gpio *table, int size) { msm_gpios_disable(table, size); msm_gpios_free(table, size); } EXPORT_SYMBOL(msm_gpios_disable_free); int msm_gpios_request(const struct msm_gpio *table, int size) { int rc; int i; const struct msm_gpio *g; for (i = 0; i < size; i++) { g = table + i; rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label); if (rc) { pr_err("gpio_request(%d) <%s> failed: %d\n", GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc); goto err; } } return 0; err: msm_gpios_free(table, i); return rc; } EXPORT_SYMBOL(msm_gpios_request); void msm_gpios_free(const struct msm_gpio *table, int size) { int i; const struct msm_gpio *g; for (i = size-1; i >= 0; i--) { g = table + i; gpio_free(GPIO_PIN(g->gpio_cfg)); } } EXPORT_SYMBOL(msm_gpios_free); int msm_gpios_enable(const struct msm_gpio *table, int size) { int rc; int i; const struct msm_gpio *g; for (i = 0; i < size; i++) { g = table + i; rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE); if (rc) { pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)" " <%s> failed: %d\n", g->gpio_cfg, g->label ?: "?", rc); pr_err("pin %d func %d dir %d pull %d drvstr %d\n", GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg), GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg), GPIO_DRVSTR(g->gpio_cfg)); goto err; } } return 0; err: msm_gpios_disable(table, i); return rc; } EXPORT_SYMBOL(msm_gpios_enable); int msm_gpios_disable(const struct msm_gpio *table, int size) { int rc = 0; int i; const struct msm_gpio *g; for (i = size-1; i >= 0; i--) { int tmp; g = table + i; tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE); if (tmp) { pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)" " <%s> failed: %d\n", g->gpio_cfg, g->label ?: "?", rc); pr_err("pin %d func %d dir %d pull %d drvstr %d\n", GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg), GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg), GPIO_DRVSTR(g->gpio_cfg)); if (!rc) rc = tmp; } } return rc; } EXPORT_SYMBOL(msm_gpios_disable); /* Locate the GPIO_OUT register for the given GPIO and return its address * and the bit position of the gpio's bit within the register. * * This function is used by gpiomux-v1 in order to support output transitions. */ void msm_gpio_find_out(const unsigned gpio, void __iomem **out, unsigned *offset) { struct msm_gpio_chip *msm_chip = msm_gpio_chips; while (gpio >= msm_chip->chip.base + msm_chip->chip.ngpio) ++msm_chip; *out = msm_chip->regs.out; *offset = gpio - msm_chip->chip.base; } static int __devinit msm_gpio_probe(struct platform_device *dev) { int i, j = 0; int grp_irq; for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) { if (i - FIRST_GPIO_IRQ >= msm_gpio_chips[j].chip.base + msm_gpio_chips[j].chip.ngpio) j++; irq_set_chip_data(i, &msm_gpio_chips[j]); irq_set_chip_and_handler(i, &msm_gpio_irq_chip, handle_edge_irq); set_irq_flags(i, IRQF_VALID); } for (i = 0; i < dev->num_resources; i++) { grp_irq = platform_get_irq(dev, i); if (grp_irq < 0) return -ENXIO; irq_set_chained_handler(grp_irq, msm_gpio_irq_handler); irq_set_irq_wake(grp_irq, (i + 1)); } for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { spin_lock_init(&msm_gpio_chips[i].lock); __raw_writel(0, msm_gpio_chips[i].regs.int_en); gpiochip_add(&msm_gpio_chips[i].chip); } mb(); return 0; } static struct platform_driver msm_gpio_driver = { .probe = msm_gpio_probe, .driver = { .name = "msmgpio", .owner = THIS_MODULE, }, }; static int __init msm_gpio_init(void) { return platform_driver_register(&msm_gpio_driver); } postcore_initcall(msm_gpio_init);
gpl-2.0
Mazout360/hammerhead-3.10
drivers/net/ethernet/pasemi/pasemi_mac.c
2153
48346
/* * Copyright (C) 2006-2007 PA Semi, Inc * * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dmaengine.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/of_mdio.h> #include <linux/etherdevice.h> #include <asm/dma-mapping.h> #include <linux/in.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/tcp.h> #include <net/checksum.h> #include <linux/inet_lro.h> #include <linux/prefetch.h> #include <asm/irq.h> #include <asm/firmware.h> #include <asm/pasemi_dma.h> #include "pasemi_mac.h" /* We have our own align, since ppc64 in general has it at 0 because * of design flaws in some of the server bridge chips. However, for * PWRficient doing the unaligned copies is more expensive than doing * unaligned DMA, so make sure the data is aligned instead. */ #define LOCAL_SKB_ALIGN 2 /* TODO list * * - Multicast support * - Large MTU support * - SW LRO * - Multiqueue RX/TX */ #define LRO_MAX_AGGR 64 #define PE_MIN_MTU 64 #define PE_MAX_MTU 9000 #define PE_DEF_MTU ETH_DATA_LEN #define DEFAULT_MSG_ENABLE \ (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK | \ NETIF_MSG_TIMER | \ NETIF_MSG_IFDOWN | \ NETIF_MSG_IFUP | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) MODULE_LICENSE("GPL"); MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); extern const struct ethtool_ops pasemi_mac_ethtool_ops; static int translation_enabled(void) { #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) return 1; #else return firmware_has_feature(FW_FEATURE_LPAR); #endif } static void write_iob_reg(unsigned int reg, unsigned int val) { pasemi_write_iob_reg(reg, val); } static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg) { return pasemi_read_mac_reg(mac->dma_if, reg); } static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg, unsigned int val) { pasemi_write_mac_reg(mac->dma_if, reg, val); } static unsigned int read_dma_reg(unsigned int reg) { return pasemi_read_dma_reg(reg); } static void write_dma_reg(unsigned int reg, unsigned int val) { pasemi_write_dma_reg(reg, val); } static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac) { return mac->rx; } static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac) { return mac->tx; } static inline void prefetch_skb(const struct sk_buff *skb) { const void *d = skb; prefetch(d); prefetch(d+64); prefetch(d+128); prefetch(d+192); } static int mac_to_intf(struct pasemi_mac *mac) { struct pci_dev *pdev = mac->pdev; u32 tmp; int nintf, off, i, j; int devfn = pdev->devfn; tmp = read_dma_reg(PAS_DMA_CAP_IFI); nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S; off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S; /* IOFF contains the offset to the registers containing the * DMA interface-to-MAC-pci-id mappings, and NIN contains number * of total interfaces. Each register contains 4 devfns. * Just do a linear search until we find the devfn of the MAC * we're trying to look up. */ for (i = 0; i < (nintf+3)/4; i++) { tmp = read_dma_reg(off+4*i); for (j = 0; j < 4; j++) { if (((tmp >> (8*j)) & 0xff) == devfn) return i*4 + j; } } return -1; } static void pasemi_mac_intf_disable(struct pasemi_mac *mac) { unsigned int flags; flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); flags &= ~PAS_MAC_CFG_PCFG_PE; write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); } static void pasemi_mac_intf_enable(struct pasemi_mac *mac) { unsigned int flags; flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); flags |= PAS_MAC_CFG_PCFG_PE; write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); } static int pasemi_get_mac_addr(struct pasemi_mac *mac) { struct pci_dev *pdev = mac->pdev; struct device_node *dn = pci_device_to_OF_node(pdev); int len; const u8 *maddr; u8 addr[6]; if (!dn) { dev_dbg(&pdev->dev, "No device node for mac, not configuring\n"); return -ENOENT; } maddr = of_get_property(dn, "local-mac-address", &len); if (maddr && len == 6) { memcpy(mac->mac_addr, maddr, 6); return 0; } /* Some old versions of firmware mistakenly uses mac-address * (and as a string) instead of a byte array in local-mac-address. */ if (maddr == NULL) maddr = of_get_property(dn, "mac-address", NULL); if (maddr == NULL) { dev_warn(&pdev->dev, "no mac address in device tree, not configuring\n"); return -ENOENT; } if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { dev_warn(&pdev->dev, "can't parse mac address, not configuring\n"); return -EINVAL; } memcpy(mac->mac_addr, addr, 6); return 0; } static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) { struct pasemi_mac *mac = netdev_priv(dev); struct sockaddr *addr = p; unsigned int adr0, adr1; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); adr0 = dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 | dev->dev_addr[4] << 8 | dev->dev_addr[5]; adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1); adr1 &= ~0xffff; adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1]; pasemi_mac_intf_disable(mac); write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0); write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1); pasemi_mac_intf_enable(mac); return 0; } static int get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, u64 *hdr_flags, void *data) { u64 macrx = (u64) data; unsigned int ip_len; struct iphdr *iph; /* IPv4 header checksum failed */ if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK) return -1; /* non tcp packet */ skb_reset_network_header(skb); iph = ip_hdr(skb); if (iph->protocol != IPPROTO_TCP) return -1; ip_len = ip_hdrlen(skb); skb_set_transport_header(skb, ip_len); *tcph = tcp_hdr(skb); /* check if ip header and tcp header are complete */ if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) return -1; *hdr_flags = LRO_IPV4 | LRO_TCP; *iphdr = iph; return 0; } static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, const int nfrags, struct sk_buff *skb, const dma_addr_t *dmas) { int f; struct pci_dev *pdev = mac->dma_pdev; pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE); for (f = 0; f < nfrags; f++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; pci_unmap_page(pdev, dmas[f+1], skb_frag_size(frag), PCI_DMA_TODEVICE); } dev_kfree_skb_irq(skb); /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs, * aligned up to a power of 2 */ return (nfrags + 3) & ~1; } static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac) { struct pasemi_mac_csring *ring; u32 val; unsigned int cfg; int chno; ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring), offsetof(struct pasemi_mac_csring, chan)); if (!ring) { dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n"); goto out_chan; } chno = ring->chan.chno; ring->size = CS_RING_SIZE; ring->next_to_fill = 0; /* Allocate descriptors */ if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE)) goto out_ring_desc; write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3); write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); ring->events[0] = pasemi_dma_alloc_flag(); ring->events[1] = pasemi_dma_alloc_flag(); if (ring->events[0] < 0 || ring->events[1] < 0) goto out_flags; pasemi_dma_clear_flag(ring->events[0]); pasemi_dma_clear_flag(ring->events[1]); ring->fun = pasemi_dma_alloc_fun(); if (ring->fun < 0) goto out_fun; cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP | PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) | PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ; if (translation_enabled()) cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); /* enable channel */ pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB | PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA); return ring; out_fun: out_flags: if (ring->events[0] >= 0) pasemi_dma_free_flag(ring->events[0]); if (ring->events[1] >= 0) pasemi_dma_free_flag(ring->events[1]); pasemi_dma_free_ring(&ring->chan); out_ring_desc: pasemi_dma_free_chan(&ring->chan); out_chan: return NULL; } static void pasemi_mac_setup_csrings(struct pasemi_mac *mac) { int i; mac->cs[0] = pasemi_mac_setup_csring(mac); if (mac->type == MAC_TYPE_XAUI) mac->cs[1] = pasemi_mac_setup_csring(mac); else mac->cs[1] = 0; for (i = 0; i < MAX_CS; i++) if (mac->cs[i]) mac->num_cs++; } static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring) { pasemi_dma_stop_chan(&csring->chan); pasemi_dma_free_flag(csring->events[0]); pasemi_dma_free_flag(csring->events[1]); pasemi_dma_free_ring(&csring->chan); pasemi_dma_free_chan(&csring->chan); pasemi_dma_free_fun(csring->fun); } static int pasemi_mac_setup_rx_resources(const struct net_device *dev) { struct pasemi_mac_rxring *ring; struct pasemi_mac *mac = netdev_priv(dev); int chno; unsigned int cfg; ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring), offsetof(struct pasemi_mac_rxring, chan)); if (!ring) { dev_err(&mac->pdev->dev, "Can't allocate RX channel\n"); goto out_chan; } chno = ring->chan.chno; spin_lock_init(&ring->lock); ring->size = RX_RING_SIZE; ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * RX_RING_SIZE, GFP_KERNEL); if (!ring->ring_info) goto out_ring_info; /* Allocate descriptors */ if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) goto out_ring_desc; ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), &ring->buf_dma, GFP_KERNEL | __GFP_ZERO); if (!ring->buffers) goto out_ring_desc; write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno), PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) | PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); cfg = PAS_DMA_RXCHAN_CFG_HBU(2); if (translation_enabled()) cfg |= PAS_DMA_RXCHAN_CFG_CTR; write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg); write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if), PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if), PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 | PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | PAS_DMA_RXINT_CFG_HEN; if (translation_enabled()) cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg); ring->next_to_fill = 0; ring->next_to_clean = 0; ring->mac = mac; mac->rx = ring; return 0; out_ring_desc: kfree(ring->ring_info); out_ring_info: pasemi_dma_free_chan(&ring->chan); out_chan: return -ENOMEM; } static struct pasemi_mac_txring * pasemi_mac_setup_tx_resources(const struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); u32 val; struct pasemi_mac_txring *ring; unsigned int cfg; int chno; ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring), offsetof(struct pasemi_mac_txring, chan)); if (!ring) { dev_err(&mac->pdev->dev, "Can't allocate TX channel\n"); goto out_chan; } chno = ring->chan.chno; spin_lock_init(&ring->lock); ring->size = TX_RING_SIZE; ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * TX_RING_SIZE, GFP_KERNEL); if (!ring->ring_info) goto out_ring_info; /* Allocate descriptors */ if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE)) goto out_ring_desc; write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | PAS_DMA_TXCHAN_CFG_UP | PAS_DMA_TXCHAN_CFG_WT(4); if (translation_enabled()) cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); ring->next_to_fill = 0; ring->next_to_clean = 0; ring->mac = mac; return ring; out_ring_desc: kfree(ring->ring_info); out_ring_info: pasemi_dma_free_chan(&ring->chan); out_chan: return NULL; } static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac) { struct pasemi_mac_txring *txring = tx_ring(mac); unsigned int i, j; struct pasemi_mac_buffer *info; dma_addr_t dmas[MAX_SKB_FRAGS+1]; int freed, nfrags; int start, limit; start = txring->next_to_clean; limit = txring->next_to_fill; /* Compensate for when fill has wrapped and clean has not */ if (start > limit) limit += TX_RING_SIZE; for (i = start; i < limit; i += freed) { info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)]; if (info->dma && info->skb) { nfrags = skb_shinfo(info->skb)->nr_frags; for (j = 0; j <= nfrags; j++) dmas[j] = txring->ring_info[(i+1+j) & (TX_RING_SIZE-1)].dma; freed = pasemi_mac_unmap_tx_skb(mac, nfrags, info->skb, dmas); } else { freed = 2; } } kfree(txring->ring_info); pasemi_dma_free_chan(&txring->chan); } static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac) { struct pasemi_mac_rxring *rx = rx_ring(mac); unsigned int i; struct pasemi_mac_buffer *info; for (i = 0; i < RX_RING_SIZE; i++) { info = &RX_DESC_INFO(rx, i); if (info->skb && info->dma) { pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(info->skb); } info->dma = 0; info->skb = NULL; } for (i = 0; i < RX_RING_SIZE; i++) RX_BUFF(rx, i) = 0; } static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) { pasemi_mac_free_rx_buffers(mac); dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); kfree(rx_ring(mac)->ring_info); pasemi_dma_free_chan(&rx_ring(mac)->chan); mac->rx = NULL; } static void pasemi_mac_replenish_rx_ring(struct net_device *dev, const int limit) { const struct pasemi_mac *mac = netdev_priv(dev); struct pasemi_mac_rxring *rx = rx_ring(mac); int fill, count; if (limit <= 0) return; fill = rx_ring(mac)->next_to_fill; for (count = 0; count < limit; count++) { struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill); u64 *buff = &RX_BUFF(rx, fill); struct sk_buff *skb; dma_addr_t dma; /* Entry in use? */ WARN_ON(*buff); skb = netdev_alloc_skb(dev, mac->bufsz); skb_reserve(skb, LOCAL_SKB_ALIGN); if (unlikely(!skb)) break; dma = pci_map_single(mac->dma_pdev, skb->data, mac->bufsz - LOCAL_SKB_ALIGN, PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { dev_kfree_skb_irq(info->skb); break; } info->skb = skb; info->dma = dma; *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma); fill++; } wmb(); write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count); rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) & (RX_RING_SIZE - 1); } static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac) { struct pasemi_mac_rxring *rx = rx_ring(mac); unsigned int reg, pcnt; /* Re-enable packet count interrupts: finally * ack the packet count interrupt we got in rx_intr. */ pcnt = *rx->chan.status & PAS_STATUS_PCNT_M; reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; if (*rx->chan.status & PAS_STATUS_TIMER) reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg); } static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac) { unsigned int reg, pcnt; /* Re-enable packet count interrupts */ pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M; reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg); } static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac, const u64 macrx) { unsigned int rcmdsta, ccmdsta; struct pasemi_dmachan *chan = &rx_ring(mac)->chan; if (!netif_msg_rx_err(mac)) return; rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno)); printk(KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n", macrx, *chan->status); printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", rcmdsta, ccmdsta); } static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac, const u64 mactx) { unsigned int cmdsta; struct pasemi_dmachan *chan = &tx_ring(mac)->chan; if (!netif_msg_tx_err(mac)) return; cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno)); printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\ "tx status 0x%016llx\n", mactx, *chan->status); printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); } static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, const int limit) { const struct pasemi_dmachan *chan = &rx->chan; struct pasemi_mac *mac = rx->mac; struct pci_dev *pdev = mac->dma_pdev; unsigned int n; int count, buf_index, tot_bytes, packets; struct pasemi_mac_buffer *info; struct sk_buff *skb; unsigned int len; u64 macrx, eval; dma_addr_t dma; tot_bytes = 0; packets = 0; spin_lock(&rx->lock); n = rx->next_to_clean; prefetch(&RX_DESC(rx, n)); for (count = 0; count < limit; count++) { macrx = RX_DESC(rx, n); prefetch(&RX_DESC(rx, n+4)); if ((macrx & XCT_MACRX_E) || (*chan->status & PAS_STATUS_ERROR)) pasemi_mac_rx_error(mac, macrx); if (!(macrx & XCT_MACRX_O)) break; info = NULL; BUG_ON(!(macrx & XCT_MACRX_RR_8BRES)); eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >> XCT_RXRES_8B_EVAL_S; buf_index = eval-1; dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M); info = &RX_DESC_INFO(rx, buf_index); skb = info->skb; prefetch_skb(skb); len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN, PCI_DMA_FROMDEVICE); if (macrx & XCT_MACRX_CRC) { /* CRC error flagged */ mac->netdev->stats.rx_errors++; mac->netdev->stats.rx_crc_errors++; /* No need to free skb, it'll be reused */ goto next; } info->skb = NULL; info->dma = 0; if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum = (macrx & XCT_MACRX_CSUM_M) >> XCT_MACRX_CSUM_S; } else { skb_checksum_none_assert(skb); } packets++; tot_bytes += len; /* Don't include CRC */ skb_put(skb, len-4); skb->protocol = eth_type_trans(skb, mac->netdev); lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx); next: RX_DESC(rx, n) = 0; RX_DESC(rx, n+1) = 0; /* Need to zero it out since hardware doesn't, since the * replenish loop uses it to tell when it's done. */ RX_BUFF(rx, buf_index) = 0; n += 4; } if (n > RX_RING_SIZE) { /* Errata 5971 workaround: L2 target of headers */ write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0); n &= (RX_RING_SIZE-1); } rx_ring(mac)->next_to_clean = n; lro_flush_all(&mac->lro_mgr); /* Increase is in number of 16-byte entries, and since each descriptor * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with * count*2. */ write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1); pasemi_mac_replenish_rx_ring(mac->netdev, count); mac->netdev->stats.rx_bytes += tot_bytes; mac->netdev->stats.rx_packets += packets; spin_unlock(&rx_ring(mac)->lock); return count; } /* Can't make this too large or we blow the kernel stack limits */ #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS) static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring) { struct pasemi_dmachan *chan = &txring->chan; struct pasemi_mac *mac = txring->mac; int i, j; unsigned int start, descr_count, buf_count, batch_limit; unsigned int ring_limit; unsigned int total_count; unsigned long flags; struct sk_buff *skbs[TX_CLEAN_BATCHSIZE]; dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; int nf[TX_CLEAN_BATCHSIZE]; int nr_frags; total_count = 0; batch_limit = TX_CLEAN_BATCHSIZE; restart: spin_lock_irqsave(&txring->lock, flags); start = txring->next_to_clean; ring_limit = txring->next_to_fill; prefetch(&TX_DESC_INFO(txring, start+1).skb); /* Compensate for when fill has wrapped but clean has not */ if (start > ring_limit) ring_limit += TX_RING_SIZE; buf_count = 0; descr_count = 0; for (i = start; descr_count < batch_limit && i < ring_limit; i += buf_count) { u64 mactx = TX_DESC(txring, i); struct sk_buff *skb; if ((mactx & XCT_MACTX_E) || (*chan->status & PAS_STATUS_ERROR)) pasemi_mac_tx_error(mac, mactx); /* Skip over control descriptors */ if (!(mactx & XCT_MACTX_LLEN_M)) { TX_DESC(txring, i) = 0; TX_DESC(txring, i+1) = 0; buf_count = 2; continue; } skb = TX_DESC_INFO(txring, i+1).skb; nr_frags = TX_DESC_INFO(txring, i).dma; if (unlikely(mactx & XCT_MACTX_O)) /* Not yet transmitted */ break; buf_count = 2 + nr_frags; /* Since we always fill with an even number of entries, make * sure we skip any unused one at the end as well. */ if (buf_count & 1) buf_count++; for (j = 0; j <= nr_frags; j++) dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma; skbs[descr_count] = skb; nf[descr_count] = nr_frags; TX_DESC(txring, i) = 0; TX_DESC(txring, i+1) = 0; descr_count++; } txring->next_to_clean = i & (TX_RING_SIZE-1); spin_unlock_irqrestore(&txring->lock, flags); netif_wake_queue(mac->netdev); for (i = 0; i < descr_count; i++) pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]); total_count += descr_count; /* If the batch was full, try to clean more */ if (descr_count == batch_limit) goto restart; return total_count; } static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) { const struct pasemi_mac_rxring *rxring = data; struct pasemi_mac *mac = rxring->mac; const struct pasemi_dmachan *chan = &rxring->chan; unsigned int reg; if (!(*chan->status & PAS_STATUS_CAUSE_M)) return IRQ_NONE; /* Don't reset packet count so it won't fire again but clear * all others. */ reg = 0; if (*chan->status & PAS_STATUS_SOFT) reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; if (*chan->status & PAS_STATUS_ERROR) reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; napi_schedule(&mac->napi); write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); return IRQ_HANDLED; } #define TX_CLEAN_INTERVAL HZ static void pasemi_mac_tx_timer(unsigned long data) { struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data; struct pasemi_mac *mac = txring->mac; pasemi_mac_clean_tx(txring); mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL); pasemi_mac_restart_tx_intr(mac); } static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) { struct pasemi_mac_txring *txring = data; const struct pasemi_dmachan *chan = &txring->chan; struct pasemi_mac *mac = txring->mac; unsigned int reg; if (!(*chan->status & PAS_STATUS_CAUSE_M)) return IRQ_NONE; reg = 0; if (*chan->status & PAS_STATUS_SOFT) reg |= PAS_IOB_DMA_TXCH_RESET_SINTC; if (*chan->status & PAS_STATUS_ERROR) reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); napi_schedule(&mac->napi); if (reg) write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); return IRQ_HANDLED; } static void pasemi_adjust_link(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); int msg; unsigned int flags; unsigned int new_flags; if (!mac->phydev->link) { /* If no link, MAC speed settings don't matter. Just report * link down and return. */ if (mac->link && netif_msg_link(mac)) printk(KERN_INFO "%s: Link is down.\n", dev->name); netif_carrier_off(dev); pasemi_mac_intf_disable(mac); mac->link = 0; return; } else { pasemi_mac_intf_enable(mac); netif_carrier_on(dev); } flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M | PAS_MAC_CFG_PCFG_TSR_M); if (!mac->phydev->duplex) new_flags |= PAS_MAC_CFG_PCFG_HD; switch (mac->phydev->speed) { case 1000: new_flags |= PAS_MAC_CFG_PCFG_SPD_1G | PAS_MAC_CFG_PCFG_TSR_1G; break; case 100: new_flags |= PAS_MAC_CFG_PCFG_SPD_100M | PAS_MAC_CFG_PCFG_TSR_100M; break; case 10: new_flags |= PAS_MAC_CFG_PCFG_SPD_10M | PAS_MAC_CFG_PCFG_TSR_10M; break; default: printk("Unsupported speed %d\n", mac->phydev->speed); } /* Print on link or speed/duplex change */ msg = mac->link != mac->phydev->link || flags != new_flags; mac->duplex = mac->phydev->duplex; mac->speed = mac->phydev->speed; mac->link = mac->phydev->link; if (new_flags != flags) write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags); if (msg && netif_msg_link(mac)) printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n", dev->name, mac->speed, mac->duplex ? "full" : "half"); } static int pasemi_mac_phy_init(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); struct device_node *dn, *phy_dn; struct phy_device *phydev; dn = pci_device_to_OF_node(mac->pdev); phy_dn = of_parse_phandle(dn, "phy-handle", 0); of_node_put(phy_dn); mac->link = 0; mac->speed = 0; mac->duplex = -1; phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII); if (!phydev) { printk(KERN_ERR "%s: Could not attach to phy\n", dev->name); return -ENODEV; } mac->phydev = phydev; return 0; } static int pasemi_mac_open(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); unsigned int flags; int i, ret; flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12); write_mac_reg(mac, PAS_MAC_CFG_TXP, flags); ret = pasemi_mac_setup_rx_resources(dev); if (ret) goto out_rx_resources; mac->tx = pasemi_mac_setup_tx_resources(dev); if (!mac->tx) goto out_tx_ring; /* We might already have allocated rings in case mtu was changed * before interface was brought up. */ if (dev->mtu > 1500 && !mac->num_cs) { pasemi_mac_setup_csrings(mac); if (!mac->num_cs) goto out_tx_ring; } /* Zero out rmon counters */ for (i = 0; i < 32; i++) write_mac_reg(mac, PAS_MAC_RMON(i), 0); /* 0x3ff with 33MHz clock is about 31us */ write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff)); write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno), PAS_IOB_DMA_RXCH_CFG_CNTTH(256)); write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno), PAS_IOB_DMA_TXCH_CFG_CNTTH(32)); write_mac_reg(mac, PAS_MAC_IPC_CHNL, PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) | PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno)); /* enable rx if */ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), PAS_DMA_RXINT_RCMDSTA_EN | PAS_DMA_RXINT_RCMDSTA_DROPS_M | PAS_DMA_RXINT_RCMDSTA_BP | PAS_DMA_RXINT_RCMDSTA_OO | PAS_DMA_RXINT_RCMDSTA_BT); /* enable rx channel */ pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU | PAS_DMA_RXCHAN_CCMDSTA_OD | PAS_DMA_RXCHAN_CCMDSTA_FD | PAS_DMA_RXCHAN_CCMDSTA_DT); /* enable tx channel */ pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB | PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA); pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno), RX_RING_SIZE>>1); /* Clear out any residual packet count state from firmware */ pasemi_mac_restart_rx_intr(mac); pasemi_mac_restart_tx_intr(mac); flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; if (mac->type == MAC_TYPE_GMAC) flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; else flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G; /* Enable interface in MAC */ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); ret = pasemi_mac_phy_init(dev); if (ret) { /* Since we won't get link notification, just enable RX */ pasemi_mac_intf_enable(mac); if (mac->type == MAC_TYPE_GMAC) { /* Warn for missing PHY on SGMII (1Gig) ports */ dev_warn(&mac->pdev->dev, "PHY init failed: %d.\n", ret); dev_warn(&mac->pdev->dev, "Defaulting to 1Gbit full duplex\n"); } } netif_start_queue(dev); napi_enable(&mac->napi); snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", dev->name); ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED, mac->tx_irq_name, mac->tx); if (ret) { dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", mac->tx->chan.irq, ret); goto out_tx_int; } snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", dev->name); ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED, mac->rx_irq_name, mac->rx); if (ret) { dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", mac->rx->chan.irq, ret); goto out_rx_int; } if (mac->phydev) phy_start(mac->phydev); init_timer(&mac->tx->clean_timer); mac->tx->clean_timer.function = pasemi_mac_tx_timer; mac->tx->clean_timer.data = (unsigned long)mac->tx; mac->tx->clean_timer.expires = jiffies+HZ; add_timer(&mac->tx->clean_timer); return 0; out_rx_int: free_irq(mac->tx->chan.irq, mac->tx); out_tx_int: napi_disable(&mac->napi); netif_stop_queue(dev); out_tx_ring: if (mac->tx) pasemi_mac_free_tx_resources(mac); pasemi_mac_free_rx_resources(mac); out_rx_resources: return ret; } #define MAX_RETRIES 5000 static void pasemi_mac_pause_txchan(struct pasemi_mac *mac) { unsigned int sta, retries; int txch = tx_ring(mac)->chan.chno; write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), PAS_DMA_TXCHAN_TCMDSTA_ST); for (retries = 0; retries < MAX_RETRIES; retries++) { sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) break; cond_resched(); } if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel, tcmdsta %08x\n", sta); write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); } static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac) { unsigned int sta, retries; int rxch = rx_ring(mac)->chan.chno; write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), PAS_DMA_RXCHAN_CCMDSTA_ST); for (retries = 0; retries < MAX_RETRIES; retries++) { sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) break; cond_resched(); } if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel, ccmdsta 08%x\n", sta); write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); } static void pasemi_mac_pause_rxint(struct pasemi_mac *mac) { unsigned int sta, retries; write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), PAS_DMA_RXINT_RCMDSTA_ST); for (retries = 0; retries < MAX_RETRIES; retries++) { sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) break; cond_resched(); } if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface, rcmdsta %08x\n", sta); write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); } static int pasemi_mac_close(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); unsigned int sta; int rxch, txch, i; rxch = rx_ring(mac)->chan.chno; txch = tx_ring(mac)->chan.chno; if (mac->phydev) { phy_stop(mac->phydev); phy_disconnect(mac->phydev); } del_timer_sync(&mac->tx->clean_timer); netif_stop_queue(dev); napi_disable(&mac->napi); sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); if (sta & (PAS_DMA_RXINT_RCMDSTA_BP | PAS_DMA_RXINT_RCMDSTA_OO | PAS_DMA_RXINT_RCMDSTA_BT)) printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta); sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU | PAS_DMA_RXCHAN_CCMDSTA_OD | PAS_DMA_RXCHAN_CCMDSTA_FD | PAS_DMA_RXCHAN_CCMDSTA_DT)) printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta); sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB | PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA)) printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta); /* Clean out any pending buffers */ pasemi_mac_clean_tx(tx_ring(mac)); pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); pasemi_mac_pause_txchan(mac); pasemi_mac_pause_rxint(mac); pasemi_mac_pause_rxchan(mac); pasemi_mac_intf_disable(mac); free_irq(mac->tx->chan.irq, mac->tx); free_irq(mac->rx->chan.irq, mac->rx); for (i = 0; i < mac->num_cs; i++) { pasemi_mac_free_csring(mac->cs[i]); mac->cs[i] = NULL; } mac->num_cs = 0; /* Free resources */ pasemi_mac_free_rx_resources(mac); pasemi_mac_free_tx_resources(mac); return 0; } static void pasemi_mac_queue_csdesc(const struct sk_buff *skb, const dma_addr_t *map, const unsigned int *map_size, struct pasemi_mac_txring *txring, struct pasemi_mac_csring *csring) { u64 fund; dma_addr_t cs_dest; const int nh_off = skb_network_offset(skb); const int nh_len = skb_network_header_len(skb); const int nfrags = skb_shinfo(skb)->nr_frags; int cs_size, i, fill, hdr, cpyhdr, evt; dma_addr_t csdma; fund = XCT_FUN_ST | XCT_FUN_RR_8BRES | XCT_FUN_O | XCT_FUN_FUN(csring->fun) | XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) | XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE; switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: fund |= XCT_FUN_SIG_TCP4; /* TCP checksum is 16 bytes into the header */ cs_dest = map[0] + skb_transport_offset(skb) + 16; break; case IPPROTO_UDP: fund |= XCT_FUN_SIG_UDP4; /* UDP checksum is 6 bytes into the header */ cs_dest = map[0] + skb_transport_offset(skb) + 6; break; default: BUG(); } /* Do the checksum offloaded */ fill = csring->next_to_fill; hdr = fill; CS_DESC(csring, fill++) = fund; /* Room for 8BRES. Checksum result is really 2 bytes into it */ csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2; CS_DESC(csring, fill++) = 0; CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off); for (i = 1; i <= nfrags; i++) CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); fill += i; if (fill & 1) fill++; /* Copy the result into the TCP packet */ cpyhdr = fill; CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) | XCT_FUN_LLEN(2) | XCT_FUN_SE; CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T; CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma); fill++; evt = !csring->last_event; csring->last_event = evt; /* Event handshaking with MAC TX */ CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]); CS_DESC(csring, fill++) = 0; CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]); CS_DESC(csring, fill++) = 0; csring->next_to_fill = fill & (CS_RING_SIZE-1); cs_size = fill - hdr; write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1); /* TX-side event handshaking */ fill = txring->next_to_fill; TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]); TX_DESC(txring, fill++) = 0; TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]); TX_DESC(txring, fill++) = 0; txring->next_to_fill = fill; write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2); } static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct pasemi_mac * const mac = netdev_priv(dev); struct pasemi_mac_txring * const txring = tx_ring(mac); struct pasemi_mac_csring *csring; u64 dflags = 0; u64 mactx; dma_addr_t map[MAX_SKB_FRAGS+1]; unsigned int map_size[MAX_SKB_FRAGS+1]; unsigned long flags; int i, nfrags; int fill; const int nh_off = skb_network_offset(skb); const int nh_len = skb_network_header_len(skb); prefetch(&txring->ring_info); dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD; nfrags = skb_shinfo(skb)->nr_frags; map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), PCI_DMA_TODEVICE); map_size[0] = skb_headlen(skb); if (pci_dma_mapping_error(mac->dma_pdev, map[0])) goto out_err_nolock; for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); map_size[i+1] = skb_frag_size(frag); if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) { nfrags = i; goto out_err_nolock; } } if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) { switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: dflags |= XCT_MACTX_CSUM_TCP; dflags |= XCT_MACTX_IPH(nh_len >> 2); dflags |= XCT_MACTX_IPO(nh_off); break; case IPPROTO_UDP: dflags |= XCT_MACTX_CSUM_UDP; dflags |= XCT_MACTX_IPH(nh_len >> 2); dflags |= XCT_MACTX_IPO(nh_off); break; default: WARN_ON(1); } } mactx = dflags | XCT_MACTX_LLEN(skb->len); spin_lock_irqsave(&txring->lock, flags); /* Avoid stepping on the same cache line that the DMA controller * is currently about to send, so leave at least 8 words available. * Total free space needed is mactx + fragments + 8 */ if (RING_AVAIL(txring) < nfrags + 14) { /* no room -- stop the queue and wait for tx intr */ netif_stop_queue(dev); goto out_err; } /* Queue up checksum + event descriptors, if needed */ if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) { csring = mac->cs[mac->last_cs]; mac->last_cs = (mac->last_cs + 1) % mac->num_cs; pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring); } fill = txring->next_to_fill; TX_DESC(txring, fill) = mactx; TX_DESC_INFO(txring, fill).dma = nfrags; fill++; TX_DESC_INFO(txring, fill).skb = skb; for (i = 0; i <= nfrags; i++) { TX_DESC(txring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); TX_DESC_INFO(txring, fill+i).dma = map[i]; } /* We have to add an even number of 8-byte entries to the ring * even if the last one is unused. That means always an odd number * of pointers + one mactx descriptor. */ if (nfrags & 1) nfrags++; txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; spin_unlock_irqrestore(&txring->lock, flags); write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1); return NETDEV_TX_OK; out_err: spin_unlock_irqrestore(&txring->lock, flags); out_err_nolock: while (nfrags--) pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], PCI_DMA_TODEVICE); return NETDEV_TX_BUSY; } static void pasemi_mac_set_rx_mode(struct net_device *dev) { const struct pasemi_mac *mac = netdev_priv(dev); unsigned int flags; flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); /* Set promiscuous */ if (dev->flags & IFF_PROMISC) flags |= PAS_MAC_CFG_PCFG_PR; else flags &= ~PAS_MAC_CFG_PCFG_PR; write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); } static int pasemi_mac_poll(struct napi_struct *napi, int budget) { struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); int pkts; pasemi_mac_clean_tx(tx_ring(mac)); pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); if (pkts < budget) { /* all done, no more packets present */ napi_complete(napi); pasemi_mac_restart_rx_intr(mac); pasemi_mac_restart_tx_intr(mac); } return pkts; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void pasemi_mac_netpoll(struct net_device *dev) { const struct pasemi_mac *mac = netdev_priv(dev); disable_irq(mac->tx->chan.irq); pasemi_mac_tx_intr(mac->tx->chan.irq, mac->tx); enable_irq(mac->tx->chan.irq); disable_irq(mac->rx->chan.irq); pasemi_mac_rx_intr(mac->rx->chan.irq, mac->rx); enable_irq(mac->rx->chan.irq); } #endif static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) { struct pasemi_mac *mac = netdev_priv(dev); unsigned int reg; unsigned int rcmdsta = 0; int running; int ret = 0; if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU) return -EINVAL; running = netif_running(dev); if (running) { /* Need to stop the interface, clean out all already * received buffers, free all unused buffers on the RX * interface ring, then finally re-fill the rx ring with * the new-size buffers and restart. */ napi_disable(&mac->napi); netif_tx_disable(dev); pasemi_mac_intf_disable(mac); rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); pasemi_mac_pause_rxint(mac); pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); pasemi_mac_free_rx_buffers(mac); } /* Setup checksum channels if large MTU and none already allocated */ if (new_mtu > 1500 && !mac->num_cs) { pasemi_mac_setup_csrings(mac); if (!mac->num_cs) { ret = -ENOMEM; goto out; } } /* Change maxf, i.e. what size frames are accepted. * Need room for ethernet header and CRC word */ reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG); reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M; reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4); write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg); dev->mtu = new_mtu; /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; out: if (running) { write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN); rx_ring(mac)->next_to_fill = 0; pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1); napi_enable(&mac->napi); netif_start_queue(dev); pasemi_mac_intf_enable(mac); } return ret; } static const struct net_device_ops pasemi_netdev_ops = { .ndo_open = pasemi_mac_open, .ndo_stop = pasemi_mac_close, .ndo_start_xmit = pasemi_mac_start_tx, .ndo_set_rx_mode = pasemi_mac_set_rx_mode, .ndo_set_mac_address = pasemi_mac_set_mac_addr, .ndo_change_mtu = pasemi_mac_change_mtu, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = pasemi_mac_netpoll, #endif }; static int pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct pasemi_mac *mac; int err, ret; err = pci_enable_device(pdev); if (err) return err; dev = alloc_etherdev(sizeof(struct pasemi_mac)); if (dev == NULL) { err = -ENOMEM; goto out_disable_device; } pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); mac = netdev_priv(dev); mac->pdev = pdev; mac->netdev = dev; netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_GSO; mac->lro_mgr.max_aggr = LRO_MAX_AGGR; mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; mac->lro_mgr.lro_arr = mac->lro_desc; mac->lro_mgr.get_skb_header = get_skb_hdr; mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; mac->lro_mgr.dev = mac->netdev; mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); if (!mac->dma_pdev) { dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); err = -ENODEV; goto out; } mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); if (!mac->iob_pdev) { dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n"); err = -ENODEV; goto out; } /* get mac addr from device tree */ if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) { err = -ENODEV; goto out; } memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); ret = mac_to_intf(mac); if (ret < 0) { dev_err(&mac->pdev->dev, "Can't map DMA interface\n"); err = -ENODEV; goto out; } mac->dma_if = ret; switch (pdev->device) { case 0xa005: mac->type = MAC_TYPE_GMAC; break; case 0xa006: mac->type = MAC_TYPE_XAUI; break; default: err = -ENODEV; goto out; } dev->netdev_ops = &pasemi_netdev_ops; dev->mtu = PE_DEF_MTU; /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; dev->ethtool_ops = &pasemi_mac_ethtool_ops; if (err) goto out; mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); /* Enable most messages by default */ mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; err = register_netdev(dev); if (err) { dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n", err); goto out; } else if (netif_msg_probe(mac)) { printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n", dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", mac->dma_if, dev->dev_addr); } return err; out: if (mac->iob_pdev) pci_dev_put(mac->iob_pdev); if (mac->dma_pdev) pci_dev_put(mac->dma_pdev); free_netdev(dev); out_disable_device: pci_disable_device(pdev); return err; } static void pasemi_mac_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct pasemi_mac *mac; if (!netdev) return; mac = netdev_priv(netdev); unregister_netdev(netdev); pci_disable_device(pdev); pci_dev_put(mac->dma_pdev); pci_dev_put(mac->iob_pdev); pasemi_dma_free_chan(&mac->tx->chan); pasemi_dma_free_chan(&mac->rx->chan); pci_set_drvdata(pdev, NULL); free_netdev(netdev); } static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, { }, }; MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl); static struct pci_driver pasemi_mac_driver = { .name = "pasemi_mac", .id_table = pasemi_mac_pci_tbl, .probe = pasemi_mac_probe, .remove = pasemi_mac_remove, }; static void __exit pasemi_mac_cleanup_module(void) { pci_unregister_driver(&pasemi_mac_driver); } int pasemi_mac_init_module(void) { int err; err = pasemi_dma_init(); if (err) return err; return pci_register_driver(&pasemi_mac_driver); } module_init(pasemi_mac_init_module); module_exit(pasemi_mac_cleanup_module);
gpl-2.0
kernel-hut/android_kernel_xiaomi_cancro
drivers/media/video/gspca/tv8532.c
4969
10863
/* * Quickcam cameras initialization data * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define MODULE_NAME "tv8532" #include "gspca.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("TV8532 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ __u16 exposure; __u16 gain; __u8 packet; }; /* V4L2 controls supported by the driver */ static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val); static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val); static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); static const struct ctrl sd_ctrls[] = { { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure", .minimum = 1, .maximum = 0x18f, .step = 1, #define EXPOSURE_DEF 0x18f .default_value = EXPOSURE_DEF, }, .set = sd_setexposure, .get = sd_getexposure, }, { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = 0, .maximum = 0x7ff, .step = 1, #define GAIN_DEF 0x100 .default_value = GAIN_DEF, }, .set = sd_setgain, .get = sd_getgain, }, }; static const struct v4l2_pix_format sif_mode[] = { {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; /* TV-8532A (ICM532A) registers (LE) */ #define R00_PART_CONTROL 0x00 #define LATENT_CHANGE 0x80 #define EXPO_CHANGE 0x04 #define R01_TIMING_CONTROL_LOW 0x01 #define CMD_EEprom_Open 0x30 #define CMD_EEprom_Close 0x29 #define R03_TABLE_ADDR 0x03 #define R04_WTRAM_DATA_L 0x04 #define R05_WTRAM_DATA_M 0x05 #define R06_WTRAM_DATA_H 0x06 #define R07_TABLE_LEN 0x07 #define R08_RAM_WRITE_ACTION 0x08 #define R0C_AD_WIDTHL 0x0c #define R0D_AD_WIDTHH 0x0d #define R0E_AD_HEIGHTL 0x0e #define R0F_AD_HEIGHTH 0x0f #define R10_AD_COL_BEGINL 0x10 #define R11_AD_COL_BEGINH 0x11 #define MIRROR 0x04 /* [10] */ #define R14_AD_ROW_BEGINL 0x14 #define R15_AD_ROWBEGINH 0x15 #define R1C_AD_EXPOSE_TIMEL 0x1c #define R20_GAIN_G1L 0x20 #define R21_GAIN_G1H 0x21 #define R22_GAIN_RL 0x22 #define R23_GAIN_RH 0x23 #define R24_GAIN_BL 0x24 #define R25_GAIN_BH 0x25 #define R26_GAIN_G2L 0x26 #define R27_GAIN_G2H 0x27 #define R28_QUANT 0x28 #define R29_LINE 0x29 #define R2C_POLARITY 0x2c #define R2D_POINT 0x2d #define R2E_POINTH 0x2e #define R2F_POINTB 0x2f #define R30_POINTBH 0x30 #define R31_UPD 0x31 #define R2A_HIGH_BUDGET 0x2a #define R2B_LOW_BUDGET 0x2b #define R34_VID 0x34 #define R35_VIDH 0x35 #define R36_PID 0x36 #define R37_PIDH 0x37 #define R39_Test1 0x39 /* GPIO */ #define R3B_Test3 0x3b /* GPIO */ #define R83_AD_IDH 0x83 #define R91_AD_SLOPEREG 0x91 #define R94_AD_BITCONTROL 0x94 static const u8 eeprom_data[][3] = { /* dataH dataM dataL */ {0x01, 0x00, 0x01}, {0x01, 0x80, 0x11}, {0x05, 0x00, 0x14}, {0x05, 0x00, 0x1c}, {0x0d, 0x00, 0x1e}, {0x05, 0x00, 0x1f}, {0x05, 0x05, 0x19}, {0x05, 0x01, 0x1b}, {0x05, 0x09, 0x1e}, {0x0d, 0x89, 0x2e}, {0x05, 0x89, 0x2f}, {0x05, 0x0d, 0xd9}, {0x05, 0x09, 0xf1}, }; /* write 1 byte */ static void reg_w1(struct gspca_dev *gspca_dev, __u16 index, __u8 value) { gspca_dev->usb_buf[0] = value; usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x02, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, 1, 500); } /* write 2 bytes */ static void reg_w2(struct gspca_dev *gspca_dev, u16 index, u16 value) { gspca_dev->usb_buf[0] = value; gspca_dev->usb_buf[1] = value >> 8; usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x02, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, 2, 500); } static void tv_8532WriteEEprom(struct gspca_dev *gspca_dev) { int i; reg_w1(gspca_dev, R01_TIMING_CONTROL_LOW, CMD_EEprom_Open); for (i = 0; i < ARRAY_SIZE(eeprom_data); i++) { reg_w1(gspca_dev, R03_TABLE_ADDR, i); reg_w1(gspca_dev, R04_WTRAM_DATA_L, eeprom_data[i][2]); reg_w1(gspca_dev, R05_WTRAM_DATA_M, eeprom_data[i][1]); reg_w1(gspca_dev, R06_WTRAM_DATA_H, eeprom_data[i][0]); reg_w1(gspca_dev, R08_RAM_WRITE_ACTION, 0); } reg_w1(gspca_dev, R07_TABLE_LEN, i); reg_w1(gspca_dev, R01_TIMING_CONTROL_LOW, CMD_EEprom_Close); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = sif_mode; cam->nmodes = ARRAY_SIZE(sif_mode); sd->exposure = EXPOSURE_DEF; sd->gain = GAIN_DEF; return 0; } static void tv_8532_setReg(struct gspca_dev *gspca_dev) { reg_w1(gspca_dev, R3B_Test3, 0x0a); /* Test0Sel = 10 */ /******************************************************/ reg_w1(gspca_dev, R0E_AD_HEIGHTL, 0x90); reg_w1(gspca_dev, R0F_AD_HEIGHTH, 0x01); reg_w2(gspca_dev, R1C_AD_EXPOSE_TIMEL, 0x018f); reg_w1(gspca_dev, R10_AD_COL_BEGINL, 0x44); /* begin active line */ reg_w1(gspca_dev, R11_AD_COL_BEGINH, 0x00); /* mirror and digital gain */ reg_w1(gspca_dev, R14_AD_ROW_BEGINL, 0x0a); reg_w1(gspca_dev, R94_AD_BITCONTROL, 0x02); reg_w1(gspca_dev, R91_AD_SLOPEREG, 0x00); reg_w1(gspca_dev, R00_PART_CONTROL, LATENT_CHANGE | EXPO_CHANGE); /* = 0x84 */ } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { tv_8532WriteEEprom(gspca_dev); return 0; } static void setexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w2(gspca_dev, R1C_AD_EXPOSE_TIMEL, sd->exposure); reg_w1(gspca_dev, R00_PART_CONTROL, LATENT_CHANGE | EXPO_CHANGE); /* 0x84 */ } static void setgain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w2(gspca_dev, R20_GAIN_G1L, sd->gain); reg_w2(gspca_dev, R22_GAIN_RL, sd->gain); reg_w2(gspca_dev, R24_GAIN_BL, sd->gain); reg_w2(gspca_dev, R26_GAIN_G2L, sd->gain); } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w1(gspca_dev, R0C_AD_WIDTHL, 0xe8); /* 0x20; 0x0c */ reg_w1(gspca_dev, R0D_AD_WIDTHH, 0x03); /************************************************/ reg_w1(gspca_dev, R28_QUANT, 0x90); /* 0x72 compressed mode 0x28 */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { /* 176x144 */ reg_w1(gspca_dev, R29_LINE, 0x41); /* CIF - 2 lines/packet */ } else { /* 352x288 */ reg_w1(gspca_dev, R29_LINE, 0x81); /* CIF - 2 lines/packet */ } /************************************************/ reg_w1(gspca_dev, R2C_POLARITY, 0x10); /* slow clock */ reg_w1(gspca_dev, R2D_POINT, 0x14); reg_w1(gspca_dev, R2E_POINTH, 0x01); reg_w1(gspca_dev, R2F_POINTB, 0x12); reg_w1(gspca_dev, R30_POINTBH, 0x01); tv_8532_setReg(gspca_dev); setexposure(gspca_dev); setgain(gspca_dev); /************************************************/ reg_w1(gspca_dev, R31_UPD, 0x01); /* update registers */ msleep(200); reg_w1(gspca_dev, R31_UPD, 0x00); /* end update */ gspca_dev->empty_packet = 0; /* check the empty packets */ sd->packet = 0; /* ignore the first packets */ return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { reg_w1(gspca_dev, R3B_Test3, 0x0b); /* Test0Sel = 11 = GPIO */ } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; int packet_type0, packet_type1; packet_type0 = packet_type1 = INTER_PACKET; if (gspca_dev->empty_packet) { gspca_dev->empty_packet = 0; sd->packet = gspca_dev->height / 2; packet_type0 = FIRST_PACKET; } else if (sd->packet == 0) return; /* 2 more lines in 352x288 ! */ sd->packet--; if (sd->packet == 0) packet_type1 = LAST_PACKET; /* each packet contains: * - header 2 bytes * - RGRG line * - 4 bytes * - GBGB line * - 4 bytes */ gspca_frame_add(gspca_dev, packet_type0, data + 2, gspca_dev->width); gspca_frame_add(gspca_dev, packet_type1, data + gspca_dev->width + 5, gspca_dev->width); } static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->exposure = val; if (gspca_dev->streaming) setexposure(gspca_dev); return 0; } static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->exposure; return 0; } static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->gain = val; if (gspca_dev->streaming) setgain(gspca_dev); return 0; } static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->gain; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x046d, 0x0920)}, {USB_DEVICE(0x046d, 0x0921)}, {USB_DEVICE(0x0545, 0x808b)}, {USB_DEVICE(0x0545, 0x8333)}, {USB_DEVICE(0x0923, 0x010f)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
wan-qy/linux
arch/powerpc/boot/treeboot-currituck.c
8809
3066
/* * Copyright © 2011 Tony Breeds IBM Corporation * * Based on earlier code: * Copyright (C) Paul Mackerras 1997. * * Matt Porter <mporter@kernel.crashing.org> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * Copyright (c) 2003, 2004 Zultys Technologies * * Copyright 2007 David Gibson, IBM Corporation. * Copyright 2010 Ben. Herrenschmidt, IBM Corporation. * Copyright © 2011 David Kleikamp IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "reg.h" #include "io.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "libfdt.h" BSS_STACK(4096); #define MAX_RANKS 0x4 #define DDR3_MR0CF 0x80010011U static unsigned long long ibm_currituck_memsize; static unsigned long long ibm_currituck_detect_memsize(void) { u32 reg; unsigned i; unsigned long long memsize = 0; for(i = 0; i < MAX_RANKS; i++){ reg = mfdcrx(DDR3_MR0CF + i); if (!(reg & 1)) continue; reg &= 0x0000f000; reg >>= 12; memsize += (0x800000ULL << reg); } return memsize; } static void ibm_currituck_fixups(void) { void *devp = finddevice("/"); u32 dma_ranges[7]; dt_fixup_memory(0x0ULL, ibm_currituck_memsize); while ((devp = find_node_by_devtype(devp, "pci"))) { if (getprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges)) < 0) { printf("%s: Failed to get dma-ranges\r\n", __func__); continue; } dma_ranges[5] = ibm_currituck_memsize >> 32; dma_ranges[6] = ibm_currituck_memsize & 0xffffffffUL; setprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges)); } } #define SPRN_PIR 0x11E /* Processor Indentification Register */ void platform_init(void) { unsigned long end_of_ram, avail_ram; u32 pir_reg; int node, size; const u32 *timebase; ibm_currituck_memsize = ibm_currituck_detect_memsize(); if (ibm_currituck_memsize >> 32) end_of_ram = ~0UL; else end_of_ram = ibm_currituck_memsize; avail_ram = end_of_ram - (unsigned long)_end; simple_alloc_init(_end, avail_ram, 128, 64); platform_ops.fixups = ibm_currituck_fixups; platform_ops.exit = ibm44x_dbcr_reset; pir_reg = mfspr(SPRN_PIR); /* Make sure FDT blob is sane */ if (fdt_check_header(_dtb_start) != 0) fatal("Invalid device tree blob\n"); node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type", "cpu", sizeof("cpu")); if (!node) fatal("Cannot find cpu node\n"); timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size); if (timebase && (size == 4)) timebase_period_ns = 1000000000 / *timebase; fdt_set_boot_cpuid_phys(_dtb_start, pir_reg); fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
domintech/PandaBoard-LinuxKernel
arch/mips/fw/arc/init.c
11881
1267
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * PROM library initialisation code. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/bootinfo.h> #include <asm/sgialib.h> #include <asm/smp-ops.h> #undef DEBUG_PROM_INIT /* Master romvec interface. */ struct linux_romvec *romvec; int prom_argc; LONG *_prom_argv, *_prom_envp; void __init prom_init(void) { PSYSTEM_PARAMETER_BLOCK pb = PROMBLOCK; romvec = ROMVECTOR; prom_argc = fw_arg0; _prom_argv = (LONG *) fw_arg1; _prom_envp = (LONG *) fw_arg2; if (pb->magic != 0x53435241) { printk(KERN_CRIT "Aieee, bad prom vector magic %08lx\n", (unsigned long) pb->magic); while(1) ; } prom_init_cmdline(); prom_identify_arch(); printk(KERN_INFO "PROMLIB: ARC firmware Version %d Revision %d\n", pb->ver, pb->rev); prom_meminit(); #ifdef DEBUG_PROM_INIT pr_info("Press a key to reboot\n"); ArcRead(0, &c, 1, &cnt); ArcEnterInteractiveMode(); #endif #ifdef CONFIG_SGI_IP27 { extern struct plat_smp_ops ip27_smp_ops; register_smp_ops(&ip27_smp_ops); } #endif }
gpl-2.0
XVilka/qemu
hw/framebuffer.c
106
3544
/* * Framebuffer device helper routines * * Copyright (c) 2009 CodeSourcery * Written by Paul Brook <paul@codesourcery.com> * * This code is licensed under the GNU GPLv2. */ /* TODO: - Do something similar for framebuffers with local ram - Handle rotation here instead of hacking dest_pitch - Use common pixel conversion routines instead of per-device drawfn - Remove all DisplayState knowledge from devices. */ #include "hw.h" #include "console.h" #include "framebuffer.h" /* Render an image from a shared memory framebuffer. */ void framebuffer_update_display( DisplayState *ds, target_phys_addr_t base, int cols, /* Width in pixels. */ int rows, /* Leight in pixels. */ int src_width, /* Length of source line, in bytes. */ int dest_row_pitch, /* Bytes between adjacent horizontal output pixels. */ int dest_col_pitch, /* Bytes between adjacent vertical output pixels. */ int invalidate, /* nonzero to redraw the whole image. */ drawfn fn, void *opaque, int *first_row, /* Input and output. */ int *last_row /* Output only */) { target_phys_addr_t src_len; uint8_t *dest; uint8_t *src; uint8_t *src_base; int first, last = 0; int dirty; int i; ram_addr_t addr; ram_addr_t pd; ram_addr_t pd2; i = *first_row; *first_row = -1; src_len = src_width * rows; cpu_physical_sync_dirty_bitmap(base, base + src_len); pd = cpu_get_physical_page_desc(base); pd2 = cpu_get_physical_page_desc(base + src_len - 1); /* We should reall check that this is a continuous ram region. Instead we just check that the first and last pages are both ram, and the right distance apart. */ if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM || (pd2 & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { return; } pd = (pd & TARGET_PAGE_MASK) + (base & ~TARGET_PAGE_MASK); if (((pd + src_len - 1) & TARGET_PAGE_MASK) != (pd2 & TARGET_PAGE_MASK)) { return; } src_base = cpu_physical_memory_map(base, &src_len, 0); /* If we can't map the framebuffer then bail. We could try harder, but it's not really worth it as dirty flag tracking will probably already have failed above. */ if (!src_base) return; if (src_len != src_width * rows) { cpu_physical_memory_unmap(src_base, src_len, 0, 0); return; } src = src_base; dest = ds_get_data(ds); if (dest_col_pitch < 0) dest -= dest_col_pitch * (cols - 1); first = -1; addr = pd; addr += i * src_width; src += i * src_width; dest += i * dest_row_pitch; for (; i < rows; i++) { target_phys_addr_t dirty_offset; dirty = 0; dirty_offset = 0; while (addr + dirty_offset < TARGET_PAGE_ALIGN(addr + src_width)) { dirty |= cpu_physical_memory_get_dirty(addr + dirty_offset, VGA_DIRTY_FLAG); dirty_offset += TARGET_PAGE_SIZE; } if (dirty || invalidate) { fn(opaque, dest, src, cols, dest_col_pitch); if (first == -1) first = i; last = i; } addr += src_width; src += src_width; dest += dest_row_pitch; } cpu_physical_memory_unmap(src_base, src_len, 0, 0); if (first < 0) { return; } cpu_physical_memory_reset_dirty(pd, pd + src_len, VGA_DIRTY_FLAG); *first_row = first; *last_row = last; return; }
gpl-2.0
ibkim/uml
drivers/mmc/core/mmc_ops.c
362
12620
/* * linux/drivers/mmc/core/mmc_ops.h * * Copyright 2006-2007 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/slab.h> #include <linux/export.h> #include <linux/types.h> #include <linux/scatterlist.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/mmc/mmc.h> #include "core.h" #include "mmc_ops.h" static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) { int err; struct mmc_command cmd = {0}; BUG_ON(!host); cmd.opcode = MMC_SELECT_CARD; if (card) { cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; } else { cmd.arg = 0; cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; } err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) return err; return 0; } int mmc_select_card(struct mmc_card *card) { BUG_ON(!card); return _mmc_select_card(card->host, card); } int mmc_deselect_cards(struct mmc_host *host) { return _mmc_select_card(host, NULL); } int mmc_card_sleepawake(struct mmc_host *host, int sleep) { struct mmc_command cmd = {0}; struct mmc_card *card = host->card; int err; if (sleep) mmc_deselect_cards(host); cmd.opcode = MMC_SLEEP_AWAKE; cmd.arg = card->rca << 16; if (sleep) cmd.arg |= 1 << 15; cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, 0); if (err) return err; /* * If the host does not wait while the card signals busy, then we will * will have to wait the sleep/awake timeout. Note, we cannot use the * SEND_STATUS command to poll the status because that command (and most * others) is invalid while the card sleeps. */ if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000)); if (!sleep) err = mmc_select_card(card); return err; } int mmc_go_idle(struct mmc_host *host) { int err; struct mmc_command cmd = {0}; /* * Non-SPI hosts need to prevent chipselect going active during * GO_IDLE; that would put chips into SPI mode. Remind them of * that in case of hardware that won't pull up DAT3/nCS otherwise. * * SPI hosts ignore ios.chip_select; it's managed according to * rules that must accommodate non-MMC slaves which this layer * won't even know about. */ if (!mmc_host_is_spi(host)) { mmc_set_chip_select(host, MMC_CS_HIGH); mmc_delay(1); } cmd.opcode = MMC_GO_IDLE_STATE; cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; err = mmc_wait_for_cmd(host, &cmd, 0); mmc_delay(1); if (!mmc_host_is_spi(host)) { mmc_set_chip_select(host, MMC_CS_DONTCARE); mmc_delay(1); } host->use_spi_crc = 0; return err; } int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) { struct mmc_command cmd = {0}; int i, err = 0; BUG_ON(!host); cmd.opcode = MMC_SEND_OP_COND; cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; for (i = 100; i; i--) { err = mmc_wait_for_cmd(host, &cmd, 0); if (err) break; /* if we're just probing, do a single pass */ if (ocr == 0) break; /* otherwise wait until reset completes */ if (mmc_host_is_spi(host)) { if (!(cmd.resp[0] & R1_SPI_IDLE)) break; } else { if (cmd.resp[0] & MMC_CARD_BUSY) break; } err = -ETIMEDOUT; mmc_delay(10); } if (rocr && !mmc_host_is_spi(host)) *rocr = cmd.resp[0]; return err; } int mmc_all_send_cid(struct mmc_host *host, u32 *cid) { int err; struct mmc_command cmd = {0}; BUG_ON(!host); BUG_ON(!cid); cmd.opcode = MMC_ALL_SEND_CID; cmd.arg = 0; cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) return err; memcpy(cid, cmd.resp, sizeof(u32) * 4); return 0; } int mmc_set_relative_addr(struct mmc_card *card) { int err; struct mmc_command cmd = {0}; BUG_ON(!card); BUG_ON(!card->host); cmd.opcode = MMC_SET_RELATIVE_ADDR; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); if (err) return err; return 0; } static int mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) { int err; struct mmc_command cmd = {0}; BUG_ON(!host); BUG_ON(!cxd); cmd.opcode = opcode; cmd.arg = arg; cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) return err; memcpy(cxd, cmd.resp, sizeof(u32) * 4); return 0; } static int mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, void *buf, unsigned len) { struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; void *data_buf; /* dma onto stack is unsafe/nonportable, but callers to this * routine normally provide temporary on-stack buffers ... */ data_buf = kmalloc(len, GFP_KERNEL); if (data_buf == NULL) return -ENOMEM; mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = opcode; cmd.arg = 0; /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we * rely on callers to never use this with "native" calls for reading * CSD or CID. Native versions of those commands use the R2 type, * not R1 plus a data block. */ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = len; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, len); if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { /* * The spec states that CSR and CID accesses have a timeout * of 64 clock cycles. */ data.timeout_ns = 0; data.timeout_clks = 64; } else mmc_set_data_timeout(&data, card); mmc_wait_for_req(host, &mrq); memcpy(buf, data_buf, len); kfree(data_buf); if (cmd.error) return cmd.error; if (data.error) return data.error; return 0; } int mmc_send_csd(struct mmc_card *card, u32 *csd) { int ret, i; if (!mmc_host_is_spi(card->host)) return mmc_send_cxd_native(card->host, card->rca << 16, csd, MMC_SEND_CSD); ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16); if (ret) return ret; for (i = 0;i < 4;i++) csd[i] = be32_to_cpu(csd[i]); return 0; } int mmc_send_cid(struct mmc_host *host, u32 *cid) { int ret, i; if (!mmc_host_is_spi(host)) { if (!host->card) return -EINVAL; return mmc_send_cxd_native(host, host->card->rca << 16, cid, MMC_SEND_CID); } ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16); if (ret) return ret; for (i = 0;i < 4;i++) cid[i] = be32_to_cpu(cid[i]); return 0; } int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) { return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd, 512); } int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) { struct mmc_command cmd = {0}; int err; cmd.opcode = MMC_SPI_READ_OCR; cmd.arg = highcap ? (1 << 30) : 0; cmd.flags = MMC_RSP_SPI_R3; err = mmc_wait_for_cmd(host, &cmd, 0); *ocrp = cmd.resp[1]; return err; } int mmc_spi_set_crc(struct mmc_host *host, int use_crc) { struct mmc_command cmd = {0}; int err; cmd.opcode = MMC_SPI_CRC_ON_OFF; cmd.flags = MMC_RSP_SPI_R1; cmd.arg = use_crc; err = mmc_wait_for_cmd(host, &cmd, 0); if (!err) host->use_spi_crc = use_crc; return err; } /** * mmc_switch - modify EXT_CSD register * @card: the MMC card associated with the data transfer * @set: cmd set values * @index: EXT_CSD register index * @value: value to program into EXT_CSD register * @timeout_ms: timeout (ms) for operation performed by register write, * timeout of zero implies maximum possible timeout * * Modifies the EXT_CSD register for selected card. */ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned int timeout_ms) { int err; struct mmc_command cmd = {0}; u32 status; BUG_ON(!card); BUG_ON(!card->host); cmd.opcode = MMC_SWITCH; cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | (index << 16) | (value << 8) | set; cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; cmd.cmd_timeout_ms = timeout_ms; err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); if (err) return err; /* Must check status to be sure of no errors */ do { err = mmc_send_status(card, &status); if (err) return err; if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) break; if (mmc_host_is_spi(card->host)) break; } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); if (mmc_host_is_spi(card->host)) { if (status & R1_SPI_ILLEGAL_COMMAND) return -EBADMSG; } else { if (status & 0xFDFFA000) pr_warning("%s: unexpected status %#x after " "switch", mmc_hostname(card->host), status); if (status & R1_SWITCH_ERROR) return -EBADMSG; } return 0; } EXPORT_SYMBOL_GPL(mmc_switch); int mmc_send_status(struct mmc_card *card, u32 *status) { int err; struct mmc_command cmd = {0}; BUG_ON(!card); BUG_ON(!card->host); cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); if (err) return err; /* NOTE: callers are required to understand the difference * between "native" and SPI format status words! */ if (status) *status = cmd.resp[0]; return 0; } static int mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, u8 len) { struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; u8 *data_buf; u8 *test_buf; int i, err; static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; /* dma onto stack is unsafe/nonportable, but callers to this * routine normally provide temporary on-stack buffers ... */ data_buf = kmalloc(len, GFP_KERNEL); if (!data_buf) return -ENOMEM; if (len == 8) test_buf = testdata_8bit; else if (len == 4) test_buf = testdata_4bit; else { pr_err("%s: Invalid bus_width %d\n", mmc_hostname(host), len); kfree(data_buf); return -EINVAL; } if (opcode == MMC_BUS_TEST_W) memcpy(data_buf, test_buf, len); mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = opcode; cmd.arg = 0; /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we * rely on callers to never use this with "native" calls for reading * CSD or CID. Native versions of those commands use the R2 type, * not R1 plus a data block. */ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = len; data.blocks = 1; if (opcode == MMC_BUS_TEST_R) data.flags = MMC_DATA_READ; else data.flags = MMC_DATA_WRITE; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, len); mmc_wait_for_req(host, &mrq); err = 0; if (opcode == MMC_BUS_TEST_R) { for (i = 0; i < len / 4; i++) if ((test_buf[i] ^ data_buf[i]) != 0xff) { err = -EIO; break; } } kfree(data_buf); if (cmd.error) return cmd.error; if (data.error) return data.error; return err; } int mmc_bus_test(struct mmc_card *card, u8 bus_width) { int err, width; if (bus_width == MMC_BUS_WIDTH_8) width = 8; else if (bus_width == MMC_BUS_WIDTH_4) width = 4; else if (bus_width == MMC_BUS_WIDTH_1) return 0; /* no need for test */ else return -EINVAL; /* * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there * is a problem. This improves chances that the test will work. */ mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); return err; } int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) { struct mmc_command cmd = {0}; unsigned int opcode; int err; if (!card->ext_csd.hpi) { pr_warning("%s: Card didn't support HPI command\n", mmc_hostname(card->host)); return -EINVAL; } opcode = card->ext_csd.hpi_cmd; if (opcode == MMC_STOP_TRANSMISSION) cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; else if (opcode == MMC_SEND_STATUS) cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; cmd.opcode = opcode; cmd.arg = card->rca << 16 | 1; cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) { pr_warn("%s: error %d interrupting operation. " "HPI command response %#x\n", mmc_hostname(card->host), err, cmd.resp[0]); return err; } if (status) *status = cmd.resp[0]; return 0; }
gpl-2.0
BanBxda/Sense_4.3
fs/fat/namei_vfat.c
362
21985
/* * linux/fs/vfat/namei.c * * Written 1992,1993 by Werner Almesberger * * Windows95/Windows NT compatible extended MSDOS filesystem * by Gordon Chaffee Copyright (C) 1995. Send bug reports for the * VFAT filesystem to <chaffee@cs.berkeley.edu>. Specify * what file operation caused you trouble and if you can duplicate * the problem, send a script that demonstrates it. * * Short name translation 1999, 2001 by Wolfram Pienkoss <wp@bszh.de> * * Support Multibyte characters and cleanup by * OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> */ #include <linux/module.h> #include <linux/jiffies.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/namei.h> #include "fat.h" static int vfat_revalidate_shortname(struct dentry *dentry) { int ret = 1; spin_lock(&dentry->d_lock); if (dentry->d_time != dentry->d_parent->d_inode->i_version) ret = 0; spin_unlock(&dentry->d_lock); return ret; } static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) { if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; if (dentry->d_inode) return 1; return vfat_revalidate_shortname(dentry); } static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) { if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; if (dentry->d_inode) return 1; if (!nd) return 0; if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; return vfat_revalidate_shortname(dentry); } static unsigned int __vfat_striptail_len(unsigned int len, const char *name) { while (len && name[len - 1] == '.') len--; return len; } static unsigned int vfat_striptail_len(const struct qstr *qstr) { return __vfat_striptail_len(qstr->len, qstr->name); } static int vfat_hash(const struct dentry *dentry, const struct inode *inode, struct qstr *qstr) { qstr->hash = full_name_hash(qstr->name, vfat_striptail_len(qstr)); return 0; } static int vfat_hashi(const struct dentry *dentry, const struct inode *inode, struct qstr *qstr) { struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io; const unsigned char *name; unsigned int len; unsigned long hash; name = qstr->name; len = vfat_striptail_len(qstr); hash = init_name_hash(); while (len--) hash = partial_name_hash(nls_tolower(t, *name++), hash); qstr->hash = end_name_hash(hash); return 0; } static int vfat_cmpi(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io; unsigned int alen, blen; alen = vfat_striptail_len(name); blen = __vfat_striptail_len(len, str); if (alen == blen) { if (nls_strnicmp(t, name->name, str, alen) == 0) return 0; } return 1; } static int vfat_cmp(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { unsigned int alen, blen; alen = vfat_striptail_len(name); blen = __vfat_striptail_len(len, str); if (alen == blen) { if (strncmp(name->name, str, alen) == 0) return 0; } return 1; } static const struct dentry_operations vfat_ci_dentry_ops = { .d_revalidate = vfat_revalidate_ci, .d_hash = vfat_hashi, .d_compare = vfat_cmpi, }; static const struct dentry_operations vfat_dentry_ops = { .d_revalidate = vfat_revalidate, .d_hash = vfat_hash, .d_compare = vfat_cmp, }; static inline wchar_t vfat_bad_char(wchar_t w) { return (w < 0x0020) || (w == '*') || (w == '?') || (w == '<') || (w == '>') || (w == '|') || (w == '"') || (w == ':') || (w == '/') || (w == '\\'); } static inline wchar_t vfat_replace_char(wchar_t w) { return (w == '[') || (w == ']') || (w == ';') || (w == ',') || (w == '+') || (w == '='); } static wchar_t vfat_skip_char(wchar_t w) { return (w == '.') || (w == ' '); } static inline int vfat_is_used_badchars(const wchar_t *s, int len) { int i; for (i = 0; i < len; i++) if (vfat_bad_char(s[i])) return -EINVAL; if (s[i - 1] == ' ') return -FAT_CHARSET_ERROR; return 0; } static int vfat_find_form(struct inode *dir, unsigned char *name) { struct fat_slot_info sinfo; int err = fat_scan(dir, name, &sinfo); if (err) return -ENOENT; brelse(sinfo.bh); return 0; } struct shortname_info { unsigned char lower:1, upper:1, valid:1; }; #define INIT_SHORTNAME_INFO(x) do { \ (x)->lower = 1; \ (x)->upper = 1; \ (x)->valid = 1; \ } while (0) static inline int to_shortname_char(struct nls_table *nls, unsigned char *buf, int buf_size, wchar_t *src, struct shortname_info *info) { int len; if (vfat_skip_char(*src)) { info->valid = 0; return 0; } if (vfat_replace_char(*src)) { info->valid = 0; buf[0] = '_'; return 1; } len = nls->uni2char(*src, buf, buf_size); if (len <= 0) { info->valid = 0; buf[0] = '_'; len = 1; } else if (len == 1) { unsigned char prev = buf[0]; if (buf[0] >= 0x7F) { info->lower = 0; info->upper = 0; } buf[0] = nls_toupper(nls, buf[0]); if (isalpha(buf[0])) { if (buf[0] == prev) info->lower = 0; else info->upper = 0; } } else { info->lower = 0; info->upper = 0; } return len; } static int vfat_create_shortname(struct inode *dir, struct nls_table *nls, wchar_t *uname, int ulen, unsigned char *name_res, unsigned char *lcase) { struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options; wchar_t *ip, *ext_start, *end, *name_start; unsigned char base[9], ext[4], buf[5], *p; unsigned char charbuf[NLS_MAX_CHARSET_SIZE]; int chl, chi; int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen; int is_shortname; struct shortname_info base_info, ext_info; is_shortname = 1; INIT_SHORTNAME_INFO(&base_info); INIT_SHORTNAME_INFO(&ext_info); ext_start = end = &uname[ulen]; while (--ext_start >= uname) { if (*ext_start == 0x002E) { if (ext_start == end - 1) { sz = ulen; ext_start = NULL; } break; } } if (ext_start == uname - 1) { sz = ulen; ext_start = NULL; } else if (ext_start) { name_start = &uname[0]; while (name_start < ext_start) { if (!vfat_skip_char(*name_start)) break; name_start++; } if (name_start != ext_start) { sz = ext_start - uname; ext_start++; } else { sz = ulen; ext_start = NULL; } } numtail_baselen = 6; numtail2_baselen = 2; for (baselen = i = 0, p = base, ip = uname; i < sz; i++, ip++) { chl = to_shortname_char(nls, charbuf, sizeof(charbuf), ip, &base_info); if (chl == 0) continue; if (baselen < 2 && (baselen + chl) > 2) numtail2_baselen = baselen; if (baselen < 6 && (baselen + chl) > 6) numtail_baselen = baselen; for (chi = 0; chi < chl; chi++) { *p++ = charbuf[chi]; baselen++; if (baselen >= 8) break; } if (baselen >= 8) { if ((chi < chl - 1) || (ip + 1) - uname < sz) is_shortname = 0; break; } } if (baselen == 0) { return -EINVAL; } extlen = 0; if (ext_start) { for (p = ext, ip = ext_start; extlen < 3 && ip < end; ip++) { chl = to_shortname_char(nls, charbuf, sizeof(charbuf), ip, &ext_info); if (chl == 0) continue; if ((extlen + chl) > 3) { is_shortname = 0; break; } for (chi = 0; chi < chl; chi++) { *p++ = charbuf[chi]; extlen++; } if (extlen >= 3) { if (ip + 1 != end) is_shortname = 0; break; } } } ext[extlen] = '\0'; base[baselen] = '\0'; if (base[0] == DELETED_FLAG) base[0] = 0x05; memset(name_res, ' ', MSDOS_NAME); memcpy(name_res, base, baselen); memcpy(name_res + 8, ext, extlen); *lcase = 0; if (is_shortname && base_info.valid && ext_info.valid) { if (vfat_find_form(dir, name_res) == 0) return -EEXIST; if (opts->shortname & VFAT_SFN_CREATE_WIN95) { return (base_info.upper && ext_info.upper); } else if (opts->shortname & VFAT_SFN_CREATE_WINNT) { if ((base_info.upper || base_info.lower) && (ext_info.upper || ext_info.lower)) { if (!base_info.upper && base_info.lower) *lcase |= CASE_LOWER_BASE; if (!ext_info.upper && ext_info.lower) *lcase |= CASE_LOWER_EXT; return 1; } return 0; } else { BUG(); } } if (opts->numtail == 0) if (vfat_find_form(dir, name_res) < 0) return 0; if (baselen > 6) { baselen = numtail_baselen; name_res[7] = ' '; } name_res[baselen] = '~'; for (i = 1; i < 10; i++) { name_res[baselen + 1] = i + '0'; if (vfat_find_form(dir, name_res) < 0) return 0; } i = jiffies; sz = (jiffies >> 16) & 0x7; if (baselen > 2) { baselen = numtail2_baselen; name_res[7] = ' '; } name_res[baselen + 4] = '~'; name_res[baselen + 5] = '1' + sz; while (1) { snprintf(buf, sizeof(buf), "%04X", i & 0xffff); memcpy(&name_res[baselen], buf, 4); if (vfat_find_form(dir, name_res) < 0) break; i -= 11; } return 0; } static int xlate_to_uni(const unsigned char *name, int len, unsigned char *outname, int *longlen, int *outlen, int escape, int utf8, struct nls_table *nls) { const unsigned char *ip; unsigned char nc; unsigned char *op; unsigned int ec; int i, k, fill; int charlen; if (utf8) { *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN, (wchar_t *) outname, FAT_LFN_LEN + 2); if (*outlen < 0) return *outlen; else if (*outlen > FAT_LFN_LEN) return -ENAMETOOLONG; op = &outname[*outlen * sizeof(wchar_t)]; } else { for (i = 0, ip = name, op = outname, *outlen = 0; i < len && *outlen < FAT_LFN_LEN; *outlen += 1) { if (escape && (*ip == ':')) { if (i > len - 5) return -EINVAL; ec = 0; for (k = 1; k < 5; k++) { nc = ip[k]; ec <<= 4; if (nc >= '0' && nc <= '9') { ec |= nc - '0'; continue; } if (nc >= 'a' && nc <= 'f') { ec |= nc - ('a' - 10); continue; } if (nc >= 'A' && nc <= 'F') { ec |= nc - ('A' - 10); continue; } return -EINVAL; } *op++ = ec & 0xFF; *op++ = ec >> 8; ip += 5; i += 5; } else { charlen = nls->char2uni(ip, len - i, (wchar_t *)op); if (charlen < 0) return -EINVAL; ip += charlen; i += charlen; op += 2; } } if (i < len) return -ENAMETOOLONG; } *longlen = *outlen; if (*outlen % 13) { *op++ = 0; *op++ = 0; *outlen += 1; if (*outlen % 13) { fill = 13 - (*outlen % 13); for (i = 0; i < fill; i++) { *op++ = 0xff; *op++ = 0xff; } *outlen += fill; } } return 0; } static int vfat_build_slots(struct inode *dir, const unsigned char *name, int len, int is_dir, int cluster, struct timespec *ts, struct msdos_dir_slot *slots, int *nr_slots) { struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); struct fat_mount_options *opts = &sbi->options; struct msdos_dir_slot *ps; struct msdos_dir_entry *de; unsigned char cksum, lcase; unsigned char msdos_name[MSDOS_NAME]; wchar_t *uname; __le16 time, date; u8 time_cs; int err, ulen, usize, i; loff_t offset; *nr_slots = 0; uname = __getname(); if (!uname) return -ENOMEM; err = xlate_to_uni(name, len, (unsigned char *)uname, &ulen, &usize, opts->unicode_xlate, opts->utf8, sbi->nls_io); if (err) goto out_free; err = vfat_is_used_badchars(uname, ulen); if (err) { if (err == -FAT_CHARSET_ERROR) { if (uname[ulen - 2] == ' ') goto out_free; pr_info("%s MTP mkdir \"%s\" workaround\n", __func__, name); } else goto out_free; } err = vfat_create_shortname(dir, sbi->nls_disk, uname, ulen, msdos_name, &lcase); if (err < 0) goto out_free; else if (err == 1) { de = (struct msdos_dir_entry *)slots; err = 0; goto shortname; } cksum = fat_checksum(msdos_name); *nr_slots = usize / 13; for (ps = slots, i = *nr_slots; i > 0; i--, ps++) { ps->id = i; ps->attr = ATTR_EXT; ps->reserved = 0; ps->alias_checksum = cksum; ps->start = 0; offset = (i - 1) * 13; fatwchar_to16(ps->name0_4, uname + offset, 5); fatwchar_to16(ps->name5_10, uname + offset + 5, 6); fatwchar_to16(ps->name11_12, uname + offset + 11, 2); } slots[0].id |= 0x40; de = (struct msdos_dir_entry *)ps; shortname: (*nr_slots)++; memcpy(de->name, msdos_name, MSDOS_NAME); de->attr = is_dir ? ATTR_DIR : ATTR_ARCH; de->lcase = lcase; fat_time_unix2fat(sbi, ts, &time, &date, &time_cs); de->time = de->ctime = time; de->date = de->cdate = de->adate = date; de->ctime_cs = time_cs; de->start = cpu_to_le16(cluster); de->starthi = cpu_to_le16(cluster >> 16); de->size = 0; out_free: __putname(uname); return err; } static int vfat_add_entry(struct inode *dir, struct qstr *qname, int is_dir, int cluster, struct timespec *ts, struct fat_slot_info *sinfo) { struct msdos_dir_slot *slots; unsigned int len; int err, nr_slots; len = vfat_striptail_len(qname); if (len == 0) return -ENOENT; slots = kmalloc(sizeof(*slots) * MSDOS_SLOTS, GFP_NOFS); if (slots == NULL) return -ENOMEM; err = vfat_build_slots(dir, qname->name, len, is_dir, cluster, ts, slots, &nr_slots); if (err) goto cleanup; err = fat_add_entries(dir, slots, nr_slots, sinfo); if (err) goto cleanup; dir->i_ctime = dir->i_mtime = dir->i_atime = *ts; if (IS_DIRSYNC(dir)) (void)fat_sync_inode(dir); else mark_inode_dirty(dir); cleanup: kfree(slots); return err; } static int vfat_find(struct inode *dir, struct qstr *qname, struct fat_slot_info *sinfo) { unsigned int len = vfat_striptail_len(qname); if (len == 0) return -ENOENT; return fat_search_long(dir, qname->name, len, sinfo); } static int vfat_d_anon_disconn(struct dentry *dentry) { return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED); } static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; struct inode *inode; struct dentry *alias; int err; lock_super(sb); err = vfat_find(dir, &dentry->d_name, &sinfo); if (err) { if (err == -ENOENT) { inode = NULL; goto out; } goto error; } inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto error; } alias = d_find_alias(inode); if (alias && !vfat_d_anon_disconn(alias)) { BUG_ON(d_unhashed(alias)); if (!S_ISDIR(inode->i_mode)) d_move(alias, dentry); iput(inode); unlock_super(sb); return alias; } else dput(alias); out: unlock_super(sb); dentry->d_time = dentry->d_parent->d_inode->i_version; dentry = d_splice_alias(inode, dentry); if (dentry) dentry->d_time = dentry->d_parent->d_inode->i_version; return dentry; error: unlock_super(sb); return ERR_PTR(err); } static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct inode *inode; struct fat_slot_info sinfo; struct timespec ts; int err; lock_super(sb); ts = CURRENT_TIME_SEC; err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &ts, &sinfo); if (err) goto out; dir->i_version++; inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out; } inode->i_version++; inode->i_mtime = inode->i_atime = inode->i_ctime = ts; /* timestamp is already written, so mark_inode_dirty() is unneeded. */ dentry->d_time = dentry->d_parent->d_inode->i_version; d_instantiate(dentry, inode); out: unlock_super(sb); return err; } static int vfat_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; int err; lock_super(sb); err = fat_dir_empty(inode); if (err) goto out; err = vfat_find(dir, &dentry->d_name, &sinfo); if (err) goto out; err = fat_remove_entries(dir, &sinfo); if (err) goto out; drop_nlink(dir); clear_nlink(inode); inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; fat_detach(inode); out: unlock_super(sb); return err; } static int vfat_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; int err; lock_super(sb); err = vfat_find(dir, &dentry->d_name, &sinfo); if (err) goto out; err = fat_remove_entries(dir, &sinfo); if (err) goto out; clear_nlink(inode); inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; fat_detach(inode); out: unlock_super(sb); return err; } static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct super_block *sb = dir->i_sb; struct inode *inode; struct fat_slot_info sinfo; struct timespec ts; int err, cluster; lock_super(sb); ts = CURRENT_TIME_SEC; cluster = fat_alloc_new_dir(dir, &ts); if (cluster < 0) { err = cluster; goto out; } err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo); if (err) goto out_free; dir->i_version++; inc_nlink(dir); inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out; } inode->i_version++; set_nlink(inode, 2); inode->i_mtime = inode->i_atime = inode->i_ctime = ts; /* timestamp is already written, so mark_inode_dirty() is unneeded. */ dentry->d_time = dentry->d_parent->d_inode->i_version; d_instantiate(dentry, inode); unlock_super(sb); return 0; out_free: fat_free_clusters(dir, cluster); out: unlock_super(sb); return err; } static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct buffer_head *dotdot_bh; struct msdos_dir_entry *dotdot_de; struct inode *old_inode, *new_inode; struct fat_slot_info old_sinfo, sinfo; struct timespec ts; loff_t dotdot_i_pos, new_i_pos; int err, is_dir, update_dotdot, corrupt = 0; struct super_block *sb = old_dir->i_sb; old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; old_inode = old_dentry->d_inode; new_inode = new_dentry->d_inode; lock_super(sb); err = vfat_find(old_dir, &old_dentry->d_name, &old_sinfo); if (err) goto out; is_dir = S_ISDIR(old_inode->i_mode); update_dotdot = (is_dir && old_dir != new_dir); if (update_dotdot) { if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de, &dotdot_i_pos) < 0) { err = -EIO; goto out; } } ts = CURRENT_TIME_SEC; if (new_inode) { if (is_dir) { err = fat_dir_empty(new_inode); if (err) goto out; } new_i_pos = MSDOS_I(new_inode)->i_pos; fat_detach(new_inode); } else { err = vfat_add_entry(new_dir, &new_dentry->d_name, is_dir, 0, &ts, &sinfo); if (err) goto out; new_i_pos = sinfo.i_pos; } new_dir->i_version++; fat_detach(old_inode); fat_attach(old_inode, new_i_pos); if (IS_DIRSYNC(new_dir)) { err = fat_sync_inode(old_inode); if (err) goto error_inode; } else mark_inode_dirty(old_inode); if (update_dotdot) { int start = MSDOS_I(new_dir)->i_logstart; dotdot_de->start = cpu_to_le16(start); dotdot_de->starthi = cpu_to_le16(start >> 16); mark_buffer_dirty_inode(dotdot_bh, old_inode); if (IS_DIRSYNC(new_dir)) { err = sync_dirty_buffer(dotdot_bh); if (err) goto error_dotdot; } drop_nlink(old_dir); if (!new_inode) inc_nlink(new_dir); } err = fat_remove_entries(old_dir, &old_sinfo); old_sinfo.bh = NULL; if (err) goto error_dotdot; old_dir->i_version++; old_dir->i_ctime = old_dir->i_mtime = ts; if (IS_DIRSYNC(old_dir)) (void)fat_sync_inode(old_dir); else mark_inode_dirty(old_dir); if (new_inode) { drop_nlink(new_inode); if (is_dir) drop_nlink(new_inode); new_inode->i_ctime = ts; } out: brelse(sinfo.bh); brelse(dotdot_bh); brelse(old_sinfo.bh); unlock_super(sb); return err; error_dotdot: corrupt = 1; if (update_dotdot) { int start = MSDOS_I(old_dir)->i_logstart; dotdot_de->start = cpu_to_le16(start); dotdot_de->starthi = cpu_to_le16(start >> 16); mark_buffer_dirty_inode(dotdot_bh, old_inode); corrupt |= sync_dirty_buffer(dotdot_bh); } error_inode: fat_detach(old_inode); fat_attach(old_inode, old_sinfo.i_pos); if (new_inode) { fat_attach(new_inode, new_i_pos); if (corrupt) corrupt |= fat_sync_inode(new_inode); } else { int err2 = fat_remove_entries(new_dir, &sinfo); if (corrupt) corrupt |= err2; sinfo.bh = NULL; } if (corrupt < 0) { fat_fs_error(new_dir->i_sb, "%s: Filesystem corrupted (i_pos %lld)", __func__, sinfo.i_pos); } goto out; } static const struct inode_operations vfat_dir_inode_operations = { .create = vfat_create, .lookup = vfat_lookup, .unlink = vfat_unlink, .mkdir = vfat_mkdir, .rmdir = vfat_rmdir, .rename = vfat_rename, .setattr = fat_setattr, .getattr = fat_getattr, }; static void setup(struct super_block *sb) { MSDOS_SB(sb)->dir_ops = &vfat_dir_inode_operations; if (MSDOS_SB(sb)->options.name_check != 's') sb->s_d_op = &vfat_ci_dentry_ops; else sb->s_d_op = &vfat_dentry_ops; } static int vfat_fill_super(struct super_block *sb, void *data, int silent) { return fat_fill_super(sb, data, silent, 1, setup); } static struct dentry *vfat_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, vfat_fill_super); } static struct file_system_type vfat_fs_type = { .owner = THIS_MODULE, .name = "vfat", .mount = vfat_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static int __init init_vfat_fs(void) { return register_filesystem(&vfat_fs_type); } static void __exit exit_vfat_fs(void) { unregister_filesystem(&vfat_fs_type); } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("VFAT filesystem support"); MODULE_AUTHOR("Gordon Chaffee"); module_init(init_vfat_fs) module_exit(exit_vfat_fs)
gpl-2.0
Lembed/uClinux-Cortex
security/keys/permission.c
618
2872
/* permission.c: key permission determination * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/security.h> #include "internal.h" /*****************************************************************************/ /** * key_task_permission - Check a key can be used * @key_ref: The key to check * @cred: The credentials to use * @perm: The permissions to check for * * Check to see whether permission is granted to use a key in the desired way, * but permit the security modules to override. * * The caller must hold either a ref on cred or must hold the RCU readlock or a * spinlock. */ int key_task_permission(const key_ref_t key_ref, const struct cred *cred, key_perm_t perm) { struct key *key; key_perm_t kperm; int ret; key = key_ref_to_ptr(key_ref); if (key->user->user_ns != cred->user->user_ns) goto use_other_perms; /* use the second 8-bits of permissions for keys the caller owns */ if (key->uid == cred->fsuid) { kperm = key->perm >> 16; goto use_these_perms; } /* use the third 8-bits of permissions for keys the caller has a group * membership in common with */ if (key->gid != -1 && key->perm & KEY_GRP_ALL) { if (key->gid == cred->fsgid) { kperm = key->perm >> 8; goto use_these_perms; } ret = groups_search(cred->group_info, key->gid); if (ret) { kperm = key->perm >> 8; goto use_these_perms; } } use_other_perms: /* otherwise use the least-significant 8-bits */ kperm = key->perm; use_these_perms: /* use the top 8-bits of permissions for keys the caller possesses * - possessor permissions are additive with other permissions */ if (is_key_possessed(key_ref)) kperm |= key->perm >> 24; kperm = kperm & perm & KEY_ALL; if (kperm != perm) return -EACCES; /* let LSM be the final arbiter */ return security_key_permission(key_ref, cred, perm); } /* end key_task_permission() */ EXPORT_SYMBOL(key_task_permission); /*****************************************************************************/ /* * validate a key */ int key_validate(struct key *key) { struct timespec now; int ret = 0; if (key) { /* check it's still accessible */ ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &key->flags) || test_bit(KEY_FLAG_DEAD, &key->flags)) goto error; /* check it hasn't expired */ ret = 0; if (key->expiry) { now = current_kernel_time(); if (now.tv_sec >= key->expiry) ret = -EKEYEXPIRED; } } error: return ret; } /* end key_validate() */ EXPORT_SYMBOL(key_validate);
gpl-2.0
rafal-krypa/smack-backport
drivers/dma/dma-jz4740.c
874
15871
/* * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 DMAC support * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/clk.h> #include <asm/mach-jz4740/dma.h> #include "virt-dma.h" #define JZ_DMA_NR_CHANS 6 #define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20) #define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20) #define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20) #define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20) #define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20) #define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20) #define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20) #define JZ_REG_DMA_CTRL 0x300 #define JZ_REG_DMA_IRQ 0x304 #define JZ_REG_DMA_DOORBELL 0x308 #define JZ_REG_DMA_DOORBELL_SET 0x30C #define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31) #define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6) #define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4) #define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3) #define JZ_DMA_STATUS_CTRL_HALT BIT(2) #define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1) #define JZ_DMA_STATUS_CTRL_ENABLE BIT(0) #define JZ_DMA_CMD_SRC_INC BIT(23) #define JZ_DMA_CMD_DST_INC BIT(22) #define JZ_DMA_CMD_RDIL_MASK (0xf << 16) #define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14) #define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12) #define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8) #define JZ_DMA_CMD_BLOCK_MODE BIT(7) #define JZ_DMA_CMD_DESC_VALID BIT(4) #define JZ_DMA_CMD_DESC_VALID_MODE BIT(3) #define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2) #define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1) #define JZ_DMA_CMD_LINK_ENABLE BIT(0) #define JZ_DMA_CMD_FLAGS_OFFSET 22 #define JZ_DMA_CMD_RDIL_OFFSET 16 #define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14 #define JZ_DMA_CMD_DST_WIDTH_OFFSET 12 #define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8 #define JZ_DMA_CMD_MODE_OFFSET 7 #define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8) #define JZ_DMA_CTRL_HALT BIT(3) #define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2) #define JZ_DMA_CTRL_ENABLE BIT(0) enum jz4740_dma_width { JZ4740_DMA_WIDTH_32BIT = 0, JZ4740_DMA_WIDTH_8BIT = 1, JZ4740_DMA_WIDTH_16BIT = 2, }; enum jz4740_dma_transfer_size { JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0, JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1, JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2, JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3, JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4, }; enum jz4740_dma_flags { JZ4740_DMA_SRC_AUTOINC = 0x2, JZ4740_DMA_DST_AUTOINC = 0x1, }; enum jz4740_dma_mode { JZ4740_DMA_MODE_SINGLE = 0, JZ4740_DMA_MODE_BLOCK = 1, }; struct jz4740_dma_sg { dma_addr_t addr; unsigned int len; }; struct jz4740_dma_desc { struct virt_dma_desc vdesc; enum dma_transfer_direction direction; bool cyclic; unsigned int num_sgs; struct jz4740_dma_sg sg[]; }; struct jz4740_dmaengine_chan { struct virt_dma_chan vchan; unsigned int id; dma_addr_t fifo_addr; unsigned int transfer_shift; struct jz4740_dma_desc *desc; unsigned int next_sg; }; struct jz4740_dma_dev { struct dma_device ddev; void __iomem *base; struct clk *clk; struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS]; }; static struct jz4740_dma_dev *jz4740_dma_chan_get_dev( struct jz4740_dmaengine_chan *chan) { return container_of(chan->vchan.chan.device, struct jz4740_dma_dev, ddev); } static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c) { return container_of(c, struct jz4740_dmaengine_chan, vchan.chan); } static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc) { return container_of(vdesc, struct jz4740_dma_desc, vdesc); } static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev, unsigned int reg) { return readl(dmadev->base + reg); } static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev, unsigned reg, uint32_t val) { writel(val, dmadev->base + reg); } static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev, unsigned int reg, uint32_t val, uint32_t mask) { uint32_t tmp; tmp = jz4740_dma_read(dmadev, reg); tmp &= ~mask; tmp |= val; jz4740_dma_write(dmadev, reg, tmp); } static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs) { return kzalloc(sizeof(struct jz4740_dma_desc) + sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC); } static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width) { switch (width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: return JZ4740_DMA_WIDTH_8BIT; case DMA_SLAVE_BUSWIDTH_2_BYTES: return JZ4740_DMA_WIDTH_16BIT; case DMA_SLAVE_BUSWIDTH_4_BYTES: return JZ4740_DMA_WIDTH_32BIT; default: return JZ4740_DMA_WIDTH_32BIT; } } static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst) { if (maxburst <= 1) return JZ4740_DMA_TRANSFER_SIZE_1BYTE; else if (maxburst <= 3) return JZ4740_DMA_TRANSFER_SIZE_2BYTE; else if (maxburst <= 15) return JZ4740_DMA_TRANSFER_SIZE_4BYTE; else if (maxburst <= 31) return JZ4740_DMA_TRANSFER_SIZE_16BYTE; return JZ4740_DMA_TRANSFER_SIZE_32BYTE; } static int jz4740_dma_slave_config(struct dma_chan *c, struct dma_slave_config *config) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); enum jz4740_dma_width src_width; enum jz4740_dma_width dst_width; enum jz4740_dma_transfer_size transfer_size; enum jz4740_dma_flags flags; uint32_t cmd; switch (config->direction) { case DMA_MEM_TO_DEV: flags = JZ4740_DMA_SRC_AUTOINC; transfer_size = jz4740_dma_maxburst(config->dst_maxburst); chan->fifo_addr = config->dst_addr; break; case DMA_DEV_TO_MEM: flags = JZ4740_DMA_DST_AUTOINC; transfer_size = jz4740_dma_maxburst(config->src_maxburst); chan->fifo_addr = config->src_addr; break; default: return -EINVAL; } src_width = jz4740_dma_width(config->src_addr_width); dst_width = jz4740_dma_width(config->dst_addr_width); switch (transfer_size) { case JZ4740_DMA_TRANSFER_SIZE_2BYTE: chan->transfer_shift = 1; break; case JZ4740_DMA_TRANSFER_SIZE_4BYTE: chan->transfer_shift = 2; break; case JZ4740_DMA_TRANSFER_SIZE_16BYTE: chan->transfer_shift = 4; break; case JZ4740_DMA_TRANSFER_SIZE_32BYTE: chan->transfer_shift = 5; break; default: chan->transfer_shift = 0; break; } cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET; cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET; cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET; cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET; cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET; cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE; jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd); jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0); jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id), config->slave_id); return 0; } static int jz4740_dma_terminate_all(struct dma_chan *c) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&chan->vchan.lock, flags); jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0, JZ_DMA_STATUS_CTRL_ENABLE); chan->desc = NULL; vchan_get_all_descriptors(&chan->vchan, &head); spin_unlock_irqrestore(&chan->vchan.lock, flags); vchan_dma_desc_free_list(&chan->vchan, &head); return 0; } static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) { struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); dma_addr_t src_addr, dst_addr; struct virt_dma_desc *vdesc; struct jz4740_dma_sg *sg; jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0, JZ_DMA_STATUS_CTRL_ENABLE); if (!chan->desc) { vdesc = vchan_next_desc(&chan->vchan); if (!vdesc) return 0; chan->desc = to_jz4740_dma_desc(vdesc); chan->next_sg = 0; } if (chan->next_sg == chan->desc->num_sgs) chan->next_sg = 0; sg = &chan->desc->sg[chan->next_sg]; if (chan->desc->direction == DMA_MEM_TO_DEV) { src_addr = sg->addr; dst_addr = chan->fifo_addr; } else { src_addr = chan->fifo_addr; dst_addr = sg->addr; } jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr); jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr); jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id), sg->len >> chan->transfer_shift); chan->next_sg++; jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE, JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE); jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL, JZ_DMA_CTRL_ENABLE, JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE); return 0; } static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan) { spin_lock(&chan->vchan.lock); if (chan->desc) { if (chan->desc->cyclic) { vchan_cyclic_callback(&chan->desc->vdesc); } else { if (chan->next_sg == chan->desc->num_sgs) { list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; } } } jz4740_dma_start_transfer(chan); spin_unlock(&chan->vchan.lock); } static irqreturn_t jz4740_dma_irq(int irq, void *devid) { struct jz4740_dma_dev *dmadev = devid; uint32_t irq_status; unsigned int i; irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ); for (i = 0; i < 6; ++i) { if (irq_status & (1 << i)) { jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(i), 0, JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); jz4740_dma_chan_irq(&dmadev->chan[i]); } } return IRQ_HANDLED; } static void jz4740_dma_issue_pending(struct dma_chan *c) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); unsigned long flags; spin_lock_irqsave(&chan->vchan.lock, flags); if (vchan_issue_pending(&chan->vchan) && !chan->desc) jz4740_dma_start_transfer(chan); spin_unlock_irqrestore(&chan->vchan.lock, flags); } static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg( struct dma_chan *c, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); struct jz4740_dma_desc *desc; struct scatterlist *sg; unsigned int i; desc = jz4740_dma_alloc_desc(sg_len); if (!desc) return NULL; for_each_sg(sgl, sg, sg_len, i) { desc->sg[i].addr = sg_dma_address(sg); desc->sg[i].len = sg_dma_len(sg); } desc->num_sgs = sg_len; desc->direction = direction; desc->cyclic = false; return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); } static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); struct jz4740_dma_desc *desc; unsigned int num_periods, i; if (buf_len % period_len) return NULL; num_periods = buf_len / period_len; desc = jz4740_dma_alloc_desc(num_periods); if (!desc) return NULL; for (i = 0; i < num_periods; i++) { desc->sg[i].addr = buf_addr; desc->sg[i].len = period_len; buf_addr += period_len; } desc->num_sgs = num_periods; desc->direction = direction; desc->cyclic = true; return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); } static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan, struct jz4740_dma_desc *desc, unsigned int next_sg) { struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); unsigned int residue, count; unsigned int i; residue = 0; for (i = next_sg; i < desc->num_sgs; i++) residue += desc->sg[i].len; if (next_sg != 0) { count = jz4740_dma_read(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id)); residue += count << chan->transfer_shift; } return residue; } static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *state) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); struct virt_dma_desc *vdesc; enum dma_status status; unsigned long flags; status = dma_cookie_status(c, cookie, state); if (status == DMA_COMPLETE || !state) return status; spin_lock_irqsave(&chan->vchan.lock, flags); vdesc = vchan_find_desc(&chan->vchan, cookie); if (cookie == chan->desc->vdesc.tx.cookie) { state->residue = jz4740_dma_desc_residue(chan, chan->desc, chan->next_sg); } else if (vdesc) { state->residue = jz4740_dma_desc_residue(chan, to_jz4740_dma_desc(vdesc), 0); } else { state->residue = 0; } spin_unlock_irqrestore(&chan->vchan.lock, flags); return status; } static void jz4740_dma_free_chan_resources(struct dma_chan *c) { vchan_free_chan_resources(to_virt_chan(c)); } static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc) { kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); } #define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) static int jz4740_dma_probe(struct platform_device *pdev) { struct jz4740_dmaengine_chan *chan; struct jz4740_dma_dev *dmadev; struct dma_device *dd; unsigned int i; struct resource *res; int ret; int irq; dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); if (!dmadev) return -EINVAL; dd = &dmadev->ddev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dmadev->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dmadev->base)) return PTR_ERR(dmadev->base); dmadev->clk = clk_get(&pdev->dev, "dma"); if (IS_ERR(dmadev->clk)) return PTR_ERR(dmadev->clk); clk_prepare_enable(dmadev->clk); dma_cap_set(DMA_SLAVE, dd->cap_mask); dma_cap_set(DMA_CYCLIC, dd->cap_mask); dd->device_free_chan_resources = jz4740_dma_free_chan_resources; dd->device_tx_status = jz4740_dma_tx_status; dd->device_issue_pending = jz4740_dma_issue_pending; dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; dd->device_config = jz4740_dma_slave_config; dd->device_terminate_all = jz4740_dma_terminate_all; dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS; dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS; dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; dd->dev = &pdev->dev; INIT_LIST_HEAD(&dd->channels); for (i = 0; i < JZ_DMA_NR_CHANS; i++) { chan = &dmadev->chan[i]; chan->id = i; chan->vchan.desc_free = jz4740_dma_desc_free; vchan_init(&chan->vchan, dd); } ret = dma_async_device_register(dd); if (ret) return ret; irq = platform_get_irq(pdev, 0); ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); if (ret) goto err_unregister; platform_set_drvdata(pdev, dmadev); return 0; err_unregister: dma_async_device_unregister(dd); return ret; } static int jz4740_dma_remove(struct platform_device *pdev) { struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); free_irq(irq, dmadev); dma_async_device_unregister(&dmadev->ddev); clk_disable_unprepare(dmadev->clk); return 0; } static struct platform_driver jz4740_dma_driver = { .probe = jz4740_dma_probe, .remove = jz4740_dma_remove, .driver = { .name = "jz4740-dma", }, }; module_platform_driver(jz4740_dma_driver); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("JZ4740 DMA driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
zephiK/android_kernel_moto_shamuLP
arch/arm/plat-orion/time.c
1386
5123
/* * arch/arm/plat-orion/time.c * * Marvell Orion SoC timer handling. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * Timer 0 is used as free-running clocksource, while timer 1 is * used as clock_event_device. */ #include <linux/kernel.h> #include <linux/timer.h> #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/sched_clock.h> /* * MBus bridge block registers. */ #define BRIDGE_CAUSE_OFF 0x0110 #define BRIDGE_MASK_OFF 0x0114 #define BRIDGE_INT_TIMER0 0x0002 #define BRIDGE_INT_TIMER1 0x0004 /* * Timer block registers. */ #define TIMER_CTRL_OFF 0x0000 #define TIMER0_EN 0x0001 #define TIMER0_RELOAD_EN 0x0002 #define TIMER1_EN 0x0004 #define TIMER1_RELOAD_EN 0x0008 #define TIMER0_RELOAD_OFF 0x0010 #define TIMER0_VAL_OFF 0x0014 #define TIMER1_RELOAD_OFF 0x0018 #define TIMER1_VAL_OFF 0x001c /* * SoC-specific data. */ static void __iomem *bridge_base; static u32 bridge_timer1_clr_mask; static void __iomem *timer_base; /* * Number of timer ticks per jiffy. */ static u32 ticks_per_jiffy; /* * Orion's sched_clock implementation. It has a resolution of * at least 7.5ns (133MHz TCLK). */ static u32 notrace orion_read_sched_clock(void) { return ~readl(timer_base + TIMER0_VAL_OFF); } /* * Clockevent handling. */ static int orion_clkevt_next_event(unsigned long delta, struct clock_event_device *dev) { unsigned long flags; u32 u; if (delta == 0) return -ETIME; local_irq_save(flags); /* * Clear and enable clockevent timer interrupt. */ writel(bridge_timer1_clr_mask, bridge_base + BRIDGE_CAUSE_OFF); u = readl(bridge_base + BRIDGE_MASK_OFF); u |= BRIDGE_INT_TIMER1; writel(u, bridge_base + BRIDGE_MASK_OFF); /* * Setup new clockevent timer value. */ writel(delta, timer_base + TIMER1_VAL_OFF); /* * Enable the timer. */ u = readl(timer_base + TIMER_CTRL_OFF); u = (u & ~TIMER1_RELOAD_EN) | TIMER1_EN; writel(u, timer_base + TIMER_CTRL_OFF); local_irq_restore(flags); return 0; } static void orion_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) { unsigned long flags; u32 u; local_irq_save(flags); if (mode == CLOCK_EVT_MODE_PERIODIC) { /* * Setup timer to fire at 1/HZ intervals. */ writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF); writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF); /* * Enable timer interrupt. */ u = readl(bridge_base + BRIDGE_MASK_OFF); writel(u | BRIDGE_INT_TIMER1, bridge_base + BRIDGE_MASK_OFF); /* * Enable timer. */ u = readl(timer_base + TIMER_CTRL_OFF); writel(u | TIMER1_EN | TIMER1_RELOAD_EN, timer_base + TIMER_CTRL_OFF); } else { /* * Disable timer. */ u = readl(timer_base + TIMER_CTRL_OFF); writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF); /* * Disable timer interrupt. */ u = readl(bridge_base + BRIDGE_MASK_OFF); writel(u & ~BRIDGE_INT_TIMER1, bridge_base + BRIDGE_MASK_OFF); /* * ACK pending timer interrupt. */ writel(bridge_timer1_clr_mask, bridge_base + BRIDGE_CAUSE_OFF); } local_irq_restore(flags); } static struct clock_event_device orion_clkevt = { .name = "orion_tick", .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, .rating = 300, .set_next_event = orion_clkevt_next_event, .set_mode = orion_clkevt_mode, }; static irqreturn_t orion_timer_interrupt(int irq, void *dev_id) { /* * ACK timer interrupt and call event handler. */ writel(bridge_timer1_clr_mask, bridge_base + BRIDGE_CAUSE_OFF); orion_clkevt.event_handler(&orion_clkevt); return IRQ_HANDLED; } static struct irqaction orion_timer_irq = { .name = "orion_tick", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = orion_timer_interrupt }; void __init orion_time_set_base(void __iomem *_timer_base) { timer_base = _timer_base; } void __init orion_time_init(void __iomem *_bridge_base, u32 _bridge_timer1_clr_mask, unsigned int irq, unsigned int tclk) { u32 u; /* * Set SoC-specific data. */ bridge_base = _bridge_base; bridge_timer1_clr_mask = _bridge_timer1_clr_mask; ticks_per_jiffy = (tclk + HZ/2) / HZ; /* * Set scale and timer for sched_clock. */ setup_sched_clock(orion_read_sched_clock, 32, tclk); /* * Setup free-running clocksource timer (interrupts * disabled). */ writel(0xffffffff, timer_base + TIMER0_VAL_OFF); writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); u = readl(bridge_base + BRIDGE_MASK_OFF); writel(u & ~BRIDGE_INT_TIMER0, bridge_base + BRIDGE_MASK_OFF); u = readl(timer_base + TIMER_CTRL_OFF); writel(u | TIMER0_EN | TIMER0_RELOAD_EN, timer_base + TIMER_CTRL_OFF); clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, "orion_clocksource", tclk, 300, 32, clocksource_mmio_readl_down); /* * Setup clockevent timer (interrupt-driven). */ setup_irq(irq, &orion_timer_irq); orion_clkevt.cpumask = cpumask_of(0); clockevents_config_and_register(&orion_clkevt, tclk, 1, 0xfffffffe); }
gpl-2.0
bju2000/android_kernel_lge_msm8994
drivers/net/irda/pxaficp_ir.c
2154
23705
/* * linux/drivers/net/irda/pxaficp_ir.c * * Based on sa1100_ir.c by Russell King * * Changes copyright (C) 2003-2005 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor * */ #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/slab.h> #include <net/irda/irda.h> #include <net/irda/irmod.h> #include <net/irda/wrapper.h> #include <net/irda/irda_device.h> #include <mach/dma.h> #include <linux/platform_data/irda-pxaficp.h> #include <mach/regs-ost.h> #include <mach/regs-uart.h> #define FICP __REG(0x40800000) /* Start of FICP area */ #define ICCR0 __REG(0x40800000) /* ICP Control Register 0 */ #define ICCR1 __REG(0x40800004) /* ICP Control Register 1 */ #define ICCR2 __REG(0x40800008) /* ICP Control Register 2 */ #define ICDR __REG(0x4080000c) /* ICP Data Register */ #define ICSR0 __REG(0x40800014) /* ICP Status Register 0 */ #define ICSR1 __REG(0x40800018) /* ICP Status Register 1 */ #define ICCR0_AME (1 << 7) /* Address match enable */ #define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */ #define ICCR0_RIE (1 << 5) /* Receive FIFO interrupt enable */ #define ICCR0_RXE (1 << 4) /* Receive enable */ #define ICCR0_TXE (1 << 3) /* Transmit enable */ #define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */ #define ICCR0_LBM (1 << 1) /* Loopback mode */ #define ICCR0_ITR (1 << 0) /* IrDA transmission */ #define ICCR2_RXP (1 << 3) /* Receive Pin Polarity select */ #define ICCR2_TXP (1 << 2) /* Transmit Pin Polarity select */ #define ICCR2_TRIG (3 << 0) /* Receive FIFO Trigger threshold */ #define ICCR2_TRIG_8 (0 << 0) /* >= 8 bytes */ #define ICCR2_TRIG_16 (1 << 0) /* >= 16 bytes */ #define ICCR2_TRIG_32 (2 << 0) /* >= 32 bytes */ #ifdef CONFIG_PXA27x #define ICSR0_EOC (1 << 6) /* DMA End of Descriptor Chain */ #endif #define ICSR0_FRE (1 << 5) /* Framing error */ #define ICSR0_RFS (1 << 4) /* Receive FIFO service request */ #define ICSR0_TFS (1 << 3) /* Transnit FIFO service request */ #define ICSR0_RAB (1 << 2) /* Receiver abort */ #define ICSR0_TUR (1 << 1) /* Trunsmit FIFO underun */ #define ICSR0_EIF (1 << 0) /* End/Error in FIFO */ #define ICSR1_ROR (1 << 6) /* Receiver FIFO underrun */ #define ICSR1_CRE (1 << 5) /* CRC error */ #define ICSR1_EOF (1 << 4) /* End of frame */ #define ICSR1_TNF (1 << 3) /* Transmit FIFO not full */ #define ICSR1_RNE (1 << 2) /* Receive FIFO not empty */ #define ICSR1_TBY (1 << 1) /* Tramsmiter busy flag */ #define ICSR1_RSY (1 << 0) /* Recevier synchronized flag */ #define IrSR_RXPL_NEG_IS_ZERO (1<<4) #define IrSR_RXPL_POS_IS_ZERO 0x0 #define IrSR_TXPL_NEG_IS_ZERO (1<<3) #define IrSR_TXPL_POS_IS_ZERO 0x0 #define IrSR_XMODE_PULSE_1_6 (1<<2) #define IrSR_XMODE_PULSE_3_16 0x0 #define IrSR_RCVEIR_IR_MODE (1<<1) #define IrSR_RCVEIR_UART_MODE 0x0 #define IrSR_XMITIR_IR_MODE (1<<0) #define IrSR_XMITIR_UART_MODE 0x0 #define IrSR_IR_RECEIVE_ON (\ IrSR_RXPL_NEG_IS_ZERO | \ IrSR_TXPL_POS_IS_ZERO | \ IrSR_XMODE_PULSE_3_16 | \ IrSR_RCVEIR_IR_MODE | \ IrSR_XMITIR_UART_MODE) #define IrSR_IR_TRANSMIT_ON (\ IrSR_RXPL_NEG_IS_ZERO | \ IrSR_TXPL_POS_IS_ZERO | \ IrSR_XMODE_PULSE_3_16 | \ IrSR_RCVEIR_UART_MODE | \ IrSR_XMITIR_IR_MODE) struct pxa_irda { int speed; int newspeed; unsigned long last_oscr; unsigned char *dma_rx_buff; unsigned char *dma_tx_buff; dma_addr_t dma_rx_buff_phy; dma_addr_t dma_tx_buff_phy; unsigned int dma_tx_buff_len; int txdma; int rxdma; int uart_irq; int icp_irq; struct irlap_cb *irlap; struct qos_info qos; iobuff_t tx_buff; iobuff_t rx_buff; struct device *dev; struct pxaficp_platform_data *pdata; struct clk *fir_clk; struct clk *sir_clk; struct clk *cur_clk; }; static inline void pxa_irda_disable_clk(struct pxa_irda *si) { if (si->cur_clk) clk_disable_unprepare(si->cur_clk); si->cur_clk = NULL; } static inline void pxa_irda_enable_firclk(struct pxa_irda *si) { si->cur_clk = si->fir_clk; clk_prepare_enable(si->fir_clk); } static inline void pxa_irda_enable_sirclk(struct pxa_irda *si) { si->cur_clk = si->sir_clk; clk_prepare_enable(si->sir_clk); } #define IS_FIR(si) ((si)->speed >= 4000000) #define IRDA_FRAME_SIZE_LIMIT 2047 inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si) { DCSR(si->rxdma) = DCSR_NODESC; DSADR(si->rxdma) = __PREG(ICDR); DTADR(si->rxdma) = si->dma_rx_buff_phy; DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT; DCSR(si->rxdma) |= DCSR_RUN; } inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si) { DCSR(si->txdma) = DCSR_NODESC; DSADR(si->txdma) = si->dma_tx_buff_phy; DTADR(si->txdma) = __PREG(ICDR); DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len; DCSR(si->txdma) |= DCSR_RUN; } /* * Set the IrDA communications mode. */ static void pxa_irda_set_mode(struct pxa_irda *si, int mode) { if (si->pdata->transceiver_mode) si->pdata->transceiver_mode(si->dev, mode); else { if (gpio_is_valid(si->pdata->gpio_pwdown)) gpio_set_value(si->pdata->gpio_pwdown, !(mode & IR_OFF) ^ !si->pdata->gpio_pwdown_inverted); pxa2xx_transceiver_mode(si->dev, mode); } } /* * Set the IrDA communications speed. */ static int pxa_irda_set_speed(struct pxa_irda *si, int speed) { unsigned long flags; unsigned int divisor; switch (speed) { case 9600: case 19200: case 38400: case 57600: case 115200: /* refer to PXA250/210 Developer's Manual 10-7 */ /* BaudRate = 14.7456 MHz / (16*Divisor) */ divisor = 14745600 / (16 * speed); local_irq_save(flags); if (IS_FIR(si)) { /* stop RX DMA */ DCSR(si->rxdma) &= ~DCSR_RUN; /* disable FICP */ ICCR0 = 0; pxa_irda_disable_clk(si); /* set board transceiver to SIR mode */ pxa_irda_set_mode(si, IR_SIRMODE); /* enable the STUART clock */ pxa_irda_enable_sirclk(si); } /* disable STUART first */ STIER = 0; /* access DLL & DLH */ STLCR |= LCR_DLAB; STDLL = divisor & 0xff; STDLH = divisor >> 8; STLCR &= ~LCR_DLAB; si->speed = speed; STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6; STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE; local_irq_restore(flags); break; case 4000000: local_irq_save(flags); /* disable STUART */ STIER = 0; STISR = 0; pxa_irda_disable_clk(si); /* disable FICP first */ ICCR0 = 0; /* set board transceiver to FIR mode */ pxa_irda_set_mode(si, IR_FIRMODE); /* enable the FICP clock */ pxa_irda_enable_firclk(si); si->speed = speed; pxa_irda_fir_dma_rx_start(si); ICCR0 = ICCR0_ITR | ICCR0_RXE; local_irq_restore(flags); break; default: return -EINVAL; } return 0; } /* SIR interrupt service routine. */ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct pxa_irda *si = netdev_priv(dev); int iir, lsr, data; iir = STIIR; switch (iir & 0x0F) { case 0x06: /* Receiver Line Status */ lsr = STLSR; while (lsr & LSR_FIFOE) { data = STRBR; if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) { printk(KERN_DEBUG "pxa_ir: sir receiving error\n"); dev->stats.rx_errors++; if (lsr & LSR_FE) dev->stats.rx_frame_errors++; if (lsr & LSR_OE) dev->stats.rx_fifo_errors++; } else { dev->stats.rx_bytes++; async_unwrap_char(dev, &dev->stats, &si->rx_buff, data); } lsr = STLSR; } si->last_oscr = readl_relaxed(OSCR); break; case 0x04: /* Received Data Available */ /* forth through */ case 0x0C: /* Character Timeout Indication */ do { dev->stats.rx_bytes++; async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR); } while (STLSR & LSR_DR); si->last_oscr = readl_relaxed(OSCR); break; case 0x02: /* Transmit FIFO Data Request */ while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) { STTHR = *si->tx_buff.data++; si->tx_buff.len -= 1; } if (si->tx_buff.len == 0) { dev->stats.tx_packets++; dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head; /* We need to ensure that the transmitter has finished. */ while ((STLSR & LSR_TEMT) == 0) cpu_relax(); si->last_oscr = readl_relaxed(OSCR); /* * Ok, we've finished transmitting. Now enable * the receiver. Sometimes we get a receive IRQ * immediately after a transmit... */ if (si->newspeed) { pxa_irda_set_speed(si, si->newspeed); si->newspeed = 0; } else { /* enable IR Receiver, disable IR Transmitter */ STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6; /* enable STUART and receive interrupts */ STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE; } /* I'm hungry! */ netif_wake_queue(dev); } break; } return IRQ_HANDLED; } /* FIR Receive DMA interrupt handler */ static void pxa_irda_fir_dma_rx_irq(int channel, void *data) { int dcsr = DCSR(channel); DCSR(channel) = dcsr & ~DCSR_RUN; printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr); } /* FIR Transmit DMA interrupt handler */ static void pxa_irda_fir_dma_tx_irq(int channel, void *data) { struct net_device *dev = data; struct pxa_irda *si = netdev_priv(dev); int dcsr; dcsr = DCSR(channel); DCSR(channel) = dcsr & ~DCSR_RUN; if (dcsr & DCSR_ENDINTR) { dev->stats.tx_packets++; dev->stats.tx_bytes += si->dma_tx_buff_len; } else { dev->stats.tx_errors++; } while (ICSR1 & ICSR1_TBY) cpu_relax(); si->last_oscr = readl_relaxed(OSCR); /* * HACK: It looks like the TBY bit is dropped too soon. * Without this delay things break. */ udelay(120); if (si->newspeed) { pxa_irda_set_speed(si, si->newspeed); si->newspeed = 0; } else { int i = 64; ICCR0 = 0; pxa_irda_fir_dma_rx_start(si); while ((ICSR1 & ICSR1_RNE) && i--) (void)ICDR; ICCR0 = ICCR0_ITR | ICCR0_RXE; if (i < 0) printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n"); } netif_wake_queue(dev); } /* EIF(Error in FIFO/End in Frame) handler for FIR */ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0) { unsigned int len, stat, data; /* Get the current data position. */ len = DTADR(si->rxdma) - si->dma_rx_buff_phy; do { /* Read Status, and then Data. */ stat = ICSR1; rmb(); data = ICDR; if (stat & (ICSR1_CRE | ICSR1_ROR)) { dev->stats.rx_errors++; if (stat & ICSR1_CRE) { printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n"); dev->stats.rx_crc_errors++; } if (stat & ICSR1_ROR) { printk(KERN_DEBUG "pxa_ir: fir receive overrun\n"); dev->stats.rx_over_errors++; } } else { si->dma_rx_buff[len++] = data; } /* If we hit the end of frame, there's no point in continuing. */ if (stat & ICSR1_EOF) break; } while (ICSR0 & ICSR0_EIF); if (stat & ICSR1_EOF) { /* end of frame. */ struct sk_buff *skb; if (icsr0 & ICSR0_FRE) { printk(KERN_ERR "pxa_ir: dropping erroneous frame\n"); dev->stats.rx_dropped++; return; } skb = alloc_skb(len+1,GFP_ATOMIC); if (!skb) { printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n"); dev->stats.rx_dropped++; return; } /* Align IP header to 20 bytes */ skb_reserve(skb, 1); skb_copy_to_linear_data(skb, si->dma_rx_buff, len); skb_put(skb, len); /* Feed it to IrLAP */ skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } } /* FIR interrupt handler */ static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct pxa_irda *si = netdev_priv(dev); int icsr0, i = 64; /* stop RX DMA */ DCSR(si->rxdma) &= ~DCSR_RUN; si->last_oscr = readl_relaxed(OSCR); icsr0 = ICSR0; if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) { if (icsr0 & ICSR0_FRE) { printk(KERN_DEBUG "pxa_ir: fir receive frame error\n"); dev->stats.rx_frame_errors++; } else { printk(KERN_DEBUG "pxa_ir: fir receive abort\n"); dev->stats.rx_errors++; } ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB); } if (icsr0 & ICSR0_EIF) { /* An error in FIFO occurred, or there is a end of frame */ pxa_irda_fir_irq_eif(si, dev, icsr0); } ICCR0 = 0; pxa_irda_fir_dma_rx_start(si); while ((ICSR1 & ICSR1_RNE) && i--) (void)ICDR; ICCR0 = ICCR0_ITR | ICCR0_RXE; if (i < 0) printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n"); return IRQ_HANDLED; } /* hard_xmit interface of irda device */ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa_irda *si = netdev_priv(dev); int speed = irda_get_next_speed(skb); /* * Does this packet contain a request to change the interface * speed? If so, remember it until we complete the transmission * of this frame. */ if (speed != si->speed && speed != -1) si->newspeed = speed; /* * If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; pxa_irda_set_speed(si, speed); } dev_kfree_skb(skb); return NETDEV_TX_OK; } netif_stop_queue(dev); if (!IS_FIR(si)) { si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); /* Disable STUART interrupts and switch to transmit mode. */ STIER = 0; STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6; /* enable STUART and transmit interrupts */ STIER = IER_UUE | IER_TIE; } else { unsigned long mtt = irda_get_mtt(skb); si->dma_tx_buff_len = skb->len; skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); if (mtt) while ((unsigned)(readl_relaxed(OSCR) - si->last_oscr)/4 < mtt) cpu_relax(); /* stop RX DMA, disable FICP */ DCSR(si->rxdma) &= ~DCSR_RUN; ICCR0 = 0; pxa_irda_fir_dma_tx_start(si); ICCR0 = ICCR0_ITR | ICCR0_TXE; } dev_kfree_skb(skb); return NETDEV_TX_OK; } static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) { struct if_irda_req *rq = (struct if_irda_req *)ifreq; struct pxa_irda *si = netdev_priv(dev); int ret; switch (cmd) { case SIOCSBANDWIDTH: ret = -EPERM; if (capable(CAP_NET_ADMIN)) { /* * We are unable to set the speed if the * device is not running. */ if (netif_running(dev)) { ret = pxa_irda_set_speed(si, rq->ifr_baudrate); } else { printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n"); ret = 0; } } break; case SIOCSMEDIABUSY: ret = -EPERM; if (capable(CAP_NET_ADMIN)) { irda_device_set_media_busy(dev, TRUE); ret = 0; } break; case SIOCGRECEIVING: ret = 0; rq->ifr_receiving = IS_FIR(si) ? 0 : si->rx_buff.state != OUTSIDE_FRAME; break; default: ret = -EOPNOTSUPP; break; } return ret; } static void pxa_irda_startup(struct pxa_irda *si) { /* Disable STUART interrupts */ STIER = 0; /* enable STUART interrupt to the processor */ STMCR = MCR_OUT2; /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */ STLCR = LCR_WLS0 | LCR_WLS1; /* enable FIFO, we use FIFO to improve performance */ STFCR = FCR_TRFIFOE | FCR_ITL_32; /* disable FICP */ ICCR0 = 0; /* configure FICP ICCR2 */ ICCR2 = ICCR2_TXP | ICCR2_TRIG_32; /* configure DMAC */ DRCMR(17) = si->rxdma | DRCMR_MAPVLD; DRCMR(18) = si->txdma | DRCMR_MAPVLD; /* force SIR reinitialization */ si->speed = 4000000; pxa_irda_set_speed(si, 9600); printk(KERN_DEBUG "pxa_ir: irda startup\n"); } static void pxa_irda_shutdown(struct pxa_irda *si) { unsigned long flags; local_irq_save(flags); /* disable STUART and interrupt */ STIER = 0; /* disable STUART SIR mode */ STISR = 0; /* disable DMA */ DCSR(si->txdma) &= ~DCSR_RUN; DCSR(si->rxdma) &= ~DCSR_RUN; /* disable FICP */ ICCR0 = 0; /* disable the STUART or FICP clocks */ pxa_irda_disable_clk(si); DRCMR(17) = 0; DRCMR(18) = 0; local_irq_restore(flags); /* power off board transceiver */ pxa_irda_set_mode(si, IR_OFF); printk(KERN_DEBUG "pxa_ir: irda shutdown\n"); } static int pxa_irda_start(struct net_device *dev) { struct pxa_irda *si = netdev_priv(dev); int err; si->speed = 9600; err = request_irq(si->uart_irq, pxa_irda_sir_irq, 0, dev->name, dev); if (err) goto err_irq1; err = request_irq(si->icp_irq, pxa_irda_fir_irq, 0, dev->name, dev); if (err) goto err_irq2; /* * The interrupt must remain disabled for now. */ disable_irq(si->uart_irq); disable_irq(si->icp_irq); err = -EBUSY; si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev); if (si->rxdma < 0) goto err_rx_dma; si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev); if (si->txdma < 0) goto err_tx_dma; err = -ENOMEM; si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, &si->dma_rx_buff_phy, GFP_KERNEL); if (!si->dma_rx_buff) goto err_dma_rx_buff; si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, &si->dma_tx_buff_phy, GFP_KERNEL); if (!si->dma_tx_buff) goto err_dma_tx_buff; /* Setup the serial port for the initial speed. */ pxa_irda_startup(si); /* * Open a new IrLAP layer instance. */ si->irlap = irlap_open(dev, &si->qos, "pxa"); err = -ENOMEM; if (!si->irlap) goto err_irlap; /* * Now enable the interrupt and start the queue */ enable_irq(si->uart_irq); enable_irq(si->icp_irq); netif_start_queue(dev); printk(KERN_DEBUG "pxa_ir: irda driver opened\n"); return 0; err_irlap: pxa_irda_shutdown(si); dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy); err_dma_tx_buff: dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy); err_dma_rx_buff: pxa_free_dma(si->txdma); err_tx_dma: pxa_free_dma(si->rxdma); err_rx_dma: free_irq(si->icp_irq, dev); err_irq2: free_irq(si->uart_irq, dev); err_irq1: return err; } static int pxa_irda_stop(struct net_device *dev) { struct pxa_irda *si = netdev_priv(dev); netif_stop_queue(dev); pxa_irda_shutdown(si); /* Stop IrLAP */ if (si->irlap) { irlap_close(si->irlap); si->irlap = NULL; } free_irq(si->uart_irq, dev); free_irq(si->icp_irq, dev); pxa_free_dma(si->rxdma); pxa_free_dma(si->txdma); if (si->dma_rx_buff) dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy); if (si->dma_tx_buff) dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy); printk(KERN_DEBUG "pxa_ir: irda driver closed\n"); return 0; } static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state) { struct net_device *dev = platform_get_drvdata(_dev); struct pxa_irda *si; if (dev && netif_running(dev)) { si = netdev_priv(dev); netif_device_detach(dev); pxa_irda_shutdown(si); } return 0; } static int pxa_irda_resume(struct platform_device *_dev) { struct net_device *dev = platform_get_drvdata(_dev); struct pxa_irda *si; if (dev && netif_running(dev)) { si = netdev_priv(dev); pxa_irda_startup(si); netif_device_attach(dev); netif_wake_queue(dev); } return 0; } static int pxa_irda_init_iobuf(iobuff_t *io, int size) { io->head = kmalloc(size, GFP_KERNEL | GFP_DMA); if (io->head != NULL) { io->truesize = size; io->in_frame = FALSE; io->state = OUTSIDE_FRAME; io->data = io->head; } return io->head ? 0 : -ENOMEM; } static const struct net_device_ops pxa_irda_netdev_ops = { .ndo_open = pxa_irda_start, .ndo_stop = pxa_irda_stop, .ndo_start_xmit = pxa_irda_hard_xmit, .ndo_do_ioctl = pxa_irda_ioctl, }; static int pxa_irda_probe(struct platform_device *pdev) { struct net_device *dev; struct pxa_irda *si; unsigned int baudrate_mask; int err; if (!pdev->dev.platform_data) return -ENODEV; err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY; if (err) goto err_mem_1; err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY; if (err) goto err_mem_2; dev = alloc_irdadev(sizeof(struct pxa_irda)); if (!dev) { err = -ENOMEM; goto err_mem_3; } SET_NETDEV_DEV(dev, &pdev->dev); si = netdev_priv(dev); si->dev = &pdev->dev; si->pdata = pdev->dev.platform_data; si->uart_irq = platform_get_irq(pdev, 0); si->icp_irq = platform_get_irq(pdev, 1); si->sir_clk = clk_get(&pdev->dev, "UARTCLK"); si->fir_clk = clk_get(&pdev->dev, "FICPCLK"); if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) { err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk); goto err_mem_4; } /* * Initialise the SIR buffers */ err = pxa_irda_init_iobuf(&si->rx_buff, 14384); if (err) goto err_mem_4; err = pxa_irda_init_iobuf(&si->tx_buff, 4000); if (err) goto err_mem_5; if (gpio_is_valid(si->pdata->gpio_pwdown)) { err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch"); if (err) goto err_startup; err = gpio_direction_output(si->pdata->gpio_pwdown, !si->pdata->gpio_pwdown_inverted); if (err) { gpio_free(si->pdata->gpio_pwdown); goto err_startup; } } if (si->pdata->startup) { err = si->pdata->startup(si->dev); if (err) goto err_startup; } if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup) dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n"); dev->netdev_ops = &pxa_irda_netdev_ops; irda_init_max_qos_capabilies(&si->qos); baudrate_mask = 0; if (si->pdata->transceiver_cap & IR_SIRMODE) baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; if (si->pdata->transceiver_cap & IR_FIRMODE) baudrate_mask |= IR_4000000 << 8; si->qos.baud_rate.bits &= baudrate_mask; si->qos.min_turn_time.bits = 7; /* 1ms or more */ irda_qos_bits_to_value(&si->qos); err = register_netdev(dev); if (err == 0) dev_set_drvdata(&pdev->dev, dev); if (err) { if (si->pdata->shutdown) si->pdata->shutdown(si->dev); err_startup: kfree(si->tx_buff.head); err_mem_5: kfree(si->rx_buff.head); err_mem_4: if (si->sir_clk && !IS_ERR(si->sir_clk)) clk_put(si->sir_clk); if (si->fir_clk && !IS_ERR(si->fir_clk)) clk_put(si->fir_clk); free_netdev(dev); err_mem_3: release_mem_region(__PREG(FICP), 0x1c); err_mem_2: release_mem_region(__PREG(STUART), 0x24); } err_mem_1: return err; } static int pxa_irda_remove(struct platform_device *_dev) { struct net_device *dev = platform_get_drvdata(_dev); if (dev) { struct pxa_irda *si = netdev_priv(dev); unregister_netdev(dev); if (gpio_is_valid(si->pdata->gpio_pwdown)) gpio_free(si->pdata->gpio_pwdown); if (si->pdata->shutdown) si->pdata->shutdown(si->dev); kfree(si->tx_buff.head); kfree(si->rx_buff.head); clk_put(si->fir_clk); clk_put(si->sir_clk); free_netdev(dev); } release_mem_region(__PREG(STUART), 0x24); release_mem_region(__PREG(FICP), 0x1c); return 0; } static struct platform_driver pxa_ir_driver = { .driver = { .name = "pxa2xx-ir", .owner = THIS_MODULE, }, .probe = pxa_irda_probe, .remove = pxa_irda_remove, .suspend = pxa_irda_suspend, .resume = pxa_irda_resume, }; module_platform_driver(pxa_ir_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-ir");
gpl-2.0
TeamRegular/android_kernel_tcl_msm8916
drivers/media/platform/s3c-camif/camif-core.c
2154
16013
/* * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver * * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com> * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 2 of the License, * or (at your option) any later version. */ #define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__ #include <linux/bug.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/version.h> #include <media/media-device.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> #include "camif-core.h" static char *camif_clocks[CLK_MAX_NUM] = { /* HCLK CAMIF clock */ [CLK_GATE] = "camif", /* CAMIF / external camera sensor master clock */ [CLK_CAM] = "camera", }; static const struct camif_fmt camif_formats[] = { { .name = "YUV 4:2:2 planar, Y/Cb/Cr", .fourcc = V4L2_PIX_FMT_YUV422P, .depth = 16, .ybpp = 1, .color = IMG_FMT_YCBCR422P, .colplanes = 3, .flags = FMT_FL_S3C24XX_CODEC | FMT_FL_S3C64XX, }, { .name = "YUV 4:2:0 planar, Y/Cb/Cr", .fourcc = V4L2_PIX_FMT_YUV420, .depth = 12, .ybpp = 1, .color = IMG_FMT_YCBCR420, .colplanes = 3, .flags = FMT_FL_S3C24XX_CODEC | FMT_FL_S3C64XX, }, { .name = "YVU 4:2:0 planar, Y/Cr/Cb", .fourcc = V4L2_PIX_FMT_YVU420, .depth = 12, .ybpp = 1, .color = IMG_FMT_YCRCB420, .colplanes = 3, .flags = FMT_FL_S3C24XX_CODEC | FMT_FL_S3C64XX, }, { .name = "RGB565, 16 bpp", .fourcc = V4L2_PIX_FMT_RGB565X, .depth = 16, .ybpp = 2, .color = IMG_FMT_RGB565, .colplanes = 1, .flags = FMT_FL_S3C24XX_PREVIEW | FMT_FL_S3C64XX, }, { .name = "XRGB8888, 32 bpp", .fourcc = V4L2_PIX_FMT_RGB32, .depth = 32, .ybpp = 4, .color = IMG_FMT_XRGB8888, .colplanes = 1, .flags = FMT_FL_S3C24XX_PREVIEW | FMT_FL_S3C64XX, }, { .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666, .depth = 32, .ybpp = 4, .color = IMG_FMT_RGB666, .colplanes = 1, .flags = FMT_FL_S3C64XX, } }; /** * s3c_camif_find_format() - lookup camif color format by fourcc or an index * @pixelformat: fourcc to match, ignored if null * @index: index to the camif_formats array, ignored if negative */ const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp, const u32 *pixelformat, int index) { const struct camif_fmt *fmt, *def_fmt = NULL; unsigned int i; int id = 0; if (index >= (int)ARRAY_SIZE(camif_formats)) return NULL; for (i = 0; i < ARRAY_SIZE(camif_formats); ++i) { fmt = &camif_formats[i]; if (vp && !(vp->fmt_flags & fmt->flags)) continue; if (pixelformat && fmt->fourcc == *pixelformat) return fmt; if (index == id) def_fmt = fmt; id++; } return def_fmt; } static int camif_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift) { unsigned int sh = 6; if (src >= 64 * tar) return -EINVAL; while (sh--) { unsigned int tmp = 1 << sh; if (src >= tar * tmp) { *shift = sh, *ratio = tmp; return 0; } } *shift = 0, *ratio = 1; return 0; } int s3c_camif_get_scaler_config(struct camif_vp *vp, struct camif_scaler *scaler) { struct v4l2_rect *camif_crop = &vp->camif->camif_crop; int source_x = camif_crop->width; int source_y = camif_crop->height; int target_x = vp->out_frame.rect.width; int target_y = vp->out_frame.rect.height; int ret; if (vp->rotation == 90 || vp->rotation == 270) swap(target_x, target_y); ret = camif_get_scaler_factor(source_x, target_x, &scaler->pre_h_ratio, &scaler->h_shift); if (ret < 0) return ret; ret = camif_get_scaler_factor(source_y, target_y, &scaler->pre_v_ratio, &scaler->v_shift); if (ret < 0) return ret; scaler->pre_dst_width = source_x / scaler->pre_h_ratio; scaler->pre_dst_height = source_y / scaler->pre_v_ratio; scaler->main_h_ratio = (source_x << 8) / (target_x << scaler->h_shift); scaler->main_v_ratio = (source_y << 8) / (target_y << scaler->v_shift); scaler->scaleup_h = (target_x >= source_x); scaler->scaleup_v = (target_y >= source_y); scaler->copy = 0; pr_debug("H: ratio: %u, shift: %u. V: ratio: %u, shift: %u.\n", scaler->pre_h_ratio, scaler->h_shift, scaler->pre_v_ratio, scaler->v_shift); pr_debug("Source: %dx%d, Target: %dx%d, scaleup_h/v: %d/%d\n", source_x, source_y, target_x, target_y, scaler->scaleup_h, scaler->scaleup_v); return 0; } static int camif_register_sensor(struct camif_dev *camif) { struct s3c_camif_sensor_info *sensor = &camif->pdata.sensor; struct v4l2_device *v4l2_dev = &camif->v4l2_dev; struct i2c_adapter *adapter; struct v4l2_subdev_format format; struct v4l2_subdev *sd; int ret; camif->sensor.sd = NULL; if (sensor->i2c_board_info.addr == 0) return -EINVAL; adapter = i2c_get_adapter(sensor->i2c_bus_num); if (adapter == NULL) { v4l2_warn(v4l2_dev, "failed to get I2C adapter %d\n", sensor->i2c_bus_num); return -EPROBE_DEFER; } sd = v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &sensor->i2c_board_info, NULL); if (sd == NULL) { i2c_put_adapter(adapter); v4l2_warn(v4l2_dev, "failed to acquire subdev %s\n", sensor->i2c_board_info.type); return -EPROBE_DEFER; } camif->sensor.sd = sd; v4l2_info(v4l2_dev, "registered sensor subdevice %s\n", sd->name); /* Get initial pixel format and set it at the camif sink pad */ format.pad = 0; format.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format); if (ret < 0) return 0; format.pad = CAMIF_SD_PAD_SINK; v4l2_subdev_call(&camif->subdev, pad, set_fmt, NULL, &format); v4l2_info(sd, "Initial format from sensor: %dx%d, %#x\n", format.format.width, format.format.height, format.format.code); return 0; } static void camif_unregister_sensor(struct camif_dev *camif) { struct v4l2_subdev *sd = camif->sensor.sd; struct i2c_client *client = sd ? v4l2_get_subdevdata(sd) : NULL; struct i2c_adapter *adapter; if (client == NULL) return; adapter = client->adapter; v4l2_device_unregister_subdev(sd); camif->sensor.sd = NULL; i2c_unregister_device(client); if (adapter) i2c_put_adapter(adapter); } static int camif_create_media_links(struct camif_dev *camif) { int i, ret; ret = media_entity_create_link(&camif->sensor.sd->entity, 0, &camif->subdev.entity, CAMIF_SD_PAD_SINK, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) return ret; for (i = 1; i < CAMIF_SD_PADS_NUM && !ret; i++) { ret = media_entity_create_link(&camif->subdev.entity, i, &camif->vp[i - 1].vdev.entity, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); } return ret; } static int camif_register_video_nodes(struct camif_dev *camif) { int ret = s3c_camif_register_video_node(camif, VP_CODEC); if (ret < 0) return ret; return s3c_camif_register_video_node(camif, VP_PREVIEW); } static void camif_unregister_video_nodes(struct camif_dev *camif) { s3c_camif_unregister_video_node(camif, VP_CODEC); s3c_camif_unregister_video_node(camif, VP_PREVIEW); } static void camif_unregister_media_entities(struct camif_dev *camif) { camif_unregister_video_nodes(camif); camif_unregister_sensor(camif); s3c_camif_unregister_subdev(camif); } /* * Media device */ static int camif_media_dev_register(struct camif_dev *camif) { struct media_device *md = &camif->media_dev; struct v4l2_device *v4l2_dev = &camif->v4l2_dev; unsigned int ip_rev = camif->variant->ip_revision; int ret; memset(md, 0, sizeof(*md)); snprintf(md->model, sizeof(md->model), "SAMSUNG S3C%s CAMIF", ip_rev == S3C6410_CAMIF_IP_REV ? "6410" : "244X"); strlcpy(md->bus_info, "platform", sizeof(md->bus_info)); md->hw_revision = ip_rev; md->driver_version = KERNEL_VERSION(1, 0, 0); md->dev = camif->dev; strlcpy(v4l2_dev->name, "s3c-camif", sizeof(v4l2_dev->name)); v4l2_dev->mdev = md; ret = v4l2_device_register(camif->dev, v4l2_dev); if (ret < 0) return ret; ret = media_device_register(md); if (ret < 0) v4l2_device_unregister(v4l2_dev); return ret; } static void camif_clk_put(struct camif_dev *camif) { int i; for (i = 0; i < CLK_MAX_NUM; i++) { if (IS_ERR_OR_NULL(camif->clock[i])) continue; clk_unprepare(camif->clock[i]); clk_put(camif->clock[i]); } } static int camif_clk_get(struct camif_dev *camif) { int ret, i; for (i = 0; i < CLK_MAX_NUM; i++) { camif->clock[i] = clk_get(camif->dev, camif_clocks[i]); if (IS_ERR(camif->clock[i])) { ret = PTR_ERR(camif->clock[i]); goto err; } ret = clk_prepare(camif->clock[i]); if (ret < 0) { clk_put(camif->clock[i]); camif->clock[i] = NULL; goto err; } } return 0; err: camif_clk_put(camif); dev_err(camif->dev, "failed to get clock: %s\n", camif_clocks[i]); return ret; } /* * The CAMIF device has two relatively independent data processing paths * that can source data from memory or the common camera input frontend. * Register interrupts for each data processing path (camif_vp). */ static int camif_request_irqs(struct platform_device *pdev, struct camif_dev *camif) { int irq, ret, i; for (i = 0; i < CAMIF_VP_NUM; i++) { struct camif_vp *vp = &camif->vp[i]; init_waitqueue_head(&vp->irq_queue); irq = platform_get_irq(pdev, i); if (irq <= 0) { dev_err(&pdev->dev, "failed to get IRQ %d\n", i); return -ENXIO; } ret = devm_request_irq(&pdev->dev, irq, s3c_camif_irq_handler, 0, dev_name(&pdev->dev), vp); if (ret < 0) { dev_err(&pdev->dev, "failed to install IRQ: %d\n", ret); break; } } return ret; } static int s3c_camif_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct s3c_camif_plat_data *pdata = dev->platform_data; struct s3c_camif_drvdata *drvdata; struct camif_dev *camif; struct resource *mres; int ret = 0; camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL); if (!camif) return -ENOMEM; spin_lock_init(&camif->slock); mutex_init(&camif->lock); camif->dev = dev; if (!pdata || !pdata->gpio_get || !pdata->gpio_put) { dev_err(dev, "wrong platform data\n"); return -EINVAL; } camif->pdata = *pdata; drvdata = (void *)platform_get_device_id(pdev)->driver_data; camif->variant = drvdata->variant; mres = platform_get_resource(pdev, IORESOURCE_MEM, 0); camif->io_base = devm_ioremap_resource(dev, mres); if (IS_ERR(camif->io_base)) return PTR_ERR(camif->io_base); ret = camif_request_irqs(pdev, camif); if (ret < 0) return ret; ret = pdata->gpio_get(); if (ret < 0) return ret; ret = s3c_camif_create_subdev(camif); if (ret < 0) goto err_sd; ret = camif_clk_get(camif); if (ret < 0) goto err_clk; platform_set_drvdata(pdev, camif); clk_set_rate(camif->clock[CLK_CAM], camif->pdata.sensor.clock_frequency); dev_info(dev, "sensor clock frequency: %lu\n", clk_get_rate(camif->clock[CLK_CAM])); /* * Set initial pixel format, resolution and crop rectangle. * Must be done before a sensor subdev is registered as some * settings are overrode with values from sensor subdev. */ s3c_camif_set_defaults(camif); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_pm; /* Initialize contiguous memory allocator */ camif->alloc_ctx = vb2_dma_contig_init_ctx(dev); if (IS_ERR(camif->alloc_ctx)) { ret = PTR_ERR(camif->alloc_ctx); goto err_alloc; } ret = camif_media_dev_register(camif); if (ret < 0) goto err_mdev; ret = camif_register_sensor(camif); if (ret < 0) goto err_sens; ret = v4l2_device_register_subdev(&camif->v4l2_dev, &camif->subdev); if (ret < 0) goto err_sens; mutex_lock(&camif->media_dev.graph_mutex); ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev); if (ret < 0) goto err_unlock; ret = camif_register_video_nodes(camif); if (ret < 0) goto err_unlock; ret = camif_create_media_links(camif); if (ret < 0) goto err_unlock; mutex_unlock(&camif->media_dev.graph_mutex); pm_runtime_put(dev); return 0; err_unlock: mutex_unlock(&camif->media_dev.graph_mutex); err_sens: v4l2_device_unregister(&camif->v4l2_dev); media_device_unregister(&camif->media_dev); camif_unregister_media_entities(camif); err_mdev: vb2_dma_contig_cleanup_ctx(camif->alloc_ctx); err_alloc: pm_runtime_put(dev); pm_runtime_disable(dev); err_pm: camif_clk_put(camif); err_clk: s3c_camif_unregister_subdev(camif); err_sd: pdata->gpio_put(); return ret; } static int s3c_camif_remove(struct platform_device *pdev) { struct camif_dev *camif = platform_get_drvdata(pdev); struct s3c_camif_plat_data *pdata = &camif->pdata; media_device_unregister(&camif->media_dev); camif_unregister_media_entities(camif); v4l2_device_unregister(&camif->v4l2_dev); pm_runtime_disable(&pdev->dev); camif_clk_put(camif); pdata->gpio_put(); return 0; } static int s3c_camif_runtime_resume(struct device *dev) { struct camif_dev *camif = dev_get_drvdata(dev); clk_enable(camif->clock[CLK_GATE]); /* null op on s3c244x */ clk_enable(camif->clock[CLK_CAM]); return 0; } static int s3c_camif_runtime_suspend(struct device *dev) { struct camif_dev *camif = dev_get_drvdata(dev); /* null op on s3c244x */ clk_disable(camif->clock[CLK_CAM]); clk_disable(camif->clock[CLK_GATE]); return 0; } static const struct s3c_camif_variant s3c244x_camif_variant = { .vp_pix_limits = { [VP_CODEC] = { .max_out_width = 4096, .max_sc_out_width = 2048, .out_width_align = 16, .min_out_width = 16, .max_height = 4096, }, [VP_PREVIEW] = { .max_out_width = 640, .max_sc_out_width = 640, .out_width_align = 16, .min_out_width = 16, .max_height = 480, } }, .pix_limits = { .win_hor_offset_align = 8, }, .ip_revision = S3C244X_CAMIF_IP_REV, }; static struct s3c_camif_drvdata s3c244x_camif_drvdata = { .variant = &s3c244x_camif_variant, .bus_clk_freq = 24000000UL, }; static const struct s3c_camif_variant s3c6410_camif_variant = { .vp_pix_limits = { [VP_CODEC] = { .max_out_width = 4096, .max_sc_out_width = 2048, .out_width_align = 16, .min_out_width = 16, .max_height = 4096, }, [VP_PREVIEW] = { .max_out_width = 4096, .max_sc_out_width = 720, .out_width_align = 16, .min_out_width = 16, .max_height = 4096, } }, .pix_limits = { .win_hor_offset_align = 8, }, .ip_revision = S3C6410_CAMIF_IP_REV, .has_img_effect = 1, .vp_offset = 0x20, }; static struct s3c_camif_drvdata s3c6410_camif_drvdata = { .variant = &s3c6410_camif_variant, .bus_clk_freq = 133000000UL, }; static struct platform_device_id s3c_camif_driver_ids[] = { { .name = "s3c2440-camif", .driver_data = (unsigned long)&s3c244x_camif_drvdata, }, { .name = "s3c6410-camif", .driver_data = (unsigned long)&s3c6410_camif_drvdata, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(platform, s3c_camif_driver_ids); static const struct dev_pm_ops s3c_camif_pm_ops = { .runtime_suspend = s3c_camif_runtime_suspend, .runtime_resume = s3c_camif_runtime_resume, }; static struct platform_driver s3c_camif_driver = { .probe = s3c_camif_probe, .remove = s3c_camif_remove, .id_table = s3c_camif_driver_ids, .driver = { .name = S3C_CAMIF_DRIVER_NAME, .owner = THIS_MODULE, .pm = &s3c_camif_pm_ops, } }; module_platform_driver(s3c_camif_driver); MODULE_AUTHOR("Sylwester Nawrocki <sylvester.nawrocki@gmail.com>"); MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>"); MODULE_DESCRIPTION("S3C24XX/S3C64XX SoC camera interface driver"); MODULE_LICENSE("GPL");
gpl-2.0
ashyx/Samsung_Galaxy_Tab_A_kernel
drivers/gpio/gpio-ep93xx.c
2410
10914
/* * Generic EP93xx GPIO handling * * Copyright (c) 2008 Ryan Mallon * Copyright (c) 2011 H Hartley Sweeten <hsweeten@visionengravers.com> * * Based on code originally from: * linux/arch/arm/mach-ep93xx/core.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/basic_mmio_gpio.h> #include <mach/hardware.h> #include <mach/gpio-ep93xx.h> #define irq_to_gpio(irq) ((irq) - gpio_to_irq(0)) struct ep93xx_gpio { void __iomem *mmio_base; struct bgpio_chip bgc[8]; }; /************************************************************************* * Interrupt handling for EP93xx on-chip GPIOs *************************************************************************/ static unsigned char gpio_int_unmasked[3]; static unsigned char gpio_int_enabled[3]; static unsigned char gpio_int_type1[3]; static unsigned char gpio_int_type2[3]; static unsigned char gpio_int_debounce[3]; /* Port ordering is: A B F */ static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c }; static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 }; static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 }; static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; static void ep93xx_gpio_update_int_params(unsigned port) { BUG_ON(port > 2); __raw_writeb(0, EP93XX_GPIO_REG(int_en_register_offset[port])); __raw_writeb(gpio_int_type2[port], EP93XX_GPIO_REG(int_type2_register_offset[port])); __raw_writeb(gpio_int_type1[port], EP93XX_GPIO_REG(int_type1_register_offset[port])); __raw_writeb(gpio_int_unmasked[port] & gpio_int_enabled[port], EP93XX_GPIO_REG(int_en_register_offset[port])); } static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable) { int line = irq_to_gpio(irq); int port = line >> 3; int port_mask = 1 << (line & 7); if (enable) gpio_int_debounce[port] |= port_mask; else gpio_int_debounce[port] &= ~port_mask; __raw_writeb(gpio_int_debounce[port], EP93XX_GPIO_REG(int_debounce_register_offset[port])); } static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned char status; int i; status = __raw_readb(EP93XX_GPIO_A_INT_STATUS); for (i = 0; i < 8; i++) { if (status & (1 << i)) { int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_A(0)) + i; generic_handle_irq(gpio_irq); } } status = __raw_readb(EP93XX_GPIO_B_INT_STATUS); for (i = 0; i < 8; i++) { if (status & (1 << i)) { int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_B(0)) + i; generic_handle_irq(gpio_irq); } } } static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc) { /* * map discontiguous hw irq range to continuous sw irq range: * * IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7}) */ int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */ int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx; generic_handle_irq(gpio_irq); } static void ep93xx_gpio_irq_ack(struct irq_data *d) { int line = irq_to_gpio(d->irq); int port = line >> 3; int port_mask = 1 << (line & 7); if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { gpio_int_type2[port] ^= port_mask; /* switch edge direction */ ep93xx_gpio_update_int_params(port); } __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port])); } static void ep93xx_gpio_irq_mask_ack(struct irq_data *d) { int line = irq_to_gpio(d->irq); int port = line >> 3; int port_mask = 1 << (line & 7); if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) gpio_int_type2[port] ^= port_mask; /* switch edge direction */ gpio_int_unmasked[port] &= ~port_mask; ep93xx_gpio_update_int_params(port); __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port])); } static void ep93xx_gpio_irq_mask(struct irq_data *d) { int line = irq_to_gpio(d->irq); int port = line >> 3; gpio_int_unmasked[port] &= ~(1 << (line & 7)); ep93xx_gpio_update_int_params(port); } static void ep93xx_gpio_irq_unmask(struct irq_data *d) { int line = irq_to_gpio(d->irq); int port = line >> 3; gpio_int_unmasked[port] |= 1 << (line & 7); ep93xx_gpio_update_int_params(port); } /* * gpio_int_type1 controls whether the interrupt is level (0) or * edge (1) triggered, while gpio_int_type2 controls whether it * triggers on low/falling (0) or high/rising (1). */ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) { const int gpio = irq_to_gpio(d->irq); const int port = gpio >> 3; const int port_mask = 1 << (gpio & 7); irq_flow_handler_t handler; gpio_direction_input(gpio); switch (type) { case IRQ_TYPE_EDGE_RISING: gpio_int_type1[port] |= port_mask; gpio_int_type2[port] |= port_mask; handler = handle_edge_irq; break; case IRQ_TYPE_EDGE_FALLING: gpio_int_type1[port] |= port_mask; gpio_int_type2[port] &= ~port_mask; handler = handle_edge_irq; break; case IRQ_TYPE_LEVEL_HIGH: gpio_int_type1[port] &= ~port_mask; gpio_int_type2[port] |= port_mask; handler = handle_level_irq; break; case IRQ_TYPE_LEVEL_LOW: gpio_int_type1[port] &= ~port_mask; gpio_int_type2[port] &= ~port_mask; handler = handle_level_irq; break; case IRQ_TYPE_EDGE_BOTH: gpio_int_type1[port] |= port_mask; /* set initial polarity based on current input level */ if (gpio_get_value(gpio)) gpio_int_type2[port] &= ~port_mask; /* falling */ else gpio_int_type2[port] |= port_mask; /* rising */ handler = handle_edge_irq; break; default: return -EINVAL; } __irq_set_handler_locked(d->irq, handler); gpio_int_enabled[port] |= port_mask; ep93xx_gpio_update_int_params(port); return 0; } static struct irq_chip ep93xx_gpio_irq_chip = { .name = "GPIO", .irq_ack = ep93xx_gpio_irq_ack, .irq_mask_ack = ep93xx_gpio_irq_mask_ack, .irq_mask = ep93xx_gpio_irq_mask, .irq_unmask = ep93xx_gpio_irq_unmask, .irq_set_type = ep93xx_gpio_irq_type, }; static void ep93xx_gpio_init_irq(void) { int gpio_irq; for (gpio_irq = gpio_to_irq(0); gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) { irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip, handle_level_irq); set_irq_flags(gpio_irq, IRQF_VALID); } irq_set_chained_handler(IRQ_EP93XX_GPIO_AB, ep93xx_gpio_ab_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX, ep93xx_gpio_f_irq_handler); } /************************************************************************* * gpiolib interface for EP93xx on-chip GPIOs *************************************************************************/ struct ep93xx_gpio_bank { const char *label; int data; int dir; int base; bool has_debounce; }; #define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _debounce) \ { \ .label = _label, \ .data = _data, \ .dir = _dir, \ .base = _base, \ .has_debounce = _debounce, \ } static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = { EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true), EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true), EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false), EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false), EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false), EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, true), EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false), EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false), }; static int ep93xx_gpio_set_debounce(struct gpio_chip *chip, unsigned offset, unsigned debounce) { int gpio = chip->base + offset; int irq = gpio_to_irq(gpio); if (irq < 0) return -EINVAL; ep93xx_gpio_int_debounce(irq, debounce ? true : false); return 0; } /* * Map GPIO A0..A7 (0..7) to irq 64..71, * B0..B7 (7..15) to irq 72..79, and * F0..F7 (16..24) to irq 80..87. */ static int ep93xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { int gpio = chip->base + offset; if (gpio > EP93XX_GPIO_LINE_MAX_IRQ) return -EINVAL; return 64 + gpio; } static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev, void __iomem *mmio_base, struct ep93xx_gpio_bank *bank) { void __iomem *data = mmio_base + bank->data; void __iomem *dir = mmio_base + bank->dir; int err; err = bgpio_init(bgc, dev, 1, data, NULL, NULL, dir, NULL, 0); if (err) return err; bgc->gc.label = bank->label; bgc->gc.base = bank->base; if (bank->has_debounce) { bgc->gc.set_debounce = ep93xx_gpio_set_debounce; bgc->gc.to_irq = ep93xx_gpio_to_irq; } return gpiochip_add(&bgc->gc); } static int ep93xx_gpio_probe(struct platform_device *pdev) { struct ep93xx_gpio *ep93xx_gpio; struct resource *res; void __iomem *mmio; int i; int ret; ep93xx_gpio = kzalloc(sizeof(*ep93xx_gpio), GFP_KERNEL); if (!ep93xx_gpio) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENXIO; goto exit_free; } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { ret = -EBUSY; goto exit_free; } mmio = ioremap(res->start, resource_size(res)); if (!mmio) { ret = -ENXIO; goto exit_release; } ep93xx_gpio->mmio_base = mmio; for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) { struct bgpio_chip *bgc = &ep93xx_gpio->bgc[i]; struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i]; if (ep93xx_gpio_add_bank(bgc, &pdev->dev, mmio, bank)) dev_warn(&pdev->dev, "Unable to add gpio bank %s\n", bank->label); } ep93xx_gpio_init_irq(); return 0; exit_release: release_mem_region(res->start, resource_size(res)); exit_free: kfree(ep93xx_gpio); dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, ret); return ret; } static struct platform_driver ep93xx_gpio_driver = { .driver = { .name = "gpio-ep93xx", .owner = THIS_MODULE, }, .probe = ep93xx_gpio_probe, }; static int __init ep93xx_gpio_init(void) { return platform_driver_register(&ep93xx_gpio_driver); } postcore_initcall(ep93xx_gpio_init); MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com> " "H Hartley Sweeten <hsweeten@visionengravers.com>"); MODULE_DESCRIPTION("EP93XX GPIO driver"); MODULE_LICENSE("GPL");
gpl-2.0
rhtu/linux
arch/sh/mm/tlb-sh5.c
2666
5614
/* * arch/sh/mm/tlb-sh5.c * * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> * Copyright (C) 2003 Richard Curnow <richard.curnow@superh.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/mm.h> #include <linux/init.h> #include <asm/page.h> #include <asm/tlb.h> #include <asm/mmu_context.h> /** * sh64_tlb_init - Perform initial setup for the DTLB and ITLB. */ int sh64_tlb_init(void) { /* Assign some sane DTLB defaults */ cpu_data->dtlb.entries = 64; cpu_data->dtlb.step = 0x10; cpu_data->dtlb.first = DTLB_FIXED | cpu_data->dtlb.step; cpu_data->dtlb.next = cpu_data->dtlb.first; cpu_data->dtlb.last = DTLB_FIXED | ((cpu_data->dtlb.entries - 1) * cpu_data->dtlb.step); /* And again for the ITLB */ cpu_data->itlb.entries = 64; cpu_data->itlb.step = 0x10; cpu_data->itlb.first = ITLB_FIXED | cpu_data->itlb.step; cpu_data->itlb.next = cpu_data->itlb.first; cpu_data->itlb.last = ITLB_FIXED | ((cpu_data->itlb.entries - 1) * cpu_data->itlb.step); return 0; } /** * sh64_next_free_dtlb_entry - Find the next available DTLB entry */ unsigned long long sh64_next_free_dtlb_entry(void) { return cpu_data->dtlb.next; } /** * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB */ unsigned long long sh64_get_wired_dtlb_entry(void) { unsigned long long entry = sh64_next_free_dtlb_entry(); cpu_data->dtlb.first += cpu_data->dtlb.step; cpu_data->dtlb.next += cpu_data->dtlb.step; return entry; } /** * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB. * * @entry: Address of TLB slot. * * Works like a stack, last one to allocate must be first one to free. */ int sh64_put_wired_dtlb_entry(unsigned long long entry) { __flush_tlb_slot(entry); /* * We don't do any particularly useful tracking of wired entries, * so this approach works like a stack .. last one to be allocated * has to be the first one to be freed. * * We could potentially load wired entries into a list and work on * rebalancing the list periodically (which also entails moving the * contents of a TLB entry) .. though I have a feeling that this is * more trouble than it's worth. */ /* * Entry must be valid .. we don't want any ITLB addresses! */ if (entry <= DTLB_FIXED) return -EINVAL; /* * Next, check if we're within range to be freed. (ie, must be the * entry beneath the first 'free' entry! */ if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step)) return -EINVAL; /* If we are, then bring this entry back into the list */ cpu_data->dtlb.first -= cpu_data->dtlb.step; cpu_data->dtlb.next = entry; return 0; } /** * sh64_setup_tlb_slot - Load up a translation in a wired slot. * * @config_addr: Address of TLB slot. * @eaddr: Virtual address. * @asid: Address Space Identifier. * @paddr: Physical address. * * Load up a virtual<->physical translation for @eaddr<->@paddr in the * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). */ void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, unsigned long asid, unsigned long paddr) { unsigned long long pteh, ptel; pteh = neff_sign_extend(eaddr); pteh &= PAGE_MASK; pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; ptel = neff_sign_extend(paddr); ptel &= PAGE_MASK; ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); asm volatile("putcfg %0, 1, %1\n\t" "putcfg %0, 0, %2\n" : : "r" (config_addr), "r" (ptel), "r" (pteh)); } /** * sh64_teardown_tlb_slot - Teardown a translation. * * @config_addr: Address of TLB slot. * * Teardown any existing mapping in the TLB slot @config_addr. */ void sh64_teardown_tlb_slot(unsigned long long config_addr) __attribute__ ((alias("__flush_tlb_slot"))); static int dtlb_entry; static unsigned long long dtlb_entries[64]; void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { unsigned long long entry; unsigned long paddr, flags; BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries)); local_irq_save(flags); entry = sh64_get_wired_dtlb_entry(); dtlb_entries[dtlb_entry++] = entry; paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK; paddr &= ~PAGE_MASK; sh64_setup_tlb_slot(entry, addr, get_asid(), paddr); local_irq_restore(flags); } void tlb_unwire_entry(void) { unsigned long long entry; unsigned long flags; BUG_ON(!dtlb_entry); local_irq_save(flags); entry = dtlb_entries[dtlb_entry--]; sh64_teardown_tlb_slot(entry); sh64_put_wired_dtlb_entry(entry); local_irq_restore(flags); } void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long long ptel; unsigned long long pteh=0; struct tlb_info *tlbp; unsigned long long next; unsigned int fault_code = get_thread_fault_code(); /* Get PTEL first */ ptel = pte.pte_low; /* * Set PTEH register */ pteh = neff_sign_extend(address & MMU_VPN_MASK); /* Set the ASID. */ pteh |= get_asid() << PTEH_ASID_SHIFT; pteh |= PTEH_VALID; /* Set PTEL register, set_pte has performed the sign extension */ ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ if (fault_code & FAULT_CODE_ITLB) tlbp = &cpu_data->itlb; else tlbp = &cpu_data->dtlb; next = tlbp->next; __flush_tlb_slot(next); asm volatile ("putcfg %0,1,%2\n\n\t" "putcfg %0,0,%1\n" : : "r" (next), "r" (pteh), "r" (ptel) ); next += TLB_STEP; if (next > tlbp->last) next = tlbp->first; tlbp->next = next; }
gpl-2.0
Ezekeel/android-3.0
sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
2922
3332
/* sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c * * Copyright 2009 Simtec Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <sound/soc.h> #include "s3c24xx_simtec.h" /* supported machines: * * Machine Connections AMP * ------- ----------- --- * BAST MIC, HPOUT, LOUT, LIN TPA2001D1 (HPOUTL,R) (gain hardwired) * VR1000 HPOUT, LIN None * VR2000 LIN, LOUT, MIC, HP LM4871 (HPOUTL,R) * DePicture LIN, LOUT, MIC, HP LM4871 (HPOUTL,R) * Anubis LIN, LOUT, MIC, HP TPA2001D1 (HPOUTL,R) */ static const struct snd_soc_dapm_widget dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_LINE("Line In", NULL), SND_SOC_DAPM_LINE("Line Out", NULL), SND_SOC_DAPM_MIC("Mic Jack", NULL), }; static const struct snd_soc_dapm_route base_map[] = { { "Headphone Jack", NULL, "LHPOUT"}, { "Headphone Jack", NULL, "RHPOUT"}, { "Line Out", NULL, "LOUT" }, { "Line Out", NULL, "ROUT" }, { "LLINEIN", NULL, "Line In"}, { "RLINEIN", NULL, "Line In"}, { "MICIN", NULL, "Mic Jack"}, }; /** * simtec_tlv320aic23_init - initialise and add controls * @codec; The codec instance to attach to. * * Attach our controls and configure the necessary codec * mappings for our sound card instance. */ static int simtec_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; snd_soc_dapm_new_controls(dapm, dapm_widgets, ARRAY_SIZE(dapm_widgets)); snd_soc_dapm_add_routes(dapm, base_map, ARRAY_SIZE(base_map)); snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); snd_soc_dapm_enable_pin(dapm, "Line In"); snd_soc_dapm_enable_pin(dapm, "Line Out"); snd_soc_dapm_enable_pin(dapm, "Mic Jack"); simtec_audio_init(rtd); snd_soc_dapm_sync(dapm); return 0; } static struct snd_soc_dai_link simtec_dai_aic23 = { .name = "tlv320aic23", .stream_name = "TLV320AIC23", .codec_name = "tlv320aic3x-codec.0-001a", .cpu_dai_name = "s3c24xx-iis", .codec_dai_name = "tlv320aic3x-hifi", .platform_name = "samsung-audio", .init = simtec_tlv320aic23_init, }; /* simtec audio machine driver */ static struct snd_soc_card snd_soc_machine_simtec_aic23 = { .name = "Simtec", .dai_link = &simtec_dai_aic23, .num_links = 1, }; static int __devinit simtec_audio_tlv320aic23_probe(struct platform_device *pd) { return simtec_audio_core_probe(pd, &snd_soc_machine_simtec_aic23); } static struct platform_driver simtec_audio_tlv320aic23_platdrv = { .driver = { .owner = THIS_MODULE, .name = "s3c24xx-simtec-tlv320aic23", .pm = simtec_audio_pm, }, .probe = simtec_audio_tlv320aic23_probe, .remove = __devexit_p(simtec_audio_remove), }; MODULE_ALIAS("platform:s3c24xx-simtec-tlv320aic23"); static int __init simtec_tlv320aic23_modinit(void) { return platform_driver_register(&simtec_audio_tlv320aic23_platdrv); } static void __exit simtec_tlv320aic23_modexit(void) { platform_driver_unregister(&simtec_audio_tlv320aic23_platdrv); } module_init(simtec_tlv320aic23_modinit); module_exit(simtec_tlv320aic23_modexit); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("ALSA SoC Simtec Audio support"); MODULE_LICENSE("GPL");
gpl-2.0
TeamRegular/android_kernel_hisense_m470
drivers/isdn/hisax/st5481_usb.c
3178
15277
/* * Driver for ST5481 USB ISDN modem * * Author Frode Isaksen * Copyright 2001 by Frode Isaksen <fisaksen@bewan.com> * 2001 by Kai Germaschewski <kai.germaschewski@gmx.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include <linux/usb.h> #include <linux/slab.h> #include "st5481.h" static int st5481_isoc_flatten(struct urb *urb); /* ====================================================================== * control pipe */ /* * Send the next endpoint 0 request stored in the FIFO. * Called either by the completion or by usb_ctrl_msg. */ static void usb_next_ctrl_msg(struct urb *urb, struct st5481_adapter *adapter) { struct st5481_ctrl *ctrl = &adapter->ctrl; int r_index; if (test_and_set_bit(0, &ctrl->busy)) { return; } if ((r_index = fifo_remove(&ctrl->msg_fifo.f)) < 0) { test_and_clear_bit(0,&ctrl->busy); return; } urb->setup_packet = (unsigned char *)&ctrl->msg_fifo.data[r_index]; DBG(1,"request=0x%02x,value=0x%04x,index=%x", ((struct ctrl_msg *)urb->setup_packet)->dr.bRequest, ((struct ctrl_msg *)urb->setup_packet)->dr.wValue, ((struct ctrl_msg *)urb->setup_packet)->dr.wIndex); // Prepare the URB urb->dev = adapter->usb_dev; SUBMIT_URB(urb, GFP_ATOMIC); } /* * Asynchronous endpoint 0 request (async version of usb_control_msg). * The request will be queued up in a FIFO if the endpoint is busy. */ static void usb_ctrl_msg(struct st5481_adapter *adapter, u8 request, u8 requesttype, u16 value, u16 index, ctrl_complete_t complete, void *context) { struct st5481_ctrl *ctrl = &adapter->ctrl; int w_index; struct ctrl_msg *ctrl_msg; if ((w_index = fifo_add(&ctrl->msg_fifo.f)) < 0) { WARNING("control msg FIFO full"); return; } ctrl_msg = &ctrl->msg_fifo.data[w_index]; ctrl_msg->dr.bRequestType = requesttype; ctrl_msg->dr.bRequest = request; ctrl_msg->dr.wValue = cpu_to_le16p(&value); ctrl_msg->dr.wIndex = cpu_to_le16p(&index); ctrl_msg->dr.wLength = 0; ctrl_msg->complete = complete; ctrl_msg->context = context; usb_next_ctrl_msg(ctrl->urb, adapter); } /* * Asynchronous endpoint 0 device request. */ void st5481_usb_device_ctrl_msg(struct st5481_adapter *adapter, u8 request, u16 value, ctrl_complete_t complete, void *context) { usb_ctrl_msg(adapter, request, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, 0, complete, context); } /* * Asynchronous pipe reset (async version of usb_clear_halt). */ void st5481_usb_pipe_reset(struct st5481_adapter *adapter, u_char pipe, ctrl_complete_t complete, void *context) { DBG(1,"pipe=%02x",pipe); usb_ctrl_msg(adapter, USB_REQ_CLEAR_FEATURE, USB_DIR_OUT | USB_RECIP_ENDPOINT, 0, pipe, complete, context); } /* Physical level functions */ void st5481_ph_command(struct st5481_adapter *adapter, unsigned int command) { DBG(8,"command=%s", ST5481_CMD_string(command)); st5481_usb_device_ctrl_msg(adapter, TXCI, command, NULL, NULL); } /* * The request on endpoint 0 has completed. * Call the user provided completion routine and try * to send the next request. */ static void usb_ctrl_complete(struct urb *urb) { struct st5481_adapter *adapter = urb->context; struct st5481_ctrl *ctrl = &adapter->ctrl; struct ctrl_msg *ctrl_msg; if (unlikely(urb->status < 0)) { switch (urb->status) { case -ENOENT: case -ESHUTDOWN: case -ECONNRESET: DBG(1,"urb killed status %d", urb->status); return; // Give up default: WARNING("urb status %d",urb->status); break; } } ctrl_msg = (struct ctrl_msg *)urb->setup_packet; if (ctrl_msg->dr.bRequest == USB_REQ_CLEAR_FEATURE) { /* Special case handling for pipe reset */ le16_to_cpus(&ctrl_msg->dr.wIndex); usb_reset_endpoint(adapter->usb_dev, ctrl_msg->dr.wIndex); } if (ctrl_msg->complete) ctrl_msg->complete(ctrl_msg->context); clear_bit(0, &ctrl->busy); // Try to send next control message usb_next_ctrl_msg(urb, adapter); return; } /* ====================================================================== * interrupt pipe */ /* * The interrupt endpoint will be called when any * of the 6 registers changes state (depending on masks). * Decode the register values and schedule a private event. * Called at interrupt. */ static void usb_int_complete(struct urb *urb) { u8 *data = urb->transfer_buffer; u8 irqbyte; struct st5481_adapter *adapter = urb->context; int j; int status; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ DBG(2, "urb shutting down with status: %d", urb->status); return; default: WARNING("nonzero urb status received: %d", urb->status); goto exit; } DBG_PACKET(2, data, INT_PKT_SIZE); if (urb->actual_length == 0) { goto exit; } irqbyte = data[MPINT]; if (irqbyte & DEN_INT) FsmEvent(&adapter->d_out.fsm, EV_DOUT_DEN, NULL); if (irqbyte & DCOLL_INT) FsmEvent(&adapter->d_out.fsm, EV_DOUT_COLL, NULL); irqbyte = data[FFINT_D]; if (irqbyte & OUT_UNDERRUN) FsmEvent(&adapter->d_out.fsm, EV_DOUT_UNDERRUN, NULL); if (irqbyte & OUT_DOWN) ;// printk("OUT_DOWN\n"); irqbyte = data[MPINT]; if (irqbyte & RXCI_INT) FsmEvent(&adapter->l1m, data[CCIST] & 0x0f, NULL); for (j = 0; j < 2; j++) adapter->bcs[j].b_out.flow_event |= data[FFINT_B1 + j]; urb->actual_length = 0; exit: status = usb_submit_urb (urb, GFP_ATOMIC); if (status) WARNING("usb_submit_urb failed with result %d", status); } /* ====================================================================== * initialization */ int st5481_setup_usb(struct st5481_adapter *adapter) { struct usb_device *dev = adapter->usb_dev; struct st5481_ctrl *ctrl = &adapter->ctrl; struct st5481_intr *intr = &adapter->intr; struct usb_interface *intf; struct usb_host_interface *altsetting = NULL; struct usb_host_endpoint *endpoint; int status; struct urb *urb; u8 *buf; DBG(2,""); if ((status = usb_reset_configuration (dev)) < 0) { WARNING("reset_configuration failed,status=%d",status); return status; } intf = usb_ifnum_to_if(dev, 0); if (intf) altsetting = usb_altnum_to_altsetting(intf, 3); if (!altsetting) return -ENXIO; // Check if the config is sane if ( altsetting->desc.bNumEndpoints != 7 ) { WARNING("expecting 7 got %d endpoints!", altsetting->desc.bNumEndpoints); return -EINVAL; } // The descriptor is wrong for some early samples of the ST5481 chip altsetting->endpoint[3].desc.wMaxPacketSize = __constant_cpu_to_le16(32); altsetting->endpoint[4].desc.wMaxPacketSize = __constant_cpu_to_le16(32); // Use alternative setting 3 on interface 0 to have 2B+D if ((status = usb_set_interface (dev, 0, 3)) < 0) { WARNING("usb_set_interface failed,status=%d",status); return status; } // Allocate URB for control endpoint urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { return -ENOMEM; } ctrl->urb = urb; // Fill the control URB usb_fill_control_urb (urb, dev, usb_sndctrlpipe(dev, 0), NULL, NULL, 0, usb_ctrl_complete, adapter); fifo_init(&ctrl->msg_fifo.f, ARRAY_SIZE(ctrl->msg_fifo.data)); // Allocate URBs and buffers for interrupt endpoint urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { return -ENOMEM; } intr->urb = urb; buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL); if (!buf) { return -ENOMEM; } endpoint = &altsetting->endpoint[EP_INT-1]; // Fill the interrupt URB usb_fill_int_urb(urb, dev, usb_rcvintpipe(dev, endpoint->desc.bEndpointAddress), buf, INT_PKT_SIZE, usb_int_complete, adapter, endpoint->desc.bInterval); return 0; } /* * Release buffers and URBs for the interrupt and control * endpoint. */ void st5481_release_usb(struct st5481_adapter *adapter) { struct st5481_intr *intr = &adapter->intr; struct st5481_ctrl *ctrl = &adapter->ctrl; DBG(1,""); // Stop and free Control and Interrupt URBs usb_kill_urb(ctrl->urb); kfree(ctrl->urb->transfer_buffer); usb_free_urb(ctrl->urb); ctrl->urb = NULL; usb_kill_urb(intr->urb); kfree(intr->urb->transfer_buffer); usb_free_urb(intr->urb); intr->urb = NULL; } /* * Initialize the adapter. */ void st5481_start(struct st5481_adapter *adapter) { static const u8 init_cmd_table[]={ SET_DEFAULT,0, STT,0, SDA_MIN,0x0d, SDA_MAX,0x29, SDELAY_VALUE,0x14, GPIO_DIR,0x01, GPIO_OUT,RED_LED, // FFCTRL_OUT_D,4, // FFCTRH_OUT_D,12, FFCTRL_OUT_B1,6, FFCTRH_OUT_B1,20, FFCTRL_OUT_B2,6, FFCTRH_OUT_B2,20, MPMSK,RXCI_INT+DEN_INT+DCOLL_INT, 0 }; struct st5481_intr *intr = &adapter->intr; int i = 0; u8 request,value; DBG(8,""); adapter->leds = RED_LED; // Start receiving on the interrupt endpoint SUBMIT_URB(intr->urb, GFP_KERNEL); while ((request = init_cmd_table[i++])) { value = init_cmd_table[i++]; st5481_usb_device_ctrl_msg(adapter, request, value, NULL, NULL); } st5481_ph_command(adapter, ST5481_CMD_PUP); } /* * Reset the adapter to default values. */ void st5481_stop(struct st5481_adapter *adapter) { DBG(8,""); st5481_usb_device_ctrl_msg(adapter, SET_DEFAULT, 0, NULL, NULL); } /* ====================================================================== * isochronous USB helpers */ static void fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *buf, int num_packets, int packet_size, usb_complete_t complete, void *context) { int k; urb->dev=dev; urb->pipe=pipe; urb->interval = 1; urb->transfer_buffer=buf; urb->number_of_packets = num_packets; urb->transfer_buffer_length=num_packets*packet_size; urb->actual_length = 0; urb->complete=complete; urb->context=context; urb->transfer_flags=URB_ISO_ASAP; for (k = 0; k < num_packets; k++) { urb->iso_frame_desc[k].offset = packet_size * k; urb->iso_frame_desc[k].length = packet_size; urb->iso_frame_desc[k].actual_length = 0; } } int st5481_setup_isocpipes(struct urb* urb[2], struct usb_device *dev, unsigned int pipe, int num_packets, int packet_size, int buf_size, usb_complete_t complete, void *context) { int j, retval; unsigned char *buf; for (j = 0; j < 2; j++) { retval = -ENOMEM; urb[j] = usb_alloc_urb(num_packets, GFP_KERNEL); if (!urb[j]) goto err; // Allocate memory for 2000bytes/sec (16Kb/s) buf = kmalloc(buf_size, GFP_KERNEL); if (!buf) goto err; // Fill the isochronous URB fill_isoc_urb(urb[j], dev, pipe, buf, num_packets, packet_size, complete, context); } return 0; err: for (j = 0; j < 2; j++) { if (urb[j]) { kfree(urb[j]->transfer_buffer); urb[j]->transfer_buffer = NULL; usb_free_urb(urb[j]); urb[j] = NULL; } } return retval; } void st5481_release_isocpipes(struct urb* urb[2]) { int j; for (j = 0; j < 2; j++) { usb_kill_urb(urb[j]); kfree(urb[j]->transfer_buffer); usb_free_urb(urb[j]); urb[j] = NULL; } } /* * Decode frames received on the B/D channel. * Note that this function will be called continuously * with 64Kbit/s / 16Kbit/s of data and hence it will be * called 50 times per second with 20 ISOC descriptors. * Called at interrupt. */ static void usb_in_complete(struct urb *urb) { struct st5481_in *in = urb->context; unsigned char *ptr; struct sk_buff *skb; int len, count, status; if (unlikely(urb->status < 0)) { switch (urb->status) { case -ENOENT: case -ESHUTDOWN: case -ECONNRESET: DBG(1,"urb killed status %d", urb->status); return; // Give up default: WARNING("urb status %d",urb->status); break; } } DBG_ISO_PACKET(0x80,urb); len = st5481_isoc_flatten(urb); ptr = urb->transfer_buffer; while (len > 0) { if (in->mode == L1_MODE_TRANS) { memcpy(in->rcvbuf, ptr, len); status = len; len = 0; } else { status = isdnhdlc_decode(&in->hdlc_state, ptr, len, &count, in->rcvbuf, in->bufsize); ptr += count; len -= count; } if (status > 0) { // Good frame received DBG(4,"count=%d",status); DBG_PACKET(0x400, in->rcvbuf, status); if (!(skb = dev_alloc_skb(status))) { WARNING("receive out of memory\n"); break; } memcpy(skb_put(skb, status), in->rcvbuf, status); in->hisax_if->l1l2(in->hisax_if, PH_DATA | INDICATION, skb); } else if (status == -HDLC_CRC_ERROR) { INFO("CRC error"); } else if (status == -HDLC_FRAMING_ERROR) { INFO("framing error"); } else if (status == -HDLC_LENGTH_ERROR) { INFO("length error"); } } // Prepare URB for next transfer urb->dev = in->adapter->usb_dev; urb->actual_length = 0; SUBMIT_URB(urb, GFP_ATOMIC); } int st5481_setup_in(struct st5481_in *in) { struct usb_device *dev = in->adapter->usb_dev; int retval; DBG(4,""); in->rcvbuf = kmalloc(in->bufsize, GFP_KERNEL); retval = -ENOMEM; if (!in->rcvbuf) goto err; retval = st5481_setup_isocpipes(in->urb, dev, usb_rcvisocpipe(dev, in->ep), in->num_packets, in->packet_size, in->num_packets * in->packet_size, usb_in_complete, in); if (retval) goto err_free; return 0; err_free: kfree(in->rcvbuf); err: return retval; } void st5481_release_in(struct st5481_in *in) { DBG(2,""); st5481_release_isocpipes(in->urb); } /* * Make the transfer_buffer contiguous by * copying from the iso descriptors if necessary. */ static int st5481_isoc_flatten(struct urb *urb) { struct usb_iso_packet_descriptor *pipd,*pend; unsigned char *src,*dst; unsigned int len; if (urb->status < 0) { return urb->status; } for (pipd = &urb->iso_frame_desc[0], pend = &urb->iso_frame_desc[urb->number_of_packets], dst = urb->transfer_buffer; pipd < pend; pipd++) { if (pipd->status < 0) { return (pipd->status); } len = pipd->actual_length; pipd->actual_length = 0; src = urb->transfer_buffer+pipd->offset; if (src != dst) { // Need to copy since isoc buffers not full while (len--) { *dst++ = *src++; } } else { // No need to copy, just update destination buffer dst += len; } } // Return size of flattened buffer return (dst - (unsigned char *)urb->transfer_buffer); } static void st5481_start_rcv(void *context) { struct st5481_in *in = context; struct st5481_adapter *adapter = in->adapter; DBG(4,""); in->urb[0]->dev = adapter->usb_dev; SUBMIT_URB(in->urb[0], GFP_KERNEL); in->urb[1]->dev = adapter->usb_dev; SUBMIT_URB(in->urb[1], GFP_KERNEL); } void st5481_in_mode(struct st5481_in *in, int mode) { if (in->mode == mode) return; in->mode = mode; usb_unlink_urb(in->urb[0]); usb_unlink_urb(in->urb[1]); if (in->mode != L1_MODE_NULL) { if (in->mode != L1_MODE_TRANS) { u32 features = HDLC_BITREVERSE; if (in->mode == L1_MODE_HDLC_56K) features |= HDLC_56KBIT; isdnhdlc_rcv_init(&in->hdlc_state, features); } st5481_usb_pipe_reset(in->adapter, in->ep, NULL, NULL); st5481_usb_device_ctrl_msg(in->adapter, in->counter, in->packet_size, NULL, NULL); st5481_start_rcv(in); } else { st5481_usb_device_ctrl_msg(in->adapter, in->counter, 0, NULL, NULL); } }
gpl-2.0
humberos/android_kernel_samsung_aries
drivers/base/dma-coherent.c
3178
4540
/* * Coherent per-device memory handling. * Borrowed from i386 */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/dma-mapping.h> struct dma_coherent_mem { void *virt_base; dma_addr_t device_base; int size; int flags; unsigned long *bitmap; }; int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dma_addr_t device_addr, size_t size, int flags) { void __iomem *mem_base = NULL; int pages = size >> PAGE_SHIFT; int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) goto out; if (!size) goto out; if (dev->dma_mem) goto out; /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ mem_base = ioremap(bus_addr, size); if (!mem_base) goto out; dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); if (!dev->dma_mem) goto out; dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!dev->dma_mem->bitmap) goto free1_out; dev->dma_mem->virt_base = mem_base; dev->dma_mem->device_base = device_addr; dev->dma_mem->size = pages; dev->dma_mem->flags = flags; if (flags & DMA_MEMORY_MAP) return DMA_MEMORY_MAP; return DMA_MEMORY_IO; free1_out: kfree(dev->dma_mem); out: if (mem_base) iounmap(mem_base); return 0; } EXPORT_SYMBOL(dma_declare_coherent_memory); void dma_release_declared_memory(struct device *dev) { struct dma_coherent_mem *mem = dev->dma_mem; if (!mem) return; dev->dma_mem = NULL; iounmap(mem->virt_base); kfree(mem->bitmap); kfree(mem); } EXPORT_SYMBOL(dma_release_declared_memory); void *dma_mark_declared_memory_occupied(struct device *dev, dma_addr_t device_addr, size_t size) { struct dma_coherent_mem *mem = dev->dma_mem; int pos, err; size += device_addr & ~PAGE_MASK; if (!mem) return ERR_PTR(-EINVAL); pos = (device_addr - mem->device_base) >> PAGE_SHIFT; err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); if (err != 0) return ERR_PTR(err); return mem->virt_base + (pos << PAGE_SHIFT); } EXPORT_SYMBOL(dma_mark_declared_memory_occupied); /** * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area * * @dev: device from which we allocate memory * @size: size of requested memory area * @dma_handle: This will be filled with the correct dma handle * @ret: This pointer will be filled with the virtual address * to allocated area. * * This function should be only called from per-arch dma_alloc_coherent() * to support allocation from per-device coherent memory pools. * * Returns 0 if dma_alloc_coherent should continue with allocating from * generic memory areas, or !0 if dma_alloc_coherent should return @ret. */ int dma_alloc_from_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle, void **ret) { struct dma_coherent_mem *mem; int order = get_order(size); int pageno; if (!dev) return 0; mem = dev->dma_mem; if (!mem) return 0; *ret = NULL; if (unlikely(size > (mem->size << PAGE_SHIFT))) goto err; pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); if (unlikely(pageno < 0)) goto err; /* * Memory was found in the per-device area. */ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); *ret = mem->virt_base + (pageno << PAGE_SHIFT); memset(*ret, 0, size); return 1; err: /* * In the case where the allocation can not be satisfied from the * per-device area, try to fall back to generic memory if the * constraints allow it. */ return mem->flags & DMA_MEMORY_EXCLUSIVE; } EXPORT_SYMBOL(dma_alloc_from_coherent); /** * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool * @dev: device from which the memory was allocated * @order: the order of pages allocated * @vaddr: virtual address of allocated pages * * This checks whether the memory was allocated from the per-device * coherent memory pool and if so, releases that memory. * * Returns 1 if we correctly released the memory, or 0 if * dma_release_coherent() should proceed with releasing memory from * generic pools. */ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) { struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; bitmap_release_region(mem->bitmap, page, order); return 1; } return 0; } EXPORT_SYMBOL(dma_release_from_coherent);
gpl-2.0