repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
shumashv1/hp-kernel-tenderloin | arch/microblaze/kernel/sys_microblaze.c | 768 | 2568 | /*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2007 John Williams <john.williams@petalogix.com>
*
* Copyright (C) 2006 Atmark Techno, Inc.
* Yasushi SHOJI <yashi@atmark-techno.com>
* Tetsuya OHKAWA <tetsuya@atmark-techno.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/syscalls.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/sys.h>
#include <linux/ipc.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <asm/syscalls.h>
asmlinkage long microblaze_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->r1,
regs, 0, NULL, NULL);
}
asmlinkage long microblaze_clone(int flags, unsigned long stack, struct pt_regs *regs)
{
if (!stack)
stack = regs->r1;
return do_fork(flags, stack, regs, 0, NULL, NULL);
}
asmlinkage long microblaze_execve(char __user *filenamei, char __user *__user *argv,
char __user *__user *envp, struct pt_regs *regs)
{
int error;
char *filename;
filename = getname(filenamei);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename, argv, envp, regs);
putname(filename);
out:
return error;
}
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, off_t pgoff)
{
if (pgoff & ~PAGE_MASK)
return -EINVAL;
return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
}
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register const char *__a __asm__("r5") = filename;
register const void *__b __asm__("r6") = argv;
register const void *__c __asm__("r7") = envp;
register unsigned long __syscall __asm__("r12") = __NR_execve;
register unsigned long __ret __asm__("r3");
__asm__ __volatile__ ("brki r14, 0x8"
: "=r" (__ret), "=r" (__syscall)
: "1" (__syscall), "r" (__a), "r" (__b), "r" (__c)
: "r4", "r8", "r9",
"r10", "r11", "r14", "cc", "memory");
return __ret;
}
| gpl-2.0 |
safarend/android_kernel_samsung_exynos5420 | fs/xfs/xfs_log_recover.c | 1536 | 106555 | /*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_log_priv.h"
#include "xfs_buf_item.h"
#include "xfs_log_recover.h"
#include "xfs_extfree_item.h"
#include "xfs_trans_priv.h"
#include "xfs_quota.h"
#include "xfs_rw.h"
#include "xfs_utils.h"
#include "xfs_trace.h"
STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
#if defined(DEBUG)
STATIC void xlog_recover_check_summary(xlog_t *);
#else
#define xlog_recover_check_summary(log)
#endif
/*
* This structure is used during recovery to record the buf log items which
* have been canceled and should not be replayed.
*/
struct xfs_buf_cancel {
xfs_daddr_t bc_blkno;
uint bc_len;
int bc_refcount;
struct list_head bc_list;
};
/*
* Sector aligned buffer routines for buffer create/read/write/access
*/
/*
* Verify the given count of basic blocks is valid number of blocks
* to specify for an operation involving the given XFS log buffer.
* Returns nonzero if the count is valid, 0 otherwise.
*/
static inline int
xlog_buf_bbcount_valid(
xlog_t *log,
int bbcount)
{
return bbcount > 0 && bbcount <= log->l_logBBsize;
}
/*
* Allocate a buffer to hold log data. The buffer needs to be able
* to map to a range of nbblks basic blocks at any valid (basic
* block) offset within the log.
*/
STATIC xfs_buf_t *
xlog_get_bp(
xlog_t *log,
int nbblks)
{
struct xfs_buf *bp;
if (!xlog_buf_bbcount_valid(log, nbblks)) {
xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks);
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return NULL;
}
/*
* We do log I/O in units of log sectors (a power-of-2
* multiple of the basic block size), so we round up the
* requested size to accommodate the basic blocks required
* for complete log sectors.
*
* In addition, the buffer may be used for a non-sector-
* aligned block offset, in which case an I/O of the
* requested size could extend beyond the end of the
* buffer. If the requested size is only 1 basic block it
* will never straddle a sector boundary, so this won't be
* an issue. Nor will this be a problem if the log I/O is
* done in basic blocks (sector size 1). But otherwise we
* extend the buffer by one extra log sector to ensure
* there's space to accommodate this possibility.
*/
if (nbblks > 1 && log->l_sectBBsize > 1)
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize);
bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, BBTOB(nbblks), 0);
if (bp)
xfs_buf_unlock(bp);
return bp;
}
STATIC void
xlog_put_bp(
xfs_buf_t *bp)
{
xfs_buf_free(bp);
}
/*
* Return the address of the start of the given block number's data
* in a log buffer. The buffer covers a log sector-aligned region.
*/
STATIC xfs_caddr_t
xlog_align(
xlog_t *log,
xfs_daddr_t blk_no,
int nbblks,
xfs_buf_t *bp)
{
xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp));
return bp->b_addr + BBTOB(offset);
}
/*
* nbblks should be uint, but oh well. Just want to catch that 32-bit length.
*/
STATIC int
xlog_bread_noalign(
xlog_t *log,
xfs_daddr_t blk_no,
int nbblks,
xfs_buf_t *bp)
{
int error;
if (!xlog_buf_bbcount_valid(log, nbblks)) {
xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks);
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return EFSCORRUPTED;
}
blk_no = round_down(blk_no, log->l_sectBBsize);
nbblks = round_up(nbblks, log->l_sectBBsize);
ASSERT(nbblks > 0);
ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
XFS_BUF_READ(bp);
XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
xfsbdstrat(log->l_mp, bp);
error = xfs_buf_iowait(bp);
if (error)
xfs_buf_ioerror_alert(bp, __func__);
return error;
}
STATIC int
xlog_bread(
xlog_t *log,
xfs_daddr_t blk_no,
int nbblks,
xfs_buf_t *bp,
xfs_caddr_t *offset)
{
int error;
error = xlog_bread_noalign(log, blk_no, nbblks, bp);
if (error)
return error;
*offset = xlog_align(log, blk_no, nbblks, bp);
return 0;
}
/*
* Read at an offset into the buffer. Returns with the buffer in it's original
* state regardless of the result of the read.
*/
STATIC int
xlog_bread_offset(
xlog_t *log,
xfs_daddr_t blk_no, /* block to read from */
int nbblks, /* blocks to read */
xfs_buf_t *bp,
xfs_caddr_t offset)
{
xfs_caddr_t orig_offset = bp->b_addr;
int orig_len = bp->b_buffer_length;
int error, error2;
error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
if (error)
return error;
error = xlog_bread_noalign(log, blk_no, nbblks, bp);
/* must reset buffer pointer even on error */
error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
if (error)
return error;
return error2;
}
/*
* Write out the buffer at the given block for the given number of blocks.
* The buffer is kept locked across the write and is returned locked.
* This can only be used for synchronous log writes.
*/
STATIC int
xlog_bwrite(
xlog_t *log,
xfs_daddr_t blk_no,
int nbblks,
xfs_buf_t *bp)
{
int error;
if (!xlog_buf_bbcount_valid(log, nbblks)) {
xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks);
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return EFSCORRUPTED;
}
blk_no = round_down(blk_no, log->l_sectBBsize);
nbblks = round_up(nbblks, log->l_sectBBsize);
ASSERT(nbblks > 0);
ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
XFS_BUF_ZEROFLAGS(bp);
xfs_buf_hold(bp);
xfs_buf_lock(bp);
XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
error = xfs_bwrite(bp);
if (error)
xfs_buf_ioerror_alert(bp, __func__);
xfs_buf_relse(bp);
return error;
}
#ifdef DEBUG
/*
* dump debug superblock and log record information
*/
STATIC void
xlog_header_check_dump(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n",
__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
xfs_debug(mp, " log : uuid = %pU, fmt = %d\n",
&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
}
#else
#define xlog_header_check_dump(mp, head)
#endif
/*
* check log record header for recovery
*/
STATIC int
xlog_header_check_recover(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
/*
* IRIX doesn't write the h_fmt field and leaves it zeroed
* (XLOG_FMT_UNKNOWN). This stops us from trying to recover
* a dirty log created in IRIX.
*/
if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
xfs_warn(mp,
"dirty log written in incompatible format - can't recover");
xlog_header_check_dump(mp, head);
XFS_ERROR_REPORT("xlog_header_check_recover(1)",
XFS_ERRLEVEL_HIGH, mp);
return XFS_ERROR(EFSCORRUPTED);
} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
xfs_warn(mp,
"dirty log entry has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head);
XFS_ERROR_REPORT("xlog_header_check_recover(2)",
XFS_ERRLEVEL_HIGH, mp);
return XFS_ERROR(EFSCORRUPTED);
}
return 0;
}
/*
* read the head block of the log and check the header
*/
STATIC int
xlog_header_check_mount(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
if (uuid_is_nil(&head->h_fs_uuid)) {
/*
* IRIX doesn't write the h_fs_uuid or h_fmt fields. If
* h_fs_uuid is nil, we assume this log was last mounted
* by IRIX and continue.
*/
xfs_warn(mp, "nil uuid in log - IRIX style log");
} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
xfs_warn(mp, "log has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head);
XFS_ERROR_REPORT("xlog_header_check_mount",
XFS_ERRLEVEL_HIGH, mp);
return XFS_ERROR(EFSCORRUPTED);
}
return 0;
}
STATIC void
xlog_recover_iodone(
struct xfs_buf *bp)
{
if (bp->b_error) {
/*
* We're not going to bother about retrying
* this during recovery. One strike!
*/
xfs_buf_ioerror_alert(bp, __func__);
xfs_force_shutdown(bp->b_target->bt_mount,
SHUTDOWN_META_IO_ERROR);
}
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
/*
* This routine finds (to an approximation) the first block in the physical
* log which contains the given cycle. It uses a binary search algorithm.
* Note that the algorithm can not be perfect because the disk will not
* necessarily be perfect.
*/
STATIC int
xlog_find_cycle_start(
xlog_t *log,
xfs_buf_t *bp,
xfs_daddr_t first_blk,
xfs_daddr_t *last_blk,
uint cycle)
{
xfs_caddr_t offset;
xfs_daddr_t mid_blk;
xfs_daddr_t end_blk;
uint mid_cycle;
int error;
end_blk = *last_blk;
mid_blk = BLK_AVG(first_blk, end_blk);
while (mid_blk != first_blk && mid_blk != end_blk) {
error = xlog_bread(log, mid_blk, 1, bp, &offset);
if (error)
return error;
mid_cycle = xlog_get_cycle(offset);
if (mid_cycle == cycle)
end_blk = mid_blk; /* last_half_cycle == mid_cycle */
else
first_blk = mid_blk; /* first_half_cycle == mid_cycle */
mid_blk = BLK_AVG(first_blk, end_blk);
}
ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
(mid_blk == end_blk && mid_blk-1 == first_blk));
*last_blk = end_blk;
return 0;
}
/*
* Check that a range of blocks does not contain stop_on_cycle_no.
* Fill in *new_blk with the block offset where such a block is
* found, or with -1 (an invalid block number) if there is no such
* block in the range. The scan needs to occur from front to back
* and the pointer into the region must be updated since a later
* routine will need to perform another test.
*/
STATIC int
xlog_find_verify_cycle(
xlog_t *log,
xfs_daddr_t start_blk,
int nbblks,
uint stop_on_cycle_no,
xfs_daddr_t *new_blk)
{
xfs_daddr_t i, j;
uint cycle;
xfs_buf_t *bp;
xfs_daddr_t bufblks;
xfs_caddr_t buf = NULL;
int error = 0;
/*
* Greedily allocate a buffer big enough to handle the full
* range of basic blocks we'll be examining. If that fails,
* try a smaller size. We need to be able to read at least
* a log sector, or we're out of luck.
*/
bufblks = 1 << ffs(nbblks);
while (!(bp = xlog_get_bp(log, bufblks))) {
bufblks >>= 1;
if (bufblks < log->l_sectBBsize)
return ENOMEM;
}
for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
int bcount;
bcount = min(bufblks, (start_blk + nbblks - i));
error = xlog_bread(log, i, bcount, bp, &buf);
if (error)
goto out;
for (j = 0; j < bcount; j++) {
cycle = xlog_get_cycle(buf);
if (cycle == stop_on_cycle_no) {
*new_blk = i+j;
goto out;
}
buf += BBSIZE;
}
}
*new_blk = -1;
out:
xlog_put_bp(bp);
return error;
}
/*
* Potentially backup over partial log record write.
*
* In the typical case, last_blk is the number of the block directly after
* a good log record. Therefore, we subtract one to get the block number
* of the last block in the given buffer. extra_bblks contains the number
* of blocks we would have read on a previous read. This happens when the
* last log record is split over the end of the physical log.
*
* extra_bblks is the number of blocks potentially verified on a previous
* call to this routine.
*/
STATIC int
xlog_find_verify_log_record(
xlog_t *log,
xfs_daddr_t start_blk,
xfs_daddr_t *last_blk,
int extra_bblks)
{
xfs_daddr_t i;
xfs_buf_t *bp;
xfs_caddr_t offset = NULL;
xlog_rec_header_t *head = NULL;
int error = 0;
int smallmem = 0;
int num_blks = *last_blk - start_blk;
int xhdrs;
ASSERT(start_blk != 0 || *last_blk != start_blk);
if (!(bp = xlog_get_bp(log, num_blks))) {
if (!(bp = xlog_get_bp(log, 1)))
return ENOMEM;
smallmem = 1;
} else {
error = xlog_bread(log, start_blk, num_blks, bp, &offset);
if (error)
goto out;
offset += ((num_blks - 1) << BBSHIFT);
}
for (i = (*last_blk) - 1; i >= 0; i--) {
if (i < start_blk) {
/* valid log record not found */
xfs_warn(log->l_mp,
"Log inconsistent (didn't find previous header)");
ASSERT(0);
error = XFS_ERROR(EIO);
goto out;
}
if (smallmem) {
error = xlog_bread(log, i, 1, bp, &offset);
if (error)
goto out;
}
head = (xlog_rec_header_t *)offset;
if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
break;
if (!smallmem)
offset -= BBSIZE;
}
/*
* We hit the beginning of the physical log & still no header. Return
* to caller. If caller can handle a return of -1, then this routine
* will be called again for the end of the physical log.
*/
if (i == -1) {
error = -1;
goto out;
}
/*
* We have the final block of the good log (the first block
* of the log record _before_ the head. So we check the uuid.
*/
if ((error = xlog_header_check_mount(log->l_mp, head)))
goto out;
/*
* We may have found a log record header before we expected one.
* last_blk will be the 1st block # with a given cycle #. We may end
* up reading an entire log record. In this case, we don't want to
* reset last_blk. Only when last_blk points in the middle of a log
* record do we update last_blk.
*/
if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
uint h_size = be32_to_cpu(head->h_size);
xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
if (h_size % XLOG_HEADER_CYCLE_SIZE)
xhdrs++;
} else {
xhdrs = 1;
}
if (*last_blk - i + extra_bblks !=
BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
*last_blk = i;
out:
xlog_put_bp(bp);
return error;
}
/*
* Head is defined to be the point of the log where the next log write
* write could go. This means that incomplete LR writes at the end are
* eliminated when calculating the head. We aren't guaranteed that previous
* LR have complete transactions. We only know that a cycle number of
* current cycle number -1 won't be present in the log if we start writing
* from our current block number.
*
* last_blk contains the block number of the first block with a given
* cycle number.
*
* Return: zero if normal, non-zero if error.
*/
STATIC int
xlog_find_head(
xlog_t *log,
xfs_daddr_t *return_head_blk)
{
xfs_buf_t *bp;
xfs_caddr_t offset;
xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
int num_scan_bblks;
uint first_half_cycle, last_half_cycle;
uint stop_on_cycle;
int error, log_bbnum = log->l_logBBsize;
/* Is the end of the log device zeroed? */
if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
*return_head_blk = first_blk;
/* Is the whole lot zeroed? */
if (!first_blk) {
/* Linux XFS shouldn't generate totally zeroed logs -
* mkfs etc write a dummy unmount record to a fresh
* log so we can store the uuid in there
*/
xfs_warn(log->l_mp, "totally zeroed log");
}
return 0;
} else if (error) {
xfs_warn(log->l_mp, "empty log check failed");
return error;
}
first_blk = 0; /* get cycle # of 1st block */
bp = xlog_get_bp(log, 1);
if (!bp)
return ENOMEM;
error = xlog_bread(log, 0, 1, bp, &offset);
if (error)
goto bp_err;
first_half_cycle = xlog_get_cycle(offset);
last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
error = xlog_bread(log, last_blk, 1, bp, &offset);
if (error)
goto bp_err;
last_half_cycle = xlog_get_cycle(offset);
ASSERT(last_half_cycle != 0);
/*
* If the 1st half cycle number is equal to the last half cycle number,
* then the entire log is stamped with the same cycle number. In this
* case, head_blk can't be set to zero (which makes sense). The below
* math doesn't work out properly with head_blk equal to zero. Instead,
* we set it to log_bbnum which is an invalid block number, but this
* value makes the math correct. If head_blk doesn't changed through
* all the tests below, *head_blk is set to zero at the very end rather
* than log_bbnum. In a sense, log_bbnum and zero are the same block
* in a circular file.
*/
if (first_half_cycle == last_half_cycle) {
/*
* In this case we believe that the entire log should have
* cycle number last_half_cycle. We need to scan backwards
* from the end verifying that there are no holes still
* containing last_half_cycle - 1. If we find such a hole,
* then the start of that hole will be the new head. The
* simple case looks like
* x | x ... | x - 1 | x
* Another case that fits this picture would be
* x | x + 1 | x ... | x
* In this case the head really is somewhere at the end of the
* log, as one of the latest writes at the beginning was
* incomplete.
* One more case is
* x | x + 1 | x ... | x - 1 | x
* This is really the combination of the above two cases, and
* the head has to end up at the start of the x-1 hole at the
* end of the log.
*
* In the 256k log case, we will read from the beginning to the
* end of the log and search for cycle numbers equal to x-1.
* We don't worry about the x+1 blocks that we encounter,
* because we know that they cannot be the head since the log
* started with x.
*/
head_blk = log_bbnum;
stop_on_cycle = last_half_cycle - 1;
} else {
/*
* In this case we want to find the first block with cycle
* number matching last_half_cycle. We expect the log to be
* some variation on
* x + 1 ... | x ... | x
* The first block with cycle number x (last_half_cycle) will
* be where the new head belongs. First we do a binary search
* for the first occurrence of last_half_cycle. The binary
* search may not be totally accurate, so then we scan back
* from there looking for occurrences of last_half_cycle before
* us. If that backwards scan wraps around the beginning of
* the log, then we look for occurrences of last_half_cycle - 1
* at the end of the log. The cases we're looking for look
* like
* v binary search stopped here
* x + 1 ... | x | x + 1 | x ... | x
* ^ but we want to locate this spot
* or
* <---------> less than scan distance
* x + 1 ... | x ... | x - 1 | x
* ^ we want to locate this spot
*/
stop_on_cycle = last_half_cycle;
if ((error = xlog_find_cycle_start(log, bp, first_blk,
&head_blk, last_half_cycle)))
goto bp_err;
}
/*
* Now validate the answer. Scan back some number of maximum possible
* blocks and make sure each one has the expected cycle number. The
* maximum is determined by the total possible amount of buffering
* in the in-core log. The following number can be made tighter if
* we actually look at the block size of the filesystem.
*/
num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
if (head_blk >= num_scan_bblks) {
/*
* We are guaranteed that the entire check can be performed
* in one buffer.
*/
start_blk = head_blk - num_scan_bblks;
if ((error = xlog_find_verify_cycle(log,
start_blk, num_scan_bblks,
stop_on_cycle, &new_blk)))
goto bp_err;
if (new_blk != -1)
head_blk = new_blk;
} else { /* need to read 2 parts of log */
/*
* We are going to scan backwards in the log in two parts.
* First we scan the physical end of the log. In this part
* of the log, we are looking for blocks with cycle number
* last_half_cycle - 1.
* If we find one, then we know that the log starts there, as
* we've found a hole that didn't get written in going around
* the end of the physical log. The simple case for this is
* x + 1 ... | x ... | x - 1 | x
* <---------> less than scan distance
* If all of the blocks at the end of the log have cycle number
* last_half_cycle, then we check the blocks at the start of
* the log looking for occurrences of last_half_cycle. If we
* find one, then our current estimate for the location of the
* first occurrence of last_half_cycle is wrong and we move
* back to the hole we've found. This case looks like
* x + 1 ... | x | x + 1 | x ...
* ^ binary search stopped here
* Another case we need to handle that only occurs in 256k
* logs is
* x + 1 ... | x ... | x+1 | x ...
* ^ binary search stops here
* In a 256k log, the scan at the end of the log will see the
* x + 1 blocks. We need to skip past those since that is
* certainly not the head of the log. By searching for
* last_half_cycle-1 we accomplish that.
*/
ASSERT(head_blk <= INT_MAX &&
(xfs_daddr_t) num_scan_bblks >= head_blk);
start_blk = log_bbnum - (num_scan_bblks - head_blk);
if ((error = xlog_find_verify_cycle(log, start_blk,
num_scan_bblks - (int)head_blk,
(stop_on_cycle - 1), &new_blk)))
goto bp_err;
if (new_blk != -1) {
head_blk = new_blk;
goto validate_head;
}
/*
* Scan beginning of log now. The last part of the physical
* log is good. This scan needs to verify that it doesn't find
* the last_half_cycle.
*/
start_blk = 0;
ASSERT(head_blk <= INT_MAX);
if ((error = xlog_find_verify_cycle(log,
start_blk, (int)head_blk,
stop_on_cycle, &new_blk)))
goto bp_err;
if (new_blk != -1)
head_blk = new_blk;
}
validate_head:
/*
* Now we need to make sure head_blk is not pointing to a block in
* the middle of a log record.
*/
num_scan_bblks = XLOG_REC_SHIFT(log);
if (head_blk >= num_scan_bblks) {
start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
/* start ptr at last block ptr before head_blk */
if ((error = xlog_find_verify_log_record(log, start_blk,
&head_blk, 0)) == -1) {
error = XFS_ERROR(EIO);
goto bp_err;
} else if (error)
goto bp_err;
} else {
start_blk = 0;
ASSERT(head_blk <= INT_MAX);
if ((error = xlog_find_verify_log_record(log, start_blk,
&head_blk, 0)) == -1) {
/* We hit the beginning of the log during our search */
start_blk = log_bbnum - (num_scan_bblks - head_blk);
new_blk = log_bbnum;
ASSERT(start_blk <= INT_MAX &&
(xfs_daddr_t) log_bbnum-start_blk >= 0);
ASSERT(head_blk <= INT_MAX);
if ((error = xlog_find_verify_log_record(log,
start_blk, &new_blk,
(int)head_blk)) == -1) {
error = XFS_ERROR(EIO);
goto bp_err;
} else if (error)
goto bp_err;
if (new_blk != log_bbnum)
head_blk = new_blk;
} else if (error)
goto bp_err;
}
xlog_put_bp(bp);
if (head_blk == log_bbnum)
*return_head_blk = 0;
else
*return_head_blk = head_blk;
/*
* When returning here, we have a good block number. Bad block
* means that during a previous crash, we didn't have a clean break
* from cycle number N to cycle number N-1. In this case, we need
* to find the first block with cycle number N-1.
*/
return 0;
bp_err:
xlog_put_bp(bp);
if (error)
xfs_warn(log->l_mp, "failed to find log head");
return error;
}
/*
* Find the sync block number or the tail of the log.
*
* This will be the block number of the last record to have its
* associated buffers synced to disk. Every log record header has
* a sync lsn embedded in it. LSNs hold block numbers, so it is easy
* to get a sync block number. The only concern is to figure out which
* log record header to believe.
*
* The following algorithm uses the log record header with the largest
* lsn. The entire log record does not need to be valid. We only care
* that the header is valid.
*
* We could speed up search by using current head_blk buffer, but it is not
* available.
*/
STATIC int
xlog_find_tail(
xlog_t *log,
xfs_daddr_t *head_blk,
xfs_daddr_t *tail_blk)
{
xlog_rec_header_t *rhead;
xlog_op_header_t *op_head;
xfs_caddr_t offset = NULL;
xfs_buf_t *bp;
int error, i, found;
xfs_daddr_t umount_data_blk;
xfs_daddr_t after_umount_blk;
xfs_lsn_t tail_lsn;
int hblks;
found = 0;
/*
* Find previous log record
*/
if ((error = xlog_find_head(log, head_blk)))
return error;
bp = xlog_get_bp(log, 1);
if (!bp)
return ENOMEM;
if (*head_blk == 0) { /* special case */
error = xlog_bread(log, 0, 1, bp, &offset);
if (error)
goto done;
if (xlog_get_cycle(offset) == 0) {
*tail_blk = 0;
/* leave all other log inited values alone */
goto done;
}
}
/*
* Search backwards looking for log record header block
*/
ASSERT(*head_blk < INT_MAX);
for (i = (int)(*head_blk) - 1; i >= 0; i--) {
error = xlog_bread(log, i, 1, bp, &offset);
if (error)
goto done;
if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
found = 1;
break;
}
}
/*
* If we haven't found the log record header block, start looking
* again from the end of the physical log. XXXmiken: There should be
* a check here to make sure we didn't search more than N blocks in
* the previous code.
*/
if (!found) {
for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
error = xlog_bread(log, i, 1, bp, &offset);
if (error)
goto done;
if (*(__be32 *)offset ==
cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
found = 2;
break;
}
}
}
if (!found) {
xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
ASSERT(0);
return XFS_ERROR(EIO);
}
/* find blk_no of tail of log */
rhead = (xlog_rec_header_t *)offset;
*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
/*
* Reset log values according to the state of the log when we
* crashed. In the case where head_blk == 0, we bump curr_cycle
* one because the next write starts a new cycle rather than
* continuing the cycle of the last good log record. At this
* point we have guaranteed that all partial log records have been
* accounted for. Therefore, we know that the last good log record
* written was complete and ended exactly on the end boundary
* of the physical log.
*/
log->l_prev_block = i;
log->l_curr_block = (int)*head_blk;
log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
if (found == 2)
log->l_curr_cycle++;
atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
BBTOB(log->l_curr_block));
xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
BBTOB(log->l_curr_block));
/*
* Look for unmount record. If we find it, then we know there
* was a clean unmount. Since 'i' could be the last block in
* the physical log, we convert to a log block before comparing
* to the head_blk.
*
* Save the current tail lsn to use to pass to
* xlog_clear_stale_blocks() below. We won't want to clear the
* unmount record if there is one, so we pass the lsn of the
* unmount record rather than the block after it.
*/
if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
int h_size = be32_to_cpu(rhead->h_size);
int h_version = be32_to_cpu(rhead->h_version);
if ((h_version & XLOG_VERSION_2) &&
(h_size > XLOG_HEADER_CYCLE_SIZE)) {
hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
if (h_size % XLOG_HEADER_CYCLE_SIZE)
hblks++;
} else {
hblks = 1;
}
} else {
hblks = 1;
}
after_umount_blk = (i + hblks + (int)
BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
tail_lsn = atomic64_read(&log->l_tail_lsn);
if (*head_blk == after_umount_blk &&
be32_to_cpu(rhead->h_num_logops) == 1) {
umount_data_blk = (i + hblks) % log->l_logBBsize;
error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
if (error)
goto done;
op_head = (xlog_op_header_t *)offset;
if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
/*
* Set tail and last sync so that newly written
* log records will point recovery to after the
* current unmount record.
*/
xlog_assign_atomic_lsn(&log->l_tail_lsn,
log->l_curr_cycle, after_umount_blk);
xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
log->l_curr_cycle, after_umount_blk);
*tail_blk = after_umount_blk;
/*
* Note that the unmount was clean. If the unmount
* was not clean, we need to know this to rebuild the
* superblock counters from the perag headers if we
* have a filesystem using non-persistent counters.
*/
log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
}
}
/*
* Make sure that there are no blocks in front of the head
* with the same cycle number as the head. This can happen
* because we allow multiple outstanding log writes concurrently,
* and the later writes might make it out before earlier ones.
*
* We use the lsn from before modifying it so that we'll never
* overwrite the unmount record after a clean unmount.
*
* Do this only if we are going to recover the filesystem
*
* NOTE: This used to say "if (!readonly)"
* However on Linux, we can & do recover a read-only filesystem.
* We only skip recovery if NORECOVERY is specified on mount,
* in which case we would not be here.
*
* But... if the -device- itself is readonly, just skip this.
* We can't recover this device anyway, so it won't matter.
*/
if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
error = xlog_clear_stale_blocks(log, tail_lsn);
done:
xlog_put_bp(bp);
if (error)
xfs_warn(log->l_mp, "failed to locate log tail");
return error;
}
/*
* Is the log zeroed at all?
*
* The last binary search should be changed to perform an X block read
* once X becomes small enough. You can then search linearly through
* the X blocks. This will cut down on the number of reads we need to do.
*
* If the log is partially zeroed, this routine will pass back the blkno
* of the first block with cycle number 0. It won't have a complete LR
* preceding it.
*
* Return:
* 0 => the log is completely written to
* -1 => use *blk_no as the first block of the log
* >0 => error has occurred
*/
STATIC int
xlog_find_zeroed(
xlog_t *log,
xfs_daddr_t *blk_no)
{
xfs_buf_t *bp;
xfs_caddr_t offset;
uint first_cycle, last_cycle;
xfs_daddr_t new_blk, last_blk, start_blk;
xfs_daddr_t num_scan_bblks;
int error, log_bbnum = log->l_logBBsize;
*blk_no = 0;
/* check totally zeroed log */
bp = xlog_get_bp(log, 1);
if (!bp)
return ENOMEM;
error = xlog_bread(log, 0, 1, bp, &offset);
if (error)
goto bp_err;
first_cycle = xlog_get_cycle(offset);
if (first_cycle == 0) { /* completely zeroed log */
*blk_no = 0;
xlog_put_bp(bp);
return -1;
}
/* check partially zeroed log */
error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
if (error)
goto bp_err;
last_cycle = xlog_get_cycle(offset);
if (last_cycle != 0) { /* log completely written to */
xlog_put_bp(bp);
return 0;
} else if (first_cycle != 1) {
/*
* If the cycle of the last block is zero, the cycle of
* the first block must be 1. If it's not, maybe we're
* not looking at a log... Bail out.
*/
xfs_warn(log->l_mp,
"Log inconsistent or not a log (last==0, first!=1)");
return XFS_ERROR(EINVAL);
}
/* we have a partially zeroed log */
last_blk = log_bbnum-1;
if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
goto bp_err;
/*
* Validate the answer. Because there is no way to guarantee that
* the entire log is made up of log records which are the same size,
* we scan over the defined maximum blocks. At this point, the maximum
* is not chosen to mean anything special. XXXmiken
*/
num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
ASSERT(num_scan_bblks <= INT_MAX);
if (last_blk < num_scan_bblks)
num_scan_bblks = last_blk;
start_blk = last_blk - num_scan_bblks;
/*
* We search for any instances of cycle number 0 that occur before
* our current estimate of the head. What we're trying to detect is
* 1 ... | 0 | 1 | 0...
* ^ binary search ends here
*/
if ((error = xlog_find_verify_cycle(log, start_blk,
(int)num_scan_bblks, 0, &new_blk)))
goto bp_err;
if (new_blk != -1)
last_blk = new_blk;
/*
* Potentially backup over partial log record write. We don't need
* to search the end of the log because we know it is zero.
*/
if ((error = xlog_find_verify_log_record(log, start_blk,
&last_blk, 0)) == -1) {
error = XFS_ERROR(EIO);
goto bp_err;
} else if (error)
goto bp_err;
*blk_no = last_blk;
bp_err:
xlog_put_bp(bp);
if (error)
return error;
return -1;
}
/*
* These are simple subroutines used by xlog_clear_stale_blocks() below
* to initialize a buffer full of empty log record headers and write
* them into the log.
*/
STATIC void
xlog_add_record(
xlog_t *log,
xfs_caddr_t buf,
int cycle,
int block,
int tail_cycle,
int tail_block)
{
xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
memset(buf, 0, BBSIZE);
recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
recp->h_cycle = cpu_to_be32(cycle);
recp->h_version = cpu_to_be32(
xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
recp->h_fmt = cpu_to_be32(XLOG_FMT);
memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
}
STATIC int
xlog_write_log_records(
xlog_t *log,
int cycle,
int start_block,
int blocks,
int tail_cycle,
int tail_block)
{
xfs_caddr_t offset;
xfs_buf_t *bp;
int balign, ealign;
int sectbb = log->l_sectBBsize;
int end_block = start_block + blocks;
int bufblks;
int error = 0;
int i, j = 0;
/*
* Greedily allocate a buffer big enough to handle the full
* range of basic blocks to be written. If that fails, try
* a smaller size. We need to be able to write at least a
* log sector, or we're out of luck.
*/
bufblks = 1 << ffs(blocks);
while (!(bp = xlog_get_bp(log, bufblks))) {
bufblks >>= 1;
if (bufblks < sectbb)
return ENOMEM;
}
/* We may need to do a read at the start to fill in part of
* the buffer in the starting sector not covered by the first
* write below.
*/
balign = round_down(start_block, sectbb);
if (balign != start_block) {
error = xlog_bread_noalign(log, start_block, 1, bp);
if (error)
goto out_put_bp;
j = start_block - balign;
}
for (i = start_block; i < end_block; i += bufblks) {
int bcount, endcount;
bcount = min(bufblks, end_block - start_block);
endcount = bcount - j;
/* We may need to do a read at the end to fill in part of
* the buffer in the final sector not covered by the write.
* If this is the same sector as the above read, skip it.
*/
ealign = round_down(end_block, sectbb);
if (j == 0 && (start_block + endcount > ealign)) {
offset = bp->b_addr + BBTOB(ealign - start_block);
error = xlog_bread_offset(log, ealign, sectbb,
bp, offset);
if (error)
break;
}
offset = xlog_align(log, start_block, endcount, bp);
for (; j < endcount; j++) {
xlog_add_record(log, offset, cycle, i+j,
tail_cycle, tail_block);
offset += BBSIZE;
}
error = xlog_bwrite(log, start_block, endcount, bp);
if (error)
break;
start_block += endcount;
j = 0;
}
out_put_bp:
xlog_put_bp(bp);
return error;
}
/*
* This routine is called to blow away any incomplete log writes out
* in front of the log head. We do this so that we won't become confused
* if we come up, write only a little bit more, and then crash again.
* If we leave the partial log records out there, this situation could
* cause us to think those partial writes are valid blocks since they
* have the current cycle number. We get rid of them by overwriting them
* with empty log records with the old cycle number rather than the
* current one.
*
* The tail lsn is passed in rather than taken from
* the log so that we will not write over the unmount record after a
* clean unmount in a 512 block log. Doing so would leave the log without
* any valid log records in it until a new one was written. If we crashed
* during that time we would not be able to recover.
*/
STATIC int
xlog_clear_stale_blocks(
xlog_t *log,
xfs_lsn_t tail_lsn)
{
int tail_cycle, head_cycle;
int tail_block, head_block;
int tail_distance, max_distance;
int distance;
int error;
tail_cycle = CYCLE_LSN(tail_lsn);
tail_block = BLOCK_LSN(tail_lsn);
head_cycle = log->l_curr_cycle;
head_block = log->l_curr_block;
/*
* Figure out the distance between the new head of the log
* and the tail. We want to write over any blocks beyond the
* head that we may have written just before the crash, but
* we don't want to overwrite the tail of the log.
*/
if (head_cycle == tail_cycle) {
/*
* The tail is behind the head in the physical log,
* so the distance from the head to the tail is the
* distance from the head to the end of the log plus
* the distance from the beginning of the log to the
* tail.
*/
if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
XFS_ERRLEVEL_LOW, log->l_mp);
return XFS_ERROR(EFSCORRUPTED);
}
tail_distance = tail_block + (log->l_logBBsize - head_block);
} else {
/*
* The head is behind the tail in the physical log,
* so the distance from the head to the tail is just
* the tail block minus the head block.
*/
if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
XFS_ERRLEVEL_LOW, log->l_mp);
return XFS_ERROR(EFSCORRUPTED);
}
tail_distance = tail_block - head_block;
}
/*
* If the head is right up against the tail, we can't clear
* anything.
*/
if (tail_distance <= 0) {
ASSERT(tail_distance == 0);
return 0;
}
max_distance = XLOG_TOTAL_REC_SHIFT(log);
/*
* Take the smaller of the maximum amount of outstanding I/O
* we could have and the distance to the tail to clear out.
* We take the smaller so that we don't overwrite the tail and
* we don't waste all day writing from the head to the tail
* for no reason.
*/
max_distance = MIN(max_distance, tail_distance);
if ((head_block + max_distance) <= log->l_logBBsize) {
/*
* We can stomp all the blocks we need to without
* wrapping around the end of the log. Just do it
* in a single write. Use the cycle number of the
* current cycle minus one so that the log will look like:
* n ... | n - 1 ...
*/
error = xlog_write_log_records(log, (head_cycle - 1),
head_block, max_distance, tail_cycle,
tail_block);
if (error)
return error;
} else {
/*
* We need to wrap around the end of the physical log in
* order to clear all the blocks. Do it in two separate
* I/Os. The first write should be from the head to the
* end of the physical log, and it should use the current
* cycle number minus one just like above.
*/
distance = log->l_logBBsize - head_block;
error = xlog_write_log_records(log, (head_cycle - 1),
head_block, distance, tail_cycle,
tail_block);
if (error)
return error;
/*
* Now write the blocks at the start of the physical log.
* This writes the remainder of the blocks we want to clear.
* It uses the current cycle number since we're now on the
* same cycle as the head so that we get:
* n ... n ... | n - 1 ...
* ^^^^^ blocks we're writing
*/
distance = max_distance - (log->l_logBBsize - head_block);
error = xlog_write_log_records(log, head_cycle, 0, distance,
tail_cycle, tail_block);
if (error)
return error;
}
return 0;
}
/******************************************************************************
*
* Log recover routines
*
******************************************************************************
*/
STATIC xlog_recover_t *
xlog_recover_find_tid(
struct hlist_head *head,
xlog_tid_t tid)
{
xlog_recover_t *trans;
struct hlist_node *n;
hlist_for_each_entry(trans, n, head, r_list) {
if (trans->r_log_tid == tid)
return trans;
}
return NULL;
}
STATIC void
xlog_recover_new_tid(
struct hlist_head *head,
xlog_tid_t tid,
xfs_lsn_t lsn)
{
xlog_recover_t *trans;
trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
trans->r_log_tid = tid;
trans->r_lsn = lsn;
INIT_LIST_HEAD(&trans->r_itemq);
INIT_HLIST_NODE(&trans->r_list);
hlist_add_head(&trans->r_list, head);
}
STATIC void
xlog_recover_add_item(
struct list_head *head)
{
xlog_recover_item_t *item;
item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
INIT_LIST_HEAD(&item->ri_list);
list_add_tail(&item->ri_list, head);
}
STATIC int
xlog_recover_add_to_cont_trans(
struct log *log,
xlog_recover_t *trans,
xfs_caddr_t dp,
int len)
{
xlog_recover_item_t *item;
xfs_caddr_t ptr, old_ptr;
int old_len;
if (list_empty(&trans->r_itemq)) {
/* finish copying rest of trans header */
xlog_recover_add_item(&trans->r_itemq);
ptr = (xfs_caddr_t) &trans->r_theader +
sizeof(xfs_trans_header_t) - len;
memcpy(ptr, dp, len); /* d, s, l */
return 0;
}
/* take the tail entry */
item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
old_len = item->ri_buf[item->ri_cnt-1].i_len;
ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
memcpy(&ptr[old_len], dp, len); /* d, s, l */
item->ri_buf[item->ri_cnt-1].i_len += len;
item->ri_buf[item->ri_cnt-1].i_addr = ptr;
trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
return 0;
}
/*
* The next region to add is the start of a new region. It could be
* a whole region or it could be the first part of a new region. Because
* of this, the assumption here is that the type and size fields of all
* format structures fit into the first 32 bits of the structure.
*
* This works because all regions must be 32 bit aligned. Therefore, we
* either have both fields or we have neither field. In the case we have
* neither field, the data part of the region is zero length. We only have
* a log_op_header and can throw away the header since a new one will appear
* later. If we have at least 4 bytes, then we can determine how many regions
* will appear in the current log item.
*/
STATIC int
xlog_recover_add_to_trans(
struct log *log,
xlog_recover_t *trans,
xfs_caddr_t dp,
int len)
{
xfs_inode_log_format_t *in_f; /* any will do */
xlog_recover_item_t *item;
xfs_caddr_t ptr;
if (!len)
return 0;
if (list_empty(&trans->r_itemq)) {
/* we need to catch log corruptions here */
if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
xfs_warn(log->l_mp, "%s: bad header magic number",
__func__);
ASSERT(0);
return XFS_ERROR(EIO);
}
if (len == sizeof(xfs_trans_header_t))
xlog_recover_add_item(&trans->r_itemq);
memcpy(&trans->r_theader, dp, len); /* d, s, l */
return 0;
}
ptr = kmem_alloc(len, KM_SLEEP);
memcpy(ptr, dp, len);
in_f = (xfs_inode_log_format_t *)ptr;
/* take the tail entry */
item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
if (item->ri_total != 0 &&
item->ri_total == item->ri_cnt) {
/* tail item is in use, get a new one */
xlog_recover_add_item(&trans->r_itemq);
item = list_entry(trans->r_itemq.prev,
xlog_recover_item_t, ri_list);
}
if (item->ri_total == 0) { /* first region to be added */
if (in_f->ilf_size == 0 ||
in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
xfs_warn(log->l_mp,
"bad number of regions (%d) in inode log format",
in_f->ilf_size);
ASSERT(0);
return XFS_ERROR(EIO);
}
item->ri_total = in_f->ilf_size;
item->ri_buf =
kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
KM_SLEEP);
}
ASSERT(item->ri_total > item->ri_cnt);
/* Description region is ri_buf[0] */
item->ri_buf[item->ri_cnt].i_addr = ptr;
item->ri_buf[item->ri_cnt].i_len = len;
item->ri_cnt++;
trace_xfs_log_recover_item_add(log, trans, item, 0);
return 0;
}
/*
* Sort the log items in the transaction. Cancelled buffers need
* to be put first so they are processed before any items that might
* modify the buffers. If they are cancelled, then the modifications
* don't need to be replayed.
*/
STATIC int
xlog_recover_reorder_trans(
struct log *log,
xlog_recover_t *trans,
int pass)
{
xlog_recover_item_t *item, *n;
LIST_HEAD(sort_list);
list_splice_init(&trans->r_itemq, &sort_list);
list_for_each_entry_safe(item, n, &sort_list, ri_list) {
xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
trace_xfs_log_recover_item_reorder_head(log,
trans, item, pass);
list_move(&item->ri_list, &trans->r_itemq);
break;
}
case XFS_LI_INODE:
case XFS_LI_DQUOT:
case XFS_LI_QUOTAOFF:
case XFS_LI_EFD:
case XFS_LI_EFI:
trace_xfs_log_recover_item_reorder_tail(log,
trans, item, pass);
list_move_tail(&item->ri_list, &trans->r_itemq);
break;
default:
xfs_warn(log->l_mp,
"%s: unrecognized type of log operation",
__func__);
ASSERT(0);
return XFS_ERROR(EIO);
}
}
ASSERT(list_empty(&sort_list));
return 0;
}
/*
* Build up the table of buf cancel records so that we don't replay
* cancelled data in the second pass. For buffer records that are
* not cancel records, there is nothing to do here so we just return.
*
* If we get a cancel record which is already in the table, this indicates
* that the buffer was cancelled multiple times. In order to ensure
* that during pass 2 we keep the record in the table until we reach its
* last occurrence in the log, we keep a reference count in the cancel
* record in the table to tell us how many times we expect to see this
* record during the second pass.
*/
STATIC int
xlog_recover_buffer_pass1(
struct log *log,
xlog_recover_item_t *item)
{
xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
struct list_head *bucket;
struct xfs_buf_cancel *bcp;
/*
* If this isn't a cancel buffer item, then just return.
*/
if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
trace_xfs_log_recover_buf_not_cancel(log, buf_f);
return 0;
}
/*
* Insert an xfs_buf_cancel record into the hash table of them.
* If there is already an identical record, bump its reference count.
*/
bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
list_for_each_entry(bcp, bucket, bc_list) {
if (bcp->bc_blkno == buf_f->blf_blkno &&
bcp->bc_len == buf_f->blf_len) {
bcp->bc_refcount++;
trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
return 0;
}
}
bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
bcp->bc_blkno = buf_f->blf_blkno;
bcp->bc_len = buf_f->blf_len;
bcp->bc_refcount = 1;
list_add_tail(&bcp->bc_list, bucket);
trace_xfs_log_recover_buf_cancel_add(log, buf_f);
return 0;
}
/*
* Check to see whether the buffer being recovered has a corresponding
* entry in the buffer cancel record table. If it does then return 1
* so that it will be cancelled, otherwise return 0. If the buffer is
* actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
* the refcount on the entry in the table and remove it from the table
* if this is the last reference.
*
* We remove the cancel record from the table when we encounter its
* last occurrence in the log so that if the same buffer is re-used
* again after its last cancellation we actually replay the changes
* made at that point.
*/
STATIC int
xlog_check_buffer_cancelled(
struct log *log,
xfs_daddr_t blkno,
uint len,
ushort flags)
{
struct list_head *bucket;
struct xfs_buf_cancel *bcp;
if (log->l_buf_cancel_table == NULL) {
/*
* There is nothing in the table built in pass one,
* so this buffer must not be cancelled.
*/
ASSERT(!(flags & XFS_BLF_CANCEL));
return 0;
}
/*
* Search for an entry in the cancel table that matches our buffer.
*/
bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
list_for_each_entry(bcp, bucket, bc_list) {
if (bcp->bc_blkno == blkno && bcp->bc_len == len)
goto found;
}
/*
* We didn't find a corresponding entry in the table, so return 0 so
* that the buffer is NOT cancelled.
*/
ASSERT(!(flags & XFS_BLF_CANCEL));
return 0;
found:
/*
* We've go a match, so return 1 so that the recovery of this buffer
* is cancelled. If this buffer is actually a buffer cancel log
* item, then decrement the refcount on the one in the table and
* remove it if this is the last reference.
*/
if (flags & XFS_BLF_CANCEL) {
if (--bcp->bc_refcount == 0) {
list_del(&bcp->bc_list);
kmem_free(bcp);
}
}
return 1;
}
/*
* Perform recovery for a buffer full of inodes. In these buffers, the only
* data which should be recovered is that which corresponds to the
* di_next_unlinked pointers in the on disk inode structures. The rest of the
* data for the inodes is always logged through the inodes themselves rather
* than the inode buffer and is recovered in xlog_recover_inode_pass2().
*
* The only time when buffers full of inodes are fully recovered is when the
* buffer is full of newly allocated inodes. In this case the buffer will
* not be marked as an inode buffer and so will be sent to
* xlog_recover_do_reg_buffer() below during recovery.
*/
STATIC int
xlog_recover_do_inode_buffer(
struct xfs_mount *mp,
xlog_recover_item_t *item,
struct xfs_buf *bp,
xfs_buf_log_format_t *buf_f)
{
int i;
int item_index = 0;
int bit = 0;
int nbits = 0;
int reg_buf_offset = 0;
int reg_buf_bytes = 0;
int next_unlinked_offset;
int inodes_per_buf;
xfs_agino_t *logged_nextp;
xfs_agino_t *buffer_nextp;
trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
for (i = 0; i < inodes_per_buf; i++) {
next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
offsetof(xfs_dinode_t, di_next_unlinked);
while (next_unlinked_offset >=
(reg_buf_offset + reg_buf_bytes)) {
/*
* The next di_next_unlinked field is beyond
* the current logged region. Find the next
* logged region that contains or is beyond
* the current di_next_unlinked field.
*/
bit += nbits;
bit = xfs_next_bit(buf_f->blf_data_map,
buf_f->blf_map_size, bit);
/*
* If there are no more logged regions in the
* buffer, then we're done.
*/
if (bit == -1)
return 0;
nbits = xfs_contig_bits(buf_f->blf_data_map,
buf_f->blf_map_size, bit);
ASSERT(nbits > 0);
reg_buf_offset = bit << XFS_BLF_SHIFT;
reg_buf_bytes = nbits << XFS_BLF_SHIFT;
item_index++;
}
/*
* If the current logged region starts after the current
* di_next_unlinked field, then move on to the next
* di_next_unlinked field.
*/
if (next_unlinked_offset < reg_buf_offset)
continue;
ASSERT(item->ri_buf[item_index].i_addr != NULL);
ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
/*
* The current logged region contains a copy of the
* current di_next_unlinked field. Extract its value
* and copy it to the buffer copy.
*/
logged_nextp = item->ri_buf[item_index].i_addr +
next_unlinked_offset - reg_buf_offset;
if (unlikely(*logged_nextp == 0)) {
xfs_alert(mp,
"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
"Trying to replay bad (0) inode di_next_unlinked field.",
item, bp);
XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
XFS_ERRLEVEL_LOW, mp);
return XFS_ERROR(EFSCORRUPTED);
}
buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
next_unlinked_offset);
*buffer_nextp = *logged_nextp;
}
return 0;
}
/*
* Perform a 'normal' buffer recovery. Each logged region of the
* buffer should be copied over the corresponding region in the
* given buffer. The bitmap in the buf log format structure indicates
* where to place the logged data.
*/
STATIC void
xlog_recover_do_reg_buffer(
struct xfs_mount *mp,
xlog_recover_item_t *item,
struct xfs_buf *bp,
xfs_buf_log_format_t *buf_f)
{
int i;
int bit;
int nbits;
int error;
trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
bit = 0;
i = 1; /* 0 is the buf format structure */
while (1) {
bit = xfs_next_bit(buf_f->blf_data_map,
buf_f->blf_map_size, bit);
if (bit == -1)
break;
nbits = xfs_contig_bits(buf_f->blf_data_map,
buf_f->blf_map_size, bit);
ASSERT(nbits > 0);
ASSERT(item->ri_buf[i].i_addr != NULL);
ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
ASSERT(XFS_BUF_COUNT(bp) >=
((uint)bit << XFS_BLF_SHIFT)+(nbits<<XFS_BLF_SHIFT));
/*
* Do a sanity check if this is a dquot buffer. Just checking
* the first dquot in the buffer should do. XXXThis is
* probably a good thing to do for other buf types also.
*/
error = 0;
if (buf_f->blf_flags &
(XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
if (item->ri_buf[i].i_addr == NULL) {
xfs_alert(mp,
"XFS: NULL dquot in %s.", __func__);
goto next;
}
if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
xfs_alert(mp,
"XFS: dquot too small (%d) in %s.",
item->ri_buf[i].i_len, __func__);
goto next;
}
error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
-1, 0, XFS_QMOPT_DOWARN,
"dquot_buf_recover");
if (error)
goto next;
}
memcpy(xfs_buf_offset(bp,
(uint)bit << XFS_BLF_SHIFT), /* dest */
item->ri_buf[i].i_addr, /* source */
nbits<<XFS_BLF_SHIFT); /* length */
next:
i++;
bit += nbits;
}
/* Shouldn't be any more regions */
ASSERT(i == item->ri_total);
}
/*
* Do some primitive error checking on ondisk dquot data structures.
*/
int
xfs_qm_dqcheck(
struct xfs_mount *mp,
xfs_disk_dquot_t *ddq,
xfs_dqid_t id,
uint type, /* used only when IO_dorepair is true */
uint flags,
char *str)
{
xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
int errs = 0;
/*
* We can encounter an uninitialized dquot buffer for 2 reasons:
* 1. If we crash while deleting the quotainode(s), and those blks got
* used for user data. This is because we take the path of regular
* file deletion; however, the size field of quotainodes is never
* updated, so all the tricks that we play in itruncate_finish
* don't quite matter.
*
* 2. We don't play the quota buffers when there's a quotaoff logitem.
* But the allocation will be replayed so we'll end up with an
* uninitialized quota block.
*
* This is all fine; things are still consistent, and we haven't lost
* any quota information. Just don't complain about bad dquot blks.
*/
if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
if (flags & XFS_QMOPT_DOWARN)
xfs_alert(mp,
"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
errs++;
}
if (ddq->d_version != XFS_DQUOT_VERSION) {
if (flags & XFS_QMOPT_DOWARN)
xfs_alert(mp,
"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
str, id, ddq->d_version, XFS_DQUOT_VERSION);
errs++;
}
if (ddq->d_flags != XFS_DQ_USER &&
ddq->d_flags != XFS_DQ_PROJ &&
ddq->d_flags != XFS_DQ_GROUP) {
if (flags & XFS_QMOPT_DOWARN)
xfs_alert(mp,
"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
str, id, ddq->d_flags);
errs++;
}
if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
if (flags & XFS_QMOPT_DOWARN)
xfs_alert(mp,
"%s : ondisk-dquot 0x%p, ID mismatch: "
"0x%x expected, found id 0x%x",
str, ddq, id, be32_to_cpu(ddq->d_id));
errs++;
}
if (!errs && ddq->d_id) {
if (ddq->d_blk_softlimit &&
be64_to_cpu(ddq->d_bcount) >
be64_to_cpu(ddq->d_blk_softlimit)) {
if (!ddq->d_btimer) {
if (flags & XFS_QMOPT_DOWARN)
xfs_alert(mp,
"%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
str, (int)be32_to_cpu(ddq->d_id), ddq);
errs++;
}
}
if (ddq->d_ino_softlimit &&
be64_to_cpu(ddq->d_icount) >
be64_to_cpu(ddq->d_ino_softlimit)) {
if (!ddq->d_itimer) {
if (flags & XFS_QMOPT_DOWARN)
xfs_alert(mp,
"%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
str, (int)be32_to_cpu(ddq->d_id), ddq);
errs++;
}
}
if (ddq->d_rtb_softlimit &&
be64_to_cpu(ddq->d_rtbcount) >
be64_to_cpu(ddq->d_rtb_softlimit)) {
if (!ddq->d_rtbtimer) {
if (flags & XFS_QMOPT_DOWARN)
xfs_alert(mp,
"%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
str, (int)be32_to_cpu(ddq->d_id), ddq);
errs++;
}
}
}
if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
return errs;
if (flags & XFS_QMOPT_DOWARN)
xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
/*
* Typically, a repair is only requested by quotacheck.
*/
ASSERT(id != -1);
ASSERT(flags & XFS_QMOPT_DQREPAIR);
memset(d, 0, sizeof(xfs_dqblk_t));
d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
d->dd_diskdq.d_flags = type;
d->dd_diskdq.d_id = cpu_to_be32(id);
return errs;
}
/*
* Perform a dquot buffer recovery.
* Simple algorithm: if we have found a QUOTAOFF logitem of the same type
* (ie. USR or GRP), then just toss this buffer away; don't recover it.
* Else, treat it as a regular buffer and do recovery.
*/
STATIC void
xlog_recover_do_dquot_buffer(
xfs_mount_t *mp,
xlog_t *log,
xlog_recover_item_t *item,
xfs_buf_t *bp,
xfs_buf_log_format_t *buf_f)
{
uint type;
trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
/*
* Filesystems are required to send in quota flags at mount time.
*/
if (mp->m_qflags == 0) {
return;
}
type = 0;
if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
type |= XFS_DQ_USER;
if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
type |= XFS_DQ_PROJ;
if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
type |= XFS_DQ_GROUP;
/*
* This type of quotas was turned off, so ignore this buffer
*/
if (log->l_quotaoffs_flag & type)
return;
xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
}
/*
* This routine replays a modification made to a buffer at runtime.
* There are actually two types of buffer, regular and inode, which
* are handled differently. Inode buffers are handled differently
* in that we only recover a specific set of data from them, namely
* the inode di_next_unlinked fields. This is because all other inode
* data is actually logged via inode records and any data we replay
* here which overlaps that may be stale.
*
* When meta-data buffers are freed at run time we log a buffer item
* with the XFS_BLF_CANCEL bit set to indicate that previous copies
* of the buffer in the log should not be replayed at recovery time.
* This is so that if the blocks covered by the buffer are reused for
* file data before we crash we don't end up replaying old, freed
* meta-data into a user's file.
*
* To handle the cancellation of buffer log items, we make two passes
* over the log during recovery. During the first we build a table of
* those buffers which have been cancelled, and during the second we
* only replay those buffers which do not have corresponding cancel
* records in the table. See xlog_recover_do_buffer_pass[1,2] above
* for more details on the implementation of the table of cancel records.
*/
STATIC int
xlog_recover_buffer_pass2(
xlog_t *log,
xlog_recover_item_t *item)
{
xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
xfs_mount_t *mp = log->l_mp;
xfs_buf_t *bp;
int error;
uint buf_flags;
/*
* In this pass we only want to recover all the buffers which have
* not been cancelled and are not cancellation buffers themselves.
*/
if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
buf_f->blf_len, buf_f->blf_flags)) {
trace_xfs_log_recover_buf_cancel(log, buf_f);
return 0;
}
trace_xfs_log_recover_buf_recover(log, buf_f);
buf_flags = XBF_LOCK;
if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF))
buf_flags |= XBF_MAPPED;
bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
buf_flags);
if (!bp)
return XFS_ERROR(ENOMEM);
error = bp->b_error;
if (error) {
xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
xfs_buf_relse(bp);
return error;
}
if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
} else if (buf_f->blf_flags &
(XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
} else {
xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
}
if (error)
return XFS_ERROR(error);
/*
* Perform delayed write on the buffer. Asynchronous writes will be
* slower when taking into account all the buffers to be flushed.
*
* Also make sure that only inode buffers with good sizes stay in
* the buffer cache. The kernel moves inodes in buffers of 1 block
* or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
* buffers in the log can be a different size if the log was generated
* by an older kernel using unclustered inode buffers or a newer kernel
* running with a different inode cluster size. Regardless, if the
* the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
* for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
* the buffer out of the buffer cache so that the buffer won't
* overlap with future reads of those inodes.
*/
if (XFS_DINODE_MAGIC ==
be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
(XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
xfs_buf_stale(bp);
error = xfs_bwrite(bp);
} else {
ASSERT(bp->b_target->bt_mount == mp);
bp->b_iodone = xlog_recover_iodone;
xfs_buf_delwri_queue(bp);
}
xfs_buf_relse(bp);
return error;
}
STATIC int
xlog_recover_inode_pass2(
xlog_t *log,
xlog_recover_item_t *item)
{
xfs_inode_log_format_t *in_f;
xfs_mount_t *mp = log->l_mp;
xfs_buf_t *bp;
xfs_dinode_t *dip;
int len;
xfs_caddr_t src;
xfs_caddr_t dest;
int error;
int attr_index;
uint fields;
xfs_icdinode_t *dicp;
int need_free = 0;
if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
in_f = item->ri_buf[0].i_addr;
} else {
in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
need_free = 1;
error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
if (error)
goto error;
}
/*
* Inode buffers can be freed, look out for it,
* and do not replay the inode.
*/
if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
in_f->ilf_len, 0)) {
error = 0;
trace_xfs_log_recover_inode_cancel(log, in_f);
goto error;
}
trace_xfs_log_recover_inode_recover(log, in_f);
bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
XBF_LOCK);
if (!bp) {
error = ENOMEM;
goto error;
}
error = bp->b_error;
if (error) {
xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
xfs_buf_relse(bp);
goto error;
}
ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
/*
* Make sure the place we're flushing out to really looks
* like an inode!
*/
if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
xfs_buf_relse(bp);
xfs_alert(mp,
"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
__func__, dip, bp, in_f->ilf_ino);
XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
XFS_ERRLEVEL_LOW, mp);
error = EFSCORRUPTED;
goto error;
}
dicp = item->ri_buf[1].i_addr;
if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
xfs_buf_relse(bp);
xfs_alert(mp,
"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
__func__, item, in_f->ilf_ino);
XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
XFS_ERRLEVEL_LOW, mp);
error = EFSCORRUPTED;
goto error;
}
/* Skip replay when the on disk inode is newer than the log one */
if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
/*
* Deal with the wrap case, DI_MAX_FLUSH is less
* than smaller numbers
*/
if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
/* do nothing */
} else {
xfs_buf_relse(bp);
trace_xfs_log_recover_inode_skip(log, in_f);
error = 0;
goto error;
}
}
/* Take the opportunity to reset the flush iteration count */
dicp->di_flushiter = 0;
if (unlikely(S_ISREG(dicp->di_mode))) {
if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
(dicp->di_format != XFS_DINODE_FMT_BTREE)) {
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
xfs_alert(mp,
"%s: Bad regular inode log record, rec ptr 0x%p, "
"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
__func__, item, dip, bp, in_f->ilf_ino);
error = EFSCORRUPTED;
goto error;
}
} else if (unlikely(S_ISDIR(dicp->di_mode))) {
if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
(dicp->di_format != XFS_DINODE_FMT_BTREE) &&
(dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
xfs_alert(mp,
"%s: Bad dir inode log record, rec ptr 0x%p, "
"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
__func__, item, dip, bp, in_f->ilf_ino);
error = EFSCORRUPTED;
goto error;
}
}
if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
xfs_alert(mp,
"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
__func__, item, dip, bp, in_f->ilf_ino,
dicp->di_nextents + dicp->di_anextents,
dicp->di_nblocks);
error = EFSCORRUPTED;
goto error;
}
if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
xfs_alert(mp,
"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
error = EFSCORRUPTED;
goto error;
}
if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
xfs_alert(mp,
"%s: Bad inode log record length %d, rec ptr 0x%p",
__func__, item->ri_buf[1].i_len, item);
error = EFSCORRUPTED;
goto error;
}
/* The core is in in-core format */
xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
/* the rest is in on-disk format */
if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
item->ri_buf[1].i_len - sizeof(struct xfs_icdinode));
}
fields = in_f->ilf_fields;
switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
case XFS_ILOG_DEV:
xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
break;
case XFS_ILOG_UUID:
memcpy(XFS_DFORK_DPTR(dip),
&in_f->ilf_u.ilfu_uuid,
sizeof(uuid_t));
break;
}
if (in_f->ilf_size == 2)
goto write_inode_buffer;
len = item->ri_buf[2].i_len;
src = item->ri_buf[2].i_addr;
ASSERT(in_f->ilf_size <= 4);
ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
ASSERT(!(fields & XFS_ILOG_DFORK) ||
(len == in_f->ilf_dsize));
switch (fields & XFS_ILOG_DFORK) {
case XFS_ILOG_DDATA:
case XFS_ILOG_DEXT:
memcpy(XFS_DFORK_DPTR(dip), src, len);
break;
case XFS_ILOG_DBROOT:
xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
(xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
XFS_DFORK_DSIZE(dip, mp));
break;
default:
/*
* There are no data fork flags set.
*/
ASSERT((fields & XFS_ILOG_DFORK) == 0);
break;
}
/*
* If we logged any attribute data, recover it. There may or
* may not have been any other non-core data logged in this
* transaction.
*/
if (in_f->ilf_fields & XFS_ILOG_AFORK) {
if (in_f->ilf_fields & XFS_ILOG_DFORK) {
attr_index = 3;
} else {
attr_index = 2;
}
len = item->ri_buf[attr_index].i_len;
src = item->ri_buf[attr_index].i_addr;
ASSERT(len == in_f->ilf_asize);
switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
case XFS_ILOG_ADATA:
case XFS_ILOG_AEXT:
dest = XFS_DFORK_APTR(dip);
ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
memcpy(dest, src, len);
break;
case XFS_ILOG_ABROOT:
dest = XFS_DFORK_APTR(dip);
xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
len, (xfs_bmdr_block_t*)dest,
XFS_DFORK_ASIZE(dip, mp));
break;
default:
xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
ASSERT(0);
xfs_buf_relse(bp);
error = EIO;
goto error;
}
}
write_inode_buffer:
ASSERT(bp->b_target->bt_mount == mp);
bp->b_iodone = xlog_recover_iodone;
xfs_buf_delwri_queue(bp);
xfs_buf_relse(bp);
error:
if (need_free)
kmem_free(in_f);
return XFS_ERROR(error);
}
/*
* Recover QUOTAOFF records. We simply make a note of it in the xlog_t
* structure, so that we know not to do any dquot item or dquot buffer recovery,
* of that type.
*/
STATIC int
xlog_recover_quotaoff_pass1(
xlog_t *log,
xlog_recover_item_t *item)
{
xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
ASSERT(qoff_f);
/*
* The logitem format's flag tells us if this was user quotaoff,
* group/project quotaoff or both.
*/
if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
log->l_quotaoffs_flag |= XFS_DQ_USER;
if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
log->l_quotaoffs_flag |= XFS_DQ_PROJ;
if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
log->l_quotaoffs_flag |= XFS_DQ_GROUP;
return (0);
}
/*
* Recover a dquot record
*/
STATIC int
xlog_recover_dquot_pass2(
xlog_t *log,
xlog_recover_item_t *item)
{
xfs_mount_t *mp = log->l_mp;
xfs_buf_t *bp;
struct xfs_disk_dquot *ddq, *recddq;
int error;
xfs_dq_logformat_t *dq_f;
uint type;
/*
* Filesystems are required to send in quota flags at mount time.
*/
if (mp->m_qflags == 0)
return (0);
recddq = item->ri_buf[1].i_addr;
if (recddq == NULL) {
xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
return XFS_ERROR(EIO);
}
if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
item->ri_buf[1].i_len, __func__);
return XFS_ERROR(EIO);
}
/*
* This type of quotas was turned off, so ignore this record.
*/
type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
ASSERT(type);
if (log->l_quotaoffs_flag & type)
return (0);
/*
* At this point we know that quota was _not_ turned off.
* Since the mount flags are not indicating to us otherwise, this
* must mean that quota is on, and the dquot needs to be replayed.
* Remember that we may not have fully recovered the superblock yet,
* so we can't do the usual trick of looking at the SB quota bits.
*
* The other possibility, of course, is that the quota subsystem was
* removed since the last mount - ENOSYS.
*/
dq_f = item->ri_buf[0].i_addr;
ASSERT(dq_f);
error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
"xlog_recover_dquot_pass2 (log copy)");
if (error)
return XFS_ERROR(EIO);
ASSERT(dq_f->qlf_len == 1);
error = xfs_read_buf(mp, mp->m_ddev_targp,
dq_f->qlf_blkno,
XFS_FSB_TO_BB(mp, dq_f->qlf_len),
0, &bp);
if (error) {
xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#3)");
return error;
}
ASSERT(bp);
ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
/*
* At least the magic num portion should be on disk because this
* was among a chunk of dquots created earlier, and we did some
* minimal initialization then.
*/
error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
"xlog_recover_dquot_pass2");
if (error) {
xfs_buf_relse(bp);
return XFS_ERROR(EIO);
}
memcpy(ddq, recddq, item->ri_buf[1].i_len);
ASSERT(dq_f->qlf_size == 2);
ASSERT(bp->b_target->bt_mount == mp);
bp->b_iodone = xlog_recover_iodone;
xfs_buf_delwri_queue(bp);
xfs_buf_relse(bp);
return (0);
}
/*
* This routine is called to create an in-core extent free intent
* item from the efi format structure which was logged on disk.
* It allocates an in-core efi, copies the extents from the format
* structure into it, and adds the efi to the AIL with the given
* LSN.
*/
STATIC int
xlog_recover_efi_pass2(
xlog_t *log,
xlog_recover_item_t *item,
xfs_lsn_t lsn)
{
int error;
xfs_mount_t *mp = log->l_mp;
xfs_efi_log_item_t *efip;
xfs_efi_log_format_t *efi_formatp;
efi_formatp = item->ri_buf[0].i_addr;
efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
&(efip->efi_format)))) {
xfs_efi_item_free(efip);
return error;
}
atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
spin_lock(&log->l_ailp->xa_lock);
/*
* xfs_trans_ail_update() drops the AIL lock.
*/
xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
return 0;
}
/*
* This routine is called when an efd format structure is found in
* a committed transaction in the log. It's purpose is to cancel
* the corresponding efi if it was still in the log. To do this
* it searches the AIL for the efi with an id equal to that in the
* efd format structure. If we find it, we remove the efi from the
* AIL and free it.
*/
STATIC int
xlog_recover_efd_pass2(
xlog_t *log,
xlog_recover_item_t *item)
{
xfs_efd_log_format_t *efd_formatp;
xfs_efi_log_item_t *efip = NULL;
xfs_log_item_t *lip;
__uint64_t efi_id;
struct xfs_ail_cursor cur;
struct xfs_ail *ailp = log->l_ailp;
efd_formatp = item->ri_buf[0].i_addr;
ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
(item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
efi_id = efd_formatp->efd_efi_id;
/*
* Search for the efi with the id in the efd format structure
* in the AIL.
*/
spin_lock(&ailp->xa_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) {
if (lip->li_type == XFS_LI_EFI) {
efip = (xfs_efi_log_item_t *)lip;
if (efip->efi_format.efi_id == efi_id) {
/*
* xfs_trans_ail_delete() drops the
* AIL lock.
*/
xfs_trans_ail_delete(ailp, lip);
xfs_efi_item_free(efip);
spin_lock(&ailp->xa_lock);
break;
}
}
lip = xfs_trans_ail_cursor_next(ailp, &cur);
}
xfs_trans_ail_cursor_done(ailp, &cur);
spin_unlock(&ailp->xa_lock);
return 0;
}
/*
* Free up any resources allocated by the transaction
*
* Remember that EFIs, EFDs, and IUNLINKs are handled later.
*/
STATIC void
xlog_recover_free_trans(
struct xlog_recover *trans)
{
xlog_recover_item_t *item, *n;
int i;
list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
/* Free the regions in the item. */
list_del(&item->ri_list);
for (i = 0; i < item->ri_cnt; i++)
kmem_free(item->ri_buf[i].i_addr);
/* Free the item itself */
kmem_free(item->ri_buf);
kmem_free(item);
}
/* Free the transaction recover structure */
kmem_free(trans);
}
STATIC int
xlog_recover_commit_pass1(
struct log *log,
struct xlog_recover *trans,
xlog_recover_item_t *item)
{
trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
return xlog_recover_buffer_pass1(log, item);
case XFS_LI_QUOTAOFF:
return xlog_recover_quotaoff_pass1(log, item);
case XFS_LI_INODE:
case XFS_LI_EFI:
case XFS_LI_EFD:
case XFS_LI_DQUOT:
/* nothing to do in pass 1 */
return 0;
default:
xfs_warn(log->l_mp, "%s: invalid item type (%d)",
__func__, ITEM_TYPE(item));
ASSERT(0);
return XFS_ERROR(EIO);
}
}
STATIC int
xlog_recover_commit_pass2(
struct log *log,
struct xlog_recover *trans,
xlog_recover_item_t *item)
{
trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
return xlog_recover_buffer_pass2(log, item);
case XFS_LI_INODE:
return xlog_recover_inode_pass2(log, item);
case XFS_LI_EFI:
return xlog_recover_efi_pass2(log, item, trans->r_lsn);
case XFS_LI_EFD:
return xlog_recover_efd_pass2(log, item);
case XFS_LI_DQUOT:
return xlog_recover_dquot_pass2(log, item);
case XFS_LI_QUOTAOFF:
/* nothing to do in pass2 */
return 0;
default:
xfs_warn(log->l_mp, "%s: invalid item type (%d)",
__func__, ITEM_TYPE(item));
ASSERT(0);
return XFS_ERROR(EIO);
}
}
/*
* Perform the transaction.
*
* If the transaction modifies a buffer or inode, do it now. Otherwise,
* EFIs and EFDs get queued up by adding entries into the AIL for them.
*/
STATIC int
xlog_recover_commit_trans(
struct log *log,
struct xlog_recover *trans,
int pass)
{
int error = 0;
xlog_recover_item_t *item;
hlist_del(&trans->r_list);
error = xlog_recover_reorder_trans(log, trans, pass);
if (error)
return error;
list_for_each_entry(item, &trans->r_itemq, ri_list) {
if (pass == XLOG_RECOVER_PASS1)
error = xlog_recover_commit_pass1(log, trans, item);
else
error = xlog_recover_commit_pass2(log, trans, item);
if (error)
return error;
}
xlog_recover_free_trans(trans);
return 0;
}
STATIC int
xlog_recover_unmount_trans(
struct log *log,
xlog_recover_t *trans)
{
/* Do nothing now */
xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
return 0;
}
/*
* There are two valid states of the r_state field. 0 indicates that the
* transaction structure is in a normal state. We have either seen the
* start of the transaction or the last operation we added was not a partial
* operation. If the last operation we added to the transaction was a
* partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
*
* NOTE: skip LRs with 0 data length.
*/
STATIC int
xlog_recover_process_data(
xlog_t *log,
struct hlist_head rhash[],
xlog_rec_header_t *rhead,
xfs_caddr_t dp,
int pass)
{
xfs_caddr_t lp;
int num_logops;
xlog_op_header_t *ohead;
xlog_recover_t *trans;
xlog_tid_t tid;
int error;
unsigned long hash;
uint flags;
lp = dp + be32_to_cpu(rhead->h_len);
num_logops = be32_to_cpu(rhead->h_num_logops);
/* check the log format matches our own - else we can't recover */
if (xlog_header_check_recover(log->l_mp, rhead))
return (XFS_ERROR(EIO));
while ((dp < lp) && num_logops) {
ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
ohead = (xlog_op_header_t *)dp;
dp += sizeof(xlog_op_header_t);
if (ohead->oh_clientid != XFS_TRANSACTION &&
ohead->oh_clientid != XFS_LOG) {
xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
__func__, ohead->oh_clientid);
ASSERT(0);
return (XFS_ERROR(EIO));
}
tid = be32_to_cpu(ohead->oh_tid);
hash = XLOG_RHASH(tid);
trans = xlog_recover_find_tid(&rhash[hash], tid);
if (trans == NULL) { /* not found; add new tid */
if (ohead->oh_flags & XLOG_START_TRANS)
xlog_recover_new_tid(&rhash[hash], tid,
be64_to_cpu(rhead->h_lsn));
} else {
if (dp + be32_to_cpu(ohead->oh_len) > lp) {
xfs_warn(log->l_mp, "%s: bad length 0x%x",
__func__, be32_to_cpu(ohead->oh_len));
WARN_ON(1);
return (XFS_ERROR(EIO));
}
flags = ohead->oh_flags & ~XLOG_END_TRANS;
if (flags & XLOG_WAS_CONT_TRANS)
flags &= ~XLOG_CONTINUE_TRANS;
switch (flags) {
case XLOG_COMMIT_TRANS:
error = xlog_recover_commit_trans(log,
trans, pass);
break;
case XLOG_UNMOUNT_TRANS:
error = xlog_recover_unmount_trans(log, trans);
break;
case XLOG_WAS_CONT_TRANS:
error = xlog_recover_add_to_cont_trans(log,
trans, dp,
be32_to_cpu(ohead->oh_len));
break;
case XLOG_START_TRANS:
xfs_warn(log->l_mp, "%s: bad transaction",
__func__);
ASSERT(0);
error = XFS_ERROR(EIO);
break;
case 0:
case XLOG_CONTINUE_TRANS:
error = xlog_recover_add_to_trans(log, trans,
dp, be32_to_cpu(ohead->oh_len));
break;
default:
xfs_warn(log->l_mp, "%s: bad flag 0x%x",
__func__, flags);
ASSERT(0);
error = XFS_ERROR(EIO);
break;
}
if (error)
return error;
}
dp += be32_to_cpu(ohead->oh_len);
num_logops--;
}
return 0;
}
/*
* Process an extent free intent item that was recovered from
* the log. We need to free the extents that it describes.
*/
STATIC int
xlog_recover_process_efi(
xfs_mount_t *mp,
xfs_efi_log_item_t *efip)
{
xfs_efd_log_item_t *efdp;
xfs_trans_t *tp;
int i;
int error = 0;
xfs_extent_t *extp;
xfs_fsblock_t startblock_fsb;
ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
/*
* First check the validity of the extents described by the
* EFI. If any are bad, then assume that all are bad and
* just toss the EFI.
*/
for (i = 0; i < efip->efi_format.efi_nextents; i++) {
extp = &(efip->efi_format.efi_extents[i]);
startblock_fsb = XFS_BB_TO_FSB(mp,
XFS_FSB_TO_DADDR(mp, extp->ext_start));
if ((startblock_fsb == 0) ||
(extp->ext_len == 0) ||
(startblock_fsb >= mp->m_sb.sb_dblocks) ||
(extp->ext_len >= mp->m_sb.sb_agblocks)) {
/*
* This will pull the EFI from the AIL and
* free the memory associated with it.
*/
xfs_efi_release(efip, efip->efi_format.efi_nextents);
return XFS_ERROR(EIO);
}
}
tp = xfs_trans_alloc(mp, 0);
error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
if (error)
goto abort_error;
efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
for (i = 0; i < efip->efi_format.efi_nextents; i++) {
extp = &(efip->efi_format.efi_extents[i]);
error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
if (error)
goto abort_error;
xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
extp->ext_len);
}
set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
error = xfs_trans_commit(tp, 0);
return error;
abort_error:
xfs_trans_cancel(tp, XFS_TRANS_ABORT);
return error;
}
/*
* When this is called, all of the EFIs which did not have
* corresponding EFDs should be in the AIL. What we do now
* is free the extents associated with each one.
*
* Since we process the EFIs in normal transactions, they
* will be removed at some point after the commit. This prevents
* us from just walking down the list processing each one.
* We'll use a flag in the EFI to skip those that we've already
* processed and use the AIL iteration mechanism's generation
* count to try to speed this up at least a bit.
*
* When we start, we know that the EFIs are the only things in
* the AIL. As we process them, however, other items are added
* to the AIL. Since everything added to the AIL must come after
* everything already in the AIL, we stop processing as soon as
* we see something other than an EFI in the AIL.
*/
STATIC int
xlog_recover_process_efis(
xlog_t *log)
{
xfs_log_item_t *lip;
xfs_efi_log_item_t *efip;
int error = 0;
struct xfs_ail_cursor cur;
struct xfs_ail *ailp;
ailp = log->l_ailp;
spin_lock(&ailp->xa_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) {
/*
* We're done when we see something other than an EFI.
* There should be no EFIs left in the AIL now.
*/
if (lip->li_type != XFS_LI_EFI) {
#ifdef DEBUG
for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
ASSERT(lip->li_type != XFS_LI_EFI);
#endif
break;
}
/*
* Skip EFIs that we've already processed.
*/
efip = (xfs_efi_log_item_t *)lip;
if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
lip = xfs_trans_ail_cursor_next(ailp, &cur);
continue;
}
spin_unlock(&ailp->xa_lock);
error = xlog_recover_process_efi(log->l_mp, efip);
spin_lock(&ailp->xa_lock);
if (error)
goto out;
lip = xfs_trans_ail_cursor_next(ailp, &cur);
}
out:
xfs_trans_ail_cursor_done(ailp, &cur);
spin_unlock(&ailp->xa_lock);
return error;
}
/*
* This routine performs a transaction to null out a bad inode pointer
* in an agi unlinked inode hash bucket.
*/
STATIC void
xlog_recover_clear_agi_bucket(
xfs_mount_t *mp,
xfs_agnumber_t agno,
int bucket)
{
xfs_trans_t *tp;
xfs_agi_t *agi;
xfs_buf_t *agibp;
int offset;
int error;
tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
0, 0, 0);
if (error)
goto out_abort;
error = xfs_read_agi(mp, tp, agno, &agibp);
if (error)
goto out_abort;
agi = XFS_BUF_TO_AGI(agibp);
agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
offset = offsetof(xfs_agi_t, agi_unlinked) +
(sizeof(xfs_agino_t) * bucket);
xfs_trans_log_buf(tp, agibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
error = xfs_trans_commit(tp, 0);
if (error)
goto out_error;
return;
out_abort:
xfs_trans_cancel(tp, XFS_TRANS_ABORT);
out_error:
xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
return;
}
STATIC xfs_agino_t
xlog_recover_process_one_iunlink(
struct xfs_mount *mp,
xfs_agnumber_t agno,
xfs_agino_t agino,
int bucket)
{
struct xfs_buf *ibp;
struct xfs_dinode *dip;
struct xfs_inode *ip;
xfs_ino_t ino;
int error;
ino = XFS_AGINO_TO_INO(mp, agno, agino);
error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
if (error)
goto fail;
/*
* Get the on disk inode to find the next inode in the bucket.
*/
error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
if (error)
goto fail_iput;
ASSERT(ip->i_d.di_nlink == 0);
ASSERT(ip->i_d.di_mode != 0);
/* setup for the next pass */
agino = be32_to_cpu(dip->di_next_unlinked);
xfs_buf_relse(ibp);
/*
* Prevent any DMAPI event from being sent when the reference on
* the inode is dropped.
*/
ip->i_d.di_dmevmask = 0;
IRELE(ip);
return agino;
fail_iput:
IRELE(ip);
fail:
/*
* We can't read in the inode this bucket points to, or this inode
* is messed up. Just ditch this bucket of inodes. We will lose
* some inodes and space, but at least we won't hang.
*
* Call xlog_recover_clear_agi_bucket() to perform a transaction to
* clear the inode pointer in the bucket.
*/
xlog_recover_clear_agi_bucket(mp, agno, bucket);
return NULLAGINO;
}
/*
* xlog_iunlink_recover
*
* This is called during recovery to process any inodes which
* we unlinked but not freed when the system crashed. These
* inodes will be on the lists in the AGI blocks. What we do
* here is scan all the AGIs and fully truncate and free any
* inodes found on the lists. Each inode is removed from the
* lists when it has been fully truncated and is freed. The
* freeing of the inode and its removal from the list must be
* atomic.
*/
STATIC void
xlog_recover_process_iunlinks(
xlog_t *log)
{
xfs_mount_t *mp;
xfs_agnumber_t agno;
xfs_agi_t *agi;
xfs_buf_t *agibp;
xfs_agino_t agino;
int bucket;
int error;
uint mp_dmevmask;
mp = log->l_mp;
/*
* Prevent any DMAPI event from being sent while in this function.
*/
mp_dmevmask = mp->m_dmevmask;
mp->m_dmevmask = 0;
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
/*
* Find the agi for this ag.
*/
error = xfs_read_agi(mp, NULL, agno, &agibp);
if (error) {
/*
* AGI is b0rked. Don't process it.
*
* We should probably mark the filesystem as corrupt
* after we've recovered all the ag's we can....
*/
continue;
}
/*
* Unlock the buffer so that it can be acquired in the normal
* course of the transaction to truncate and free each inode.
* Because we are not racing with anyone else here for the AGI
* buffer, we don't even need to hold it locked to read the
* initial unlinked bucket entries out of the buffer. We keep
* buffer reference though, so that it stays pinned in memory
* while we need the buffer.
*/
agi = XFS_BUF_TO_AGI(agibp);
xfs_buf_unlock(agibp);
for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
agino = be32_to_cpu(agi->agi_unlinked[bucket]);
while (agino != NULLAGINO) {
agino = xlog_recover_process_one_iunlink(mp,
agno, agino, bucket);
}
}
xfs_buf_rele(agibp);
}
mp->m_dmevmask = mp_dmevmask;
}
#ifdef DEBUG
STATIC void
xlog_pack_data_checksum(
xlog_t *log,
xlog_in_core_t *iclog,
int size)
{
int i;
__be32 *up;
uint chksum = 0;
up = (__be32 *)iclog->ic_datap;
/* divide length by 4 to get # words */
for (i = 0; i < (size >> 2); i++) {
chksum ^= be32_to_cpu(*up);
up++;
}
iclog->ic_header.h_chksum = cpu_to_be32(chksum);
}
#else
#define xlog_pack_data_checksum(log, iclog, size)
#endif
/*
* Stamp cycle number in every block
*/
void
xlog_pack_data(
xlog_t *log,
xlog_in_core_t *iclog,
int roundoff)
{
int i, j, k;
int size = iclog->ic_offset + roundoff;
__be32 cycle_lsn;
xfs_caddr_t dp;
xlog_pack_data_checksum(log, iclog, size);
cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
dp = iclog->ic_datap;
for (i = 0; i < BTOBB(size) &&
i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
*(__be32 *)dp = cycle_lsn;
dp += BBSIZE;
}
if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
xlog_in_core_2_t *xhdr = iclog->ic_data;
for ( ; i < BTOBB(size); i++) {
j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
*(__be32 *)dp = cycle_lsn;
dp += BBSIZE;
}
for (i = 1; i < log->l_iclog_heads; i++) {
xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
}
}
}
STATIC void
xlog_unpack_data(
xlog_rec_header_t *rhead,
xfs_caddr_t dp,
xlog_t *log)
{
int i, j, k;
for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
dp += BBSIZE;
}
if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
dp += BBSIZE;
}
}
}
STATIC int
xlog_valid_rec_header(
xlog_t *log,
xlog_rec_header_t *rhead,
xfs_daddr_t blkno)
{
int hlen;
if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
XFS_ERRLEVEL_LOW, log->l_mp);
return XFS_ERROR(EFSCORRUPTED);
}
if (unlikely(
(!rhead->h_version ||
(be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
__func__, be32_to_cpu(rhead->h_version));
return XFS_ERROR(EIO);
}
/* LR body must have data or it wouldn't have been written */
hlen = be32_to_cpu(rhead->h_len);
if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
XFS_ERRLEVEL_LOW, log->l_mp);
return XFS_ERROR(EFSCORRUPTED);
}
if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
XFS_ERRLEVEL_LOW, log->l_mp);
return XFS_ERROR(EFSCORRUPTED);
}
return 0;
}
/*
* Read the log from tail to head and process the log records found.
* Handle the two cases where the tail and head are in the same cycle
* and where the active portion of the log wraps around the end of
* the physical log separately. The pass parameter is passed through
* to the routines called to process the data and is not looked at
* here.
*/
STATIC int
xlog_do_recovery_pass(
xlog_t *log,
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk,
int pass)
{
xlog_rec_header_t *rhead;
xfs_daddr_t blk_no;
xfs_caddr_t offset;
xfs_buf_t *hbp, *dbp;
int error = 0, h_size;
int bblks, split_bblks;
int hblks, split_hblks, wrapped_hblks;
struct hlist_head rhash[XLOG_RHASH_SIZE];
ASSERT(head_blk != tail_blk);
/*
* Read the header of the tail block and get the iclog buffer size from
* h_size. Use this to tell how many sectors make up the log header.
*/
if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
/*
* When using variable length iclogs, read first sector of
* iclog header and extract the header size from it. Get a
* new hbp that is the correct size.
*/
hbp = xlog_get_bp(log, 1);
if (!hbp)
return ENOMEM;
error = xlog_bread(log, tail_blk, 1, hbp, &offset);
if (error)
goto bread_err1;
rhead = (xlog_rec_header_t *)offset;
error = xlog_valid_rec_header(log, rhead, tail_blk);
if (error)
goto bread_err1;
h_size = be32_to_cpu(rhead->h_size);
if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
(h_size > XLOG_HEADER_CYCLE_SIZE)) {
hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
if (h_size % XLOG_HEADER_CYCLE_SIZE)
hblks++;
xlog_put_bp(hbp);
hbp = xlog_get_bp(log, hblks);
} else {
hblks = 1;
}
} else {
ASSERT(log->l_sectBBsize == 1);
hblks = 1;
hbp = xlog_get_bp(log, 1);
h_size = XLOG_BIG_RECORD_BSIZE;
}
if (!hbp)
return ENOMEM;
dbp = xlog_get_bp(log, BTOBB(h_size));
if (!dbp) {
xlog_put_bp(hbp);
return ENOMEM;
}
memset(rhash, 0, sizeof(rhash));
if (tail_blk <= head_blk) {
for (blk_no = tail_blk; blk_no < head_blk; ) {
error = xlog_bread(log, blk_no, hblks, hbp, &offset);
if (error)
goto bread_err2;
rhead = (xlog_rec_header_t *)offset;
error = xlog_valid_rec_header(log, rhead, blk_no);
if (error)
goto bread_err2;
/* blocks in data section */
bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
error = xlog_bread(log, blk_no + hblks, bblks, dbp,
&offset);
if (error)
goto bread_err2;
xlog_unpack_data(rhead, offset, log);
if ((error = xlog_recover_process_data(log,
rhash, rhead, offset, pass)))
goto bread_err2;
blk_no += bblks + hblks;
}
} else {
/*
* Perform recovery around the end of the physical log.
* When the head is not on the same cycle number as the tail,
* we can't do a sequential recovery as above.
*/
blk_no = tail_blk;
while (blk_no < log->l_logBBsize) {
/*
* Check for header wrapping around physical end-of-log
*/
offset = hbp->b_addr;
split_hblks = 0;
wrapped_hblks = 0;
if (blk_no + hblks <= log->l_logBBsize) {
/* Read header in one read */
error = xlog_bread(log, blk_no, hblks, hbp,
&offset);
if (error)
goto bread_err2;
} else {
/* This LR is split across physical log end */
if (blk_no != log->l_logBBsize) {
/* some data before physical log end */
ASSERT(blk_no <= INT_MAX);
split_hblks = log->l_logBBsize - (int)blk_no;
ASSERT(split_hblks > 0);
error = xlog_bread(log, blk_no,
split_hblks, hbp,
&offset);
if (error)
goto bread_err2;
}
/*
* Note: this black magic still works with
* large sector sizes (non-512) only because:
* - we increased the buffer size originally
* by 1 sector giving us enough extra space
* for the second read;
* - the log start is guaranteed to be sector
* aligned;
* - we read the log end (LR header start)
* _first_, then the log start (LR header end)
* - order is important.
*/
wrapped_hblks = hblks - split_hblks;
error = xlog_bread_offset(log, 0,
wrapped_hblks, hbp,
offset + BBTOB(split_hblks));
if (error)
goto bread_err2;
}
rhead = (xlog_rec_header_t *)offset;
error = xlog_valid_rec_header(log, rhead,
split_hblks ? blk_no : 0);
if (error)
goto bread_err2;
bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
blk_no += hblks;
/* Read in data for log record */
if (blk_no + bblks <= log->l_logBBsize) {
error = xlog_bread(log, blk_no, bblks, dbp,
&offset);
if (error)
goto bread_err2;
} else {
/* This log record is split across the
* physical end of log */
offset = dbp->b_addr;
split_bblks = 0;
if (blk_no != log->l_logBBsize) {
/* some data is before the physical
* end of log */
ASSERT(!wrapped_hblks);
ASSERT(blk_no <= INT_MAX);
split_bblks =
log->l_logBBsize - (int)blk_no;
ASSERT(split_bblks > 0);
error = xlog_bread(log, blk_no,
split_bblks, dbp,
&offset);
if (error)
goto bread_err2;
}
/*
* Note: this black magic still works with
* large sector sizes (non-512) only because:
* - we increased the buffer size originally
* by 1 sector giving us enough extra space
* for the second read;
* - the log start is guaranteed to be sector
* aligned;
* - we read the log end (LR header start)
* _first_, then the log start (LR header end)
* - order is important.
*/
error = xlog_bread_offset(log, 0,
bblks - split_bblks, dbp,
offset + BBTOB(split_bblks));
if (error)
goto bread_err2;
}
xlog_unpack_data(rhead, offset, log);
if ((error = xlog_recover_process_data(log, rhash,
rhead, offset, pass)))
goto bread_err2;
blk_no += bblks;
}
ASSERT(blk_no >= log->l_logBBsize);
blk_no -= log->l_logBBsize;
/* read first part of physical log */
while (blk_no < head_blk) {
error = xlog_bread(log, blk_no, hblks, hbp, &offset);
if (error)
goto bread_err2;
rhead = (xlog_rec_header_t *)offset;
error = xlog_valid_rec_header(log, rhead, blk_no);
if (error)
goto bread_err2;
bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
error = xlog_bread(log, blk_no+hblks, bblks, dbp,
&offset);
if (error)
goto bread_err2;
xlog_unpack_data(rhead, offset, log);
if ((error = xlog_recover_process_data(log, rhash,
rhead, offset, pass)))
goto bread_err2;
blk_no += bblks + hblks;
}
}
bread_err2:
xlog_put_bp(dbp);
bread_err1:
xlog_put_bp(hbp);
return error;
}
/*
* Do the recovery of the log. We actually do this in two phases.
* The two passes are necessary in order to implement the function
* of cancelling a record written into the log. The first pass
* determines those things which have been cancelled, and the
* second pass replays log items normally except for those which
* have been cancelled. The handling of the replay and cancellations
* takes place in the log item type specific routines.
*
* The table of items which have cancel records in the log is allocated
* and freed at this level, since only here do we know when all of
* the log recovery has been completed.
*/
STATIC int
xlog_do_log_recovery(
xlog_t *log,
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk)
{
int error, i;
ASSERT(head_blk != tail_blk);
/*
* First do a pass to find all of the cancelled buf log items.
* Store them in the buf_cancel_table for use in the second pass.
*/
log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
sizeof(struct list_head),
KM_SLEEP);
for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
error = xlog_do_recovery_pass(log, head_blk, tail_blk,
XLOG_RECOVER_PASS1);
if (error != 0) {
kmem_free(log->l_buf_cancel_table);
log->l_buf_cancel_table = NULL;
return error;
}
/*
* Then do a second pass to actually recover the items in the log.
* When it is complete free the table of buf cancel items.
*/
error = xlog_do_recovery_pass(log, head_blk, tail_blk,
XLOG_RECOVER_PASS2);
#ifdef DEBUG
if (!error) {
int i;
for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
ASSERT(list_empty(&log->l_buf_cancel_table[i]));
}
#endif /* DEBUG */
kmem_free(log->l_buf_cancel_table);
log->l_buf_cancel_table = NULL;
return error;
}
/*
* Do the actual recovery
*/
STATIC int
xlog_do_recover(
xlog_t *log,
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk)
{
int error;
xfs_buf_t *bp;
xfs_sb_t *sbp;
/*
* First replay the images in the log.
*/
error = xlog_do_log_recovery(log, head_blk, tail_blk);
if (error) {
return error;
}
xfs_flush_buftarg(log->l_mp->m_ddev_targp, 1);
/*
* If IO errors happened during recovery, bail out.
*/
if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
return (EIO);
}
/*
* We now update the tail_lsn since much of the recovery has completed
* and there may be space available to use. If there were no extent
* or iunlinks, we can free up the entire log and set the tail_lsn to
* be the last_sync_lsn. This was set in xlog_find_tail to be the
* lsn of the last known good LR on disk. If there are extent frees
* or iunlinks they will have some entries in the AIL; so we look at
* the AIL to determine how to set the tail_lsn.
*/
xlog_assign_tail_lsn(log->l_mp);
/*
* Now that we've finished replaying all buffer and inode
* updates, re-read in the superblock.
*/
bp = xfs_getsb(log->l_mp, 0);
XFS_BUF_UNDONE(bp);
ASSERT(!(XFS_BUF_ISWRITE(bp)));
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
XFS_BUF_READ(bp);
XFS_BUF_UNASYNC(bp);
xfsbdstrat(log->l_mp, bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp, __func__);
ASSERT(0);
xfs_buf_relse(bp);
return error;
}
/* Convert superblock from on-disk format */
sbp = &log->l_mp->m_sb;
xfs_sb_from_disk(log->l_mp, XFS_BUF_TO_SBP(bp));
ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
ASSERT(xfs_sb_good_version(sbp));
xfs_buf_relse(bp);
/* We've re-read the superblock so re-initialize per-cpu counters */
xfs_icsb_reinit_counters(log->l_mp);
xlog_recover_check_summary(log);
/* Normal transactions can now occur */
log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
return 0;
}
/*
* Perform recovery and re-initialize some log variables in xlog_find_tail.
*
* Return error or zero.
*/
int
xlog_recover(
xlog_t *log)
{
xfs_daddr_t head_blk, tail_blk;
int error;
/* find the tail of the log */
if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
return error;
if (tail_blk != head_blk) {
/* There used to be a comment here:
*
* disallow recovery on read-only mounts. note -- mount
* checks for ENOSPC and turns it into an intelligent
* error message.
* ...but this is no longer true. Now, unless you specify
* NORECOVERY (in which case this function would never be
* called), we just go ahead and recover. We do this all
* under the vfs layer, so we can get away with it unless
* the device itself is read-only, in which case we fail.
*/
if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
return error;
}
xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
log->l_mp->m_logname ? log->l_mp->m_logname
: "internal");
error = xlog_do_recover(log, head_blk, tail_blk);
log->l_flags |= XLOG_RECOVERY_NEEDED;
}
return error;
}
/*
* In the first part of recovery we replay inodes and buffers and build
* up the list of extent free items which need to be processed. Here
* we process the extent free items and clean up the on disk unlinked
* inode lists. This is separated from the first part of recovery so
* that the root and real-time bitmap inodes can be read in from disk in
* between the two stages. This is necessary so that we can free space
* in the real-time portion of the file system.
*/
int
xlog_recover_finish(
xlog_t *log)
{
/*
* Now we're ready to do the transactions needed for the
* rest of recovery. Start with completing all the extent
* free intent records and then process the unlinked inode
* lists. At this point, we essentially run in normal mode
* except that we're still performing recovery actions
* rather than accepting new requests.
*/
if (log->l_flags & XLOG_RECOVERY_NEEDED) {
int error;
error = xlog_recover_process_efis(log);
if (error) {
xfs_alert(log->l_mp, "Failed to recover EFIs");
return error;
}
/*
* Sync the log to get all the EFIs out of the AIL.
* This isn't absolutely necessary, but it helps in
* case the unlink transactions would have problems
* pushing the EFIs out of the way.
*/
xfs_log_force(log->l_mp, XFS_LOG_SYNC);
xlog_recover_process_iunlinks(log);
xlog_recover_check_summary(log);
xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
log->l_mp->m_logname ? log->l_mp->m_logname
: "internal");
log->l_flags &= ~XLOG_RECOVERY_NEEDED;
} else {
xfs_info(log->l_mp, "Ending clean mount");
}
return 0;
}
#if defined(DEBUG)
/*
* Read all of the agf and agi counters and check that they
* are consistent with the superblock counters.
*/
void
xlog_recover_check_summary(
xlog_t *log)
{
xfs_mount_t *mp;
xfs_agf_t *agfp;
xfs_buf_t *agfbp;
xfs_buf_t *agibp;
xfs_agnumber_t agno;
__uint64_t freeblks;
__uint64_t itotal;
__uint64_t ifree;
int error;
mp = log->l_mp;
freeblks = 0LL;
itotal = 0LL;
ifree = 0LL;
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
if (error) {
xfs_alert(mp, "%s agf read failed agno %d error %d",
__func__, agno, error);
} else {
agfp = XFS_BUF_TO_AGF(agfbp);
freeblks += be32_to_cpu(agfp->agf_freeblks) +
be32_to_cpu(agfp->agf_flcount);
xfs_buf_relse(agfbp);
}
error = xfs_read_agi(mp, NULL, agno, &agibp);
if (error) {
xfs_alert(mp, "%s agi read failed agno %d error %d",
__func__, agno, error);
} else {
struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
itotal += be32_to_cpu(agi->agi_count);
ifree += be32_to_cpu(agi->agi_freecount);
xfs_buf_relse(agibp);
}
}
}
#endif /* DEBUG */
| gpl-2.0 |
zeeshanhussain/inazuma-msm8916 | drivers/staging/ozwpan/ozusbsvc.c | 2048 | 7676 | /* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
*
* This file provides protocol independent part of the implementation of the USB
* service for a PD.
* The implementation of this service is split into two parts the first of which
* is protocol independent and the second contains protocol specific details.
* This split is to allow alternative protocols to be defined.
* The implementation of this service uses ozhcd.c to implement a USB HCD.
* -----------------------------------------------------------------------------
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <asm/unaligned.h>
#include "ozconfig.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozusbif.h"
#include "ozhcd.h"
#include "oztrace.h"
#include "ozusbsvc.h"
#include "ozevent.h"
/*------------------------------------------------------------------------------
* This is called once when the driver is loaded to initialise the USB service.
* Context: process
*/
int oz_usb_init(void)
{
oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_USB, NULL, 0);
return oz_hcd_init();
}
/*------------------------------------------------------------------------------
* This is called once when the driver is unloaded to terminate the USB service.
* Context: process
*/
void oz_usb_term(void)
{
oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_USB, NULL, 0);
oz_hcd_term();
}
/*------------------------------------------------------------------------------
* This is called when the USB service is started or resumed for a PD.
* Context: softirq
*/
int oz_usb_start(struct oz_pd *pd, int resume)
{
int rc = 0;
struct oz_usb_ctx *usb_ctx;
struct oz_usb_ctx *old_ctx;
oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_USB, NULL, resume);
if (resume) {
oz_trace("USB service resumed.\n");
return 0;
}
oz_trace("USB service started.\n");
/* Create a USB context in case we need one. If we find the PD already
* has a USB context then we will destroy it.
*/
usb_ctx = kzalloc(sizeof(struct oz_usb_ctx), GFP_ATOMIC);
if (usb_ctx == NULL)
return -ENOMEM;
atomic_set(&usb_ctx->ref_count, 1);
usb_ctx->pd = pd;
usb_ctx->stopped = 0;
/* Install the USB context if the PD doesn't already have one.
* If it does already have one then destroy the one we have just
* created.
*/
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
old_ctx = pd->app_ctx[OZ_APPID_USB-1];
if (old_ctx == NULL)
pd->app_ctx[OZ_APPID_USB-1] = usb_ctx;
oz_usb_get(pd->app_ctx[OZ_APPID_USB-1]);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
if (old_ctx) {
oz_trace("Already have USB context.\n");
kfree(usb_ctx);
usb_ctx = old_ctx;
} else if (usb_ctx) {
/* Take a reference to the PD. This will be released when
* the USB context is destroyed.
*/
oz_pd_get(pd);
}
/* If we already had a USB context and had obtained a port from
* the USB HCD then just reset the port. If we didn't have a port
* then report the arrival to the USB HCD so we get one.
*/
if (usb_ctx->hport) {
oz_hcd_pd_reset(usb_ctx, usb_ctx->hport);
} else {
usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx);
if (usb_ctx->hport == NULL) {
oz_trace("USB hub returned null port.\n");
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
pd->app_ctx[OZ_APPID_USB-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
oz_usb_put(usb_ctx);
rc = -1;
}
}
oz_usb_put(usb_ctx);
return rc;
}
/*------------------------------------------------------------------------------
* This is called when the USB service is stopped or paused for a PD.
* Context: softirq or process
*/
void oz_usb_stop(struct oz_pd *pd, int pause)
{
struct oz_usb_ctx *usb_ctx;
oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_USB, NULL, pause);
if (pause) {
oz_trace("USB service paused.\n");
return;
}
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
pd->app_ctx[OZ_APPID_USB-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
if (usb_ctx) {
unsigned long tout = jiffies + HZ;
oz_trace("USB service stopping...\n");
usb_ctx->stopped = 1;
/* At this point the reference count on the usb context should
* be 2 - one from when we created it and one from the hcd
* which claims a reference. Since stopped = 1 no one else
* should get in but someone may already be in. So wait
* until they leave but timeout after 1 second.
*/
while ((atomic_read(&usb_ctx->ref_count) > 2) &&
time_before(jiffies, tout))
;
oz_trace("USB service stopped.\n");
oz_hcd_pd_departed(usb_ctx->hport);
/* Release the reference taken in oz_usb_start.
*/
oz_usb_put(usb_ctx);
}
}
/*------------------------------------------------------------------------------
* This increments the reference count of the context area for a specific PD.
* This ensures this context area does not disappear while still in use.
* Context: softirq
*/
void oz_usb_get(void *hpd)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
atomic_inc(&usb_ctx->ref_count);
}
/*------------------------------------------------------------------------------
* This decrements the reference count of the context area for a specific PD
* and destroys the context area if the reference count becomes zero.
* Context: softirq or process
*/
void oz_usb_put(void *hpd)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
if (atomic_dec_and_test(&usb_ctx->ref_count)) {
oz_trace("Dealloc USB context.\n");
oz_pd_put(usb_ctx->pd);
kfree(usb_ctx);
}
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_usb_heartbeat(struct oz_pd *pd)
{
struct oz_usb_ctx *usb_ctx;
int rc = 0;
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
if (usb_ctx == NULL)
return rc;
if (usb_ctx->stopped)
goto done;
if (usb_ctx->hport)
if (oz_hcd_heartbeat(usb_ctx->hport))
rc = 1;
done:
oz_usb_put(usb_ctx);
return rc;
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_usb_stream_create(void *hpd, u8 ep_num)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
struct oz_pd *pd = usb_ctx->pd;
oz_trace("oz_usb_stream_create(0x%x)\n", ep_num);
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
oz_isoc_stream_create(pd, ep_num);
} else {
oz_pd_get(pd);
if (oz_elt_stream_create(&pd->elt_buff, ep_num,
4*pd->max_tx_size)) {
oz_pd_put(pd);
return -1;
}
}
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_usb_stream_delete(void *hpd, u8 ep_num)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
if (usb_ctx) {
struct oz_pd *pd = usb_ctx->pd;
if (pd) {
oz_trace("oz_usb_stream_delete(0x%x)\n", ep_num);
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
oz_isoc_stream_delete(pd, ep_num);
} else {
if (oz_elt_stream_delete(&pd->elt_buff, ep_num))
return -1;
oz_pd_put(pd);
}
}
}
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_usb_request_heartbeat(void *hpd)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
if (usb_ctx && usb_ctx->pd)
oz_pd_request_heartbeat(usb_ctx->pd);
}
| gpl-2.0 |
abyssxsy/linux-tk1 | arch/x86/kernel/microcode_core.c | 2048 | 15828 | /*
* Intel CPU Microcode Update Driver for Linux
*
* Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
* 2006 Shaohua Li <shaohua.li@intel.com>
*
* This driver allows to upgrade microcode on Intel processors
* belonging to IA-32 family - PentiumPro, Pentium II,
* Pentium III, Xeon, Pentium 4, etc.
*
* Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
* Software Developer's Manual
* Order Number 253668 or free download from:
*
* http://developer.intel.com/Assets/PDF/manual/253668.pdf
*
* For more information, go to http://www.urbanmyth.org/microcode
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* 1.0 16 Feb 2000, Tigran Aivazian <tigran@sco.com>
* Initial release.
* 1.01 18 Feb 2000, Tigran Aivazian <tigran@sco.com>
* Added read() support + cleanups.
* 1.02 21 Feb 2000, Tigran Aivazian <tigran@sco.com>
* Added 'device trimming' support. open(O_WRONLY) zeroes
* and frees the saved copy of applied microcode.
* 1.03 29 Feb 2000, Tigran Aivazian <tigran@sco.com>
* Made to use devfs (/dev/cpu/microcode) + cleanups.
* 1.04 06 Jun 2000, Simon Trimmer <simon@veritas.com>
* Added misc device support (now uses both devfs and misc).
* Added MICROCODE_IOCFREE ioctl to clear memory.
* 1.05 09 Jun 2000, Simon Trimmer <simon@veritas.com>
* Messages for error cases (non Intel & no suitable microcode).
* 1.06 03 Aug 2000, Tigran Aivazian <tigran@veritas.com>
* Removed ->release(). Removed exclusive open and status bitmap.
* Added microcode_rwsem to serialize read()/write()/ioctl().
* Removed global kernel lock usage.
* 1.07 07 Sep 2000, Tigran Aivazian <tigran@veritas.com>
* Write 0 to 0x8B msr and then cpuid before reading revision,
* so that it works even if there were no update done by the
* BIOS. Otherwise, reading from 0x8B gives junk (which happened
* to be 0 on my machine which is why it worked even when I
* disabled update by the BIOS)
* Thanks to Eric W. Biederman <ebiederman@lnxi.com> for the fix.
* 1.08 11 Dec 2000, Richard Schaal <richard.schaal@intel.com> and
* Tigran Aivazian <tigran@veritas.com>
* Intel Pentium 4 processor support and bugfixes.
* 1.09 30 Oct 2001, Tigran Aivazian <tigran@veritas.com>
* Bugfix for HT (Hyper-Threading) enabled processors
* whereby processor resources are shared by all logical processors
* in a single CPU package.
* 1.10 28 Feb 2002 Asit K Mallick <asit.k.mallick@intel.com> and
* Tigran Aivazian <tigran@veritas.com>,
* Serialize updates as required on HT processors due to
* speculative nature of implementation.
* 1.11 22 Mar 2002 Tigran Aivazian <tigran@veritas.com>
* Fix the panic when writing zero-length microcode chunk.
* 1.12 29 Sep 2003 Nitin Kamble <nitin.a.kamble@intel.com>,
* Jun Nakajima <jun.nakajima@intel.com>
* Support for the microcode updates in the new format.
* 1.13 10 Oct 2003 Tigran Aivazian <tigran@veritas.com>
* Removed ->read() method and obsoleted MICROCODE_IOCFREE ioctl
* because we no longer hold a copy of applied microcode
* in kernel memory.
* 1.14 25 Jun 2004 Tigran Aivazian <tigran@veritas.com>
* Fix sigmatch() macro to handle old CPUs with pf == 0.
* Thanks to Stuart Swales for pointing out this bug.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/syscore_ops.h>
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
#include <asm/perf_event.h>
MODULE_DESCRIPTION("Microcode Update Driver");
MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
MODULE_LICENSE("GPL");
#define MICROCODE_VERSION "2.00"
static struct microcode_ops *microcode_ops;
/*
* Synchronization.
*
* All non cpu-hotplug-callback call sites use:
*
* - microcode_mutex to synchronize with each other;
* - get/put_online_cpus() to synchronize with
* the cpu-hotplug-callback call sites.
*
* We guarantee that only a single cpu is being
* updated at any particular moment of time.
*/
static DEFINE_MUTEX(microcode_mutex);
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
EXPORT_SYMBOL_GPL(ucode_cpu_info);
/*
* Operations that are run on a target cpu:
*/
struct cpu_info_ctx {
struct cpu_signature *cpu_sig;
int err;
};
static void collect_cpu_info_local(void *arg)
{
struct cpu_info_ctx *ctx = arg;
ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
ctx->cpu_sig);
}
static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
{
struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
int ret;
ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
if (!ret)
ret = ctx.err;
return ret;
}
static int collect_cpu_info(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
int ret;
memset(uci, 0, sizeof(*uci));
ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
if (!ret)
uci->valid = 1;
return ret;
}
struct apply_microcode_ctx {
int err;
};
static void apply_microcode_local(void *arg)
{
struct apply_microcode_ctx *ctx = arg;
ctx->err = microcode_ops->apply_microcode(smp_processor_id());
}
static int apply_microcode_on_target(int cpu)
{
struct apply_microcode_ctx ctx = { .err = 0 };
int ret;
ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
if (!ret)
ret = ctx.err;
return ret;
}
#ifdef CONFIG_MICROCODE_OLD_INTERFACE
static int do_microcode_update(const void __user *buf, size_t size)
{
int error = 0;
int cpu;
for_each_online_cpu(cpu) {
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
enum ucode_state ustate;
if (!uci->valid)
continue;
ustate = microcode_ops->request_microcode_user(cpu, buf, size);
if (ustate == UCODE_ERROR) {
error = -1;
break;
} else if (ustate == UCODE_OK)
apply_microcode_on_target(cpu);
}
return error;
}
static int microcode_open(struct inode *inode, struct file *file)
{
return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
}
static ssize_t microcode_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
ssize_t ret = -EINVAL;
if ((len >> PAGE_SHIFT) > totalram_pages) {
pr_err("too much data (max %ld pages)\n", totalram_pages);
return ret;
}
get_online_cpus();
mutex_lock(µcode_mutex);
if (do_microcode_update(buf, len) == 0)
ret = (ssize_t)len;
if (ret > 0)
perf_check_microcode();
mutex_unlock(µcode_mutex);
put_online_cpus();
return ret;
}
static const struct file_operations microcode_fops = {
.owner = THIS_MODULE,
.write = microcode_write,
.open = microcode_open,
.llseek = no_llseek,
};
static struct miscdevice microcode_dev = {
.minor = MICROCODE_MINOR,
.name = "microcode",
.nodename = "cpu/microcode",
.fops = µcode_fops,
};
static int __init microcode_dev_init(void)
{
int error;
error = misc_register(µcode_dev);
if (error) {
pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
return error;
}
return 0;
}
static void __exit microcode_dev_exit(void)
{
misc_deregister(µcode_dev);
}
MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
MODULE_ALIAS("devname:cpu/microcode");
#else
#define microcode_dev_init() 0
#define microcode_dev_exit() do { } while (0)
#endif
/* fake device for request_firmware */
static struct platform_device *microcode_pdev;
static int reload_for_cpu(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
enum ucode_state ustate;
int err = 0;
if (!uci->valid)
return err;
ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true);
if (ustate == UCODE_OK)
apply_microcode_on_target(cpu);
else
if (ustate == UCODE_ERROR)
err = -EINVAL;
return err;
}
static ssize_t reload_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
int cpu;
ssize_t ret = 0, tmp_ret;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
if (val != 1)
return size;
get_online_cpus();
mutex_lock(µcode_mutex);
for_each_online_cpu(cpu) {
tmp_ret = reload_for_cpu(cpu);
if (tmp_ret != 0)
pr_warn("Error reloading microcode on CPU %d\n", cpu);
/* save retval of the first encountered reload error */
if (!ret)
ret = tmp_ret;
}
if (!ret)
perf_check_microcode();
mutex_unlock(µcode_mutex);
put_online_cpus();
if (!ret)
ret = size;
return ret;
}
static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
}
static ssize_t pf_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
}
static DEVICE_ATTR(reload, 0200, NULL, reload_store);
static DEVICE_ATTR(version, 0400, version_show, NULL);
static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
static struct attribute *mc_default_attrs[] = {
&dev_attr_version.attr,
&dev_attr_processor_flags.attr,
NULL
};
static struct attribute_group mc_attr_group = {
.attrs = mc_default_attrs,
.name = "microcode",
};
static void microcode_fini_cpu(int cpu)
{
microcode_ops->microcode_fini_cpu(cpu);
}
static enum ucode_state microcode_resume_cpu(int cpu)
{
pr_debug("CPU%d updated upon resume\n", cpu);
if (apply_microcode_on_target(cpu))
return UCODE_ERROR;
return UCODE_OK;
}
static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
{
enum ucode_state ustate;
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
if (uci && uci->valid)
return UCODE_OK;
if (collect_cpu_info(cpu))
return UCODE_ERROR;
/* --dimm. Trigger a delayed update? */
if (system_state != SYSTEM_RUNNING)
return UCODE_NFOUND;
ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev,
refresh_fw);
if (ustate == UCODE_OK) {
pr_debug("CPU%d updated upon init\n", cpu);
apply_microcode_on_target(cpu);
}
return ustate;
}
static enum ucode_state microcode_update_cpu(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
if (uci->valid)
return microcode_resume_cpu(cpu);
return microcode_init_cpu(cpu, false);
}
static int mc_device_add(struct device *dev, struct subsys_interface *sif)
{
int err, cpu = dev->id;
if (!cpu_online(cpu))
return 0;
pr_debug("CPU%d added\n", cpu);
err = sysfs_create_group(&dev->kobj, &mc_attr_group);
if (err)
return err;
if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
return -EINVAL;
return err;
}
static int mc_device_remove(struct device *dev, struct subsys_interface *sif)
{
int cpu = dev->id;
if (!cpu_online(cpu))
return 0;
pr_debug("CPU%d removed\n", cpu);
microcode_fini_cpu(cpu);
sysfs_remove_group(&dev->kobj, &mc_attr_group);
return 0;
}
static struct subsys_interface mc_cpu_interface = {
.name = "microcode",
.subsys = &cpu_subsys,
.add_dev = mc_device_add,
.remove_dev = mc_device_remove,
};
/**
* mc_bp_resume - Update boot CPU microcode during resume.
*/
static void mc_bp_resume(void)
{
int cpu = smp_processor_id();
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
if (uci->valid && uci->mc)
microcode_ops->apply_microcode(cpu);
}
static struct syscore_ops mc_syscore_ops = {
.resume = mc_bp_resume,
};
static __cpuinit int
mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct device *dev;
dev = get_cpu_device(cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
microcode_update_cpu(cpu);
pr_debug("CPU%d added\n", cpu);
/*
* "break" is missing on purpose here because we want to fall
* through in order to create the sysfs group.
*/
case CPU_DOWN_FAILED:
if (sysfs_create_group(&dev->kobj, &mc_attr_group))
pr_err("Failed to create group for CPU%d\n", cpu);
break;
case CPU_DOWN_PREPARE:
/* Suspend is in progress, only remove the interface */
sysfs_remove_group(&dev->kobj, &mc_attr_group);
pr_debug("CPU%d removed\n", cpu);
break;
/*
* case CPU_DEAD:
*
* When a CPU goes offline, don't free up or invalidate the copy of
* the microcode in kernel memory, so that we can reuse it when the
* CPU comes back online without unnecessarily requesting the userspace
* for it again.
*/
}
/* The CPU refused to come up during a system resume */
if (action == CPU_UP_CANCELED_FROZEN)
microcode_fini_cpu(cpu);
return NOTIFY_OK;
}
static struct notifier_block __refdata mc_cpu_notifier = {
.notifier_call = mc_cpu_callback,
};
#ifdef MODULE
/* Autoload on Intel and AMD systems */
static const struct x86_cpu_id __initconst microcode_id[] = {
#ifdef CONFIG_MICROCODE_INTEL
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, },
#endif
#ifdef CONFIG_MICROCODE_AMD
{ X86_VENDOR_AMD, X86_FAMILY_ANY, X86_MODEL_ANY, },
#endif
{}
};
MODULE_DEVICE_TABLE(x86cpu, microcode_id);
#endif
static struct attribute *cpu_root_microcode_attrs[] = {
&dev_attr_reload.attr,
NULL
};
static struct attribute_group cpu_root_microcode_group = {
.name = "microcode",
.attrs = cpu_root_microcode_attrs,
};
static int __init microcode_init(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
int error;
if (c->x86_vendor == X86_VENDOR_INTEL)
microcode_ops = init_intel_microcode();
else if (c->x86_vendor == X86_VENDOR_AMD)
microcode_ops = init_amd_microcode();
else
pr_err("no support for this CPU vendor\n");
if (!microcode_ops)
return -ENODEV;
microcode_pdev = platform_device_register_simple("microcode", -1,
NULL, 0);
if (IS_ERR(microcode_pdev))
return PTR_ERR(microcode_pdev);
get_online_cpus();
mutex_lock(µcode_mutex);
error = subsys_interface_register(&mc_cpu_interface);
if (!error)
perf_check_microcode();
mutex_unlock(µcode_mutex);
put_online_cpus();
if (error)
goto out_pdev;
error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
&cpu_root_microcode_group);
if (error) {
pr_err("Error creating microcode group!\n");
goto out_driver;
}
error = microcode_dev_init();
if (error)
goto out_ucode_group;
register_syscore_ops(&mc_syscore_ops);
register_hotcpu_notifier(&mc_cpu_notifier);
pr_info("Microcode Update Driver: v" MICROCODE_VERSION
" <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
return 0;
out_ucode_group:
sysfs_remove_group(&cpu_subsys.dev_root->kobj,
&cpu_root_microcode_group);
out_driver:
get_online_cpus();
mutex_lock(µcode_mutex);
subsys_interface_unregister(&mc_cpu_interface);
mutex_unlock(µcode_mutex);
put_online_cpus();
out_pdev:
platform_device_unregister(microcode_pdev);
return error;
}
module_init(microcode_init);
static void __exit microcode_exit(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
microcode_dev_exit();
unregister_hotcpu_notifier(&mc_cpu_notifier);
unregister_syscore_ops(&mc_syscore_ops);
sysfs_remove_group(&cpu_subsys.dev_root->kobj,
&cpu_root_microcode_group);
get_online_cpus();
mutex_lock(µcode_mutex);
subsys_interface_unregister(&mc_cpu_interface);
mutex_unlock(µcode_mutex);
put_online_cpus();
platform_device_unregister(microcode_pdev);
microcode_ops = NULL;
if (c->x86_vendor == X86_VENDOR_AMD)
exit_amd_microcode();
pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n");
}
module_exit(microcode_exit);
| gpl-2.0 |
estiko/android_kernel_cyanogen_msm8916 | drivers/staging/ozwpan/ozpd.c | 2048 | 26026 | /* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
#include <linux/errno.h>
#include "ozconfig.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "oztrace.h"
#include "ozevent.h"
#include "ozcdev.h"
#include "ozusbsvc.h"
#include <asm/unaligned.h>
#include <linux/uaccess.h>
#include <net/psnap.h>
/*------------------------------------------------------------------------------
*/
#define OZ_MAX_TX_POOL_SIZE 6
/*------------------------------------------------------------------------------
*/
static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
static int oz_send_isoc_frame(struct oz_pd *pd);
static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
static void oz_isoc_stream_free(struct oz_isoc_stream *st);
static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
static void oz_isoc_destructor(struct sk_buff *skb);
static int oz_def_app_init(void);
static void oz_def_app_term(void);
static int oz_def_app_start(struct oz_pd *pd, int resume);
static void oz_def_app_stop(struct oz_pd *pd, int pause);
static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
/*------------------------------------------------------------------------------
* Counts the uncompleted isoc frames submitted to netcard.
*/
static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
/* Application handler functions.
*/
static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
{oz_usb_init,
oz_usb_term,
oz_usb_start,
oz_usb_stop,
oz_usb_rx,
oz_usb_heartbeat,
oz_usb_farewell,
OZ_APPID_USB},
{oz_def_app_init,
oz_def_app_term,
oz_def_app_start,
oz_def_app_stop,
oz_def_app_rx,
NULL,
NULL,
OZ_APPID_UNUSED1},
{oz_def_app_init,
oz_def_app_term,
oz_def_app_start,
oz_def_app_stop,
oz_def_app_rx,
NULL,
NULL,
OZ_APPID_UNUSED2},
{oz_cdev_init,
oz_cdev_term,
oz_cdev_start,
oz_cdev_stop,
oz_cdev_rx,
NULL,
NULL,
OZ_APPID_SERIAL},
};
/*------------------------------------------------------------------------------
* Context: process
*/
static int oz_def_app_init(void)
{
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
static void oz_def_app_term(void)
{
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
static int oz_def_app_start(struct oz_pd *pd, int resume)
{
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
static void oz_def_app_stop(struct oz_pd *pd, int pause)
{
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
{
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_set_state(struct oz_pd *pd, unsigned state)
{
pd->state = state;
oz_event_log(OZ_EVT_PD_STATE, 0, 0, NULL, state);
#ifdef WANT_TRACE
switch (state) {
case OZ_PD_S_IDLE:
oz_trace("PD State: OZ_PD_S_IDLE\n");
break;
case OZ_PD_S_CONNECTED:
oz_trace("PD State: OZ_PD_S_CONNECTED\n");
break;
case OZ_PD_S_STOPPED:
oz_trace("PD State: OZ_PD_S_STOPPED\n");
break;
case OZ_PD_S_SLEEP:
oz_trace("PD State: OZ_PD_S_SLEEP\n");
break;
}
#endif /* WANT_TRACE */
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_get(struct oz_pd *pd)
{
atomic_inc(&pd->ref_count);
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_put(struct oz_pd *pd)
{
if (atomic_dec_and_test(&pd->ref_count))
oz_pd_destroy(pd);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
{
struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
if (pd) {
int i;
atomic_set(&pd->ref_count, 2);
for (i = 0; i < OZ_APPID_MAX; i++)
spin_lock_init(&pd->app_lock[i]);
pd->last_rx_pkt_num = 0xffffffff;
oz_pd_set_state(pd, OZ_PD_S_IDLE);
pd->max_tx_size = OZ_MAX_TX_SIZE;
memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
if (0 != oz_elt_buf_init(&pd->elt_buff)) {
kfree(pd);
pd = NULL;
}
spin_lock_init(&pd->tx_frame_lock);
INIT_LIST_HEAD(&pd->tx_queue);
INIT_LIST_HEAD(&pd->farewell_list);
pd->last_sent_frame = &pd->tx_queue;
spin_lock_init(&pd->stream_lock);
INIT_LIST_HEAD(&pd->stream_list);
}
return pd;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_destroy(struct oz_pd *pd)
{
struct list_head *e;
struct oz_tx_frame *f;
struct oz_isoc_stream *st;
struct oz_farewell *fwell;
oz_trace("Destroying PD\n");
/* Delete any streams.
*/
e = pd->stream_list.next;
while (e != &pd->stream_list) {
st = container_of(e, struct oz_isoc_stream, link);
e = e->next;
oz_isoc_stream_free(st);
}
/* Free any queued tx frames.
*/
e = pd->tx_queue.next;
while (e != &pd->tx_queue) {
f = container_of(e, struct oz_tx_frame, link);
e = e->next;
if (f->skb != NULL)
kfree_skb(f->skb);
oz_retire_frame(pd, f);
}
oz_elt_buf_term(&pd->elt_buff);
/* Free any farewells.
*/
e = pd->farewell_list.next;
while (e != &pd->farewell_list) {
fwell = container_of(e, struct oz_farewell, link);
e = e->next;
kfree(fwell);
}
/* Deallocate all frames in tx pool.
*/
while (pd->tx_pool) {
e = pd->tx_pool;
pd->tx_pool = e->next;
kfree(container_of(e, struct oz_tx_frame, link));
}
if (pd->net_dev)
dev_put(pd->net_dev);
kfree(pd);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
{
const struct oz_app_if *ai;
int rc = 0;
oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
if (ai->start(pd, resume)) {
rc = -1;
oz_trace("Unabled to start service %d\n",
ai->app_id);
break;
}
oz_polling_lock_bh();
pd->total_apps |= (1<<ai->app_id);
if (resume)
pd->paused_apps &= ~(1<<ai->app_id);
oz_polling_unlock_bh();
}
}
return rc;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
{
const struct oz_app_if *ai;
oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
oz_polling_lock_bh();
if (pause) {
pd->paused_apps |= (1<<ai->app_id);
} else {
pd->total_apps &= ~(1<<ai->app_id);
pd->paused_apps &= ~(1<<ai->app_id);
}
oz_polling_unlock_bh();
ai->stop(pd, pause);
}
}
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
{
const struct oz_app_if *ai;
int more = 0;
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (ai->heartbeat && (apps & (1<<ai->app_id))) {
if (ai->heartbeat(pd))
more = 1;
}
}
if (more)
oz_pd_request_heartbeat(pd);
if (pd->mode & OZ_F_ISOC_ANYTIME) {
int count = 8;
while (count-- && (oz_send_isoc_frame(pd) >= 0))
;
}
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_stop(struct oz_pd *pd)
{
u16 stop_apps = 0;
oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
oz_pd_indicate_farewells(pd);
oz_polling_lock_bh();
stop_apps = pd->total_apps;
pd->total_apps = 0;
pd->paused_apps = 0;
oz_polling_unlock_bh();
oz_services_stop(pd, stop_apps, 0);
oz_polling_lock_bh();
oz_pd_set_state(pd, OZ_PD_S_STOPPED);
/* Remove from PD list.*/
list_del(&pd->link);
oz_polling_unlock_bh();
oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
oz_timer_delete(pd, 0);
oz_pd_put(pd);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_pd_sleep(struct oz_pd *pd)
{
int do_stop = 0;
u16 stop_apps = 0;
oz_polling_lock_bh();
if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
oz_polling_unlock_bh();
return 0;
}
if (pd->keep_alive_j && pd->session_id) {
oz_pd_set_state(pd, OZ_PD_S_SLEEP);
pd->pulse_time_j = jiffies + pd->keep_alive_j;
oz_trace("Sleep Now %lu until %lu\n",
jiffies, pd->pulse_time_j);
} else {
do_stop = 1;
}
stop_apps = pd->total_apps;
oz_polling_unlock_bh();
if (do_stop) {
oz_pd_stop(pd);
} else {
oz_services_stop(pd, stop_apps, 1);
oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
}
return do_stop;
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
{
struct oz_tx_frame *f = NULL;
spin_lock_bh(&pd->tx_frame_lock);
if (pd->tx_pool) {
f = container_of(pd->tx_pool, struct oz_tx_frame, link);
pd->tx_pool = pd->tx_pool->next;
pd->tx_pool_count--;
}
spin_unlock_bh(&pd->tx_frame_lock);
if (f == NULL)
f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
if (f) {
f->total_size = sizeof(struct oz_hdr);
INIT_LIST_HEAD(&f->link);
INIT_LIST_HEAD(&f->elt_list);
}
return f;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
{
pd->nb_queued_isoc_frames--;
list_del_init(&f->link);
if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
f->link.next = pd->tx_pool;
pd->tx_pool = &f->link;
pd->tx_pool_count++;
} else {
kfree(f);
}
oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
pd->nb_queued_isoc_frames);
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
{
spin_lock_bh(&pd->tx_frame_lock);
if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
f->link.next = pd->tx_pool;
pd->tx_pool = &f->link;
pd->tx_pool_count++;
f = NULL;
}
spin_unlock_bh(&pd->tx_frame_lock);
kfree(f);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
static void oz_set_more_bit(struct sk_buff *skb)
{
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
oz_hdr->control |= OZ_F_MORE_DATA;
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
{
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_prepare_frame(struct oz_pd *pd, int empty)
{
struct oz_tx_frame *f;
if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
return -1;
if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
return -1;
if (!empty && !oz_are_elts_available(&pd->elt_buff))
return -1;
f = oz_tx_frame_alloc(pd);
if (f == NULL)
return -1;
f->skb = NULL;
f->hdr.control =
(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
++pd->last_tx_pkt_num;
put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
if (empty == 0) {
oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
pd->max_tx_size, &f->elt_list);
}
spin_lock(&pd->tx_frame_lock);
list_add_tail(&f->link, &pd->tx_queue);
pd->nb_queued_frames++;
spin_unlock(&pd->tx_frame_lock);
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
{
struct sk_buff *skb;
struct net_device *dev = pd->net_dev;
struct oz_hdr *oz_hdr;
struct oz_elt *elt;
struct list_head *e;
/* Allocate skb with enough space for the lower layers as well
* as the space we need.
*/
skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL)
return NULL;
/* Reserve the head room for lower layers.
*/
skb_reserve(skb, LL_RESERVED_SPACE(dev));
skb_reset_network_header(skb);
skb->dev = dev;
skb->protocol = htons(OZ_ETHERTYPE);
if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
dev->dev_addr, skb->len) < 0)
goto fail;
/* Push the tail to the end of the area we are going to copy to.
*/
oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
/* Copy the elements into the frame body.
*/
elt = (struct oz_elt *)(oz_hdr+1);
for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
struct oz_elt_info *ei;
ei = container_of(e, struct oz_elt_info, link);
memcpy(elt, ei->data, ei->length);
elt = oz_next_elt(elt);
}
return skb;
fail:
kfree_skb(skb);
return NULL;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
{
struct list_head *e;
struct oz_elt_info *ei;
e = f->elt_list.next;
while (e != &f->elt_list) {
ei = container_of(e, struct oz_elt_info, link);
e = e->next;
list_del_init(&ei->link);
if (ei->callback)
ei->callback(pd, ei->context);
spin_lock_bh(&pd->elt_buff.lock);
oz_elt_info_free(&pd->elt_buff, ei);
spin_unlock_bh(&pd->elt_buff.lock);
}
oz_tx_frame_free(pd, f);
if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
oz_trim_elt_pool(&pd->elt_buff);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
{
struct sk_buff *skb;
struct oz_tx_frame *f;
struct list_head *e;
spin_lock(&pd->tx_frame_lock);
e = pd->last_sent_frame->next;
if (e == &pd->tx_queue) {
spin_unlock(&pd->tx_frame_lock);
return -1;
}
f = container_of(e, struct oz_tx_frame, link);
if (f->skb != NULL) {
skb = f->skb;
oz_tx_isoc_free(pd, f);
spin_unlock(&pd->tx_frame_lock);
if (more_data)
oz_set_more_bit(skb);
oz_set_last_pkt_nb(pd, skb);
if ((int)atomic_read(&g_submitted_isoc) <
OZ_MAX_SUBMITTED_ISOC) {
if (dev_queue_xmit(skb) < 0) {
oz_trace2(OZ_TRACE_TX_FRAMES,
"Dropping ISOC Frame\n");
oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
return -1;
}
atomic_inc(&g_submitted_isoc);
oz_trace2(OZ_TRACE_TX_FRAMES,
"Sending ISOC Frame, nb_isoc= %d\n",
pd->nb_queued_isoc_frames);
return 0;
} else {
kfree_skb(skb);
oz_trace2(OZ_TRACE_TX_FRAMES, "Dropping ISOC Frame>\n");
oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
return -1;
}
}
pd->last_sent_frame = e;
skb = oz_build_frame(pd, f);
spin_unlock(&pd->tx_frame_lock);
if (more_data)
oz_set_more_bit(skb);
oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
if (skb) {
oz_event_log(OZ_EVT_TX_FRAME,
0,
(((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
NULL, f->hdr.pkt_num);
if (dev_queue_xmit(skb) < 0)
return -1;
}
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
void oz_send_queued_frames(struct oz_pd *pd, int backlog)
{
while (oz_prepare_frame(pd, 0) >= 0)
backlog++;
switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
case OZ_F_ISOC_NO_ELTS: {
backlog += pd->nb_queued_isoc_frames;
if (backlog <= 0)
goto out;
if (backlog > OZ_MAX_SUBMITTED_ISOC)
backlog = OZ_MAX_SUBMITTED_ISOC;
break;
}
case OZ_NO_ELTS_ANYTIME: {
if ((backlog <= 0) && (pd->isoc_sent == 0))
goto out;
break;
}
default: {
if (backlog <= 0)
goto out;
break;
}
}
while (backlog--) {
if (oz_send_next_queued_frame(pd, backlog) < 0)
break;
}
return;
out: oz_prepare_frame(pd, 1);
oz_send_next_queued_frame(pd, 0);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
static int oz_send_isoc_frame(struct oz_pd *pd)
{
struct sk_buff *skb;
struct net_device *dev = pd->net_dev;
struct oz_hdr *oz_hdr;
struct oz_elt *elt;
struct list_head *e;
struct list_head list;
int total_size = sizeof(struct oz_hdr);
INIT_LIST_HEAD(&list);
oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
pd->max_tx_size, &list);
if (list.next == &list)
return 0;
skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL) {
oz_trace("Cannot alloc skb\n");
oz_elt_info_free_chain(&pd->elt_buff, &list);
return -1;
}
skb_reserve(skb, LL_RESERVED_SPACE(dev));
skb_reset_network_header(skb);
skb->dev = dev;
skb->protocol = htons(OZ_ETHERTYPE);
if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
dev->dev_addr, skb->len) < 0) {
kfree_skb(skb);
return -1;
}
oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
elt = (struct oz_elt *)(oz_hdr+1);
for (e = list.next; e != &list; e = e->next) {
struct oz_elt_info *ei;
ei = container_of(e, struct oz_elt_info, link);
memcpy(elt, ei->data, ei->length);
elt = oz_next_elt(elt);
}
oz_event_log(OZ_EVT_TX_ISOC, 0, 0, NULL, 0);
dev_queue_xmit(skb);
oz_elt_info_free_chain(&pd->elt_buff, &list);
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
{
struct list_head *e;
struct oz_tx_frame *f;
struct list_head *first = NULL;
struct list_head *last = NULL;
u8 diff;
u32 pkt_num;
spin_lock(&pd->tx_frame_lock);
e = pd->tx_queue.next;
while (e != &pd->tx_queue) {
f = container_of(e, struct oz_tx_frame, link);
pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
break;
oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
pkt_num, pd->nb_queued_frames);
if (first == NULL)
first = e;
last = e;
e = e->next;
pd->nb_queued_frames--;
}
if (first) {
last->next->prev = &pd->tx_queue;
pd->tx_queue.next = last->next;
last->next = NULL;
}
pd->last_sent_frame = &pd->tx_queue;
spin_unlock(&pd->tx_frame_lock);
while (first) {
f = container_of(first, struct oz_tx_frame, link);
first = first->next;
oz_retire_frame(pd, f);
}
}
/*------------------------------------------------------------------------------
* Precondition: stream_lock must be held.
* Context: softirq
*/
static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
{
struct list_head *e;
struct oz_isoc_stream *st;
list_for_each(e, &pd->stream_list) {
st = container_of(e, struct oz_isoc_stream, link);
if (st->ep_num == ep_num)
return st;
}
return NULL;
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
{
struct oz_isoc_stream *st =
kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
if (!st)
return -ENOMEM;
st->ep_num = ep_num;
spin_lock_bh(&pd->stream_lock);
if (!pd_stream_find(pd, ep_num)) {
list_add(&st->link, &pd->stream_list);
st = NULL;
}
spin_unlock_bh(&pd->stream_lock);
kfree(st);
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
static void oz_isoc_stream_free(struct oz_isoc_stream *st)
{
kfree_skb(st->skb);
kfree(st);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
{
struct oz_isoc_stream *st;
spin_lock_bh(&pd->stream_lock);
st = pd_stream_find(pd, ep_num);
if (st)
list_del(&st->link);
spin_unlock_bh(&pd->stream_lock);
if (st)
oz_isoc_stream_free(st);
return 0;
}
/*------------------------------------------------------------------------------
* Context: any
*/
static void oz_isoc_destructor(struct sk_buff *skb)
{
atomic_dec(&g_submitted_isoc);
oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
0, skb, 0);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
{
struct net_device *dev = pd->net_dev;
struct oz_isoc_stream *st;
u8 nb_units = 0;
struct sk_buff *skb = NULL;
struct oz_hdr *oz_hdr = NULL;
int size = 0;
spin_lock_bh(&pd->stream_lock);
st = pd_stream_find(pd, ep_num);
if (st) {
skb = st->skb;
st->skb = NULL;
nb_units = st->nb_units;
st->nb_units = 0;
oz_hdr = st->oz_hdr;
size = st->size;
}
spin_unlock_bh(&pd->stream_lock);
if (!st)
return 0;
if (!skb) {
/* Allocate enough space for max size frame. */
skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
GFP_ATOMIC);
if (skb == NULL)
return 0;
/* Reserve the head room for lower layers. */
skb_reserve(skb, LL_RESERVED_SPACE(dev));
skb_reset_network_header(skb);
skb->dev = dev;
skb->protocol = htons(OZ_ETHERTYPE);
/* For audio packet set priority to AC_VO */
skb->priority = 0x7;
size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
oz_hdr = (struct oz_hdr *)skb_put(skb, size);
}
memcpy(skb_put(skb, len), data, len);
size += len;
if (++nb_units < pd->ms_per_isoc) {
spin_lock_bh(&pd->stream_lock);
st->skb = skb;
st->nb_units = nb_units;
st->oz_hdr = oz_hdr;
st->size = size;
spin_unlock_bh(&pd->stream_lock);
} else {
struct oz_hdr oz;
struct oz_isoc_large iso;
spin_lock_bh(&pd->stream_lock);
iso.frame_number = st->frame_num;
st->frame_num += nb_units;
spin_unlock_bh(&pd->stream_lock);
oz.control =
(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
oz.pkt_num = 0;
iso.endpoint = ep_num;
iso.format = OZ_DATA_F_ISOC_LARGE;
iso.ms_data = nb_units;
memcpy(oz_hdr, &oz, sizeof(oz));
memcpy(oz_hdr+1, &iso, sizeof(iso));
if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
dev->dev_addr, skb->len) < 0)
goto out;
skb->destructor = oz_isoc_destructor;
/*Queue for Xmit if mode is not ANYTIME*/
if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
struct oz_tx_frame *isoc_unit = NULL;
int nb = pd->nb_queued_isoc_frames;
if (nb >= pd->isoc_latency) {
oz_trace2(OZ_TRACE_TX_FRAMES,
"Dropping ISOC Unit nb= %d\n",
nb);
goto out;
}
isoc_unit = oz_tx_frame_alloc(pd);
if (isoc_unit == NULL)
goto out;
isoc_unit->hdr = oz;
isoc_unit->skb = skb;
spin_lock_bh(&pd->tx_frame_lock);
list_add_tail(&isoc_unit->link, &pd->tx_queue);
pd->nb_queued_isoc_frames++;
spin_unlock_bh(&pd->tx_frame_lock);
oz_trace2(OZ_TRACE_TX_FRAMES,
"Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
pd->nb_queued_isoc_frames, pd->nb_queued_frames);
oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
skb, atomic_read(&g_submitted_isoc));
return 0;
}
/*In ANYTIME mode Xmit unit immediately*/
if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
atomic_inc(&g_submitted_isoc);
oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
skb, atomic_read(&g_submitted_isoc));
if (dev_queue_xmit(skb) < 0) {
oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
return -1;
} else
return 0;
}
out: oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
kfree_skb(skb);
return -1;
}
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
void oz_apps_init(void)
{
int i;
for (i = 0; i < OZ_APPID_MAX; i++)
if (g_app_if[i].init)
g_app_if[i].init();
}
/*------------------------------------------------------------------------------
* Context: process
*/
void oz_apps_term(void)
{
int i;
/* Terminate all the apps. */
for (i = 0; i < OZ_APPID_MAX; i++)
if (g_app_if[i].term)
g_app_if[i].term();
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
{
const struct oz_app_if *ai;
if (app_id == 0 || app_id > OZ_APPID_MAX)
return;
ai = &g_app_if[app_id-1];
ai->rx(pd, elt);
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_indicate_farewells(struct oz_pd *pd)
{
struct oz_farewell *f;
const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
while (1) {
oz_polling_lock_bh();
if (list_empty(&pd->farewell_list)) {
oz_polling_unlock_bh();
break;
}
f = list_first_entry(&pd->farewell_list,
struct oz_farewell, link);
list_del(&f->link);
oz_polling_unlock_bh();
if (ai->farewell)
ai->farewell(pd, f->ep_num, f->report, f->len);
kfree(f);
}
}
| gpl-2.0 |
Chad0989/Vigor-Incredikernel | drivers/ata/pata_platform.c | 3584 | 7445 | /*
* Generic platform device PATA driver
*
* Copyright (C) 2006 - 2007 Paul Mundt
*
* Based on pata_pcmcia:
*
* Copyright 2005-2006 Red Hat Inc, all rights reserved.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#define DRV_NAME "pata_platform"
#define DRV_VERSION "1.2"
static int pio_mask = 1;
/*
* Provide our own set_mode() as we don't want to change anything that has
* already been configured..
*/
static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unused)
{
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
/* We don't really care */
dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
}
return 0;
}
static struct scsi_host_template pata_platform_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations pata_platform_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_sff_data_xfer_noirq,
.cable_detect = ata_cable_unknown,
.set_mode = pata_platform_set_mode,
};
static void pata_platform_setup_port(struct ata_ioports *ioaddr,
unsigned int shift)
{
/* Fixup the port shift for platforms that need it */
ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << shift);
ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << shift);
ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << shift);
ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << shift);
ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << shift);
ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << shift);
ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << shift);
ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << shift);
ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << shift);
ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << shift);
}
/**
* __pata_platform_probe - attach a platform interface
* @dev: device
* @io_res: Resource representing I/O base
* @ctl_res: Resource representing CTL base
* @irq_res: Resource representing IRQ and its flags
* @ioport_shift: I/O port shift
* @__pio_mask: PIO mask
*
* Register a platform bus IDE interface. Such interfaces are PIO and we
* assume do not support IRQ sharing.
*
* Platform devices are expected to contain at least 2 resources per port:
*
* - I/O Base (IORESOURCE_IO or IORESOURCE_MEM)
* - CTL Base (IORESOURCE_IO or IORESOURCE_MEM)
*
* and optionally:
*
* - IRQ (IORESOURCE_IRQ)
*
* If the base resources are both mem types, the ioremap() is handled
* here. For IORESOURCE_IO, it's assumed that there's no remapping
* necessary.
*
* If no IRQ resource is present, PIO polling mode is used instead.
*/
int __devinit __pata_platform_probe(struct device *dev,
struct resource *io_res,
struct resource *ctl_res,
struct resource *irq_res,
unsigned int ioport_shift,
int __pio_mask)
{
struct ata_host *host;
struct ata_port *ap;
unsigned int mmio;
int irq = 0;
int irq_flags = 0;
/*
* Check for MMIO
*/
mmio = (( io_res->flags == IORESOURCE_MEM) &&
(ctl_res->flags == IORESOURCE_MEM));
/*
* And the IRQ
*/
if (irq_res && irq_res->start > 0) {
irq = irq_res->start;
irq_flags = irq_res->flags;
}
/*
* Now that that's out of the way, wire up the port..
*/
host = ata_host_alloc(dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
ap->ops = &pata_platform_port_ops;
ap->pio_mask = __pio_mask;
ap->flags |= ATA_FLAG_SLAVE_POSS;
/*
* Use polling mode if there's no IRQ
*/
if (!irq) {
ap->flags |= ATA_FLAG_PIO_POLLING;
ata_port_desc(ap, "no IRQ, using PIO polling");
}
/*
* Handle the MMIO case
*/
if (mmio) {
ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start,
resource_size(io_res));
ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start,
resource_size(ctl_res));
} else {
ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start,
resource_size(io_res));
ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start,
resource_size(ctl_res));
}
if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) {
dev_err(dev, "failed to map IO/CTL base\n");
return -ENOMEM;
}
ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
pata_platform_setup_port(&ap->ioaddr, ioport_shift);
ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport",
(unsigned long long)io_res->start,
(unsigned long long)ctl_res->start);
/* activate */
return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL,
irq_flags, &pata_platform_sht);
}
EXPORT_SYMBOL_GPL(__pata_platform_probe);
/**
* __pata_platform_remove - unplug a platform interface
* @dev: device
*
* A platform bus ATA device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
int __pata_platform_remove(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
ata_host_detach(host);
return 0;
}
EXPORT_SYMBOL_GPL(__pata_platform_remove);
static int __devinit pata_platform_probe(struct platform_device *pdev)
{
struct resource *io_res;
struct resource *ctl_res;
struct resource *irq_res;
struct pata_platform_info *pp_info = pdev->dev.platform_data;
/*
* Simple resource validation ..
*/
if ((pdev->num_resources != 3) && (pdev->num_resources != 2)) {
dev_err(&pdev->dev, "invalid number of resources\n");
return -EINVAL;
}
/*
* Get the I/O base first
*/
io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (io_res == NULL) {
io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(io_res == NULL))
return -EINVAL;
}
/*
* Then the CTL base
*/
ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (ctl_res == NULL) {
ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (unlikely(ctl_res == NULL))
return -EINVAL;
}
/*
* And the IRQ
*/
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_res)
irq_res->flags = pp_info ? pp_info->irq_flags : 0;
return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res,
pp_info ? pp_info->ioport_shift : 0,
pio_mask);
}
static int __devexit pata_platform_remove(struct platform_device *pdev)
{
return __pata_platform_remove(&pdev->dev);
}
static struct platform_driver pata_platform_driver = {
.probe = pata_platform_probe,
.remove = __devexit_p(pata_platform_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init pata_platform_init(void)
{
return platform_driver_register(&pata_platform_driver);
}
static void __exit pata_platform_exit(void)
{
platform_driver_unregister(&pata_platform_driver);
}
module_init(pata_platform_init);
module_exit(pata_platform_exit);
module_param(pio_mask, int, 0);
MODULE_AUTHOR("Paul Mundt");
MODULE_DESCRIPTION("low-level driver for platform device ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
jyh0082007/sigTaint | arch/sh/boards/mach-se/7724/irq.c | 3840 | 3566 | /*
* linux/arch/sh/boards/se/7724/irq.c
*
* Copyright (C) 2009 Renesas Solutions Corp.
*
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
*
* Based on linux/arch/sh/boards/se/7722/irq.c
* Copyright (C) 2007 Nobuhiro Iwamatsu
*
* Hitachi UL SolutionEngine 7724 Support.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/topology.h>
#include <linux/io.h>
#include <linux/err.h>
#include <mach-se/mach/se7724.h>
struct fpga_irq {
unsigned long sraddr;
unsigned long mraddr;
unsigned short mask;
unsigned int base;
};
static unsigned int fpga2irq(unsigned int irq)
{
if (irq >= IRQ0_BASE &&
irq <= IRQ0_END)
return IRQ0_IRQ;
else if (irq >= IRQ1_BASE &&
irq <= IRQ1_END)
return IRQ1_IRQ;
else
return IRQ2_IRQ;
}
static struct fpga_irq get_fpga_irq(unsigned int irq)
{
struct fpga_irq set;
switch (irq) {
case IRQ0_IRQ:
set.sraddr = IRQ0_SR;
set.mraddr = IRQ0_MR;
set.mask = IRQ0_MASK;
set.base = IRQ0_BASE;
break;
case IRQ1_IRQ:
set.sraddr = IRQ1_SR;
set.mraddr = IRQ1_MR;
set.mask = IRQ1_MASK;
set.base = IRQ1_BASE;
break;
default:
set.sraddr = IRQ2_SR;
set.mraddr = IRQ2_MR;
set.mask = IRQ2_MASK;
set.base = IRQ2_BASE;
break;
}
return set;
}
static void disable_se7724_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
struct fpga_irq set = get_fpga_irq(fpga2irq(irq));
unsigned int bit = irq - set.base;
__raw_writew(__raw_readw(set.mraddr) | 0x0001 << bit, set.mraddr);
}
static void enable_se7724_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
struct fpga_irq set = get_fpga_irq(fpga2irq(irq));
unsigned int bit = irq - set.base;
__raw_writew(__raw_readw(set.mraddr) & ~(0x0001 << bit), set.mraddr);
}
static struct irq_chip se7724_irq_chip __read_mostly = {
.name = "SE7724-FPGA",
.irq_mask = disable_se7724_irq,
.irq_unmask = enable_se7724_irq,
};
static void se7724_irq_demux(unsigned int irq, struct irq_desc *desc)
{
struct fpga_irq set = get_fpga_irq(irq);
unsigned short intv = __raw_readw(set.sraddr);
unsigned int ext_irq = set.base;
intv &= set.mask;
for (; intv; intv >>= 1, ext_irq++) {
if (!(intv & 1))
continue;
generic_handle_irq(ext_irq);
}
}
/*
* Initialize IRQ setting
*/
void __init init_se7724_IRQ(void)
{
int irq_base, i;
__raw_writew(0xffff, IRQ0_MR); /* mask all */
__raw_writew(0xffff, IRQ1_MR); /* mask all */
__raw_writew(0xffff, IRQ2_MR); /* mask all */
__raw_writew(0x0000, IRQ0_SR); /* clear irq */
__raw_writew(0x0000, IRQ1_SR); /* clear irq */
__raw_writew(0x0000, IRQ2_SR); /* clear irq */
__raw_writew(0x002a, IRQ_MODE); /* set irq type */
irq_base = irq_alloc_descs(SE7724_FPGA_IRQ_BASE, SE7724_FPGA_IRQ_BASE,
SE7724_FPGA_IRQ_NR, numa_node_id());
if (IS_ERR_VALUE(irq_base)) {
pr_err("%s: failed hooking irqs for FPGA\n", __func__);
return;
}
for (i = 0; i < SE7724_FPGA_IRQ_NR; i++)
irq_set_chip_and_handler_name(irq_base + i, &se7724_irq_chip,
handle_level_irq, "level");
irq_set_chained_handler(IRQ0_IRQ, se7724_irq_demux);
irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
irq_set_chained_handler(IRQ1_IRQ, se7724_irq_demux);
irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
irq_set_chained_handler(IRQ2_IRQ, se7724_irq_demux);
irq_set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW);
}
| gpl-2.0 |
BanBxda/m7-3.4.10-4-4-2 | drivers/hid/hid-roccat-kovaplus.c | 4096 | 20932 | /*
* Roccat Kova[+] driver for Linux
*
* Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
/*
* Roccat Kova[+] is a bigger version of the Pyra with two more side buttons.
*/
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/hid-roccat.h>
#include "hid-ids.h"
#include "hid-roccat-common.h"
#include "hid-roccat-kovaplus.h"
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
static struct class *kovaplus_class;
static uint kovaplus_convert_event_cpi(uint value)
{
return (value == 7 ? 4 : (value == 4 ? 3 : value));
}
static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
uint new_profile_index)
{
kovaplus->actual_profile = new_profile_index;
kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
kovaplus->actual_y_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_y;
}
static int kovaplus_send_control(struct usb_device *usb_dev, uint value,
enum kovaplus_control_requests request)
{
int retval;
struct kovaplus_control control;
if ((request == KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
value > 4)
return -EINVAL;
control.command = KOVAPLUS_COMMAND_CONTROL;
control.value = value;
control.request = request;
retval = roccat_common_send(usb_dev, KOVAPLUS_COMMAND_CONTROL,
&control, sizeof(struct kovaplus_control));
return retval;
}
static int kovaplus_receive_control_status(struct usb_device *usb_dev)
{
int retval;
struct kovaplus_control control;
do {
retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_CONTROL,
&control, sizeof(struct kovaplus_control));
/* check if we get a completely wrong answer */
if (retval)
return retval;
if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OK)
return 0;
/* indicates that hardware needs some more time to complete action */
if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT) {
msleep(500); /* windows driver uses 1000 */
continue;
}
/* seems to be critical - replug necessary */
if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
return -EINVAL;
hid_err(usb_dev, "roccat_common_receive_control_status: "
"unknown response value 0x%x\n", control.value);
return -EINVAL;
} while (1);
}
static int kovaplus_send(struct usb_device *usb_dev, uint command,
void const *buf, uint size)
{
int retval;
retval = roccat_common_send(usb_dev, command, buf, size);
if (retval)
return retval;
msleep(100);
return kovaplus_receive_control_status(usb_dev);
}
static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
enum kovaplus_control_requests request)
{
return kovaplus_send_control(usb_dev, number, request);
}
static int kovaplus_get_info(struct usb_device *usb_dev,
struct kovaplus_info *buf)
{
return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
buf, sizeof(struct kovaplus_info));
}
static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
struct kovaplus_profile_settings *buf, uint number)
{
int retval;
retval = kovaplus_select_profile(usb_dev, number,
KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct kovaplus_profile_settings));
}
static int kovaplus_set_profile_settings(struct usb_device *usb_dev,
struct kovaplus_profile_settings const *settings)
{
return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct kovaplus_profile_settings));
}
static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
struct kovaplus_profile_buttons *buf, int number)
{
int retval;
retval = kovaplus_select_profile(usb_dev, number,
KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct kovaplus_profile_buttons));
}
static int kovaplus_set_profile_buttons(struct usb_device *usb_dev,
struct kovaplus_profile_buttons const *buttons)
{
return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct kovaplus_profile_buttons));
}
/* retval is 0-4 on success, < 0 on error */
static int kovaplus_get_actual_profile(struct usb_device *usb_dev)
{
struct kovaplus_actual_profile buf;
int retval;
retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
return retval ? retval : buf.actual_profile;
}
static int kovaplus_set_actual_profile(struct usb_device *usb_dev,
int new_profile)
{
struct kovaplus_actual_profile buf;
buf.command = KOVAPLUS_COMMAND_ACTUAL_PROFILE;
buf.size = sizeof(struct kovaplus_actual_profile);
buf.actual_profile = new_profile;
return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
}
static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
if (off >= sizeof(struct kovaplus_profile_settings))
return 0;
if (off + count > sizeof(struct kovaplus_profile_settings))
count = sizeof(struct kovaplus_profile_settings) - off;
mutex_lock(&kovaplus->kovaplus_lock);
memcpy(buf, ((char const *)&kovaplus->profile_settings[*(uint *)(attr->private)]) + off,
count);
mutex_unlock(&kovaplus->kovaplus_lock);
return count;
}
static ssize_t kovaplus_sysfs_write_profile_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0;
int difference;
int profile_index;
struct kovaplus_profile_settings *profile_settings;
if (off != 0 || count != sizeof(struct kovaplus_profile_settings))
return -EINVAL;
profile_index = ((struct kovaplus_profile_settings const *)buf)->profile_index;
profile_settings = &kovaplus->profile_settings[profile_index];
mutex_lock(&kovaplus->kovaplus_lock);
difference = memcmp(buf, profile_settings,
sizeof(struct kovaplus_profile_settings));
if (difference) {
retval = kovaplus_set_profile_settings(usb_dev,
(struct kovaplus_profile_settings const *)buf);
if (!retval)
memcpy(profile_settings, buf,
sizeof(struct kovaplus_profile_settings));
}
mutex_unlock(&kovaplus->kovaplus_lock);
if (retval)
return retval;
return sizeof(struct kovaplus_profile_settings);
}
static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
if (off >= sizeof(struct kovaplus_profile_buttons))
return 0;
if (off + count > sizeof(struct kovaplus_profile_buttons))
count = sizeof(struct kovaplus_profile_buttons) - off;
mutex_lock(&kovaplus->kovaplus_lock);
memcpy(buf, ((char const *)&kovaplus->profile_buttons[*(uint *)(attr->private)]) + off,
count);
mutex_unlock(&kovaplus->kovaplus_lock);
return count;
}
static ssize_t kovaplus_sysfs_write_profile_buttons(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0;
int difference;
uint profile_index;
struct kovaplus_profile_buttons *profile_buttons;
if (off != 0 || count != sizeof(struct kovaplus_profile_buttons))
return -EINVAL;
profile_index = ((struct kovaplus_profile_buttons const *)buf)->profile_index;
profile_buttons = &kovaplus->profile_buttons[profile_index];
mutex_lock(&kovaplus->kovaplus_lock);
difference = memcmp(buf, profile_buttons,
sizeof(struct kovaplus_profile_buttons));
if (difference) {
retval = kovaplus_set_profile_buttons(usb_dev,
(struct kovaplus_profile_buttons const *)buf);
if (!retval)
memcpy(profile_buttons, buf,
sizeof(struct kovaplus_profile_buttons));
}
mutex_unlock(&kovaplus->kovaplus_lock);
if (retval)
return retval;
return sizeof(struct kovaplus_profile_buttons);
}
static ssize_t kovaplus_sysfs_show_actual_profile(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kovaplus_device *kovaplus =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_profile);
}
static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
struct device_attribute *attr, char const *buf, size_t size)
{
struct kovaplus_device *kovaplus;
struct usb_device *usb_dev;
unsigned long profile;
int retval;
struct kovaplus_roccat_report roccat_report;
dev = dev->parent->parent;
kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
usb_dev = interface_to_usbdev(to_usb_interface(dev));
retval = strict_strtoul(buf, 10, &profile);
if (retval)
return retval;
if (profile >= 5)
return -EINVAL;
mutex_lock(&kovaplus->kovaplus_lock);
retval = kovaplus_set_actual_profile(usb_dev, profile);
if (retval) {
mutex_unlock(&kovaplus->kovaplus_lock);
return retval;
}
kovaplus_profile_activated(kovaplus, profile);
roccat_report.type = KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1;
roccat_report.profile = profile + 1;
roccat_report.button = 0;
roccat_report.data1 = profile + 1;
roccat_report.data2 = 0;
roccat_report_event(kovaplus->chrdev_minor,
(uint8_t const *)&roccat_report);
mutex_unlock(&kovaplus->kovaplus_lock);
return size;
}
static ssize_t kovaplus_sysfs_show_actual_cpi(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kovaplus_device *kovaplus =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_cpi);
}
static ssize_t kovaplus_sysfs_show_actual_sensitivity_x(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kovaplus_device *kovaplus =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_x_sensitivity);
}
static ssize_t kovaplus_sysfs_show_actual_sensitivity_y(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kovaplus_device *kovaplus =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_y_sensitivity);
}
static ssize_t kovaplus_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kovaplus_device *kovaplus =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->info.firmware_version);
}
static struct device_attribute kovaplus_attributes[] = {
__ATTR(actual_cpi, 0440,
kovaplus_sysfs_show_actual_cpi, NULL),
__ATTR(firmware_version, 0440,
kovaplus_sysfs_show_firmware_version, NULL),
__ATTR(actual_profile, 0660,
kovaplus_sysfs_show_actual_profile,
kovaplus_sysfs_set_actual_profile),
__ATTR(actual_sensitivity_x, 0440,
kovaplus_sysfs_show_actual_sensitivity_x, NULL),
__ATTR(actual_sensitivity_y, 0440,
kovaplus_sysfs_show_actual_sensitivity_y, NULL),
__ATTR_NULL
};
static struct bin_attribute kovaplus_bin_attributes[] = {
{
.attr = { .name = "profile_settings", .mode = 0220 },
.size = sizeof(struct kovaplus_profile_settings),
.write = kovaplus_sysfs_write_profile_settings
},
{
.attr = { .name = "profile1_settings", .mode = 0440 },
.size = sizeof(struct kovaplus_profile_settings),
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_settings", .mode = 0440 },
.size = sizeof(struct kovaplus_profile_settings),
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_settings", .mode = 0440 },
.size = sizeof(struct kovaplus_profile_settings),
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_settings", .mode = 0440 },
.size = sizeof(struct kovaplus_profile_settings),
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_settings", .mode = 0440 },
.size = sizeof(struct kovaplus_profile_settings),
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[4]
},
{
.attr = { .name = "profile_buttons", .mode = 0220 },
.size = sizeof(struct kovaplus_profile_buttons),
.write = kovaplus_sysfs_write_profile_buttons
},
{
.attr = { .name = "profile1_buttons", .mode = 0440 },
.size = sizeof(struct kovaplus_profile_buttons),
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_buttons", .mode = 0440 },
.size = sizeof(struct kovaplus_profile_buttons),
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_buttons", .mode = 0440 },
.size = sizeof(struct kovaplus_profile_buttons),
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_buttons", .mode = 0440 },
.size = sizeof(struct kovaplus_profile_buttons),
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_buttons", .mode = 0440 },
.size = sizeof(struct kovaplus_profile_buttons),
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[4]
},
__ATTR_NULL
};
static int kovaplus_init_kovaplus_device_struct(struct usb_device *usb_dev,
struct kovaplus_device *kovaplus)
{
int retval, i;
static uint wait = 70; /* device will freeze with just 60 */
mutex_init(&kovaplus->kovaplus_lock);
retval = kovaplus_get_info(usb_dev, &kovaplus->info);
if (retval)
return retval;
for (i = 0; i < 5; ++i) {
msleep(wait);
retval = kovaplus_get_profile_settings(usb_dev,
&kovaplus->profile_settings[i], i);
if (retval)
return retval;
msleep(wait);
retval = kovaplus_get_profile_buttons(usb_dev,
&kovaplus->profile_buttons[i], i);
if (retval)
return retval;
}
msleep(wait);
retval = kovaplus_get_actual_profile(usb_dev);
if (retval < 0)
return retval;
kovaplus_profile_activated(kovaplus, retval);
return 0;
}
static int kovaplus_init_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct kovaplus_device *kovaplus;
int retval;
if (intf->cur_altsetting->desc.bInterfaceProtocol
== USB_INTERFACE_PROTOCOL_MOUSE) {
kovaplus = kzalloc(sizeof(*kovaplus), GFP_KERNEL);
if (!kovaplus) {
hid_err(hdev, "can't alloc device descriptor\n");
return -ENOMEM;
}
hid_set_drvdata(hdev, kovaplus);
retval = kovaplus_init_kovaplus_device_struct(usb_dev, kovaplus);
if (retval) {
hid_err(hdev, "couldn't init struct kovaplus_device\n");
goto exit_free;
}
retval = roccat_connect(kovaplus_class, hdev,
sizeof(struct kovaplus_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
} else {
kovaplus->chrdev_minor = retval;
kovaplus->roccat_claimed = 1;
}
} else {
hid_set_drvdata(hdev, NULL);
}
return 0;
exit_free:
kfree(kovaplus);
return retval;
}
static void kovaplus_remove_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct kovaplus_device *kovaplus;
if (intf->cur_altsetting->desc.bInterfaceProtocol
== USB_INTERFACE_PROTOCOL_MOUSE) {
kovaplus = hid_get_drvdata(hdev);
if (kovaplus->roccat_claimed)
roccat_disconnect(kovaplus->chrdev_minor);
kfree(kovaplus);
}
}
static int kovaplus_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int retval;
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
goto exit;
}
retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (retval) {
hid_err(hdev, "hw start failed\n");
goto exit;
}
retval = kovaplus_init_specials(hdev);
if (retval) {
hid_err(hdev, "couldn't install mouse\n");
goto exit_stop;
}
return 0;
exit_stop:
hid_hw_stop(hdev);
exit:
return retval;
}
static void kovaplus_remove(struct hid_device *hdev)
{
kovaplus_remove_specials(hdev);
hid_hw_stop(hdev);
}
static void kovaplus_keep_values_up_to_date(struct kovaplus_device *kovaplus,
u8 const *data)
{
struct kovaplus_mouse_report_button const *button_report;
if (data[0] != KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON)
return;
button_report = (struct kovaplus_mouse_report_button const *)data;
switch (button_report->type) {
case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1:
kovaplus_profile_activated(kovaplus, button_report->data1 - 1);
break;
case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI:
kovaplus->actual_cpi = kovaplus_convert_event_cpi(button_report->data1);
case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY:
kovaplus->actual_x_sensitivity = button_report->data1;
kovaplus->actual_y_sensitivity = button_report->data2;
}
}
static void kovaplus_report_to_chrdev(struct kovaplus_device const *kovaplus,
u8 const *data)
{
struct kovaplus_roccat_report roccat_report;
struct kovaplus_mouse_report_button const *button_report;
if (data[0] != KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON)
return;
button_report = (struct kovaplus_mouse_report_button const *)data;
if (button_report->type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_2)
return;
roccat_report.type = button_report->type;
roccat_report.profile = kovaplus->actual_profile + 1;
if (roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MACRO ||
roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SHORTCUT ||
roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH ||
roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER)
roccat_report.button = button_report->data1;
else
roccat_report.button = 0;
if (roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI)
roccat_report.data1 = kovaplus_convert_event_cpi(button_report->data1);
else
roccat_report.data1 = button_report->data1;
roccat_report.data2 = button_report->data2;
roccat_report_event(kovaplus->chrdev_minor,
(uint8_t const *)&roccat_report);
}
static int kovaplus_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct kovaplus_device *kovaplus = hid_get_drvdata(hdev);
if (intf->cur_altsetting->desc.bInterfaceProtocol
!= USB_INTERFACE_PROTOCOL_MOUSE)
return 0;
if (kovaplus == NULL)
return 0;
kovaplus_keep_values_up_to_date(kovaplus, data);
if (kovaplus->roccat_claimed)
kovaplus_report_to_chrdev(kovaplus, data);
return 0;
}
static const struct hid_device_id kovaplus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ }
};
MODULE_DEVICE_TABLE(hid, kovaplus_devices);
static struct hid_driver kovaplus_driver = {
.name = "kovaplus",
.id_table = kovaplus_devices,
.probe = kovaplus_probe,
.remove = kovaplus_remove,
.raw_event = kovaplus_raw_event
};
static int __init kovaplus_init(void)
{
int retval;
kovaplus_class = class_create(THIS_MODULE, "kovaplus");
if (IS_ERR(kovaplus_class))
return PTR_ERR(kovaplus_class);
kovaplus_class->dev_attrs = kovaplus_attributes;
kovaplus_class->dev_bin_attrs = kovaplus_bin_attributes;
retval = hid_register_driver(&kovaplus_driver);
if (retval)
class_destroy(kovaplus_class);
return retval;
}
static void __exit kovaplus_exit(void)
{
hid_unregister_driver(&kovaplus_driver);
class_destroy(kovaplus_class);
}
module_init(kovaplus_init);
module_exit(kovaplus_exit);
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat Kova[+] driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
MilysTW/linux-sunxi-cb2 | drivers/staging/iio/magnetometer/hmc5843.c | 4864 | 16358 | /* Copyright (C) 2010 Texas Instruments
Author: Shubhrajyoti Datta <shubhrajyoti@ti.com>
Acknowledgement: Jonathan Cameron <jic23@cam.ac.uk> for valuable inputs.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "../iio.h"
#include "../sysfs.h"
#define HMC5843_I2C_ADDRESS 0x1E
#define HMC5843_CONFIG_REG_A 0x00
#define HMC5843_CONFIG_REG_B 0x01
#define HMC5843_MODE_REG 0x02
#define HMC5843_DATA_OUT_X_MSB_REG 0x03
#define HMC5843_DATA_OUT_X_LSB_REG 0x04
#define HMC5843_DATA_OUT_Y_MSB_REG 0x05
#define HMC5843_DATA_OUT_Y_LSB_REG 0x06
#define HMC5843_DATA_OUT_Z_MSB_REG 0x07
#define HMC5843_DATA_OUT_Z_LSB_REG 0x08
#define HMC5843_STATUS_REG 0x09
#define HMC5843_ID_REG_A 0x0A
#define HMC5843_ID_REG_B 0x0B
#define HMC5843_ID_REG_C 0x0C
#define HMC5843_ID_REG_LENGTH 0x03
#define HMC5843_ID_STRING "H43"
/*
* Range settings in (+-)Ga
* */
#define RANGE_GAIN_OFFSET 0x05
#define RANGE_0_7 0x00
#define RANGE_1_0 0x01 /* default */
#define RANGE_1_5 0x02
#define RANGE_2_0 0x03
#define RANGE_3_2 0x04
#define RANGE_3_8 0x05
#define RANGE_4_5 0x06
#define RANGE_6_5 0x07 /* Not recommended */
/*
* Device status
*/
#define DATA_READY 0x01
#define DATA_OUTPUT_LOCK 0x02
#define VOLTAGE_REGULATOR_ENABLED 0x04
/*
* Mode register configuration
*/
#define MODE_CONVERSION_CONTINUOUS 0x00
#define MODE_CONVERSION_SINGLE 0x01
#define MODE_IDLE 0x02
#define MODE_SLEEP 0x03
/* Minimum Data Output Rate in 1/10 Hz */
#define RATE_OFFSET 0x02
#define RATE_BITMASK 0x1C
#define RATE_5 0x00
#define RATE_10 0x01
#define RATE_20 0x02
#define RATE_50 0x03
#define RATE_100 0x04
#define RATE_200 0x05
#define RATE_500 0x06
#define RATE_NOT_USED 0x07
/*
* Device Configuration
*/
#define CONF_NORMAL 0x00
#define CONF_POSITIVE_BIAS 0x01
#define CONF_NEGATIVE_BIAS 0x02
#define CONF_NOT_USED 0x03
#define MEAS_CONF_MASK 0x03
static int hmc5843_regval_to_nanoscale[] = {
6173, 7692, 10309, 12821, 18868, 21739, 25641, 35714
};
static const int regval_to_input_field_mg[] = {
700,
1000,
1500,
2000,
3200,
3800,
4500,
6500
};
static const char * const regval_to_samp_freq[] = {
"0.5",
"1",
"2",
"5",
"10",
"20",
"50",
};
/* Addresses to scan: 0x1E */
static const unsigned short normal_i2c[] = { HMC5843_I2C_ADDRESS,
I2C_CLIENT_END };
/* Each client has this additional data */
struct hmc5843_data {
struct mutex lock;
u8 rate;
u8 meas_conf;
u8 operating_mode;
u8 range;
};
static void hmc5843_init_client(struct i2c_client *client);
static s32 hmc5843_configure(struct i2c_client *client,
u8 operating_mode)
{
/* The lower two bits contain the current conversion mode */
return i2c_smbus_write_byte_data(client,
HMC5843_MODE_REG,
(operating_mode & 0x03));
}
/* Return the measurement value from the specified channel */
static int hmc5843_read_measurement(struct iio_dev *indio_dev,
int address,
int *val)
{
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
struct hmc5843_data *data = iio_priv(indio_dev);
s32 result;
mutex_lock(&data->lock);
result = i2c_smbus_read_byte_data(client, HMC5843_STATUS_REG);
while (!(result & DATA_READY))
result = i2c_smbus_read_byte_data(client, HMC5843_STATUS_REG);
result = i2c_smbus_read_word_data(client, address);
mutex_unlock(&data->lock);
if (result < 0)
return -EINVAL;
*val = (s16)swab16((u16)result);
return IIO_VAL_INT;
}
/*
* From the datasheet
* 0 - Continuous-Conversion Mode: In continuous-conversion mode, the
* device continuously performs conversions and places the result in the
* data register.
*
* 1 - Single-Conversion Mode : device performs a single measurement,
* sets RDY high and returned to sleep mode
*
* 2 - Idle Mode : Device is placed in idle mode.
*
* 3 - Sleep Mode. Device is placed in sleep mode.
*
*/
static ssize_t hmc5843_show_operating_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct hmc5843_data *data = iio_priv(indio_dev);
return sprintf(buf, "%d\n", data->operating_mode);
}
static ssize_t hmc5843_set_operating_mode(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
struct hmc5843_data *data = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned long operating_mode = 0;
s32 status;
int error;
mutex_lock(&data->lock);
error = strict_strtoul(buf, 10, &operating_mode);
if (error) {
count = error;
goto exit;
}
dev_dbg(dev, "set Conversion mode to %lu\n", operating_mode);
if (operating_mode > MODE_SLEEP) {
count = -EINVAL;
goto exit;
}
status = i2c_smbus_write_byte_data(client, this_attr->address,
operating_mode);
if (status) {
count = -EINVAL;
goto exit;
}
data->operating_mode = operating_mode;
exit:
mutex_unlock(&data->lock);
return count;
}
static IIO_DEVICE_ATTR(operating_mode,
S_IWUSR | S_IRUGO,
hmc5843_show_operating_mode,
hmc5843_set_operating_mode,
HMC5843_MODE_REG);
/*
* API for setting the measurement configuration to
* Normal, Positive bias and Negative bias
* From the datasheet
*
* Normal measurement configuration (default): In normal measurement
* configuration the device follows normal measurement flow. Pins BP and BN
* are left floating and high impedance.
*
* Positive bias configuration: In positive bias configuration, a positive
* current is forced across the resistive load on pins BP and BN.
*
* Negative bias configuration. In negative bias configuration, a negative
* current is forced across the resistive load on pins BP and BN.
*
*/
static s32 hmc5843_set_meas_conf(struct i2c_client *client,
u8 meas_conf)
{
struct hmc5843_data *data = i2c_get_clientdata(client);
u8 reg_val;
reg_val = (meas_conf & MEAS_CONF_MASK) | (data->rate << RATE_OFFSET);
return i2c_smbus_write_byte_data(client, HMC5843_CONFIG_REG_A, reg_val);
}
static ssize_t hmc5843_show_measurement_configuration(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct hmc5843_data *data = iio_priv(indio_dev);
return sprintf(buf, "%d\n", data->meas_conf);
}
static ssize_t hmc5843_set_measurement_configuration(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
struct hmc5843_data *data = i2c_get_clientdata(client);
unsigned long meas_conf = 0;
int error = strict_strtoul(buf, 10, &meas_conf);
if (error)
return error;
mutex_lock(&data->lock);
dev_dbg(dev, "set mode to %lu\n", meas_conf);
if (hmc5843_set_meas_conf(client, meas_conf)) {
count = -EINVAL;
goto exit;
}
data->meas_conf = meas_conf;
exit:
mutex_unlock(&data->lock);
return count;
}
static IIO_DEVICE_ATTR(meas_conf,
S_IWUSR | S_IRUGO,
hmc5843_show_measurement_configuration,
hmc5843_set_measurement_configuration,
0);
/*
* From Datasheet
* The table shows the minimum data output
* Value | Minimum data output rate(Hz)
* 0 | 0.5
* 1 | 1
* 2 | 2
* 3 | 5
* 4 | 10 (default)
* 5 | 20
* 6 | 50
* 7 | Not used
*/
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("0.5 1 2 5 10 20 50");
static s32 hmc5843_set_rate(struct i2c_client *client,
u8 rate)
{
struct hmc5843_data *data = i2c_get_clientdata(client);
u8 reg_val;
reg_val = (data->meas_conf) | (rate << RATE_OFFSET);
if (rate >= RATE_NOT_USED) {
dev_err(&client->dev,
"This data output rate is not supported\n");
return -EINVAL;
}
return i2c_smbus_write_byte_data(client, HMC5843_CONFIG_REG_A, reg_val);
}
static ssize_t set_sampling_frequency(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
struct hmc5843_data *data = iio_priv(indio_dev);
unsigned long rate = 0;
if (strncmp(buf, "0.5" , 3) == 0)
rate = RATE_5;
else if (strncmp(buf, "1" , 1) == 0)
rate = RATE_10;
else if (strncmp(buf, "2", 1) == 0)
rate = RATE_20;
else if (strncmp(buf, "5", 1) == 0)
rate = RATE_50;
else if (strncmp(buf, "10", 2) == 0)
rate = RATE_100;
else if (strncmp(buf, "20" , 2) == 0)
rate = RATE_200;
else if (strncmp(buf, "50" , 2) == 0)
rate = RATE_500;
else
return -EINVAL;
mutex_lock(&data->lock);
dev_dbg(dev, "set rate to %lu\n", rate);
if (hmc5843_set_rate(client, rate)) {
count = -EINVAL;
goto exit;
}
data->rate = rate;
exit:
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_sampling_frequency(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
s32 rate;
rate = i2c_smbus_read_byte_data(client, this_attr->address);
if (rate < 0)
return rate;
rate = (rate & RATE_BITMASK) >> RATE_OFFSET;
return sprintf(buf, "%s\n", regval_to_samp_freq[rate]);
}
static IIO_DEVICE_ATTR(sampling_frequency,
S_IWUSR | S_IRUGO,
show_sampling_frequency,
set_sampling_frequency,
HMC5843_CONFIG_REG_A);
/*
* From Datasheet
* Nominal gain settings
* Value | Sensor Input Field Range(Ga) | Gain(counts/ milli-gauss)
*0 |(+-)0.7 |1620
*1 |(+-)1.0 |1300
*2 |(+-)1.5 |970
*3 |(+-)2.0 |780
*4 |(+-)3.2 |530
*5 |(+-)3.8 |460
*6 |(+-)4.5 |390
*7 |(+-)6.5 |280
*/
static ssize_t show_range(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u8 range;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct hmc5843_data *data = iio_priv(indio_dev);
range = data->range;
return sprintf(buf, "%d\n", regval_to_input_field_mg[range]);
}
static ssize_t set_range(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct hmc5843_data *data = iio_priv(indio_dev);
unsigned long range = 0;
int error;
mutex_lock(&data->lock);
error = strict_strtoul(buf, 10, &range);
if (error) {
count = error;
goto exit;
}
dev_dbg(dev, "set range to %lu\n", range);
if (range > RANGE_6_5) {
count = -EINVAL;
goto exit;
}
data->range = range;
range = range << RANGE_GAIN_OFFSET;
if (i2c_smbus_write_byte_data(client, this_attr->address, range))
count = -EINVAL;
exit:
mutex_unlock(&data->lock);
return count;
}
static IIO_DEVICE_ATTR(in_magn_range,
S_IWUSR | S_IRUGO,
show_range,
set_range,
HMC5843_CONFIG_REG_B);
static int hmc5843_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
long mask)
{
struct hmc5843_data *data = iio_priv(indio_dev);
switch (mask) {
case 0:
return hmc5843_read_measurement(indio_dev,
chan->address,
val);
case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = hmc5843_regval_to_nanoscale[data->range];
return IIO_VAL_INT_PLUS_NANO;
};
return -EINVAL;
}
#define HMC5843_CHANNEL(axis, add) \
{ \
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
.info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = add \
}
static const struct iio_chan_spec hmc5843_channels[] = {
HMC5843_CHANNEL(X, HMC5843_DATA_OUT_X_MSB_REG),
HMC5843_CHANNEL(Y, HMC5843_DATA_OUT_Y_MSB_REG),
HMC5843_CHANNEL(Z, HMC5843_DATA_OUT_Z_MSB_REG),
};
static struct attribute *hmc5843_attributes[] = {
&iio_dev_attr_meas_conf.dev_attr.attr,
&iio_dev_attr_operating_mode.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_dev_attr_in_magn_range.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL
};
static const struct attribute_group hmc5843_group = {
.attrs = hmc5843_attributes,
};
static int hmc5843_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
unsigned char id_str[HMC5843_ID_REG_LENGTH];
if (client->addr != HMC5843_I2C_ADDRESS)
return -ENODEV;
if (i2c_smbus_read_i2c_block_data(client, HMC5843_ID_REG_A,
HMC5843_ID_REG_LENGTH, id_str)
!= HMC5843_ID_REG_LENGTH)
return -ENODEV;
if (0 != strncmp(id_str, HMC5843_ID_STRING, HMC5843_ID_REG_LENGTH))
return -ENODEV;
return 0;
}
/* Called when we have found a new HMC5843. */
static void hmc5843_init_client(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct hmc5843_data *data = iio_priv(indio_dev);
hmc5843_set_meas_conf(client, data->meas_conf);
hmc5843_set_rate(client, data->rate);
hmc5843_configure(client, data->operating_mode);
i2c_smbus_write_byte_data(client, HMC5843_CONFIG_REG_B, data->range);
mutex_init(&data->lock);
pr_info("HMC5843 initialized\n");
}
static const struct iio_info hmc5843_info = {
.attrs = &hmc5843_group,
.read_raw = &hmc5843_read_raw,
.driver_module = THIS_MODULE,
};
static int hmc5843_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct hmc5843_data *data;
struct iio_dev *indio_dev;
int err = 0;
indio_dev = iio_allocate_device(sizeof(*data));
if (indio_dev == NULL) {
err = -ENOMEM;
goto exit;
}
data = iio_priv(indio_dev);
/* default settings at probe */
data->meas_conf = CONF_NORMAL;
data->range = RANGE_1_0;
data->operating_mode = MODE_CONVERSION_CONTINUOUS;
i2c_set_clientdata(client, indio_dev);
/* Initialize the HMC5843 chip */
hmc5843_init_client(client);
indio_dev->info = &hmc5843_info;
indio_dev->name = id->name;
indio_dev->channels = hmc5843_channels;
indio_dev->num_channels = ARRAY_SIZE(hmc5843_channels);
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
err = iio_device_register(indio_dev);
if (err)
goto exit_free2;
return 0;
exit_free2:
iio_free_device(indio_dev);
exit:
return err;
}
static int hmc5843_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
/* sleep mode to save power */
hmc5843_configure(client, MODE_SLEEP);
iio_free_device(indio_dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int hmc5843_suspend(struct device *dev)
{
hmc5843_configure(to_i2c_client(dev), MODE_SLEEP);
return 0;
}
static int hmc5843_resume(struct device *dev)
{
struct hmc5843_data *data = i2c_get_clientdata(to_i2c_client(dev));
hmc5843_configure(to_i2c_client(dev), data->operating_mode);
return 0;
}
static SIMPLE_DEV_PM_OPS(hmc5843_pm_ops, hmc5843_suspend, hmc5843_resume);
#define HMC5843_PM_OPS (&hmc5843_pm_ops)
#else
#define HMC5843_PM_OPS NULL
#endif
static const struct i2c_device_id hmc5843_id[] = {
{ "hmc5843", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, hmc5843_id);
static struct i2c_driver hmc5843_driver = {
.driver = {
.name = "hmc5843",
.pm = HMC5843_PM_OPS,
},
.id_table = hmc5843_id,
.probe = hmc5843_probe,
.remove = hmc5843_remove,
.detect = hmc5843_detect,
.address_list = normal_i2c,
};
module_i2c_driver(hmc5843_driver);
MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti@ti.com");
MODULE_DESCRIPTION("HMC5843 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
basr/Hammerhead | arch/arm/mach-orion5x/pci.c | 4864 | 15288 | /*
* arch/arm/mach-orion5x/pci.c
*
* PCI and PCIe functions for Marvell Orion System On Chip
*
* Maintainer: Tzachi Perelstein <tzachi@marvell.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mbus.h>
#include <video/vga.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
#include <plat/pcie.h>
#include <plat/addr-map.h>
#include <mach/orion5x.h>
#include "common.h"
/*****************************************************************************
* Orion has one PCIe controller and one PCI controller.
*
* Note1: The local PCIe bus number is '0'. The local PCI bus number
* follows the scanned PCIe bridged busses, if any.
*
* Note2: It is possible for PCI/PCIe agents to access many subsystem's
* space, by configuring BARs and Address Decode Windows, e.g. flashes on
* device bus, Orion registers, etc. However this code only enable the
* access to DDR banks.
****************************************************************************/
/*****************************************************************************
* PCIe controller
****************************************************************************/
#define PCIE_BASE ((void __iomem *)ORION5X_PCIE_VIRT_BASE)
void __init orion5x_pcie_id(u32 *dev, u32 *rev)
{
*dev = orion_pcie_dev_id(PCIE_BASE);
*rev = orion_pcie_rev(PCIE_BASE);
}
static int pcie_valid_config(int bus, int dev)
{
/*
* Don't go out when trying to access --
* 1. nonexisting device on local bus
* 2. where there's no device connected (no link)
*/
if (bus == 0 && dev == 0)
return 1;
if (!orion_pcie_link_up(PCIE_BASE))
return 0;
if (bus == 0 && dev != 1)
return 0;
return 1;
}
/*
* PCIe config cycles are done by programming the PCIE_CONF_ADDR register
* and then reading the PCIE_CONF_DATA register. Need to make sure these
* transactions are atomic.
*/
static DEFINE_SPINLOCK(orion5x_pcie_lock);
static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
unsigned long flags;
int ret;
if (pcie_valid_config(bus->number, PCI_SLOT(devfn)) == 0) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
spin_lock_irqsave(&orion5x_pcie_lock, flags);
ret = orion_pcie_rd_conf(PCIE_BASE, bus, devfn, where, size, val);
spin_unlock_irqrestore(&orion5x_pcie_lock, flags);
return ret;
}
static int pcie_rd_conf_wa(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
int ret;
if (pcie_valid_config(bus->number, PCI_SLOT(devfn)) == 0) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
/*
* We only support access to the non-extended configuration
* space when using the WA access method (or we would have to
* sacrifice 256M of CPU virtual address space.)
*/
if (where >= 0x100) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
ret = orion_pcie_rd_conf_wa((void __iomem *)ORION5X_PCIE_WA_VIRT_BASE,
bus, devfn, where, size, val);
return ret;
}
static int pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
unsigned long flags;
int ret;
if (pcie_valid_config(bus->number, PCI_SLOT(devfn)) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
spin_lock_irqsave(&orion5x_pcie_lock, flags);
ret = orion_pcie_wr_conf(PCIE_BASE, bus, devfn, where, size, val);
spin_unlock_irqrestore(&orion5x_pcie_lock, flags);
return ret;
}
static struct pci_ops pcie_ops = {
.read = pcie_rd_conf,
.write = pcie_wr_conf,
};
static int __init pcie_setup(struct pci_sys_data *sys)
{
struct resource *res;
int dev;
/*
* Generic PCIe unit setup.
*/
orion_pcie_setup(PCIE_BASE);
/*
* Check whether to apply Orion-1/Orion-NAS PCIe config
* read transaction workaround.
*/
dev = orion_pcie_dev_id(PCIE_BASE);
if (dev == MV88F5181_DEV_ID || dev == MV88F5182_DEV_ID) {
printk(KERN_NOTICE "Applying Orion-1/Orion-NAS PCIe config "
"read transaction workaround\n");
orion5x_setup_pcie_wa_win(ORION5X_PCIE_WA_PHYS_BASE,
ORION5X_PCIE_WA_SIZE);
pcie_ops.read = pcie_rd_conf_wa;
}
/*
* Request resources.
*/
res = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL);
if (!res)
panic("pcie_setup unable to alloc resources");
/*
* IORESOURCE_IO
*/
sys->io_offset = 0;
res[0].name = "PCIe I/O Space";
res[0].flags = IORESOURCE_IO;
res[0].start = ORION5X_PCIE_IO_BUS_BASE;
res[0].end = res[0].start + ORION5X_PCIE_IO_SIZE - 1;
if (request_resource(&ioport_resource, &res[0]))
panic("Request PCIe IO resource failed\n");
pci_add_resource_offset(&sys->resources, &res[0], sys->io_offset);
/*
* IORESOURCE_MEM
*/
res[1].name = "PCIe Memory Space";
res[1].flags = IORESOURCE_MEM;
res[1].start = ORION5X_PCIE_MEM_PHYS_BASE;
res[1].end = res[1].start + ORION5X_PCIE_MEM_SIZE - 1;
if (request_resource(&iomem_resource, &res[1]))
panic("Request PCIe Memory resource failed\n");
pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
return 1;
}
/*****************************************************************************
* PCI controller
****************************************************************************/
#define ORION5X_PCI_REG(x) (ORION5X_PCI_VIRT_BASE | (x))
#define PCI_MODE ORION5X_PCI_REG(0xd00)
#define PCI_CMD ORION5X_PCI_REG(0xc00)
#define PCI_P2P_CONF ORION5X_PCI_REG(0x1d14)
#define PCI_CONF_ADDR ORION5X_PCI_REG(0xc78)
#define PCI_CONF_DATA ORION5X_PCI_REG(0xc7c)
/*
* PCI_MODE bits
*/
#define PCI_MODE_64BIT (1 << 2)
#define PCI_MODE_PCIX ((1 << 4) | (1 << 5))
/*
* PCI_CMD bits
*/
#define PCI_CMD_HOST_REORDER (1 << 29)
/*
* PCI_P2P_CONF bits
*/
#define PCI_P2P_BUS_OFFS 16
#define PCI_P2P_BUS_MASK (0xff << PCI_P2P_BUS_OFFS)
#define PCI_P2P_DEV_OFFS 24
#define PCI_P2P_DEV_MASK (0x1f << PCI_P2P_DEV_OFFS)
/*
* PCI_CONF_ADDR bits
*/
#define PCI_CONF_REG(reg) ((reg) & 0xfc)
#define PCI_CONF_FUNC(func) (((func) & 0x3) << 8)
#define PCI_CONF_DEV(dev) (((dev) & 0x1f) << 11)
#define PCI_CONF_BUS(bus) (((bus) & 0xff) << 16)
#define PCI_CONF_ADDR_EN (1 << 31)
/*
* Internal configuration space
*/
#define PCI_CONF_FUNC_STAT_CMD 0
#define PCI_CONF_REG_STAT_CMD 4
#define PCIX_STAT 0x64
#define PCIX_STAT_BUS_OFFS 8
#define PCIX_STAT_BUS_MASK (0xff << PCIX_STAT_BUS_OFFS)
/*
* PCI Address Decode Windows registers
*/
#define PCI_BAR_SIZE_DDR_CS(n) (((n) == 0) ? ORION5X_PCI_REG(0xc08) : \
((n) == 1) ? ORION5X_PCI_REG(0xd08) : \
((n) == 2) ? ORION5X_PCI_REG(0xc0c) : \
((n) == 3) ? ORION5X_PCI_REG(0xd0c) : 0)
#define PCI_BAR_REMAP_DDR_CS(n) (((n) == 0) ? ORION5X_PCI_REG(0xc48) : \
((n) == 1) ? ORION5X_PCI_REG(0xd48) : \
((n) == 2) ? ORION5X_PCI_REG(0xc4c) : \
((n) == 3) ? ORION5X_PCI_REG(0xd4c) : 0)
#define PCI_BAR_ENABLE ORION5X_PCI_REG(0xc3c)
#define PCI_ADDR_DECODE_CTRL ORION5X_PCI_REG(0xd3c)
/*
* PCI configuration helpers for BAR settings
*/
#define PCI_CONF_FUNC_BAR_CS(n) ((n) >> 1)
#define PCI_CONF_REG_BAR_LO_CS(n) (((n) & 1) ? 0x18 : 0x10)
#define PCI_CONF_REG_BAR_HI_CS(n) (((n) & 1) ? 0x1c : 0x14)
/*
* PCI config cycles are done by programming the PCI_CONF_ADDR register
* and then reading the PCI_CONF_DATA register. Need to make sure these
* transactions are atomic.
*/
static DEFINE_SPINLOCK(orion5x_pci_lock);
static int orion5x_pci_cardbus_mode;
static int orion5x_pci_local_bus_nr(void)
{
u32 conf = readl(PCI_P2P_CONF);
return((conf & PCI_P2P_BUS_MASK) >> PCI_P2P_BUS_OFFS);
}
static int orion5x_pci_hw_rd_conf(int bus, int dev, u32 func,
u32 where, u32 size, u32 *val)
{
unsigned long flags;
spin_lock_irqsave(&orion5x_pci_lock, flags);
writel(PCI_CONF_BUS(bus) |
PCI_CONF_DEV(dev) | PCI_CONF_REG(where) |
PCI_CONF_FUNC(func) | PCI_CONF_ADDR_EN, PCI_CONF_ADDR);
*val = readl(PCI_CONF_DATA);
if (size == 1)
*val = (*val >> (8*(where & 0x3))) & 0xff;
else if (size == 2)
*val = (*val >> (8*(where & 0x3))) & 0xffff;
spin_unlock_irqrestore(&orion5x_pci_lock, flags);
return PCIBIOS_SUCCESSFUL;
}
static int orion5x_pci_hw_wr_conf(int bus, int dev, u32 func,
u32 where, u32 size, u32 val)
{
unsigned long flags;
int ret = PCIBIOS_SUCCESSFUL;
spin_lock_irqsave(&orion5x_pci_lock, flags);
writel(PCI_CONF_BUS(bus) |
PCI_CONF_DEV(dev) | PCI_CONF_REG(where) |
PCI_CONF_FUNC(func) | PCI_CONF_ADDR_EN, PCI_CONF_ADDR);
if (size == 4) {
__raw_writel(val, PCI_CONF_DATA);
} else if (size == 2) {
__raw_writew(val, PCI_CONF_DATA + (where & 0x3));
} else if (size == 1) {
__raw_writeb(val, PCI_CONF_DATA + (where & 0x3));
} else {
ret = PCIBIOS_BAD_REGISTER_NUMBER;
}
spin_unlock_irqrestore(&orion5x_pci_lock, flags);
return ret;
}
static int orion5x_pci_valid_config(int bus, u32 devfn)
{
if (bus == orion5x_pci_local_bus_nr()) {
/*
* Don't go out for local device
*/
if (PCI_SLOT(devfn) == 0 && PCI_FUNC(devfn) != 0)
return 0;
/*
* When the PCI signals are directly connected to a
* Cardbus slot, ignore all but device IDs 0 and 1.
*/
if (orion5x_pci_cardbus_mode && PCI_SLOT(devfn) > 1)
return 0;
}
return 1;
}
static int orion5x_pci_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
if (!orion5x_pci_valid_config(bus->number, devfn)) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
return orion5x_pci_hw_rd_conf(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where, size, val);
}
static int orion5x_pci_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
if (!orion5x_pci_valid_config(bus->number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
return orion5x_pci_hw_wr_conf(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where, size, val);
}
static struct pci_ops pci_ops = {
.read = orion5x_pci_rd_conf,
.write = orion5x_pci_wr_conf,
};
static void __init orion5x_pci_set_bus_nr(int nr)
{
u32 p2p = readl(PCI_P2P_CONF);
if (readl(PCI_MODE) & PCI_MODE_PCIX) {
/*
* PCI-X mode
*/
u32 pcix_status, bus, dev;
bus = (p2p & PCI_P2P_BUS_MASK) >> PCI_P2P_BUS_OFFS;
dev = (p2p & PCI_P2P_DEV_MASK) >> PCI_P2P_DEV_OFFS;
orion5x_pci_hw_rd_conf(bus, dev, 0, PCIX_STAT, 4, &pcix_status);
pcix_status &= ~PCIX_STAT_BUS_MASK;
pcix_status |= (nr << PCIX_STAT_BUS_OFFS);
orion5x_pci_hw_wr_conf(bus, dev, 0, PCIX_STAT, 4, pcix_status);
} else {
/*
* PCI Conventional mode
*/
p2p &= ~PCI_P2P_BUS_MASK;
p2p |= (nr << PCI_P2P_BUS_OFFS);
writel(p2p, PCI_P2P_CONF);
}
}
static void __init orion5x_pci_master_slave_enable(void)
{
int bus_nr, func, reg;
u32 val;
bus_nr = orion5x_pci_local_bus_nr();
func = PCI_CONF_FUNC_STAT_CMD;
reg = PCI_CONF_REG_STAT_CMD;
orion5x_pci_hw_rd_conf(bus_nr, 0, func, reg, 4, &val);
val |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
orion5x_pci_hw_wr_conf(bus_nr, 0, func, reg, 4, val | 0x7);
}
static void __init orion5x_setup_pci_wins(struct mbus_dram_target_info *dram)
{
u32 win_enable;
int bus;
int i;
/*
* First, disable windows.
*/
win_enable = 0xffffffff;
writel(win_enable, PCI_BAR_ENABLE);
/*
* Setup windows for DDR banks.
*/
bus = orion5x_pci_local_bus_nr();
for (i = 0; i < dram->num_cs; i++) {
struct mbus_dram_window *cs = dram->cs + i;
u32 func = PCI_CONF_FUNC_BAR_CS(cs->cs_index);
u32 reg;
u32 val;
/*
* Write DRAM bank base address register.
*/
reg = PCI_CONF_REG_BAR_LO_CS(cs->cs_index);
orion5x_pci_hw_rd_conf(bus, 0, func, reg, 4, &val);
val = (cs->base & 0xfffff000) | (val & 0xfff);
orion5x_pci_hw_wr_conf(bus, 0, func, reg, 4, val);
/*
* Write DRAM bank size register.
*/
reg = PCI_CONF_REG_BAR_HI_CS(cs->cs_index);
orion5x_pci_hw_wr_conf(bus, 0, func, reg, 4, 0);
writel((cs->size - 1) & 0xfffff000,
PCI_BAR_SIZE_DDR_CS(cs->cs_index));
writel(cs->base & 0xfffff000,
PCI_BAR_REMAP_DDR_CS(cs->cs_index));
/*
* Enable decode window for this chip select.
*/
win_enable &= ~(1 << cs->cs_index);
}
/*
* Re-enable decode windows.
*/
writel(win_enable, PCI_BAR_ENABLE);
/*
* Disable automatic update of address remapping when writing to BARs.
*/
orion5x_setbits(PCI_ADDR_DECODE_CTRL, 1);
}
static int __init pci_setup(struct pci_sys_data *sys)
{
struct resource *res;
/*
* Point PCI unit MBUS decode windows to DRAM space.
*/
orion5x_setup_pci_wins(&orion_mbus_dram_info);
/*
* Master + Slave enable
*/
orion5x_pci_master_slave_enable();
/*
* Force ordering
*/
orion5x_setbits(PCI_CMD, PCI_CMD_HOST_REORDER);
/*
* Request resources
*/
res = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL);
if (!res)
panic("pci_setup unable to alloc resources");
/*
* IORESOURCE_IO
*/
sys->io_offset = 0;
res[0].name = "PCI I/O Space";
res[0].flags = IORESOURCE_IO;
res[0].start = ORION5X_PCI_IO_BUS_BASE;
res[0].end = res[0].start + ORION5X_PCI_IO_SIZE - 1;
if (request_resource(&ioport_resource, &res[0]))
panic("Request PCI IO resource failed\n");
pci_add_resource_offset(&sys->resources, &res[0], sys->io_offset);
/*
* IORESOURCE_MEM
*/
res[1].name = "PCI Memory Space";
res[1].flags = IORESOURCE_MEM;
res[1].start = ORION5X_PCI_MEM_PHYS_BASE;
res[1].end = res[1].start + ORION5X_PCI_MEM_SIZE - 1;
if (request_resource(&iomem_resource, &res[1]))
panic("Request PCI Memory resource failed\n");
pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
return 1;
}
/*****************************************************************************
* General PCIe + PCI
****************************************************************************/
static void __devinit rc_pci_fixup(struct pci_dev *dev)
{
/*
* Prevent enumeration of root complex.
*/
if (dev->bus->parent == NULL && dev->devfn == 0) {
int i;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
dev->resource[i].start = 0;
dev->resource[i].end = 0;
dev->resource[i].flags = 0;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup);
static int orion5x_pci_disabled __initdata;
void __init orion5x_pci_disable(void)
{
orion5x_pci_disabled = 1;
}
void __init orion5x_pci_set_cardbus_mode(void)
{
orion5x_pci_cardbus_mode = 1;
}
int __init orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys)
{
int ret = 0;
vga_base = ORION5X_PCIE_MEM_PHYS_BASE;
if (nr == 0) {
orion_pcie_set_local_bus_nr(PCIE_BASE, sys->busnr);
ret = pcie_setup(sys);
} else if (nr == 1 && !orion5x_pci_disabled) {
orion5x_pci_set_bus_nr(sys->busnr);
ret = pci_setup(sys);
}
return ret;
}
struct pci_bus __init *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys)
{
struct pci_bus *bus;
if (nr == 0) {
bus = pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
&sys->resources);
} else if (nr == 1 && !orion5x_pci_disabled) {
bus = pci_scan_root_bus(NULL, sys->busnr, &pci_ops, sys,
&sys->resources);
} else {
bus = NULL;
BUG();
}
return bus;
}
int __init orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int bus = dev->bus->number;
/*
* PCIe endpoint?
*/
if (orion5x_pci_disabled || bus < orion5x_pci_local_bus_nr())
return IRQ_ORION5X_PCIE0_INT;
return -1;
}
| gpl-2.0 |
tepelmann/linux-perf-cumulate | arch/sh/kernel/io_trapped.c | 6400 | 6544 | /*
* Trapped io support
*
* Copyright (C) 2008 Magnus Damm
*
* Intercept io operations by trapping.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/init.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/io_trapped.h>
#define TRAPPED_PAGES_MAX 16
#ifdef CONFIG_HAS_IOPORT
LIST_HEAD(trapped_io);
EXPORT_SYMBOL_GPL(trapped_io);
#endif
#ifdef CONFIG_HAS_IOMEM
LIST_HEAD(trapped_mem);
EXPORT_SYMBOL_GPL(trapped_mem);
#endif
static DEFINE_SPINLOCK(trapped_lock);
static int trapped_io_disable __read_mostly;
static int __init trapped_io_setup(char *__unused)
{
trapped_io_disable = 1;
return 1;
}
__setup("noiotrap", trapped_io_setup);
int register_trapped_io(struct trapped_io *tiop)
{
struct resource *res;
unsigned long len = 0, flags = 0;
struct page *pages[TRAPPED_PAGES_MAX];
int k, n;
if (unlikely(trapped_io_disable))
return 0;
/* structure must be page aligned */
if ((unsigned long)tiop & (PAGE_SIZE - 1))
goto bad;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
len += roundup(resource_size(res), PAGE_SIZE);
flags |= res->flags;
}
/* support IORESOURCE_IO _or_ MEM, not both */
if (hweight_long(flags) != 1)
goto bad;
n = len >> PAGE_SHIFT;
if (n >= TRAPPED_PAGES_MAX)
goto bad;
for (k = 0; k < n; k++)
pages[k] = virt_to_page(tiop);
tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
if (!tiop->virt_base)
goto bad;
len = 0;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
(unsigned long)(tiop->virt_base + len),
res->flags & IORESOURCE_IO ? "io" : "mmio",
(unsigned long)res->start);
len += roundup(resource_size(res), PAGE_SIZE);
}
tiop->magic = IO_TRAPPED_MAGIC;
INIT_LIST_HEAD(&tiop->list);
spin_lock_irq(&trapped_lock);
#ifdef CONFIG_HAS_IOPORT
if (flags & IORESOURCE_IO)
list_add(&tiop->list, &trapped_io);
#endif
#ifdef CONFIG_HAS_IOMEM
if (flags & IORESOURCE_MEM)
list_add(&tiop->list, &trapped_mem);
#endif
spin_unlock_irq(&trapped_lock);
return 0;
bad:
pr_warning("unable to install trapped io filter\n");
return -1;
}
EXPORT_SYMBOL_GPL(register_trapped_io);
void __iomem *match_trapped_io_handler(struct list_head *list,
unsigned long offset,
unsigned long size)
{
unsigned long voffs;
struct trapped_io *tiop;
struct resource *res;
int k, len;
unsigned long flags;
spin_lock_irqsave(&trapped_lock, flags);
list_for_each_entry(tiop, list, list) {
voffs = 0;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
if (res->start == offset) {
spin_unlock_irqrestore(&trapped_lock, flags);
return tiop->virt_base + voffs;
}
len = resource_size(res);
voffs += roundup(len, PAGE_SIZE);
}
}
spin_unlock_irqrestore(&trapped_lock, flags);
return NULL;
}
EXPORT_SYMBOL_GPL(match_trapped_io_handler);
static struct trapped_io *lookup_tiop(unsigned long address)
{
pgd_t *pgd_k;
pud_t *pud_k;
pmd_t *pmd_k;
pte_t *pte_k;
pte_t entry;
pgd_k = swapper_pg_dir + pgd_index(address);
if (!pgd_present(*pgd_k))
return NULL;
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
return NULL;
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
return NULL;
pte_k = pte_offset_kernel(pmd_k, address);
entry = *pte_k;
return pfn_to_kaddr(pte_pfn(entry));
}
static unsigned long lookup_address(struct trapped_io *tiop,
unsigned long address)
{
struct resource *res;
unsigned long vaddr = (unsigned long)tiop->virt_base;
unsigned long len;
int k;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
len = roundup(resource_size(res), PAGE_SIZE);
if (address < (vaddr + len))
return res->start + (address - vaddr);
vaddr += len;
}
return 0;
}
static unsigned long long copy_word(unsigned long src_addr, int src_len,
unsigned long dst_addr, int dst_len)
{
unsigned long long tmp = 0;
switch (src_len) {
case 1:
tmp = __raw_readb(src_addr);
break;
case 2:
tmp = __raw_readw(src_addr);
break;
case 4:
tmp = __raw_readl(src_addr);
break;
case 8:
tmp = __raw_readq(src_addr);
break;
}
switch (dst_len) {
case 1:
__raw_writeb(tmp, dst_addr);
break;
case 2:
__raw_writew(tmp, dst_addr);
break;
case 4:
__raw_writel(tmp, dst_addr);
break;
case 8:
__raw_writeq(tmp, dst_addr);
break;
}
return tmp;
}
static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
{
struct trapped_io *tiop;
unsigned long src_addr = (unsigned long)src;
unsigned long long tmp;
pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
tiop = lookup_tiop(src_addr);
WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
src_addr = lookup_address(tiop, src_addr);
if (!src_addr)
return cnt;
tmp = copy_word(src_addr,
max_t(unsigned long, cnt,
(tiop->minimum_bus_width / 8)),
(unsigned long)dst, cnt);
pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
return 0;
}
static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
{
struct trapped_io *tiop;
unsigned long dst_addr = (unsigned long)dst;
unsigned long long tmp;
pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
tiop = lookup_tiop(dst_addr);
WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
dst_addr = lookup_address(tiop, dst_addr);
if (!dst_addr)
return cnt;
tmp = copy_word((unsigned long)src, cnt,
dst_addr, max_t(unsigned long, cnt,
(tiop->minimum_bus_width / 8)));
pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
return 0;
}
static struct mem_access trapped_io_access = {
from_device,
to_device,
};
int handle_trapped_io(struct pt_regs *regs, unsigned long address)
{
mm_segment_t oldfs;
insn_size_t instruction;
int tmp;
if (trapped_io_disable)
return 0;
if (!lookup_tiop(address))
return 0;
WARN_ON(user_mode(regs));
oldfs = get_fs();
set_fs(KERNEL_DS);
if (copy_from_user(&instruction, (void *)(regs->pc),
sizeof(instruction))) {
set_fs(oldfs);
return 0;
}
tmp = handle_unaligned_access(instruction, regs,
&trapped_io_access, 1, address);
set_fs(oldfs);
return tmp == 0;
}
| gpl-2.0 |
keyser84/android_kernel_motorola_msm8226 | arch/ia64/hp/common/sba_iommu.c | 6656 | 60275 | /*
** IA64 System Bus Adapter (SBA) I/O MMU manager
**
** (c) Copyright 2002-2005 Alex Williamson
** (c) Copyright 2002-2003 Grant Grundler
** (c) Copyright 2002-2005 Hewlett-Packard Company
**
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
**
** This module initializes the IOC (I/O Controller) found on HP
** McKinley machines and their successors.
**
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/nodemask.h>
#include <linux/bitops.h> /* hweight64() */
#include <linux/crash_dump.h>
#include <linux/iommu-helper.h>
#include <linux/dma-mapping.h>
#include <linux/prefetch.h>
#include <asm/delay.h> /* ia64_get_itc() */
#include <asm/io.h>
#include <asm/page.h> /* PAGE_OFFSET */
#include <asm/dma.h>
#include <asm/acpi-ext.h>
extern int swiotlb_late_init_with_default_size (size_t size);
#define PFX "IOC: "
/*
** Enabling timing search of the pdir resource map. Output in /proc.
** Disabled by default to optimize performance.
*/
#undef PDIR_SEARCH_TIMING
/*
** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
** not defined, all DMA will be 32bit and go through the TLB.
** There's potentially a conflict in the bio merge code with us
** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
** appears to give more performance than bio-level virtual merging, we'll
** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
** completely restrict DMA to the IOMMU.
*/
#define ALLOW_IOV_BYPASS
/*
** This option specifically allows/disallows bypassing scatterlists with
** multiple entries. Coalescing these entries can allow better DMA streaming
** and in some cases shows better performance than entirely bypassing the
** IOMMU. Performance increase on the order of 1-2% sequential output/input
** using bonnie++ on a RAID0 MD device (sym2 & mpt).
*/
#undef ALLOW_IOV_BYPASS_SG
/*
** If a device prefetches beyond the end of a valid pdir entry, it will cause
** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
** disconnect on 4k boundaries and prevent such issues. If the device is
** particularly aggressive, this option will keep the entire pdir valid such
** that prefetching will hit a valid address. This could severely impact
** error containment, and is therefore off by default. The page that is
** used for spill-over is poisoned, so that should help debugging somewhat.
*/
#undef FULL_VALID_PDIR
#define ENABLE_MARK_CLEAN
/*
** The number of debug flags is a clue - this code is fragile. NOTE: since
** tightening the use of res_lock the resource bitmap and actual pdir are no
** longer guaranteed to stay in sync. The sanity checking code isn't going to
** like that.
*/
#undef DEBUG_SBA_INIT
#undef DEBUG_SBA_RUN
#undef DEBUG_SBA_RUN_SG
#undef DEBUG_SBA_RESOURCE
#undef ASSERT_PDIR_SANITY
#undef DEBUG_LARGE_SG_ENTRIES
#undef DEBUG_BYPASS
#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
#endif
#define SBA_INLINE __inline__
/* #define SBA_INLINE */
#ifdef DEBUG_SBA_INIT
#define DBG_INIT(x...) printk(x)
#else
#define DBG_INIT(x...)
#endif
#ifdef DEBUG_SBA_RUN
#define DBG_RUN(x...) printk(x)
#else
#define DBG_RUN(x...)
#endif
#ifdef DEBUG_SBA_RUN_SG
#define DBG_RUN_SG(x...) printk(x)
#else
#define DBG_RUN_SG(x...)
#endif
#ifdef DEBUG_SBA_RESOURCE
#define DBG_RES(x...) printk(x)
#else
#define DBG_RES(x...)
#endif
#ifdef DEBUG_BYPASS
#define DBG_BYPASS(x...) printk(x)
#else
#define DBG_BYPASS(x...)
#endif
#ifdef ASSERT_PDIR_SANITY
#define ASSERT(expr) \
if(!(expr)) { \
printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
panic(#expr); \
}
#else
#define ASSERT(expr)
#endif
/*
** The number of pdir entries to "free" before issuing
** a read to PCOM register to flush out PCOM writes.
** Interacts with allocation granularity (ie 4 or 8 entries
** allocated and free'd/purged at a time might make this
** less interesting).
*/
#define DELAYED_RESOURCE_CNT 64
#define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
#define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
#define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
#define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
#define IOC_FUNC_ID 0x000
#define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
#define IOC_IBASE 0x300 /* IO TLB */
#define IOC_IMASK 0x308
#define IOC_PCOM 0x310
#define IOC_TCNFG 0x318
#define IOC_PDIR_BASE 0x320
#define IOC_ROPE0_CFG 0x500
#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
/* AGP GART driver looks for this */
#define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
/*
** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
**
** Some IOCs (sx1000) can run at the above pages sizes, but are
** really only supported using the IOC at a 4k page size.
**
** iovp_size could only be greater than PAGE_SIZE if we are
** confident the drivers really only touch the next physical
** page iff that driver instance owns it.
*/
static unsigned long iovp_size;
static unsigned long iovp_shift;
static unsigned long iovp_mask;
struct ioc {
void __iomem *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base */
unsigned long imask; /* pdir IOV Space mask */
unsigned long *res_hint; /* next avail IOVP - circular search */
unsigned long dma_mask;
spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
/* clearing pdir to prevent races with allocations. */
unsigned int res_bitshift; /* from the RIGHT! */
unsigned int res_size; /* size of resource map in bytes */
#ifdef CONFIG_NUMA
unsigned int node; /* node where this IOC lives */
#endif
#if DELAYED_RESOURCE_CNT > 0
spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
/* than res_lock for bigger systems. */
int saved_cnt;
struct sba_dma_pair {
dma_addr_t iova;
size_t size;
} saved[DELAYED_RESOURCE_CNT];
#endif
#ifdef PDIR_SEARCH_TIMING
#define SBA_SEARCH_SAMPLE 0x100
unsigned long avg_search[SBA_SEARCH_SAMPLE];
unsigned long avg_idx; /* current index into avg_search */
#endif
/* Stuff we don't need in performance path */
struct ioc *next; /* list of IOC's in system */
acpi_handle handle; /* for multiple IOC's */
const char *name;
unsigned int func_id;
unsigned int rev; /* HW revision of chip */
u32 iov_size;
unsigned int pdir_size; /* in bytes, determined by IOV Space size */
struct pci_dev *sac_only_dev;
};
static struct ioc *ioc_list;
static int reserve_sba_gart = 1;
static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
#define sba_sg_address(sg) sg_virt((sg))
#ifdef FULL_VALID_PDIR
static u64 prefetch_spill_page;
#endif
#ifdef CONFIG_PCI
# define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
#else
# define GET_IOC(dev) NULL
#endif
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
** (or rather not merge) DMAs into manageable chunks.
** On parisc, this is more of the software/tuning constraint
** rather than the HW. I/O MMU allocation algorithms can be
** faster with smaller sizes (to some degree).
*/
#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
/************************************
** SBA register read and write support
**
** BE WARNED: register writes are posted.
** (ie follow writes which must reach HW with a read)
**
*/
#define READ_REG(addr) __raw_readq(addr)
#define WRITE_REG(val, addr) __raw_writeq(val, addr)
#ifdef DEBUG_SBA_INIT
/**
* sba_dump_tlb - debugging only - print IOMMU operating parameters
* @hpa: base address of the IOMMU
*
* Print the size/location of the IO MMU PDIR.
*/
static void
sba_dump_tlb(char *hpa)
{
DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
DBG_INIT("\n");
}
#endif
#ifdef ASSERT_PDIR_SANITY
/**
* sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
* @pide: pdir index.
*
* Print one entry of the IO MMU PDIR in human readable form.
*/
static void
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
{
/* start printing from lowest pde in rval */
u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
uint rcnt;
printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
rcnt = 0;
while (rcnt < BITS_PER_LONG) {
printk(KERN_DEBUG "%s %2d %p %016Lx\n",
(rcnt == (pide & (BITS_PER_LONG - 1)))
? " -->" : " ",
rcnt, ptr, (unsigned long long) *ptr );
rcnt++;
ptr++;
}
printk(KERN_DEBUG "%s", msg);
}
/**
* sba_check_pdir - debugging only - consistency checker
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
*
* Verify the resource map and pdir state is consistent
*/
static int
sba_check_pdir(struct ioc *ioc, char *msg)
{
u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
u64 *pptr = ioc->pdir_base; /* pdir ptr */
uint pide = 0;
while (rptr < rptr_end) {
u64 rval;
int rcnt; /* number of bits we might check */
rval = *rptr;
rcnt = 64;
while (rcnt) {
/* Get last byte and highest bit from that */
u32 pde = ((u32)((*pptr >> (63)) & 0x1));
if ((rval & 0x1) ^ pde)
{
/*
** BUMMER! -- res_map != pdir --
** Dump rval and matching pdir entries
*/
sba_dump_pdir_entry(ioc, msg, pide);
return(1);
}
rcnt--;
rval >>= 1; /* try the next bit */
pptr++;
pide++;
}
rptr++; /* look at next word of res_map */
}
/* It'd be nice if we always got here :^) */
return 0;
}
/**
* sba_dump_sg - debugging only - print Scatter-Gather list
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: head of the SG list
* @nents: number of entries in SG list
*
* print the SG list so we can verify it's correct by hand.
*/
static void
sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
{
while (nents-- > 0) {
printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
startsg->dma_address, startsg->dma_length,
sba_sg_address(startsg));
startsg = sg_next(startsg);
}
}
static void
sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
{
struct scatterlist *the_sg = startsg;
int the_nents = nents;
while (the_nents-- > 0) {
if (sba_sg_address(the_sg) == 0x0UL)
sba_dump_sg(NULL, startsg, nents);
the_sg = sg_next(the_sg);
}
}
#endif /* ASSERT_PDIR_SANITY */
/**************************************************************
*
* I/O Pdir Resource Management
*
* Bits set in the resource map are in use.
* Each bit can represent a number of pages.
* LSbs represent lower addresses (IOVA's).
*
***************************************************************/
#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
/* Convert from IOVP to IOVA and vice versa. */
#define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
#define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
#define PDIR_ENTRY_SIZE sizeof(u64)
#define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
#define RESMAP_MASK(n) ~(~0UL << (n))
#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
/**
* For most cases the normal get_order is sufficient, however it limits us
* to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
* It only incurs about 1 clock cycle to use this one with the static variable
* and makes the code more intuitive.
*/
static SBA_INLINE int
get_iovp_order (unsigned long size)
{
long double d = size - 1;
long order;
order = ia64_getf_exp(d);
order = order - iovp_shift - 0xffff + 1;
if (order < 0)
order = 0;
return order;
}
static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
unsigned int bitshiftcnt)
{
return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
+ bitshiftcnt;
}
/**
* sba_search_bitmap - find free space in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @bits_wanted: number of entries we need.
* @use_hint: use res_hint to indicate where to start looking
*
* Find consecutive free bits in resource bitmap.
* Each bit represents one entry in the IO Pdir.
* Cool perf optimization: search for log2(size) bits at a time.
*/
static SBA_INLINE unsigned long
sba_search_bitmap(struct ioc *ioc, struct device *dev,
unsigned long bits_wanted, int use_hint)
{
unsigned long *res_ptr;
unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
unsigned long flags, pide = ~0UL, tpide;
unsigned long boundary_size;
unsigned long shift;
int ret;
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
ASSERT(res_ptr < res_end);
boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
BUG_ON(ioc->ibase & ~iovp_mask);
shift = ioc->ibase >> iovp_shift;
spin_lock_irqsave(&ioc->res_lock, flags);
/* Allow caller to force a search through the entire resource space */
if (likely(use_hint)) {
res_ptr = ioc->res_hint;
} else {
res_ptr = (ulong *)ioc->res_map;
ioc->res_bitshift = 0;
}
/*
* N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
* if a TLB entry is purged while in use. sba_mark_invalid()
* purges IOTLB entries in power-of-two sizes, so we also
* allocate IOVA space in power-of-two sizes.
*/
bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
if (likely(bits_wanted == 1)) {
unsigned int bitshiftcnt;
for(; res_ptr < res_end ; res_ptr++) {
if (likely(*res_ptr != ~0UL)) {
bitshiftcnt = ffz(*res_ptr);
*res_ptr |= (1UL << bitshiftcnt);
pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
ioc->res_bitshift = bitshiftcnt + bits_wanted;
goto found_it;
}
}
goto not_found;
}
if (likely(bits_wanted <= BITS_PER_LONG/2)) {
/*
** Search the resource bit map on well-aligned values.
** "o" is the alignment.
** We need the alignment to invalidate I/O TLB using
** SBA HW features in the unmap path.
*/
unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
unsigned long mask, base_mask;
base_mask = RESMAP_MASK(bits_wanted);
mask = base_mask << bitshiftcnt;
DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
for(; res_ptr < res_end ; res_ptr++)
{
DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
ASSERT(0 != mask);
for (; mask ; mask <<= o, bitshiftcnt += o) {
tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
ret = iommu_is_span_boundary(tpide, bits_wanted,
shift,
boundary_size);
if ((0 == ((*res_ptr) & mask)) && !ret) {
*res_ptr |= mask; /* mark resources busy! */
pide = tpide;
ioc->res_bitshift = bitshiftcnt + bits_wanted;
goto found_it;
}
}
bitshiftcnt = 0;
mask = base_mask;
}
} else {
int qwords, bits, i;
unsigned long *end;
qwords = bits_wanted >> 6; /* /64 */
bits = bits_wanted - (qwords * BITS_PER_LONG);
end = res_end - qwords;
for (; res_ptr < end; res_ptr++) {
tpide = ptr_to_pide(ioc, res_ptr, 0);
ret = iommu_is_span_boundary(tpide, bits_wanted,
shift, boundary_size);
if (ret)
goto next_ptr;
for (i = 0 ; i < qwords ; i++) {
if (res_ptr[i] != 0)
goto next_ptr;
}
if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
continue;
/* Found it, mark it */
for (i = 0 ; i < qwords ; i++)
res_ptr[i] = ~0UL;
res_ptr[i] |= RESMAP_MASK(bits);
pide = tpide;
res_ptr += qwords;
ioc->res_bitshift = bits;
goto found_it;
next_ptr:
;
}
}
not_found:
prefetch(ioc->res_map);
ioc->res_hint = (unsigned long *) ioc->res_map;
ioc->res_bitshift = 0;
spin_unlock_irqrestore(&ioc->res_lock, flags);
return (pide);
found_it:
ioc->res_hint = res_ptr;
spin_unlock_irqrestore(&ioc->res_lock, flags);
return (pide);
}
/**
* sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @size: number of bytes to create a mapping for
*
* Given a size, find consecutive unmarked and then mark those bits in the
* resource bit map.
*/
static int
sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
{
unsigned int pages_needed = size >> iovp_shift;
#ifdef PDIR_SEARCH_TIMING
unsigned long itc_start;
#endif
unsigned long pide;
ASSERT(pages_needed);
ASSERT(0 == (size & ~iovp_mask));
#ifdef PDIR_SEARCH_TIMING
itc_start = ia64_get_itc();
#endif
/*
** "seek and ye shall find"...praying never hurts either...
*/
pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
if (unlikely(pide >= (ioc->res_size << 3))) {
pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
if (unlikely(pide >= (ioc->res_size << 3))) {
#if DELAYED_RESOURCE_CNT > 0
unsigned long flags;
/*
** With delayed resource freeing, we can give this one more shot. We're
** getting close to being in trouble here, so do what we can to make this
** one count.
*/
spin_lock_irqsave(&ioc->saved_lock, flags);
if (ioc->saved_cnt > 0) {
struct sba_dma_pair *d;
int cnt = ioc->saved_cnt;
d = &(ioc->saved[ioc->saved_cnt - 1]);
spin_lock(&ioc->res_lock);
while (cnt--) {
sba_mark_invalid(ioc, d->iova, d->size);
sba_free_range(ioc, d->iova, d->size);
d--;
}
ioc->saved_cnt = 0;
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
spin_unlock(&ioc->res_lock);
}
spin_unlock_irqrestore(&ioc->saved_lock, flags);
pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
if (unlikely(pide >= (ioc->res_size << 3))) {
printk(KERN_WARNING "%s: I/O MMU @ %p is"
"out of mapping resources, %u %u %lx\n",
__func__, ioc->ioc_hpa, ioc->res_size,
pages_needed, dma_get_seg_boundary(dev));
return -1;
}
#else
printk(KERN_WARNING "%s: I/O MMU @ %p is"
"out of mapping resources, %u %u %lx\n",
__func__, ioc->ioc_hpa, ioc->res_size,
pages_needed, dma_get_seg_boundary(dev));
return -1;
#endif
}
}
#ifdef PDIR_SEARCH_TIMING
ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
#endif
prefetchw(&(ioc->pdir_base[pide]));
#ifdef ASSERT_PDIR_SANITY
/* verify the first enable bit is clear */
if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
}
#endif
DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
__func__, size, pages_needed, pide,
(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
ioc->res_bitshift );
return (pide);
}
/**
* sba_free_range - unmark bits in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO virtual address which was previously allocated.
* @size: number of bytes to create a mapping for
*
* clear bits in the ioc's resource map
*/
static SBA_INLINE void
sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
{
unsigned long iovp = SBA_IOVP(ioc, iova);
unsigned int pide = PDIR_INDEX(iovp);
unsigned int ridx = pide >> 3; /* convert bit to byte address */
unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
int bits_not_wanted = size >> iovp_shift;
unsigned long m;
/* Round up to power-of-two size: see AR2305 note above */
bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
for (; bits_not_wanted > 0 ; res_ptr++) {
if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
/* these mappings start 64bit aligned */
*res_ptr = 0UL;
bits_not_wanted -= BITS_PER_LONG;
pide += BITS_PER_LONG;
} else {
/* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
bits_not_wanted = 0;
DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
bits_not_wanted, m, pide, res_ptr, *res_ptr);
ASSERT(m != 0);
ASSERT(bits_not_wanted);
ASSERT((*res_ptr & m) == m); /* verify same bits are set */
*res_ptr &= ~m;
}
}
}
/**************************************************************
*
* "Dynamic DMA Mapping" support (aka "Coherent I/O")
*
***************************************************************/
/**
* sba_io_pdir_entry - fill in one IO PDIR entry
* @pdir_ptr: pointer to IO PDIR entry
* @vba: Virtual CPU address of buffer to map
*
* SBA Mapping Routine
*
* Given a virtual address (vba, arg1) sba_io_pdir_entry()
* loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
* Each IO Pdir entry consists of 8 bytes as shown below
* (LSB == bit 0):
*
* 63 40 11 7 0
* +-+---------------------+----------------------------------+----+--------+
* |V| U | PPN[39:12] | U | FF |
* +-+---------------------+----------------------------------+----+--------+
*
* V == Valid Bit
* U == Unused
* PPN == Physical Page Number
*
* The physical address fields are filled with the results of virt_to_phys()
* on the vba.
*/
#if 1
#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
| 0x8000000000000000ULL)
#else
void SBA_INLINE
sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
{
*pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
}
#endif
#ifdef ENABLE_MARK_CLEAN
/**
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
static void
mark_clean (void *addr, size_t size)
{
unsigned long pg_addr, end;
pg_addr = PAGE_ALIGN((unsigned long) addr);
end = (unsigned long) addr + size;
while (pg_addr + PAGE_SIZE <= end) {
struct page *page = virt_to_page((void *)pg_addr);
set_bit(PG_arch_1, &page->flags);
pg_addr += PAGE_SIZE;
}
}
#endif
/**
* sba_mark_invalid - invalidate one or more IO PDIR entries
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO Virtual Address mapped earlier
* @byte_cnt: number of bytes this mapping covers.
*
* Marking the IO PDIR entry(ies) as Invalid and invalidate
* corresponding IO TLB entry. The PCOM (Purge Command Register)
* is to purge stale entries in the IO TLB when unmapping entries.
*
* The PCOM register supports purging of multiple pages, with a minium
* of 1 page and a maximum of 2GB. Hardware requires the address be
* aligned to the size of the range being purged. The size of the range
* must be a power of 2. The "Cool perf optimization" in the
* allocation routine helps keep that true.
*/
static SBA_INLINE void
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
int off = PDIR_INDEX(iovp);
/* Must be non-zero and rounded up */
ASSERT(byte_cnt > 0);
ASSERT(0 == (byte_cnt & ~iovp_mask));
#ifdef ASSERT_PDIR_SANITY
/* Assert first pdir entry is set */
if (!(ioc->pdir_base[off] >> 60)) {
sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
}
#endif
if (byte_cnt <= iovp_size)
{
ASSERT(off < ioc->pdir_size);
iovp |= iovp_shift; /* set "size" field for PCOM */
#ifndef FULL_VALID_PDIR
/*
** clear I/O PDIR entry "valid" bit
** Do NOT clear the rest - save it for debugging.
** We should only clear bits that have previously
** been enabled.
*/
ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
#else
/*
** If we want to maintain the PDIR as valid, put in
** the spill page so devices prefetching won't
** cause a hard fail.
*/
ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
#endif
} else {
u32 t = get_iovp_order(byte_cnt) + iovp_shift;
iovp |= t;
ASSERT(t <= 31); /* 2GB! Max value of "size" field */
do {
/* verify this pdir entry is enabled */
ASSERT(ioc->pdir_base[off] >> 63);
#ifndef FULL_VALID_PDIR
/* clear I/O Pdir entry "valid" bit first */
ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
#else
ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
#endif
off++;
byte_cnt -= iovp_size;
} while (byte_cnt > 0);
}
WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
}
/**
* sba_map_single_attrs - map one buffer and return IOVA for DMA
* @dev: instance of PCI owned by the driver that's asking.
* @addr: driver buffer to map.
* @size: number of bytes to map in driver buffer.
* @dir: R/W or both.
* @attrs: optional dma attributes
*
* See Documentation/DMA-API-HOWTO.txt
*/
static dma_addr_t sba_map_page(struct device *dev, struct page *page,
unsigned long poff, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct ioc *ioc;
void *addr = page_address(page) + poff;
dma_addr_t iovp;
dma_addr_t offset;
u64 *pdir_start;
int pide;
#ifdef ASSERT_PDIR_SANITY
unsigned long flags;
#endif
#ifdef ALLOW_IOV_BYPASS
unsigned long pci_addr = virt_to_phys(addr);
#endif
#ifdef ALLOW_IOV_BYPASS
ASSERT(to_pci_dev(dev)->dma_mask);
/*
** Check if the PCI device can DMA to ptr... if so, just return ptr
*/
if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
/*
** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr
*/
DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
"0x%lx/0x%lx\n",
to_pci_dev(dev)->dma_mask, pci_addr);
return pci_addr;
}
#endif
ioc = GET_IOC(dev);
ASSERT(ioc);
prefetch(ioc->res_hint);
ASSERT(size > 0);
ASSERT(size <= DMA_CHUNK_SIZE);
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
/* round up to nearest iovp_size */
size = (size + offset + ~iovp_mask) & iovp_mask;
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
panic("Sanity check failed");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
pide = sba_alloc_range(ioc, dev, size);
if (pide < 0)
return 0;
iovp = (dma_addr_t) pide << iovp_shift;
DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]);
while (size > 0) {
ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
sba_io_pdir_entry(pdir_start, (unsigned long) addr);
DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
addr += iovp_size;
size -= iovp_size;
pdir_start++;
}
/* force pdir update */
wmb();
/* form complete address */
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
return SBA_IOVA(ioc, iovp, offset);
}
static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return sba_map_page(dev, virt_to_page(addr),
(unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
}
#ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void
sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
int off = PDIR_INDEX(iovp);
void *addr;
if (size <= iovp_size) {
addr = phys_to_virt(ioc->pdir_base[off] &
~0xE000000000000FFFULL);
mark_clean(addr, size);
} else {
do {
addr = phys_to_virt(ioc->pdir_base[off] &
~0xE000000000000FFFULL);
mark_clean(addr, min(size, iovp_size));
off++;
size -= iovp_size;
} while (size > 0);
}
}
#endif
/**
* sba_unmap_single_attrs - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
* @dir: R/W or both.
* @attrs: optional dma attributes
*
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
struct sba_dma_pair *d;
#endif
unsigned long flags;
dma_addr_t offset;
ioc = GET_IOC(dev);
ASSERT(ioc);
#ifdef ALLOW_IOV_BYPASS
if (likely((iova & ioc->imask) != ioc->ibase)) {
/*
** Address does not fall w/in IOVA, must be bypassing
*/
DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n",
iova);
#ifdef ENABLE_MARK_CLEAN
if (dir == DMA_FROM_DEVICE) {
mark_clean(phys_to_virt(iova), size);
}
#endif
return;
}
#endif
offset = iova & ~iovp_mask;
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
iova ^= offset; /* clear offset bits */
size += offset;
size = ROUNDUP(size, iovp_size);
#ifdef ENABLE_MARK_CLEAN
if (dir == DMA_FROM_DEVICE)
sba_mark_clean(ioc, iova, size);
#endif
#if DELAYED_RESOURCE_CNT > 0
spin_lock_irqsave(&ioc->saved_lock, flags);
d = &(ioc->saved[ioc->saved_cnt]);
d->iova = iova;
d->size = size;
if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
int cnt = ioc->saved_cnt;
spin_lock(&ioc->res_lock);
while (cnt--) {
sba_mark_invalid(ioc, d->iova, d->size);
sba_free_range(ioc, d->iova, d->size);
d--;
}
ioc->saved_cnt = 0;
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
spin_unlock(&ioc->res_lock);
}
spin_unlock_irqrestore(&ioc->saved_lock, flags);
#else /* DELAYED_RESOURCE_CNT == 0 */
spin_lock_irqsave(&ioc->res_lock, flags);
sba_mark_invalid(ioc, iova, size);
sba_free_range(ioc, iova, size);
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif /* DELAYED_RESOURCE_CNT == 0 */
}
void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
sba_unmap_page(dev, iova, size, dir, attrs);
}
/**
* sba_alloc_coherent - allocate/map shared mem for DMA
* @dev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
* See Documentation/DMA-API-HOWTO.txt
*/
static void *
sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flags, struct dma_attrs *attrs)
{
struct ioc *ioc;
void *addr;
ioc = GET_IOC(dev);
ASSERT(ioc);
#ifdef CONFIG_NUMA
{
struct page *page;
page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
numa_node_id() : ioc->node, flags,
get_order(size));
if (unlikely(!page))
return NULL;
addr = page_address(page);
}
#else
addr = (void *) __get_free_pages(flags, get_order(size));
#endif
if (unlikely(!addr))
return NULL;
memset(addr, 0, size);
*dma_handle = virt_to_phys(addr);
#ifdef ALLOW_IOV_BYPASS
ASSERT(dev->coherent_dma_mask);
/*
** Check if the PCI device can DMA to ptr... if so, just return ptr
*/
if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
dev->coherent_dma_mask, *dma_handle);
return addr;
}
#endif
/*
* If device can't bypass or bypass is disabled, pass the 32bit fake
* device to map single to get an iova mapping.
*/
*dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
size, 0, NULL);
return addr;
}
/**
* sba_free_coherent - free/unmap shared mem for DMA
* @dev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @vaddr: virtual address IOVA of "consistent" buffer.
* @dma_handler: IO virtual address of "consistent" buffer.
*
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
free_pages((unsigned long) vaddr, get_order(size));
}
/*
** Since 0 is a valid pdir_base index value, can't use that
** to determine if a value is valid or not. Use a flag to indicate
** the SG list entry contains a valid pdir index.
*/
#define PIDE_FLAG 0x1UL
#ifdef DEBUG_LARGE_SG_ENTRIES
int dump_run_sg = 0;
#endif
/**
* sba_fill_pdir - write allocated SG entries into IO PDIR
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list
*
* Take preprocessed SG list and write corresponding entries
* in the IO PDIR.
*/
static SBA_INLINE int
sba_fill_pdir(
struct ioc *ioc,
struct scatterlist *startsg,
int nents)
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
int n_mappings = 0;
u64 *pdirp = NULL;
unsigned long dma_offset = 0;
while (nents-- > 0) {
int cnt = startsg->dma_length;
startsg->dma_length = 0;
#ifdef DEBUG_LARGE_SG_ENTRIES
if (dump_run_sg)
printk(" %2d : %08lx/%05x %p\n",
nents, startsg->dma_address, cnt,
sba_sg_address(startsg));
#else
DBG_RUN_SG(" %d : %08lx/%05x %p\n",
nents, startsg->dma_address, cnt,
sba_sg_address(startsg));
#endif
/*
** Look for the start of a new DMA stream
*/
if (startsg->dma_address & PIDE_FLAG) {
u32 pide = startsg->dma_address & ~PIDE_FLAG;
dma_offset = (unsigned long) pide & ~iovp_mask;
startsg->dma_address = 0;
if (n_mappings)
dma_sg = sg_next(dma_sg);
dma_sg->dma_address = pide | ioc->ibase;
pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
n_mappings++;
}
/*
** Look for a VCONTIG chunk
*/
if (cnt) {
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
ASSERT(pdirp);
/* Since multiple Vcontig blocks could make up
** one DMA stream, *add* cnt to dma_len.
*/
dma_sg->dma_length += cnt;
cnt += dma_offset;
dma_offset=0; /* only want offset on first chunk */
cnt = ROUNDUP(cnt, iovp_size);
do {
sba_io_pdir_entry(pdirp, vaddr);
vaddr += iovp_size;
cnt -= iovp_size;
pdirp++;
} while (cnt > 0);
}
startsg = sg_next(startsg);
}
/* force pdir update */
wmb();
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg = 0;
#endif
return(n_mappings);
}
/*
** Two address ranges are DMA contiguous *iff* "end of prev" and
** "start of next" are both on an IOV page boundary.
**
** (shift left is a quick trick to mask off upper bits)
*/
#define DMA_CONTIG(__X, __Y) \
(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
/**
* sba_coalesce_chunks - preprocess the SG list
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list
*
* First pass is to walk the SG list and determine where the breaks are
* in the DMA stream. Allocates PDIR entries but does not fill them.
* Returns the number of DMA chunks.
*
* Doing the fill separate from the coalescing/allocation keeps the
* code simpler. Future enhancement could make one pass through
* the sglist do both.
*/
static SBA_INLINE int
sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
struct scatterlist *startsg,
int nents)
{
struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
unsigned long vcontig_len; /* len of VCONTIG chunk */
unsigned long vcontig_end;
struct scatterlist *dma_sg; /* next DMA stream head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
int n_mappings = 0;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
int idx;
while (nents > 0) {
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
/*
** Prepare for first/next DMA stream
*/
dma_sg = vcontig_sg = startsg;
dma_len = vcontig_len = vcontig_end = startsg->length;
vcontig_end += vaddr;
dma_offset = vaddr & ~iovp_mask;
/* PARANOID: clear entries */
startsg->dma_address = startsg->dma_length = 0;
/*
** This loop terminates one iteration "early" since
** it's always looking one "ahead".
*/
while (--nents > 0) {
unsigned long vaddr; /* tmp */
startsg = sg_next(startsg);
/* PARANOID */
startsg->dma_address = startsg->dma_length = 0;
/* catch brokenness in SCSI layer */
ASSERT(startsg->length <= DMA_CHUNK_SIZE);
/*
** First make sure current dma stream won't
** exceed DMA_CHUNK_SIZE if we coalesce the
** next entry.
*/
if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
> DMA_CHUNK_SIZE)
break;
if (dma_len + startsg->length > max_seg_size)
break;
/*
** Then look for virtually contiguous blocks.
**
** append the next transaction?
*/
vaddr = (unsigned long) sba_sg_address(startsg);
if (vcontig_end == vaddr)
{
vcontig_len += startsg->length;
vcontig_end += startsg->length;
dma_len += startsg->length;
continue;
}
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg = (vcontig_len > iovp_size);
#endif
/*
** Not virtually contiguous.
** Terminate prev chunk.
** Start a new chunk.
**
** Once we start a new VCONTIG chunk, dma_offset
** can't change. And we need the offset from the first
** chunk - not the last one. Ergo Successive chunks
** must start on page boundaries and dove tail
** with it's predecessor.
*/
vcontig_sg->dma_length = vcontig_len;
vcontig_sg = startsg;
vcontig_len = startsg->length;
/*
** 3) do the entries end/start on page boundaries?
** Don't update vcontig_end until we've checked.
*/
if (DMA_CONTIG(vcontig_end, vaddr))
{
vcontig_end = vcontig_len + vaddr;
dma_len += vcontig_len;
continue;
} else {
break;
}
}
/*
** End of DMA Stream
** Terminate last VCONTIG block.
** Allocate space for DMA stream.
*/
vcontig_sg->dma_length = vcontig_len;
dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
ASSERT(dma_len <= DMA_CHUNK_SIZE);
idx = sba_alloc_range(ioc, dev, dma_len);
if (idx < 0) {
dma_sg->dma_length = 0;
return -1;
}
dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
| dma_offset);
n_mappings++;
}
return n_mappings;
}
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);
/**
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @dir: R/W or both.
* @attrs: optional dma attributes
*
* See Documentation/DMA-API-HOWTO.txt
*/
static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
#ifdef ASSERT_PDIR_SANITY
unsigned long flags;
#endif
#ifdef ALLOW_IOV_BYPASS_SG
struct scatterlist *sg;
#endif
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev);
ASSERT(ioc);
#ifdef ALLOW_IOV_BYPASS_SG
ASSERT(to_pci_dev(dev)->dma_mask);
if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
for_each_sg(sglist, sg, nents, filled) {
sg->dma_length = sg->length;
sg->dma_address = virt_to_phys(sba_sg_address(sg));
}
return filled;
}
#endif
/* Fast path single entry scatterlists. */
if (nents == 1) {
sglist->dma_length = sglist->length;
sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
return 1;
}
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
{
sba_dump_sg(ioc, sglist, nents);
panic("Check before sba_map_sg_attrs()");
}
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
prefetch(ioc->res_hint);
/*
** First coalesce the chunks and allocate I/O pdir space
**
** If this is one DMA stream, we can properly map using the
** correct virtual address associated with each DMA page.
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
if (coalesced < 0) {
sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
return 0;
}
/*
** Program the I/O Pdir
**
** map the virtual addresses to the I/O Pdir
** o dma_address will contain the pdir index
** o dma_len will contain the number of bytes to map
** o address contains the virtual address.
*/
filled = sba_fill_pdir(ioc, sglist, nents);
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
{
sba_dump_sg(ioc, sglist, nents);
panic("Check after sba_map_sg_attrs()\n");
}
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
ASSERT(coalesced == filled);
DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
return filled;
}
/**
* sba_unmap_sg_attrs - unmap Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @dir: R/W or both.
* @attrs: optional dma attributes
*
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
#ifdef ASSERT_PDIR_SANITY
struct ioc *ioc;
unsigned long flags;
#endif
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sba_sg_address(sglist), sglist->length);
#ifdef ASSERT_PDIR_SANITY
ioc = GET_IOC(dev);
ASSERT(ioc);
spin_lock_irqsave(&ioc->res_lock, flags);
sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
while (nents && sglist->dma_length) {
sba_unmap_single_attrs(dev, sglist->dma_address,
sglist->dma_length, dir, attrs);
sglist = sg_next(sglist);
nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
}
/**************************************************************
*
* Initialization and claim
*
***************************************************************/
static void __init
ioc_iova_init(struct ioc *ioc)
{
int tcnfg;
int agp_found = 0;
struct pci_dev *device = NULL;
#ifdef FULL_VALID_PDIR
unsigned long index;
#endif
/*
** Firmware programs the base and size of a "safe IOVA space"
** (one that doesn't overlap memory or LMMIO space) in the
** IBASE and IMASK registers.
*/
ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
ioc->iov_size = ~ioc->imask + 1;
DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
__func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
ioc->iov_size >> 20);
switch (iovp_size) {
case 4*1024: tcnfg = 0; break;
case 8*1024: tcnfg = 1; break;
case 16*1024: tcnfg = 2; break;
case 64*1024: tcnfg = 3; break;
default:
panic(PFX "Unsupported IOTLB page size %ldK",
iovp_size >> 10);
break;
}
WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size));
if (!ioc->pdir_base)
panic(PFX "Couldn't allocate I/O Page Table\n");
memset(ioc->pdir_base, 0, ioc->pdir_size);
DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
/*
** If an AGP device is present, only use half of the IOV space
** for PCI DMA. Unfortunately we can't know ahead of time
** whether GART support will actually be used, for now we
** can just key on an AGP device found in the system.
** We program the next pdir index after we stop w/ a key for
** the GART code to handshake on.
*/
for_each_pci_dev(device)
agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
if (agp_found && reserve_sba_gart) {
printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
ioc->pdir_size /= 2;
((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
}
#ifdef FULL_VALID_PDIR
/*
** Check to see if the spill page has been allocated, we don't need more than
** one across multiple SBAs.
*/
if (!prefetch_spill_page) {
char *spill_poison = "SBAIOMMU POISON";
int poison_size = 16;
void *poison_addr, *addr;
addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
if (!addr)
panic(PFX "Couldn't allocate PDIR spill page\n");
poison_addr = addr;
for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
memcpy(poison_addr, spill_poison, poison_size);
prefetch_spill_page = virt_to_phys(addr);
DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
}
/*
** Set all the PDIR entries valid w/ the spill page as the target
*/
for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
#endif
/* Clear I/O TLB of any possible entries */
WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
READ_REG(ioc->ioc_hpa + IOC_PCOM);
/* Enable IOVA translation */
WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
READ_REG(ioc->ioc_hpa + IOC_IBASE);
}
static void __init
ioc_resource_init(struct ioc *ioc)
{
spin_lock_init(&ioc->res_lock);
#if DELAYED_RESOURCE_CNT > 0
spin_lock_init(&ioc->saved_lock);
#endif
/* resource map size dictated by pdir_size */
ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
ioc->res_size >>= 3; /* convert bit count to byte count */
DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
get_order(ioc->res_size));
if (!ioc->res_map)
panic(PFX "Couldn't allocate resource map\n");
memset(ioc->res_map, 0, ioc->res_size);
/* next available IOVP - circular search */
ioc->res_hint = (unsigned long *) ioc->res_map;
#ifdef ASSERT_PDIR_SANITY
/* Mark first bit busy - ie no IOVA 0 */
ioc->res_map[0] = 0x1;
ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
#endif
#ifdef FULL_VALID_PDIR
/* Mark the last resource used so we don't prefetch beyond IOVA space */
ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
| prefetch_spill_page);
#endif
DBG_INIT("%s() res_map %x %p\n", __func__,
ioc->res_size, (void *) ioc->res_map);
}
static void __init
ioc_sac_init(struct ioc *ioc)
{
struct pci_dev *sac = NULL;
struct pci_controller *controller = NULL;
/*
* pci_alloc_coherent() must return a DMA address which is
* SAC (single address cycle) addressable, so allocate a
* pseudo-device to enforce that.
*/
sac = kzalloc(sizeof(*sac), GFP_KERNEL);
if (!sac)
panic(PFX "Couldn't allocate struct pci_dev");
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
panic(PFX "Couldn't allocate struct pci_controller");
controller->iommu = ioc;
sac->sysdata = controller;
sac->dma_mask = 0xFFFFFFFFUL;
#ifdef CONFIG_PCI
sac->dev.bus = &pci_bus_type;
#endif
ioc->sac_only_dev = sac;
}
static void __init
ioc_zx1_init(struct ioc *ioc)
{
unsigned long rope_config;
unsigned int i;
if (ioc->rev < 0x20)
panic(PFX "IOC 2.0 or later required for IOMMU support\n");
/* 38 bit memory controller + extra bit for range displaced by MMIO */
ioc->dma_mask = (0x1UL << 39) - 1;
/*
** Clear ROPE(N)_CONFIG AO bit.
** Disables "NT Ordering" (~= !"Relaxed Ordering")
** Overrides bit 1 in DMA Hint Sets.
** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
*/
for (i=0; i<(8*8); i+=8) {
rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
rope_config &= ~IOC_ROPE_AO;
WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
}
}
typedef void (initfunc)(struct ioc *);
struct ioc_iommu {
u32 func_id;
char *name;
initfunc *init;
};
static struct ioc_iommu ioc_iommu_info[] __initdata = {
{ ZX1_IOC_ID, "zx1", ioc_zx1_init },
{ ZX2_IOC_ID, "zx2", NULL },
{ SX1000_IOC_ID, "sx1000", NULL },
{ SX2000_IOC_ID, "sx2000", NULL },
};
static struct ioc * __init
ioc_init(unsigned long hpa, void *handle)
{
struct ioc *ioc;
struct ioc_iommu *info;
ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
if (!ioc)
return NULL;
ioc->next = ioc_list;
ioc_list = ioc;
ioc->handle = handle;
ioc->ioc_hpa = ioremap(hpa, 0x1000);
ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
if (ioc->func_id == info->func_id) {
ioc->name = info->name;
if (info->init)
(info->init)(ioc);
}
}
iovp_size = (1 << iovp_shift);
iovp_mask = ~(iovp_size - 1);
DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
PAGE_SIZE >> 10, iovp_size >> 10);
if (!ioc->name) {
ioc->name = kmalloc(24, GFP_KERNEL);
if (ioc->name)
sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
else
ioc->name = "Unknown";
}
ioc_iova_init(ioc);
ioc_resource_init(ioc);
ioc_sac_init(ioc);
if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
ia64_max_iommu_merge_mask = ~iovp_mask;
printk(KERN_INFO PFX
"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
hpa, ioc->iov_size >> 20, ioc->ibase);
return ioc;
}
/**************************************************************************
**
** SBA initialization code (HW and SW)
**
** o identify SBA chip itself
** o FIXME: initialize DMA hints for reasonable defaults
**
**************************************************************************/
#ifdef CONFIG_PROC_FS
static void *
ioc_start(struct seq_file *s, loff_t *pos)
{
struct ioc *ioc;
loff_t n = *pos;
for (ioc = ioc_list; ioc; ioc = ioc->next)
if (!n--)
return ioc;
return NULL;
}
static void *
ioc_next(struct seq_file *s, void *v, loff_t *pos)
{
struct ioc *ioc = v;
++*pos;
return ioc->next;
}
static void
ioc_stop(struct seq_file *s, void *v)
{
}
static int
ioc_show(struct seq_file *s, void *v)
{
struct ioc *ioc = v;
unsigned long *res_ptr = (unsigned long *)ioc->res_map;
int i, used = 0;
seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
#ifdef CONFIG_NUMA
if (ioc->node != MAX_NUMNODES)
seq_printf(s, "NUMA node : %d\n", ioc->node);
#endif
seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
used += hweight64(*res_ptr);
seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
seq_printf(s, "PDIR used : %d entries\n", used);
#ifdef PDIR_SEARCH_TIMING
{
unsigned long i = 0, avg = 0, min, max;
min = max = ioc->avg_search[0];
for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
avg += ioc->avg_search[i];
if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
}
avg /= SBA_SEARCH_SAMPLE;
seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
min, avg, max);
}
#endif
#ifndef ALLOW_IOV_BYPASS
seq_printf(s, "IOVA bypass disabled\n");
#endif
return 0;
}
static const struct seq_operations ioc_seq_ops = {
.start = ioc_start,
.next = ioc_next,
.stop = ioc_stop,
.show = ioc_show
};
static int
ioc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ioc_seq_ops);
}
static const struct file_operations ioc_fops = {
.open = ioc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release
};
static void __init
ioc_proc_init(void)
{
struct proc_dir_entry *dir;
dir = proc_mkdir("bus/mckinley", NULL);
if (!dir)
return;
proc_create(ioc_list->name, 0, dir, &ioc_fops);
}
#endif
static void
sba_connect_bus(struct pci_bus *bus)
{
acpi_handle handle, parent;
acpi_status status;
struct ioc *ioc;
if (!PCI_CONTROLLER(bus))
panic(PFX "no sysdata on bus %d!\n", bus->number);
if (PCI_CONTROLLER(bus)->iommu)
return;
handle = PCI_CONTROLLER(bus)->acpi_handle;
if (!handle)
return;
/*
* The IOC scope encloses PCI root bridges in the ACPI
* namespace, so work our way out until we find an IOC we
* claimed previously.
*/
do {
for (ioc = ioc_list; ioc; ioc = ioc->next)
if (ioc->handle == handle) {
PCI_CONTROLLER(bus)->iommu = ioc;
return;
}
status = acpi_get_parent(handle, &parent);
handle = parent;
} while (ACPI_SUCCESS(status));
printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
}
#ifdef CONFIG_NUMA
static void __init
sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
{
unsigned int node;
int pxm;
ioc->node = MAX_NUMNODES;
pxm = acpi_get_pxm(handle);
if (pxm < 0)
return;
node = pxm_to_node(pxm);
if (node >= MAX_NUMNODES || !node_online(node))
return;
ioc->node = node;
return;
}
#else
#define sba_map_ioc_to_node(ioc, handle)
#endif
static int __init
acpi_sba_ioc_add(struct acpi_device *device)
{
struct ioc *ioc;
acpi_status status;
u64 hpa, length;
struct acpi_device_info *adi;
status = hp_acpi_csr_space(device->handle, &hpa, &length);
if (ACPI_FAILURE(status))
return 1;
status = acpi_get_object_info(device->handle, &adi);
if (ACPI_FAILURE(status))
return 1;
/*
* For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
* root bridges, and its CSR space includes the IOC function.
*/
if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
hpa += ZX1_IOC_OFFSET;
/* zx1 based systems default to kernel page size iommu pages */
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
kfree(adi);
/*
* default anything not caught above or specified on cmdline to 4k
* iommu page size
*/
if (!iovp_shift)
iovp_shift = 12;
ioc = ioc_init(hpa, device->handle);
if (!ioc)
return 1;
/* setup NUMA node association */
sba_map_ioc_to_node(ioc, device->handle);
return 0;
}
static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
{"HWP0001", 0},
{"HWP0004", 0},
{"", 0},
};
static struct acpi_driver acpi_sba_ioc_driver = {
.name = "IOC IOMMU Driver",
.ids = hp_ioc_iommu_device_ids,
.ops = {
.add = acpi_sba_ioc_add,
},
};
extern struct dma_map_ops swiotlb_dma_ops;
static int __init
sba_init(void)
{
if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
return 0;
#if defined(CONFIG_IA64_GENERIC)
/* If we are booting a kdump kernel, the sba_iommu will
* cause devices that were not shutdown properly to MCA
* as soon as they are turned back on. Our only option for
* a successful kdump kernel boot is to use the swiotlb.
*/
if (is_kdump_kernel()) {
dma_ops = &swiotlb_dma_ops;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to initialize software I/O TLB:"
" Try machvec=dig boot option");
machvec_init("dig");
return 0;
}
#endif
acpi_bus_register_driver(&acpi_sba_ioc_driver);
if (!ioc_list) {
#ifdef CONFIG_IA64_GENERIC
/*
* If we didn't find something sba_iommu can claim, we
* need to setup the swiotlb and switch to the dig machvec.
*/
dma_ops = &swiotlb_dma_ops;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to find SBA IOMMU or initialize "
"software I/O TLB: Try machvec=dig boot option");
machvec_init("dig");
#else
panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
#endif
return 0;
}
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
/*
* hpzx1_swiotlb needs to have a fairly small swiotlb bounce
* buffer setup to support devices with smaller DMA masks than
* sba_iommu can handle.
*/
if (ia64_platform_is("hpzx1_swiotlb")) {
extern void hwsw_init(void);
hwsw_init();
}
#endif
#ifdef CONFIG_PCI
{
struct pci_bus *b = NULL;
while ((b = pci_find_next_bus(b)) != NULL)
sba_connect_bus(b);
}
#endif
#ifdef CONFIG_PROC_FS
ioc_proc_init();
#endif
return 0;
}
subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
static int __init
nosbagart(char *str)
{
reserve_sba_gart = 0;
return 1;
}
static int sba_dma_supported (struct device *dev, u64 mask)
{
/* make sure it's at least 32bit capable */
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
}
static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
__setup("nosbagart", nosbagart);
static int __init
sba_page_override(char *str)
{
unsigned long page_size;
page_size = memparse(str, &str);
switch (page_size) {
case 4096:
case 8192:
case 16384:
case 65536:
iovp_shift = ffs(page_size) - 1;
break;
default:
printk("%s: unknown/unsupported iommu page size %ld\n",
__func__, page_size);
}
return 1;
}
__setup("sbapagesize=",sba_page_override);
struct dma_map_ops sba_dma_ops = {
.alloc = sba_alloc_coherent,
.free = sba_free_coherent,
.map_page = sba_map_page,
.unmap_page = sba_unmap_page,
.map_sg = sba_map_sg_attrs,
.unmap_sg = sba_unmap_sg_attrs,
.sync_single_for_cpu = machvec_dma_sync_single,
.sync_sg_for_cpu = machvec_dma_sync_sg,
.sync_single_for_device = machvec_dma_sync_single,
.sync_sg_for_device = machvec_dma_sync_sg,
.dma_supported = sba_dma_supported,
.mapping_error = sba_dma_mapping_error,
};
void sba_dma_init(void)
{
dma_ops = &sba_dma_ops;
}
| gpl-2.0 |
jfdsmabalot/kernel_samsung_klte | arch/ia64/kernel/salinfo.c | 7680 | 19958 | /*
* salinfo.c
*
* Creates entries in /proc/sal for various system features.
*
* Copyright (c) 2003, 2006 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2003 Hewlett-Packard Co
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* 10/30/2001 jbarnes@sgi.com copied much of Stephane's palinfo
* code to create this file
* Oct 23 2003 kaos@sgi.com
* Replace IPI with set_cpus_allowed() to read a record from the required cpu.
* Redesign salinfo log processing to separate interrupt and user space
* contexts.
* Cache the record across multi-block reads from user space.
* Support > 64 cpus.
* Delete module_exit and MOD_INC/DEC_COUNT, salinfo cannot be a module.
*
* Jan 28 2004 kaos@sgi.com
* Periodically check for outstanding MCA or INIT records.
*
* Dec 5 2004 kaos@sgi.com
* Standardize which records are cleared automatically.
*
* Aug 18 2005 kaos@sgi.com
* mca.c may not pass a buffer, a NULL buffer just indicates that a new
* record is available in SAL.
* Replace some NR_CPUS by cpus_online, for hotplug cpu.
*
* Jan 5 2006 kaos@sgi.com
* Handle hotplug cpus coming online.
* Handle hotplug cpus going offline while they still have outstanding records.
* Use the cpu_* macros consistently.
* Replace the counting semaphore with a mutex and a test if the cpumask is non-empty.
* Modify the locking to make the test for "work to do" an atomic operation.
*/
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/semaphore.h>
#include <asm/sal.h>
#include <asm/uaccess.h>
MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
MODULE_DESCRIPTION("/proc interface to IA-64 SAL features");
MODULE_LICENSE("GPL");
static int salinfo_read(char *page, char **start, off_t off, int count, int *eof, void *data);
typedef struct {
const char *name; /* name of the proc entry */
unsigned long feature; /* feature bit */
struct proc_dir_entry *entry; /* registered entry (removal) */
} salinfo_entry_t;
/*
* List {name,feature} pairs for every entry in /proc/sal/<feature>
* that this module exports
*/
static salinfo_entry_t salinfo_entries[]={
{ "bus_lock", IA64_SAL_PLATFORM_FEATURE_BUS_LOCK, },
{ "irq_redirection", IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT, },
{ "ipi_redirection", IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT, },
{ "itc_drift", IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT, },
};
#define NR_SALINFO_ENTRIES ARRAY_SIZE(salinfo_entries)
static char *salinfo_log_name[] = {
"mca",
"init",
"cmc",
"cpe",
};
static struct proc_dir_entry *salinfo_proc_entries[
ARRAY_SIZE(salinfo_entries) + /* /proc/sal/bus_lock */
ARRAY_SIZE(salinfo_log_name) + /* /proc/sal/{mca,...} */
(2 * ARRAY_SIZE(salinfo_log_name)) + /* /proc/sal/mca/{event,data} */
1]; /* /proc/sal */
/* Some records we get ourselves, some are accessed as saved data in buffers
* that are owned by mca.c.
*/
struct salinfo_data_saved {
u8* buffer;
u64 size;
u64 id;
int cpu;
};
/* State transitions. Actions are :-
* Write "read <cpunum>" to the data file.
* Write "clear <cpunum>" to the data file.
* Write "oemdata <cpunum> <offset> to the data file.
* Read from the data file.
* Close the data file.
*
* Start state is NO_DATA.
*
* NO_DATA
* write "read <cpunum>" -> NO_DATA or LOG_RECORD.
* write "clear <cpunum>" -> NO_DATA or LOG_RECORD.
* write "oemdata <cpunum> <offset> -> return -EINVAL.
* read data -> return EOF.
* close -> unchanged. Free record areas.
*
* LOG_RECORD
* write "read <cpunum>" -> NO_DATA or LOG_RECORD.
* write "clear <cpunum>" -> NO_DATA or LOG_RECORD.
* write "oemdata <cpunum> <offset> -> format the oem data, goto OEMDATA.
* read data -> return the INIT/MCA/CMC/CPE record.
* close -> unchanged. Keep record areas.
*
* OEMDATA
* write "read <cpunum>" -> NO_DATA or LOG_RECORD.
* write "clear <cpunum>" -> NO_DATA or LOG_RECORD.
* write "oemdata <cpunum> <offset> -> format the oem data, goto OEMDATA.
* read data -> return the formatted oemdata.
* close -> unchanged. Keep record areas.
*
* Closing the data file does not change the state. This allows shell scripts
* to manipulate salinfo data, each shell redirection opens the file, does one
* action then closes it again. The record areas are only freed at close when
* the state is NO_DATA.
*/
enum salinfo_state {
STATE_NO_DATA,
STATE_LOG_RECORD,
STATE_OEMDATA,
};
struct salinfo_data {
cpumask_t cpu_event; /* which cpus have outstanding events */
struct semaphore mutex;
u8 *log_buffer;
u64 log_size;
u8 *oemdata; /* decoded oem data */
u64 oemdata_size;
int open; /* single-open to prevent races */
u8 type;
u8 saved_num; /* using a saved record? */
enum salinfo_state state :8; /* processing state */
u8 padding;
int cpu_check; /* next CPU to check */
struct salinfo_data_saved data_saved[5];/* save last 5 records from mca.c, must be < 255 */
};
static struct salinfo_data salinfo_data[ARRAY_SIZE(salinfo_log_name)];
static DEFINE_SPINLOCK(data_lock);
static DEFINE_SPINLOCK(data_saved_lock);
/** salinfo_platform_oemdata - optional callback to decode oemdata from an error
* record.
* @sect_header: pointer to the start of the section to decode.
* @oemdata: returns vmalloc area containing the decoded output.
* @oemdata_size: returns length of decoded output (strlen).
*
* Description: If user space asks for oem data to be decoded by the kernel
* and/or prom and the platform has set salinfo_platform_oemdata to the address
* of a platform specific routine then call that routine. salinfo_platform_oemdata
* vmalloc's and formats its output area, returning the address of the text
* and its strlen. Returns 0 for success, -ve for error. The callback is
* invoked on the cpu that generated the error record.
*/
int (*salinfo_platform_oemdata)(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size);
struct salinfo_platform_oemdata_parms {
const u8 *efi_guid;
u8 **oemdata;
u64 *oemdata_size;
int ret;
};
/* Kick the mutex that tells user space that there is work to do. Instead of
* trying to track the state of the mutex across multiple cpus, in user
* context, interrupt context, non-maskable interrupt context and hotplug cpu,
* it is far easier just to grab the mutex if it is free then release it.
*
* This routine must be called with data_saved_lock held, to make the down/up
* operation atomic.
*/
static void
salinfo_work_to_do(struct salinfo_data *data)
{
(void)(down_trylock(&data->mutex) ?: 0);
up(&data->mutex);
}
static void
salinfo_platform_oemdata_cpu(void *context)
{
struct salinfo_platform_oemdata_parms *parms = context;
parms->ret = salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size);
}
static void
shift1_data_saved (struct salinfo_data *data, int shift)
{
memcpy(data->data_saved+shift, data->data_saved+shift+1,
(ARRAY_SIZE(data->data_saved) - (shift+1)) * sizeof(data->data_saved[0]));
memset(data->data_saved + ARRAY_SIZE(data->data_saved) - 1, 0,
sizeof(data->data_saved[0]));
}
/* This routine is invoked in interrupt context. Note: mca.c enables
* interrupts before calling this code for CMC/CPE. MCA and INIT events are
* not irq safe, do not call any routines that use spinlocks, they may deadlock.
* MCA and INIT records are recorded, a timer event will look for any
* outstanding events and wake up the user space code.
*
* The buffer passed from mca.c points to the output from ia64_log_get. This is
* a persistent buffer but its contents can change between the interrupt and
* when user space processes the record. Save the record id to identify
* changes. If the buffer is NULL then just update the bitmap.
*/
void
salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
{
struct salinfo_data *data = salinfo_data + type;
struct salinfo_data_saved *data_saved;
unsigned long flags = 0;
int i;
int saved_size = ARRAY_SIZE(data->data_saved);
BUG_ON(type >= ARRAY_SIZE(salinfo_log_name));
if (irqsafe)
spin_lock_irqsave(&data_saved_lock, flags);
if (buffer) {
for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
if (!data_saved->buffer)
break;
}
if (i == saved_size) {
if (!data->saved_num) {
shift1_data_saved(data, 0);
data_saved = data->data_saved + saved_size - 1;
} else
data_saved = NULL;
}
if (data_saved) {
data_saved->cpu = smp_processor_id();
data_saved->id = ((sal_log_record_header_t *)buffer)->id;
data_saved->size = size;
data_saved->buffer = buffer;
}
}
cpu_set(smp_processor_id(), data->cpu_event);
if (irqsafe) {
salinfo_work_to_do(data);
spin_unlock_irqrestore(&data_saved_lock, flags);
}
}
/* Check for outstanding MCA/INIT records every minute (arbitrary) */
#define SALINFO_TIMER_DELAY (60*HZ)
static struct timer_list salinfo_timer;
extern void ia64_mlogbuf_dump(void);
static void
salinfo_timeout_check(struct salinfo_data *data)
{
unsigned long flags;
if (!data->open)
return;
if (!cpus_empty(data->cpu_event)) {
spin_lock_irqsave(&data_saved_lock, flags);
salinfo_work_to_do(data);
spin_unlock_irqrestore(&data_saved_lock, flags);
}
}
static void
salinfo_timeout (unsigned long arg)
{
ia64_mlogbuf_dump();
salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA);
salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_INIT);
salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY;
add_timer(&salinfo_timer);
}
static int
salinfo_event_open(struct inode *inode, struct file *file)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
static ssize_t
salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
char cmd[32];
size_t size;
int i, n, cpu = -1;
retry:
if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (down_interruptible(&data->mutex))
return -EINTR;
}
n = data->cpu_check;
for (i = 0; i < nr_cpu_ids; i++) {
if (cpu_isset(n, data->cpu_event)) {
if (!cpu_online(n)) {
cpu_clear(n, data->cpu_event);
continue;
}
cpu = n;
break;
}
if (++n == nr_cpu_ids)
n = 0;
}
if (cpu == -1)
goto retry;
ia64_mlogbuf_dump();
/* for next read, start checking at next CPU */
data->cpu_check = cpu;
if (++data->cpu_check == nr_cpu_ids)
data->cpu_check = 0;
snprintf(cmd, sizeof(cmd), "read %d\n", cpu);
size = strlen(cmd);
if (size > count)
size = count;
if (copy_to_user(buffer, cmd, size))
return -EFAULT;
return size;
}
static const struct file_operations salinfo_event_fops = {
.open = salinfo_event_open,
.read = salinfo_event_read,
.llseek = noop_llseek,
};
static int
salinfo_log_open(struct inode *inode, struct file *file)
{
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
spin_lock(&data_lock);
if (data->open) {
spin_unlock(&data_lock);
return -EBUSY;
}
data->open = 1;
spin_unlock(&data_lock);
if (data->state == STATE_NO_DATA &&
!(data->log_buffer = vmalloc(ia64_sal_get_state_info_size(data->type)))) {
data->open = 0;
return -ENOMEM;
}
return 0;
}
static int
salinfo_log_release(struct inode *inode, struct file *file)
{
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
if (data->state == STATE_NO_DATA) {
vfree(data->log_buffer);
vfree(data->oemdata);
data->log_buffer = NULL;
data->oemdata = NULL;
}
spin_lock(&data_lock);
data->open = 0;
spin_unlock(&data_lock);
return 0;
}
static void
call_on_cpu(int cpu, void (*fn)(void *), void *arg)
{
cpumask_t save_cpus_allowed = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpumask_of(cpu));
(*fn)(arg);
set_cpus_allowed_ptr(current, &save_cpus_allowed);
}
static void
salinfo_log_read_cpu(void *context)
{
struct salinfo_data *data = context;
sal_log_record_header_t *rh;
data->log_size = ia64_sal_get_state_info(data->type, (u64 *) data->log_buffer);
rh = (sal_log_record_header_t *)(data->log_buffer);
/* Clear corrected errors as they are read from SAL */
if (rh->severity == sal_log_severity_corrected)
ia64_sal_clear_state_info(data->type);
}
static void
salinfo_log_new_read(int cpu, struct salinfo_data *data)
{
struct salinfo_data_saved *data_saved;
unsigned long flags;
int i;
int saved_size = ARRAY_SIZE(data->data_saved);
data->saved_num = 0;
spin_lock_irqsave(&data_saved_lock, flags);
retry:
for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
if (data_saved->buffer && data_saved->cpu == cpu) {
sal_log_record_header_t *rh = (sal_log_record_header_t *)(data_saved->buffer);
data->log_size = data_saved->size;
memcpy(data->log_buffer, rh, data->log_size);
barrier(); /* id check must not be moved */
if (rh->id == data_saved->id) {
data->saved_num = i+1;
break;
}
/* saved record changed by mca.c since interrupt, discard it */
shift1_data_saved(data, i);
goto retry;
}
}
spin_unlock_irqrestore(&data_saved_lock, flags);
if (!data->saved_num)
call_on_cpu(cpu, salinfo_log_read_cpu, data);
if (!data->log_size) {
data->state = STATE_NO_DATA;
cpu_clear(cpu, data->cpu_event);
} else {
data->state = STATE_LOG_RECORD;
}
}
static ssize_t
salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
u8 *buf;
u64 bufsize;
if (data->state == STATE_LOG_RECORD) {
buf = data->log_buffer;
bufsize = data->log_size;
} else if (data->state == STATE_OEMDATA) {
buf = data->oemdata;
bufsize = data->oemdata_size;
} else {
buf = NULL;
bufsize = 0;
}
return simple_read_from_buffer(buffer, count, ppos, buf, bufsize);
}
static void
salinfo_log_clear_cpu(void *context)
{
struct salinfo_data *data = context;
ia64_sal_clear_state_info(data->type);
}
static int
salinfo_log_clear(struct salinfo_data *data, int cpu)
{
sal_log_record_header_t *rh;
unsigned long flags;
spin_lock_irqsave(&data_saved_lock, flags);
data->state = STATE_NO_DATA;
if (!cpu_isset(cpu, data->cpu_event)) {
spin_unlock_irqrestore(&data_saved_lock, flags);
return 0;
}
cpu_clear(cpu, data->cpu_event);
if (data->saved_num) {
shift1_data_saved(data, data->saved_num - 1);
data->saved_num = 0;
}
spin_unlock_irqrestore(&data_saved_lock, flags);
rh = (sal_log_record_header_t *)(data->log_buffer);
/* Corrected errors have already been cleared from SAL */
if (rh->severity != sal_log_severity_corrected)
call_on_cpu(cpu, salinfo_log_clear_cpu, data);
/* clearing a record may make a new record visible */
salinfo_log_new_read(cpu, data);
if (data->state == STATE_LOG_RECORD) {
spin_lock_irqsave(&data_saved_lock, flags);
cpu_set(cpu, data->cpu_event);
salinfo_work_to_do(data);
spin_unlock_irqrestore(&data_saved_lock, flags);
}
return 0;
}
static ssize_t
salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
char cmd[32];
size_t size;
u32 offset;
int cpu;
size = sizeof(cmd);
if (count < size)
size = count;
if (copy_from_user(cmd, buffer, size))
return -EFAULT;
if (sscanf(cmd, "read %d", &cpu) == 1) {
salinfo_log_new_read(cpu, data);
} else if (sscanf(cmd, "clear %d", &cpu) == 1) {
int ret;
if ((ret = salinfo_log_clear(data, cpu)))
count = ret;
} else if (sscanf(cmd, "oemdata %d %d", &cpu, &offset) == 2) {
if (data->state != STATE_LOG_RECORD && data->state != STATE_OEMDATA)
return -EINVAL;
if (offset > data->log_size - sizeof(efi_guid_t))
return -EINVAL;
data->state = STATE_OEMDATA;
if (salinfo_platform_oemdata) {
struct salinfo_platform_oemdata_parms parms = {
.efi_guid = data->log_buffer + offset,
.oemdata = &data->oemdata,
.oemdata_size = &data->oemdata_size
};
call_on_cpu(cpu, salinfo_platform_oemdata_cpu, &parms);
if (parms.ret)
count = parms.ret;
} else
data->oemdata_size = 0;
} else
return -EINVAL;
return count;
}
static const struct file_operations salinfo_data_fops = {
.open = salinfo_log_open,
.release = salinfo_log_release,
.read = salinfo_log_read,
.write = salinfo_log_write,
.llseek = default_llseek,
};
static int __cpuinit
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int i, cpu = (unsigned long)hcpu;
unsigned long flags;
struct salinfo_data *data;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
spin_lock_irqsave(&data_saved_lock, flags);
for (i = 0, data = salinfo_data;
i < ARRAY_SIZE(salinfo_data);
++i, ++data) {
cpu_set(cpu, data->cpu_event);
salinfo_work_to_do(data);
}
spin_unlock_irqrestore(&data_saved_lock, flags);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
spin_lock_irqsave(&data_saved_lock, flags);
for (i = 0, data = salinfo_data;
i < ARRAY_SIZE(salinfo_data);
++i, ++data) {
struct salinfo_data_saved *data_saved;
int j;
for (j = ARRAY_SIZE(data->data_saved) - 1, data_saved = data->data_saved + j;
j >= 0;
--j, --data_saved) {
if (data_saved->buffer && data_saved->cpu == cpu) {
shift1_data_saved(data, j);
}
}
cpu_clear(cpu, data->cpu_event);
}
spin_unlock_irqrestore(&data_saved_lock, flags);
break;
}
return NOTIFY_OK;
}
static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
{
.notifier_call = salinfo_cpu_callback,
.priority = 0,
};
static int __init
salinfo_init(void)
{
struct proc_dir_entry *salinfo_dir; /* /proc/sal dir entry */
struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */
struct proc_dir_entry *dir, *entry;
struct salinfo_data *data;
int i, j;
salinfo_dir = proc_mkdir("sal", NULL);
if (!salinfo_dir)
return 0;
for (i=0; i < NR_SALINFO_ENTRIES; i++) {
/* pass the feature bit in question as misc data */
*sdir++ = create_proc_read_entry (salinfo_entries[i].name, 0, salinfo_dir,
salinfo_read, (void *)salinfo_entries[i].feature);
}
for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
data = salinfo_data + i;
data->type = i;
sema_init(&data->mutex, 1);
dir = proc_mkdir(salinfo_log_name[i], salinfo_dir);
if (!dir)
continue;
entry = proc_create_data("event", S_IRUSR, dir,
&salinfo_event_fops, data);
if (!entry)
continue;
*sdir++ = entry;
entry = proc_create_data("data", S_IRUSR | S_IWUSR, dir,
&salinfo_data_fops, data);
if (!entry)
continue;
*sdir++ = entry;
/* we missed any events before now */
for_each_online_cpu(j)
cpu_set(j, data->cpu_event);
*sdir++ = dir;
}
*sdir++ = salinfo_dir;
init_timer(&salinfo_timer);
salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY;
salinfo_timer.function = &salinfo_timeout;
add_timer(&salinfo_timer);
register_hotcpu_notifier(&salinfo_cpu_notifier);
return 0;
}
/*
* 'data' contains an integer that corresponds to the feature we're
* testing
*/
static int
salinfo_read(char *page, char **start, off_t off, int count, int *eof, void *data)
{
int len = 0;
len = sprintf(page, (sal_platform_features & (unsigned long)data) ? "1\n" : "0\n");
if (len <= off+count) *eof = 1;
*start = page + off;
len -= off;
if (len>count) len = count;
if (len<0) len = 0;
return len;
}
module_init(salinfo_init);
| gpl-2.0 |
md5555/S6-UniKernel | arch/parisc/kernel/unwind.c | 9984 | 11526 | /*
* Kernel unwinding support
*
* (c) 2002-2004 Randolph Chung <tausq@debian.org>
*
* Derived partially from the IA64 implementation. The PA-RISC
* Runtime Architecture Document is also a useful reference to
* understand what is happening here
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/sort.h>
#include <asm/uaccess.h>
#include <asm/assembly.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/unwind.h>
/* #define DEBUG 1 */
#ifdef DEBUG
#define dbg(x...) printk(x)
#else
#define dbg(x...)
#endif
#define KERNEL_START (KERNEL_BINARY_TEXT_START)
extern struct unwind_table_entry __start___unwind[];
extern struct unwind_table_entry __stop___unwind[];
static spinlock_t unwind_lock;
/*
* the kernel unwind block is not dynamically allocated so that
* we can call unwind_init as early in the bootup process as
* possible (before the slab allocator is initialized)
*/
static struct unwind_table kernel_unwind_table __read_mostly;
static LIST_HEAD(unwind_tables);
static inline const struct unwind_table_entry *
find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
{
const struct unwind_table_entry *e = NULL;
unsigned long lo, hi, mid;
lo = 0;
hi = table->length - 1;
while (lo <= hi) {
mid = (hi - lo) / 2 + lo;
e = &table->table[mid];
if (addr < e->region_start)
hi = mid - 1;
else if (addr > e->region_end)
lo = mid + 1;
else
return e;
}
return NULL;
}
static const struct unwind_table_entry *
find_unwind_entry(unsigned long addr)
{
struct unwind_table *table;
const struct unwind_table_entry *e = NULL;
if (addr >= kernel_unwind_table.start &&
addr <= kernel_unwind_table.end)
e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
else
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->start &&
addr <= table->end)
e = find_unwind_entry_in_table(table, addr);
if (e) {
/* Move-to-front to exploit common traces */
list_move(&table->list, &unwind_tables);
break;
}
}
return e;
}
static void
unwind_table_init(struct unwind_table *table, const char *name,
unsigned long base_addr, unsigned long gp,
void *table_start, void *table_end)
{
struct unwind_table_entry *start = table_start;
struct unwind_table_entry *end =
(struct unwind_table_entry *)table_end - 1;
table->name = name;
table->base_addr = base_addr;
table->gp = gp;
table->start = base_addr + start->region_start;
table->end = base_addr + end->region_end;
table->table = (struct unwind_table_entry *)table_start;
table->length = end - start + 1;
INIT_LIST_HEAD(&table->list);
for (; start <= end; start++) {
if (start < end &&
start->region_end > (start+1)->region_start) {
printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
}
start->region_start += base_addr;
start->region_end += base_addr;
}
}
static int cmp_unwind_table_entry(const void *a, const void *b)
{
return ((const struct unwind_table_entry *)a)->region_start
- ((const struct unwind_table_entry *)b)->region_start;
}
static void
unwind_table_sort(struct unwind_table_entry *start,
struct unwind_table_entry *finish)
{
sort(start, finish - start, sizeof(struct unwind_table_entry),
cmp_unwind_table_entry, NULL);
}
struct unwind_table *
unwind_table_add(const char *name, unsigned long base_addr,
unsigned long gp,
void *start, void *end)
{
struct unwind_table *table;
unsigned long flags;
struct unwind_table_entry *s = (struct unwind_table_entry *)start;
struct unwind_table_entry *e = (struct unwind_table_entry *)end;
unwind_table_sort(s, e);
table = kmalloc(sizeof(struct unwind_table), GFP_USER);
if (table == NULL)
return NULL;
unwind_table_init(table, name, base_addr, gp, start, end);
spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&table->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
return table;
}
void unwind_table_remove(struct unwind_table *table)
{
unsigned long flags;
spin_lock_irqsave(&unwind_lock, flags);
list_del(&table->list);
spin_unlock_irqrestore(&unwind_lock, flags);
kfree(table);
}
/* Called from setup_arch to import the kernel unwind info */
int unwind_init(void)
{
long start, stop;
register unsigned long gp __asm__ ("r27");
start = (long)&__start___unwind[0];
stop = (long)&__stop___unwind[0];
spin_lock_init(&unwind_lock);
printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
start, stop,
(stop - start) / sizeof(struct unwind_table_entry));
unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
gp,
&__start___unwind[0], &__stop___unwind[0]);
#if 0
{
int i;
for (i = 0; i < 10; i++)
{
printk("region 0x%x-0x%x\n",
__start___unwind[i].region_start,
__start___unwind[i].region_end);
}
}
#endif
return 0;
}
#ifdef CONFIG_64BIT
#define get_func_addr(fptr) fptr[2]
#else
#define get_func_addr(fptr) fptr[0]
#endif
static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
{
extern void handle_interruption(int, struct pt_regs *);
static unsigned long *hi = (unsigned long *)&handle_interruption;
if (pc == get_func_addr(hi)) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
return 1;
}
return 0;
}
static void unwind_frame_regs(struct unwind_frame_info *info)
{
const struct unwind_table_entry *e;
unsigned long npc;
unsigned int insn;
long frame_size = 0;
int looking_for_rp, rpoffset = 0;
e = find_unwind_entry(info->ip);
if (e == NULL) {
unsigned long sp;
extern char _stext[], _etext[];
dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
#ifdef CONFIG_KALLSYMS
/* Handle some frequent special cases.... */
{
char symname[KSYM_NAME_LEN];
char *modname;
kallsyms_lookup(info->ip, NULL, NULL, &modname,
symname);
dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
if (strcmp(symname, "_switch_to_ret") == 0) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
dbg("_switch_to_ret @ %lx - setting "
"prev_sp=%lx prev_ip=%lx\n",
info->ip, info->prev_sp,
info->prev_ip);
return;
} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
strcmp(symname, "syscall_exit") == 0) {
info->prev_ip = info->prev_sp = 0;
return;
}
}
#endif
/* Since we are doing the unwinding blind, we don't know if
we are adjusting the stack correctly or extracting the rp
correctly. The rp is checked to see if it belongs to the
kernel text section, if not we assume we don't have a
correct stack frame and we continue to unwind the stack.
This is not quite correct, and will fail for loadable
modules. */
sp = info->sp & ~63;
do {
unsigned long tmp;
info->prev_sp = sp - 64;
info->prev_ip = 0;
if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
break;
info->prev_ip = tmp;
sp = info->prev_sp;
} while (info->prev_ip < (unsigned long)_stext ||
info->prev_ip > (unsigned long)_etext);
info->rp = 0;
dbg("analyzing func @ %lx with no unwind info, setting "
"prev_sp=%lx prev_ip=%lx\n", info->ip,
info->prev_sp, info->prev_ip);
} else {
dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
"Save_RP = %d, Millicode = %d size = %u\n",
e->region_start, e->region_end, e->Save_SP, e->Save_RP,
e->Millicode, e->Total_frame_size);
looking_for_rp = e->Save_RP;
for (npc = e->region_start;
(frame_size < (e->Total_frame_size << 3) ||
looking_for_rp) &&
npc < info->ip;
npc += 4) {
insn = *(unsigned int *)npc;
if ((insn & 0xffffc000) == 0x37de0000 ||
(insn & 0xffe00000) == 0x6fc00000) {
/* ldo X(sp), sp, or stwm X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
((insn & 0x3fff) >> 1);
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
} else if ((insn & 0xffe00008) == 0x73c00008) {
/* std,ma X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
(((insn >> 4) & 0x3ff) << 3);
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
} else if (insn == 0x6bc23fd9) {
/* stw rp,-20(sp) */
rpoffset = 20;
looking_for_rp = 0;
dbg("analyzing func @ %lx, insn=stw rp,"
"-20(sp) @ %lx\n", info->ip, npc);
} else if (insn == 0x0fc212c1) {
/* std rp,-16(sr0,sp) */
rpoffset = 16;
looking_for_rp = 0;
dbg("analyzing func @ %lx, insn=std rp,"
"-16(sp) @ %lx\n", info->ip, npc);
}
}
if (!unwind_special(info, e->region_start, frame_size)) {
info->prev_sp = info->sp - frame_size;
if (e->Millicode)
info->rp = info->r31;
else if (rpoffset)
info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
info->prev_ip = info->rp;
info->rp = 0;
}
dbg("analyzing func @ %lx, setting prev_sp=%lx "
"prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
info->prev_ip, npc);
}
}
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
struct pt_regs *regs)
{
memset(info, 0, sizeof(struct unwind_frame_info));
info->t = t;
info->sp = regs->gr[30];
info->ip = regs->iaoq[0];
info->rp = regs->gr[2];
info->r31 = regs->gr[31];
dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
t ? (int)t->pid : -1, info->sp, info->ip);
}
void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
{
struct pt_regs *r = &t->thread.regs;
struct pt_regs *r2;
r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
if (!r2)
return;
*r2 = *r;
r2->gr[30] = r->ksp;
r2->iaoq[0] = r->kpc;
unwind_frame_init(info, t, r2);
kfree(r2);
}
void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
{
unwind_frame_init(info, current, regs);
}
int unwind_once(struct unwind_frame_info *next_frame)
{
unwind_frame_regs(next_frame);
if (next_frame->prev_sp == 0 ||
next_frame->prev_ip == 0)
return -1;
next_frame->sp = next_frame->prev_sp;
next_frame->ip = next_frame->prev_ip;
next_frame->prev_sp = 0;
next_frame->prev_ip = 0;
dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
next_frame->t ? (int)next_frame->t->pid : -1,
next_frame->sp, next_frame->ip);
return 0;
}
int unwind_to_user(struct unwind_frame_info *info)
{
int ret;
do {
ret = unwind_once(info);
} while (!ret && !(info->ip & 3));
return ret;
}
unsigned long return_address(unsigned int level)
{
struct unwind_frame_info info;
struct pt_regs r;
unsigned long sp;
/* initialize unwind info */
asm volatile ("copy %%r30, %0" : "=r"(sp));
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long) current_text_addr();
r.gr[2] = (unsigned long) __builtin_return_address(0);
r.gr[30] = sp;
unwind_frame_init(&info, current, &r);
/* unwind stack */
++level;
do {
if (unwind_once(&info) < 0 || info.ip == 0)
return 0;
if (!__kernel_text_address(info.ip)) {
return 0;
}
} while (info.ip && level--);
return info.ip;
}
| gpl-2.0 |
bju2000/kernel_lge_msm8994 | net/ceph/crush/hash.c | 12032 | 3181 |
#include <linux/types.h>
#include <linux/crush/hash.h>
/*
* Robert Jenkins' function for mixing 32-bit values
* http://burtleburtle.net/bob/hash/evahash.html
* a, b = random bits, c = input and output
*/
#define crush_hashmix(a, b, c) do { \
a = a-b; a = a-c; a = a^(c>>13); \
b = b-c; b = b-a; b = b^(a<<8); \
c = c-a; c = c-b; c = c^(b>>13); \
a = a-b; a = a-c; a = a^(c>>12); \
b = b-c; b = b-a; b = b^(a<<16); \
c = c-a; c = c-b; c = c^(b>>5); \
a = a-b; a = a-c; a = a^(c>>3); \
b = b-c; b = b-a; b = b^(a<<10); \
c = c-a; c = c-b; c = c^(b>>15); \
} while (0)
#define crush_hash_seed 1315423911
static __u32 crush_hash32_rjenkins1(__u32 a)
{
__u32 hash = crush_hash_seed ^ a;
__u32 b = a;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(b, x, hash);
crush_hashmix(y, a, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_2(__u32 a, __u32 b)
{
__u32 hash = crush_hash_seed ^ a ^ b;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(x, a, hash);
crush_hashmix(b, y, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_3(__u32 a, __u32 b, __u32 c)
{
__u32 hash = crush_hash_seed ^ a ^ b ^ c;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(c, x, hash);
crush_hashmix(y, a, hash);
crush_hashmix(b, x, hash);
crush_hashmix(y, c, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_4(__u32 a, __u32 b, __u32 c, __u32 d)
{
__u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(c, d, hash);
crush_hashmix(a, x, hash);
crush_hashmix(y, b, hash);
crush_hashmix(c, x, hash);
crush_hashmix(y, d, hash);
return hash;
}
static __u32 crush_hash32_rjenkins1_5(__u32 a, __u32 b, __u32 c, __u32 d,
__u32 e)
{
__u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e;
__u32 x = 231232;
__u32 y = 1232;
crush_hashmix(a, b, hash);
crush_hashmix(c, d, hash);
crush_hashmix(e, x, hash);
crush_hashmix(y, a, hash);
crush_hashmix(b, x, hash);
crush_hashmix(y, c, hash);
crush_hashmix(d, x, hash);
crush_hashmix(y, e, hash);
return hash;
}
__u32 crush_hash32(int type, __u32 a)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1(a);
default:
return 0;
}
}
__u32 crush_hash32_2(int type, __u32 a, __u32 b)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_2(a, b);
default:
return 0;
}
}
__u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_3(a, b, c);
default:
return 0;
}
}
__u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_4(a, b, c, d);
default:
return 0;
}
}
__u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return crush_hash32_rjenkins1_5(a, b, c, d, e);
default:
return 0;
}
}
const char *crush_hash_name(int type)
{
switch (type) {
case CRUSH_HASH_RJENKINS1:
return "rjenkins1";
default:
return "unknown";
}
}
| gpl-2.0 |
DESHONOR/android_kernel_huawei_g620s_Eloy | arch/ia64/kernel/esi.c | 13312 | 4576 | /*
* Extensible SAL Interface (ESI) support routines.
*
* Copyright (C) 2006 Hewlett-Packard Co
* Alex Williamson <alex.williamson@hp.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/esi.h>
#include <asm/sal.h>
MODULE_AUTHOR("Alex Williamson <alex.williamson@hp.com>");
MODULE_DESCRIPTION("Extensible SAL Interface (ESI) support");
MODULE_LICENSE("GPL");
#define MODULE_NAME "esi"
#define ESI_TABLE_GUID \
EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \
0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4)
enum esi_systab_entry_type {
ESI_DESC_ENTRY_POINT = 0
};
/*
* Entry type: Size:
* 0 48
*/
#define ESI_DESC_SIZE(type) "\060"[(unsigned) (type)]
typedef struct ia64_esi_desc_entry_point {
u8 type;
u8 reserved1[15];
u64 esi_proc;
u64 gp;
efi_guid_t guid;
} ia64_esi_desc_entry_point_t;
struct pdesc {
void *addr;
void *gp;
};
static struct ia64_sal_systab *esi_systab;
static int __init esi_init (void)
{
efi_config_table_t *config_tables;
struct ia64_sal_systab *systab;
unsigned long esi = 0;
char *p;
int i;
config_tables = __va(efi.systab->tables);
for (i = 0; i < (int) efi.systab->nr_tables; ++i) {
if (efi_guidcmp(config_tables[i].guid, ESI_TABLE_GUID) == 0) {
esi = config_tables[i].table;
break;
}
}
if (!esi)
return -ENODEV;
systab = __va(esi);
if (strncmp(systab->signature, "ESIT", 4) != 0) {
printk(KERN_ERR "bad signature in ESI system table!");
return -ENODEV;
}
p = (char *) (systab + 1);
for (i = 0; i < systab->entry_count; i++) {
/*
* The first byte of each entry type contains the type
* descriptor.
*/
switch (*p) {
case ESI_DESC_ENTRY_POINT:
break;
default:
printk(KERN_WARNING "Unknown table type %d found in "
"ESI table, ignoring rest of table\n", *p);
return -ENODEV;
}
p += ESI_DESC_SIZE(*p);
}
esi_systab = systab;
return 0;
}
int ia64_esi_call (efi_guid_t guid, struct ia64_sal_retval *isrvp,
enum esi_proc_type proc_type, u64 func,
u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
u64 arg7)
{
struct ia64_fpreg fr[6];
unsigned long flags = 0;
int i;
char *p;
if (!esi_systab)
return -1;
p = (char *) (esi_systab + 1);
for (i = 0; i < esi_systab->entry_count; i++) {
if (*p == ESI_DESC_ENTRY_POINT) {
ia64_esi_desc_entry_point_t *esi = (void *)p;
if (!efi_guidcmp(guid, esi->guid)) {
ia64_sal_handler esi_proc;
struct pdesc pdesc;
pdesc.addr = __va(esi->esi_proc);
pdesc.gp = __va(esi->gp);
esi_proc = (ia64_sal_handler) &pdesc;
ia64_save_scratch_fpregs(fr);
if (proc_type == ESI_PROC_SERIALIZED)
spin_lock_irqsave(&sal_lock, flags);
else if (proc_type == ESI_PROC_MP_SAFE)
local_irq_save(flags);
else
preempt_disable();
*isrvp = (*esi_proc)(func, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
if (proc_type == ESI_PROC_SERIALIZED)
spin_unlock_irqrestore(&sal_lock,
flags);
else if (proc_type == ESI_PROC_MP_SAFE)
local_irq_restore(flags);
else
preempt_enable();
ia64_load_scratch_fpregs(fr);
return 0;
}
}
p += ESI_DESC_SIZE(*p);
}
return -1;
}
EXPORT_SYMBOL_GPL(ia64_esi_call);
int ia64_esi_call_phys (efi_guid_t guid, struct ia64_sal_retval *isrvp,
u64 func, u64 arg1, u64 arg2, u64 arg3, u64 arg4,
u64 arg5, u64 arg6, u64 arg7)
{
struct ia64_fpreg fr[6];
unsigned long flags;
u64 esi_params[8];
char *p;
int i;
if (!esi_systab)
return -1;
p = (char *) (esi_systab + 1);
for (i = 0; i < esi_systab->entry_count; i++) {
if (*p == ESI_DESC_ENTRY_POINT) {
ia64_esi_desc_entry_point_t *esi = (void *)p;
if (!efi_guidcmp(guid, esi->guid)) {
ia64_sal_handler esi_proc;
struct pdesc pdesc;
pdesc.addr = (void *)esi->esi_proc;
pdesc.gp = (void *)esi->gp;
esi_proc = (ia64_sal_handler) &pdesc;
esi_params[0] = func;
esi_params[1] = arg1;
esi_params[2] = arg2;
esi_params[3] = arg3;
esi_params[4] = arg4;
esi_params[5] = arg5;
esi_params[6] = arg6;
esi_params[7] = arg7;
ia64_save_scratch_fpregs(fr);
spin_lock_irqsave(&sal_lock, flags);
*isrvp = esi_call_phys(esi_proc, esi_params);
spin_unlock_irqrestore(&sal_lock, flags);
ia64_load_scratch_fpregs(fr);
return 0;
}
}
p += ESI_DESC_SIZE(*p);
}
return -1;
}
EXPORT_SYMBOL_GPL(ia64_esi_call_phys);
static void __exit esi_exit (void)
{
}
module_init(esi_init);
module_exit(esi_exit); /* makes module removable... */
| gpl-2.0 |
qayshp/TestDisk | src/file_itu.c | 1 | 2268 | /*
File: file_itu.c
Copyright (C) 2006-2007 Christophe GRENIER <grenier@cgsecurity.org>
This software is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write the Free Software Foundation, Inc., 51
Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#include <stdio.h>
#include "types.h"
#include "filegen.h"
static void register_header_check_itunes(file_stat_t *file_stat);
const file_hint_t file_hint_itunes= {
.extension="itu",
.description="iTunes",
.min_header_distance=0,
.max_filesize=PHOTOREC_MAX_FILE_SIZE,
.recover=1,
.enable_by_default=1,
.register_header_check=®ister_header_check_itunes
};
static int header_check_itunes(const unsigned char *buffer, const unsigned int buffer_size, const unsigned int safe_header_only, const file_recovery_t *file_recovery, file_recovery_t *file_recovery_new)
{
const uint64_t size= (uint64_t)buffer[8] +
(((uint64_t)buffer[9])<<8) + (((uint64_t)buffer[10])<<16) + (((uint64_t)buffer[11])<<24);
if(size < 12)
return 0;
/* mhbd */
reset_file_recovery(file_recovery_new);
file_recovery_new->extension=file_hint_itunes.extension;
file_recovery_new->min_filesize=0x68;
file_recovery_new->calculated_file_size=size;
file_recovery_new->data_check=&data_check_size;
file_recovery_new->file_check=&file_check_size;
return 1;
}
static void register_header_check_itunes(file_stat_t *file_stat)
{
static const unsigned char itunes_header[8]= {'m', 'h', 'b', 'd', 0x68, 0x00, 0x00, 0x00};
register_header_check(0, itunes_header,sizeof(itunes_header), &header_check_itunes, file_stat);
}
| gpl-2.0 |
leemgs/OptimusOneKernel-KandroidCommunity | scripts/dtc/treesource.c | 1 | 4924 |
#include "dtc.h"
#include "srcpos.h"
extern FILE *yyin;
extern int yyparse(void);
struct boot_info *the_boot_info;
int treesource_error;
struct boot_info *dt_from_source(const char *fname)
{
the_boot_info = NULL;
treesource_error = 0;
srcpos_file = dtc_open_file(fname, NULL);
yyin = srcpos_file->file;
if (yyparse() != 0)
die("Unable to parse input tree\n");
if (treesource_error)
die("Syntax error parsing input tree\n");
return the_boot_info;
}
static void write_prefix(FILE *f, int level)
{
int i;
for (i = 0; i < level; i++)
fputc('\t', f);
}
static int isstring(char c)
{
return (isprint(c)
|| (c == '\0')
|| strchr("\a\b\t\n\v\f\r", c));
}
static void write_propval_string(FILE *f, struct data val)
{
const char *str = val.val;
int i;
int newchunk = 1;
struct marker *m = val.markers;
assert(str[val.len-1] == '\0');
for (i = 0; i < (val.len-1); i++) {
char c = str[i];
if (newchunk) {
while (m && (m->offset <= i)) {
if (m->type == LABEL) {
assert(m->offset == i);
fprintf(f, "%s: ", m->ref);
}
m = m->next;
}
fprintf(f, "\"");
newchunk = 0;
}
switch (c) {
case '\a':
fprintf(f, "\\a");
break;
case '\b':
fprintf(f, "\\b");
break;
case '\t':
fprintf(f, "\\t");
break;
case '\n':
fprintf(f, "\\n");
break;
case '\v':
fprintf(f, "\\v");
break;
case '\f':
fprintf(f, "\\f");
break;
case '\r':
fprintf(f, "\\r");
break;
case '\\':
fprintf(f, "\\\\");
break;
case '\"':
fprintf(f, "\\\"");
break;
case '\0':
fprintf(f, "\", ");
newchunk = 1;
break;
default:
if (isprint(c))
fprintf(f, "%c", c);
else
fprintf(f, "\\x%02hhx", c);
}
}
fprintf(f, "\"");
for_each_marker_of_type(m, LABEL) {
assert (m->offset == val.len);
fprintf(f, " %s:", m->ref);
}
}
static void write_propval_cells(FILE *f, struct data val)
{
void *propend = val.val + val.len;
cell_t *cp = (cell_t *)val.val;
struct marker *m = val.markers;
fprintf(f, "<");
for (;;) {
while (m && (m->offset <= ((char *)cp - val.val))) {
if (m->type == LABEL) {
assert(m->offset == ((char *)cp - val.val));
fprintf(f, "%s: ", m->ref);
}
m = m->next;
}
fprintf(f, "0x%x", fdt32_to_cpu(*cp++));
if ((void *)cp >= propend)
break;
fprintf(f, " ");
}
for_each_marker_of_type(m, LABEL) {
assert (m->offset == val.len);
fprintf(f, " %s:", m->ref);
}
fprintf(f, ">");
}
static void write_propval_bytes(FILE *f, struct data val)
{
void *propend = val.val + val.len;
const char *bp = val.val;
struct marker *m = val.markers;
fprintf(f, "[");
for (;;) {
while (m && (m->offset == (bp-val.val))) {
if (m->type == LABEL)
fprintf(f, "%s: ", m->ref);
m = m->next;
}
fprintf(f, "%02hhx", *bp++);
if ((const void *)bp >= propend)
break;
fprintf(f, " ");
}
for_each_marker_of_type(m, LABEL) {
assert (m->offset == val.len);
fprintf(f, " %s:", m->ref);
}
fprintf(f, "]");
}
static void write_propval(FILE *f, struct property *prop)
{
int len = prop->val.len;
const char *p = prop->val.val;
struct marker *m = prop->val.markers;
int nnotstring = 0, nnul = 0;
int nnotstringlbl = 0, nnotcelllbl = 0;
int i;
if (len == 0) {
fprintf(f, ";\n");
return;
}
for (i = 0; i < len; i++) {
if (! isstring(p[i]))
nnotstring++;
if (p[i] == '\0')
nnul++;
}
for_each_marker_of_type(m, LABEL) {
if ((m->offset > 0) && (prop->val.val[m->offset - 1] != '\0'))
nnotstringlbl++;
if ((m->offset % sizeof(cell_t)) != 0)
nnotcelllbl++;
}
fprintf(f, " = ");
if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul < (len-nnul))
&& (nnotstringlbl == 0)) {
write_propval_string(f, prop->val);
} else if (((len % sizeof(cell_t)) == 0) && (nnotcelllbl == 0)) {
write_propval_cells(f, prop->val);
} else {
write_propval_bytes(f, prop->val);
}
fprintf(f, ";\n");
}
static void write_tree_source_node(FILE *f, struct node *tree, int level)
{
struct property *prop;
struct node *child;
write_prefix(f, level);
if (tree->label)
fprintf(f, "%s: ", tree->label);
if (tree->name && (*tree->name))
fprintf(f, "%s {\n", tree->name);
else
fprintf(f, "/ {\n");
for_each_property(tree, prop) {
write_prefix(f, level+1);
if (prop->label)
fprintf(f, "%s: ", prop->label);
fprintf(f, "%s", prop->name);
write_propval(f, prop);
}
for_each_child(tree, child) {
fprintf(f, "\n");
write_tree_source_node(f, child, level+1);
}
write_prefix(f, level);
fprintf(f, "};\n");
}
void dt_to_source(FILE *f, struct boot_info *bi)
{
struct reserve_info *re;
fprintf(f, "/dts-v1/;\n\n");
for (re = bi->reservelist; re; re = re->next) {
if (re->label)
fprintf(f, "%s: ", re->label);
fprintf(f, "/memreserve/\t0x%016llx 0x%016llx;\n",
(unsigned long long)re->re.address,
(unsigned long long)re->re.size);
}
write_tree_source_node(f, bi->dt, 0);
}
| gpl-2.0 |
eglaysher/vitaminsee | Components/ImageMetadata/Exiv2/datasets.cpp | 1 | 22336 | // ***************************************************************** -*- C++ -*-
/*
* Copyright (C) 2004 Andreas Huggel <ahuggel@gmx.net>
*
* This program is part of the Exiv2 distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
File: datasets.cpp
Version: $Rev: 445 $
Author(s): Brad Schick (brad) <brad@robotbattle.com>
History: 24-Jul-04, brad: created
*/
// *****************************************************************************
#include "rcsid.hpp"
EXIV2_RCSID("@(#) $Id: datasets.cpp 445 2004-12-10 18:02:31Z ahuggel $");
// *****************************************************************************
// included header files
#include "datasets.hpp"
#include "error.hpp"
#include "types.hpp"
#include "value.hpp"
#include "metadatum.hpp"
#include <iostream>
#include <iomanip>
#include <sstream>
// *****************************************************************************
// class member definitions
namespace Exiv2 {
DataSet::DataSet(
uint16_t number,
const char* name,
const char* desc,
bool mandatory,
bool repeatable,
uint32_t minbytes,
uint32_t maxbytes,
TypeId type,
uint16_t recordId,
const char* photoshop
)
: number_(number), name_(name), desc_(desc), mandatory_(mandatory),
repeatable_(repeatable), minbytes_(minbytes), maxbytes_(maxbytes),
type_(type), recordId_(recordId), photoshop_(photoshop)
{
}
RecordInfo::RecordInfo(
uint16_t recordId,
const char* name,
const char* desc
)
: recordId_(recordId), name_(name), desc_(desc)
{
}
const RecordInfo IptcDataSets::recordInfo_[] = {
RecordInfo(IptcDataSets::invalidRecord, "(invalid)", "(invalid)"),
RecordInfo(IptcDataSets::envelope, "Envelope", "IIM envelope record"),
RecordInfo(IptcDataSets::application2, "Application2", "IIM application record 2"),
};
static const DataSet envelopeRecord[] = {
DataSet(IptcDataSets::ModelVersion, "ModelVersion", "Version of IIM part 1", true, false, 2, 2, Exiv2::unsignedShort, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::Destination, "Destination", "Routing information", false, true, 0, 1024, Exiv2::string, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::FileFormat, "FileFormat", "IIM appendix A file format", true, false, 2, 2, Exiv2::unsignedShort, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::FileVersion, "FileVersion", "File format version", true, false, 2, 2, Exiv2::unsignedShort, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::ServiceId, "ServiceId", "Identifies the provider and product", true, false, 0, 10, Exiv2::string, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::EnvelopeNumber, "EnvelopeNumber", "Combined unique identification", true, false, 8, 8, Exiv2::string, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::ProductId, "ProductId", "Identifies service subset", false, true, 0, 32, Exiv2::string, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::EnvelopePriority, "EnvelopePriority", "Envelope handling priority", false, false, 1, 1, Exiv2::string, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::DateSent, "DateSent", "Date material was sent", true, false, 8, 8, Exiv2::date, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::TimeSent, "TimeSent", "Time material was sent", false, false, 11, 11, Exiv2::time, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::CharacterSet, "CharacterSet", "Specifies character sets", false, false, 0, 32, Exiv2::undefined, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::UNO, "UNO", "Unique Name of Object", false, false, 14, 80, Exiv2::string, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::ARMId, "ARMId", "Abstract Relationship Method identifier", false, false, 2, 2, Exiv2::unsignedShort, IptcDataSets::envelope, ""),
DataSet(IptcDataSets::ARMVersion, "ARMVersion", "Abstract Relationship Method version", false, false, 2, 2, Exiv2::unsignedShort, IptcDataSets::envelope, ""),
DataSet(0xffff, "(Invalid)", "(Invalid)", false, false, 0, 0, Exiv2::unsignedShort, IptcDataSets::envelope, "")
};
static const DataSet application2Record[] = {
DataSet(IptcDataSets::RecordVersion, "RecordVersion", "Version of IIM part 2", true, false, 2, 2, Exiv2::unsignedShort, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ObjectType, "ObjectType", "IIM appendix G object type", false, false, 3, 67, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ObjectAttribute, "ObjectAttribute", "IIM appendix G object attribute", false, true, 4, 68, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ObjectName, "ObjectName", "Shorthand reference of content", false, false, 0, 64, Exiv2::string, IptcDataSets::application2, "Document title"),
DataSet(IptcDataSets::EditStatus, "EditStatus", "Content status", false, false, 0, 64, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::EditorialUpdate, "EditorialUpdate", "Indicates the type of update", false, false, 2, 2, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::Urgency, "Urgency", "Editorial urgency of content", false, false, 1, 1, Exiv2::string, IptcDataSets::application2, "Urgency"),
DataSet(IptcDataSets::Subject, "Subject", "Structured definition of the subject", false, true, 13, 236, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::Category, "Category", "Identifies the subject", false, false, 0, 3, Exiv2::string, IptcDataSets::application2, "Category"),
DataSet(IptcDataSets::SuppCategory, "SuppCategory", "Refines the subject", false, true, 0, 32, Exiv2::string, IptcDataSets::application2, "Supplemental Categories"),
DataSet(IptcDataSets::FixtureId, "FixtureId", "Identifies content that recurs", false, false, 0, 32, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::Keywords, "Keywords", "Information retrieval words", false, true, 0, 64, Exiv2::string, IptcDataSets::application2, "Keywords"),
DataSet(IptcDataSets::LocationCode, "LocationCode", "ISO country code for content", false, true, 3, 3, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::LocationName, "LocationName", "Full country name for content", false, true, 0, 64, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ReleaseDate, "ReleaseDate", "Earliest intended usable date", false, false, 8, 8, Exiv2::date, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ReleaseTime, "ReleaseTime", "Earliest intended usable time", false, false, 11, 11, Exiv2::time, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ExpirationDate, "ExpirationDate", "Latest intended usable date", false, false, 8, 8, Exiv2::date, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ExpirationTime, "ExpirationTime", "Latest intended usable time", false, false, 11, 11, Exiv2::time, IptcDataSets::application2, ""),
DataSet(IptcDataSets::SpecialInstructions, "SpecialInstructions", "Editorial usage instructions", false, false, 0, 256, Exiv2::string, IptcDataSets::application2, "Instructions"),
DataSet(IptcDataSets::ActionAdvised, "ActionAdvised", "Action provided to previous data", false, false, 2, 2, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ReferenceService, "ReferenceService", "Service Identifier of a prior envelope", false, true, 0, 10, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ReferenceDate, "ReferenceDate", "Date of a prior envelope", false, true, 8, 8, Exiv2::date, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ReferenceNumber, "ReferenceNumber", "Envelope Number of a prior envelope", false, true, 8, 8, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::DateCreated, "DateCreated", "Creation date of intellectual content", false, false, 8, 8, Exiv2::date, IptcDataSets::application2, "Date created"),
DataSet(IptcDataSets::TimeCreated, "TimeCreated", "Creation time of intellectual content", false, false, 11, 11, Exiv2::time, IptcDataSets::application2, ""),
DataSet(IptcDataSets::DigitizationDate, "DigitizationDate", "Creation date of digital representation", false, false, 8, 8, Exiv2::date, IptcDataSets::application2, ""),
DataSet(IptcDataSets::DigitizationTime, "DigitizationTime", "Creation time of digital representation", false, false, 11, 11, Exiv2::time, IptcDataSets::application2, ""),
DataSet(IptcDataSets::Program, "Program", "Content creation program", false, false, 0, 32, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ProgramVersion, "ProgramVersion", "Content creation program version", false, false, 0, 10, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ObjectCycle, "ObjectCycle", "Morning, evening, or both", false, false, 1, 1, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::Byline, "Byline", "Name of content creator", false, true, 0, 32, Exiv2::string, IptcDataSets::application2, "Author"),
DataSet(IptcDataSets::BylineTitle, "BylineTitle", "Title of content creator", false, true, 0, 32, Exiv2::string, IptcDataSets::application2, "Authors Position"),
DataSet(IptcDataSets::City, "City", "City of content origin", false, false, 0, 32, Exiv2::string, IptcDataSets::application2, "City"),
DataSet(IptcDataSets::SubLocation, "SubLocation", "Location within city", false, false, 0, 32, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ProvinceState, "ProvinceState", "Province/State of content origin", false, false, 0, 32, Exiv2::string, IptcDataSets::application2, "State/Province"),
DataSet(IptcDataSets::CountryCode, "CountryCode", "ISO country code of content origin", false, false, 3, 3, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::CountryName, "CountryName", "Full country name of content origin", false, false, 0, 64, Exiv2::string, IptcDataSets::application2, "Country"),
DataSet(IptcDataSets::TransmissionReference, "TransmissionReference", "Location of original transmission", false, false, 0, 32, Exiv2::string, IptcDataSets::application2, "Transmission Reference"),
DataSet(IptcDataSets::Headline, "Headline", "Content synopsis", false, false, 0, 256, Exiv2::string, IptcDataSets::application2, "Headline"),
DataSet(IptcDataSets::Credit, "Credit", "Content provider", false, false, 0, 32, Exiv2::string, IptcDataSets::application2, "Credit"),
DataSet(IptcDataSets::Source, "Source", "Original owner of content", false, false, 0, 32, Exiv2::string, IptcDataSets::application2, "Source"),
DataSet(IptcDataSets::Copyright, "Copyright", "Necessary copyright notice", false, false, 0, 128, Exiv2::string, IptcDataSets::application2, "Copyright notice"),
DataSet(IptcDataSets::Contact, "Contact", "Person or organisation to contact", false, true, 0, 128, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::Caption, "Caption", "Content description", false, false, 0, 2000, Exiv2::string, IptcDataSets::application2, "Description"),
DataSet(IptcDataSets::Writer, "Writer", "Person responsible for caption", false, true, 0, 32, Exiv2::string, IptcDataSets::application2, "Description writer"),
DataSet(IptcDataSets::RasterizedCaption, "RasterizedCaption", "Black and white caption image", false, false, 7360, 7360, Exiv2::undefined, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ImageType, "ImageType", "Color components in an image", false, false, 2, 2, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::ImageOrientation, "ImageOrientation", "Indicates the layout of an image", false, false, 1, 1, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::Language, "Language", "ISO 639:1988 language code", false, false, 2, 3, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::AudioType, "AudioType", "Information about audio content", false, false, 2, 2, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::AudioRate, "AudioRate", "Sampling rate of audio content", false, false, 6, 6, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::AudioResolution, "AudioResolution", "Sampling resolution of audio content", false, false, 2, 2, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::AudioDuration, "AudioDuration", "Duration of audio content", false, false, 6, 6, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::AudioOutcue, "AudioOutcue", "Final words or sounds of audio content", false, false, 0, 64, Exiv2::string, IptcDataSets::application2, ""),
DataSet(IptcDataSets::PreviewFormat, "PreviewFormat", "IIM appendix A file format of preview", false, false, 2, 2, Exiv2::unsignedShort, IptcDataSets::application2, ""),
DataSet(IptcDataSets::PreviewVersion, "PreviewVersion", "File format version of preview", false, false, 2, 2, Exiv2::unsignedShort, IptcDataSets::application2, ""),
DataSet(IptcDataSets::Preview, "Preview", "Binary preview data", false, false, 0, 256000, Exiv2::undefined, IptcDataSets::application2, ""),
DataSet(0xffff, "(Invalid)", "(Invalid)", false, false, 0, 0, Exiv2::unsignedShort, IptcDataSets::application2, "")
};
static const DataSet unknownDataSet(0xffff, "Unknown dataset", "Unknown dataset", false, true, 0, 0xffffffff, Exiv2::string, IptcDataSets::invalidRecord, "Unknown dataset");
// Dataset lookup lists.This is an array with pointers to one list per IIM4 Record.
// The record id is used as the index into the array.
const DataSet* IptcDataSets::records_[] = {
0,
envelopeRecord, application2Record,
0
};
int IptcDataSets::dataSetIdx(uint16_t number, uint16_t recordId)
{
if( recordId != envelope && recordId != application2 ) return -1;
const DataSet* dataSet = records_[recordId];
if (dataSet == 0) return -1;
int idx;
for (idx = 0; dataSet[idx].number_ != number; ++idx) {
if (dataSet[idx].number_ == 0xffff) return -1;
}
return idx;
}
int IptcDataSets::dataSetIdx(const std::string& dataSetName, uint16_t recordId)
{
if( recordId != envelope && recordId != application2 ) return -1;
const DataSet* dataSet = records_[recordId];
if (dataSet == 0) return -1;
int idx;
for (idx = 0; dataSet[idx].name_ != dataSetName; ++idx) {
if (dataSet[idx].number_ == 0xffff) return -1;
}
return idx;
}
TypeId IptcDataSets::dataSetType(uint16_t number, uint16_t recordId)
{
int idx = dataSetIdx(number, recordId);
if (idx == -1) return unknownDataSet.type_;
return records_[recordId][idx].type_;
}
std::string IptcDataSets::dataSetName(uint16_t number, uint16_t recordId)
{
int idx = dataSetIdx(number, recordId);
if (idx != -1) return records_[recordId][idx].name_;
std::ostringstream os;
os << "0x" << std::setw(4) << std::setfill('0') << std::right
<< std::hex << number;
return os.str();
}
const char* IptcDataSets::dataSetDesc(uint16_t number, uint16_t recordId)
{
int idx = dataSetIdx(number, recordId);
if (idx == -1) return unknownDataSet.desc_;
return records_[recordId][idx].desc_;
}
const char* IptcDataSets::dataSetPsName(uint16_t number, uint16_t recordId)
{
int idx = dataSetIdx(number, recordId);
if (idx == -1) return unknownDataSet.photoshop_;
return records_[recordId][idx].photoshop_;
}
bool IptcDataSets::dataSetRepeatable(uint16_t number, uint16_t recordId)
{
int idx = dataSetIdx(number, recordId);
if (idx == -1) return unknownDataSet.repeatable_;
return records_[recordId][idx].repeatable_;
}
uint16_t IptcDataSets::dataSet(const std::string& dataSetName,
uint16_t recordId)
{
uint16_t dataSet;
int idx = dataSetIdx(dataSetName, recordId);
if (idx != -1) {
// dataSetIdx checks the range of recordId
dataSet = records_[recordId][idx].number_;
}
else {
if (!isHex(dataSetName, 4, "0x")) throw Error("Invalid dataset name");
std::istringstream is(dataSetName);
is >> std::hex >> dataSet;
}
return dataSet;
}
std::string IptcDataSets::recordName(uint16_t recordId)
{
if (recordId == envelope || recordId == application2) {
return recordInfo_[recordId].name_;
}
std::ostringstream os;
os << "0x" << std::setw(4) << std::setfill('0') << std::right
<< std::hex << recordId;
return os.str();
}
const char* IptcDataSets::recordDesc(uint16_t recordId)
{
if (recordId != envelope && recordId != application2) {
return unknownDataSet.desc_;
}
return recordInfo_[recordId].desc_;
}
uint16_t IptcDataSets::recordId(const std::string& recordName)
{
uint16_t i;
for (i = application2; i > 0; --i) {
if (recordInfo_[i].name_ == recordName) break;
}
if (i == 0) {
if (!isHex(recordName, 4, "0x")) throw Error("Invalid record name");
std::istringstream is(recordName);
is >> std::hex >> i;
}
return i;
}
void IptcDataSets::dataSetList(std::ostream& os)
{
const int count = sizeof(records_)/sizeof(records_[0]);
for (int i=0; i < count; ++i) {
const DataSet *record = records_[i];
for (int j=0; record != 0 && record[j].number_ != 0xffff; ++j) {
os << record[j] << "\n";
}
}
} // IptcDataSets::dataSetList
const char* IptcKey::familyName_ = "Iptc";
IptcKey::IptcKey(const std::string& key)
: key_(key)
{
decomposeKey();
}
IptcKey::IptcKey(uint16_t tag, uint16_t record)
: tag_(tag), record_(record)
{
makeKey();
}
IptcKey::IptcKey(const IptcKey& rhs)
: tag_(rhs.tag_), record_(rhs.record_), key_(rhs.key_)
{
}
IptcKey& IptcKey::operator=(const IptcKey& rhs)
{
if (this == &rhs) return *this;
Key::operator=(rhs);
tag_ = rhs.tag_;
record_ = rhs.record_;
key_ = rhs.key_;
return *this;
}
IptcKey::AutoPtr IptcKey::clone() const
{
return AutoPtr(clone_());
}
IptcKey* IptcKey::clone_() const
{
return new IptcKey(*this);
}
void IptcKey::decomposeKey()
{
// Get the family name, record name and dataSet name parts of the key
std::string::size_type pos1 = key_.find('.');
if (pos1 == std::string::npos) throw Error("Invalid key");
std::string familyName = key_.substr(0, pos1);
if (familyName != std::string(familyName_)) {
throw Error("Invalid key");
}
std::string::size_type pos0 = pos1 + 1;
pos1 = key_.find('.', pos0);
if (pos1 == std::string::npos) throw Error("Invalid key");
std::string recordName = key_.substr(pos0, pos1 - pos0);
if (recordName == "") throw Error("Invalid key");
std::string dataSetName = key_.substr(pos1 + 1);
if (dataSetName == "") throw Error("Invalid key");
// Use the parts of the key to find dataSet and recordId
uint16_t recId = IptcDataSets::recordId(recordName);
uint16_t dataSet = IptcDataSets::dataSet(dataSetName, recId);
// Possibly translate hex name parts (0xabcd) to real names
recordName = IptcDataSets::recordName(recId);
dataSetName = IptcDataSets::dataSetName(dataSet, recId);
tag_ = dataSet;
record_ = recId;
key_ = familyName + "." + recordName + "." + dataSetName;
} // IptcKey::decomposeKey
void IptcKey::makeKey()
{
key_ = std::string(familyName_)
+ "." + IptcDataSets::recordName(record_)
+ "." + IptcDataSets::dataSetName(tag_, record_);
}
// *************************************************************************
// free functions
std::ostream& operator<<(std::ostream& os, const DataSet& dataSet)
{
IptcKey iptcKey(dataSet.number_, dataSet.recordId_);
return os << dataSet.name_ << ", "
<< std::dec << dataSet.number_ << ", "
<< "0x" << std::setw(4) << std::setfill('0')
<< std::right << std::hex << dataSet.number_ << ", "
<< IptcDataSets::recordName(dataSet.recordId_) << ", "
<< std::boolalpha << dataSet.mandatory_ << ", "
<< dataSet.repeatable_ << ", "
<< std::dec << dataSet.minbytes_ << ", "
<< dataSet.maxbytes_ << ", "
<< iptcKey.key() << ", "
<< dataSet.desc_;
}
} // namespace Exiv2
| gpl-2.0 |
sarnobat/knoppix | net/ipv4/netfilter/ip_nat_ftp.c | 1 | 9512 | /* FTP extension for TCP NAT alteration. */
#include <linux/module.h>
#include <linux/netfilter_ipv4.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/tcp.h>
#include <linux/netfilter_ipv4/ip_nat.h>
#include <linux/netfilter_ipv4/ip_nat_helper.h>
#include <linux/netfilter_ipv4/ip_nat_rule.h>
#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
MODULE_DESCRIPTION("ftp NAT helper");
#if 0
#define DEBUGP printk
#else
#define DEBUGP(format, args...)
#endif
#define MAX_PORTS 8
static int ports[MAX_PORTS];
static int ports_c;
#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
#endif
DECLARE_LOCK_EXTERN(ip_ftp_lock);
/* FIXME: Time out? --RR */
static unsigned int
ftp_nat_expected(struct sk_buff **pskb,
unsigned int hooknum,
struct ip_conntrack *ct,
struct ip_nat_info *info)
{
struct ip_nat_multi_range mr;
u_int32_t newdstip, newsrcip, newip;
struct ip_ct_ftp_expect *exp_ftp_info;
struct ip_conntrack *master = master_ct(ct);
IP_NF_ASSERT(info);
IP_NF_ASSERT(master);
IP_NF_ASSERT(!(info->initialized & (1<<HOOK2MANIP(hooknum))));
DEBUGP("nat_expected: We have a connection!\n");
exp_ftp_info = &ct->master->help.exp_ftp_info;
LOCK_BH(&ip_ftp_lock);
if (exp_ftp_info->ftptype == IP_CT_FTP_PORT
|| exp_ftp_info->ftptype == IP_CT_FTP_EPRT) {
/* PORT command: make connection go to the client. */
newdstip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
newsrcip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
DEBUGP("nat_expected: PORT cmd. %u.%u.%u.%u->%u.%u.%u.%u\n",
NIPQUAD(newsrcip), NIPQUAD(newdstip));
} else {
/* PASV command: make the connection go to the server */
newdstip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip;
newsrcip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
DEBUGP("nat_expected: PASV cmd. %u.%u.%u.%u->%u.%u.%u.%u\n",
NIPQUAD(newsrcip), NIPQUAD(newdstip));
}
UNLOCK_BH(&ip_ftp_lock);
if (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC)
newip = newsrcip;
else
newip = newdstip;
DEBUGP("nat_expected: IP to %u.%u.%u.%u\n", NIPQUAD(newip));
mr.rangesize = 1;
/* We don't want to manip the per-protocol, just the IPs... */
mr.range[0].flags = IP_NAT_RANGE_MAP_IPS;
mr.range[0].min_ip = mr.range[0].max_ip = newip;
/* ... unless we're doing a MANIP_DST, in which case, make
sure we map to the correct port */
if (HOOK2MANIP(hooknum) == IP_NAT_MANIP_DST) {
mr.range[0].flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
mr.range[0].min = mr.range[0].max
= ((union ip_conntrack_manip_proto)
{ .tcp = { htons(exp_ftp_info->port) } });
}
return ip_nat_setup_info(ct, &mr, hooknum);
}
static int
mangle_rfc959_packet(struct sk_buff **pskb,
u_int32_t newip,
u_int16_t port,
unsigned int matchoff,
unsigned int matchlen,
struct ip_conntrack *ct,
enum ip_conntrack_info ctinfo)
{
char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")];
MUST_BE_LOCKED(&ip_ftp_lock);
sprintf(buffer, "%u,%u,%u,%u,%u,%u",
NIPQUAD(newip), port>>8, port&0xFF);
DEBUGP("calling ip_nat_mangle_tcp_packet\n");
return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
matchlen, buffer, strlen(buffer));
}
/* |1|132.235.1.2|6275| */
static int
mangle_eprt_packet(struct sk_buff **pskb,
u_int32_t newip,
u_int16_t port,
unsigned int matchoff,
unsigned int matchlen,
struct ip_conntrack *ct,
enum ip_conntrack_info ctinfo)
{
char buffer[sizeof("|1|255.255.255.255|65535|")];
MUST_BE_LOCKED(&ip_ftp_lock);
sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port);
DEBUGP("calling ip_nat_mangle_tcp_packet\n");
return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
matchlen, buffer, strlen(buffer));
}
/* |1|132.235.1.2|6275| */
static int
mangle_epsv_packet(struct sk_buff **pskb,
u_int32_t newip,
u_int16_t port,
unsigned int matchoff,
unsigned int matchlen,
struct ip_conntrack *ct,
enum ip_conntrack_info ctinfo)
{
char buffer[sizeof("|||65535|")];
MUST_BE_LOCKED(&ip_ftp_lock);
sprintf(buffer, "|||%u|", port);
DEBUGP("calling ip_nat_mangle_tcp_packet\n");
return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
matchlen, buffer, strlen(buffer));
}
static int (*mangle[])(struct sk_buff **, u_int32_t, u_int16_t,
unsigned int,
unsigned int,
struct ip_conntrack *,
enum ip_conntrack_info)
= { [IP_CT_FTP_PORT] = mangle_rfc959_packet,
[IP_CT_FTP_PASV] = mangle_rfc959_packet,
[IP_CT_FTP_EPRT] = mangle_eprt_packet,
[IP_CT_FTP_EPSV] = mangle_epsv_packet
};
static int ftp_data_fixup(const struct ip_ct_ftp_expect *ct_ftp_info,
struct ip_conntrack *ct,
struct sk_buff **pskb,
enum ip_conntrack_info ctinfo,
struct ip_conntrack_expect *expect)
{
u_int32_t newip;
struct iphdr *iph = (*pskb)->nh.iph;
struct tcphdr *tcph = (void *)iph + iph->ihl*4;
u_int16_t port;
struct ip_conntrack_tuple newtuple;
MUST_BE_LOCKED(&ip_ftp_lock);
DEBUGP("FTP_NAT: seq %u + %u in %u\n",
expect->seq, ct_ftp_info->len,
ntohl(tcph->seq));
/* Change address inside packet to match way we're mapping
this connection. */
if (ct_ftp_info->ftptype == IP_CT_FTP_PASV
|| ct_ftp_info->ftptype == IP_CT_FTP_EPSV) {
/* PASV/EPSV response: must be where client thinks server
is */
newip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
/* Expect something from client->server */
newtuple.src.ip =
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
newtuple.dst.ip =
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
} else {
/* PORT command: must be where server thinks client is */
newip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
/* Expect something from server->client */
newtuple.src.ip =
ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip;
newtuple.dst.ip =
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
}
newtuple.dst.protonum = IPPROTO_TCP;
newtuple.src.u.tcp.port = expect->tuple.src.u.tcp.port;
/* Try to get same port: if not, try to change it. */
for (port = ct_ftp_info->port; port != 0; port++) {
newtuple.dst.u.tcp.port = htons(port);
if (ip_conntrack_change_expect(expect, &newtuple) == 0)
break;
}
if (port == 0)
return 0;
if (!mangle[ct_ftp_info->ftptype](pskb, newip, port,
expect->seq - ntohl(tcph->seq),
ct_ftp_info->len, ct, ctinfo))
return 0;
return 1;
}
static unsigned int help(struct ip_conntrack *ct,
struct ip_conntrack_expect *exp,
struct ip_nat_info *info,
enum ip_conntrack_info ctinfo,
unsigned int hooknum,
struct sk_buff **pskb)
{
struct iphdr *iph = (*pskb)->nh.iph;
struct tcphdr *tcph = (void *)iph + iph->ihl*4;
unsigned int datalen;
int dir;
struct ip_ct_ftp_expect *ct_ftp_info;
if (!exp)
DEBUGP("ip_nat_ftp: no exp!!");
ct_ftp_info = &exp->help.exp_ftp_info;
/* Only mangle things once: original direction in POST_ROUTING
and reply direction on PRE_ROUTING. */
dir = CTINFO2DIR(ctinfo);
if (!((hooknum == NF_IP_POST_ROUTING && dir == IP_CT_DIR_ORIGINAL)
|| (hooknum == NF_IP_PRE_ROUTING && dir == IP_CT_DIR_REPLY))) {
DEBUGP("nat_ftp: Not touching dir %s at hook %s\n",
dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY",
hooknum == NF_IP_POST_ROUTING ? "POSTROUTING"
: hooknum == NF_IP_PRE_ROUTING ? "PREROUTING"
: hooknum == NF_IP_LOCAL_OUT ? "OUTPUT" : "???");
return NF_ACCEPT;
}
datalen = (*pskb)->len - iph->ihl * 4 - tcph->doff * 4;
LOCK_BH(&ip_ftp_lock);
/* If it's in the right range... */
if (between(exp->seq + ct_ftp_info->len,
ntohl(tcph->seq),
ntohl(tcph->seq) + datalen)) {
if (!ftp_data_fixup(ct_ftp_info, ct, pskb, ctinfo, exp)) {
UNLOCK_BH(&ip_ftp_lock);
return NF_DROP;
}
} else {
/* Half a match? This means a partial retransmisison.
It's a cracker being funky. */
if (net_ratelimit()) {
printk("FTP_NAT: partial packet %u/%u in %u/%u\n",
exp->seq, ct_ftp_info->len,
ntohl(tcph->seq),
ntohl(tcph->seq) + datalen);
}
UNLOCK_BH(&ip_ftp_lock);
return NF_DROP;
}
UNLOCK_BH(&ip_ftp_lock);
return NF_ACCEPT;
}
static struct ip_nat_helper ftp[MAX_PORTS];
static char ftp_names[MAX_PORTS][10];
/* Not __exit: called from init() */
static void fini(void)
{
int i;
for (i = 0; i < ports_c; i++) {
DEBUGP("ip_nat_ftp: unregistering port %d\n", ports[i]);
ip_nat_helper_unregister(&ftp[i]);
}
}
static int __init init(void)
{
int i, ret = 0;
char *tmpname;
if (ports[0] == 0)
ports[0] = FTP_PORT;
for (i = 0; (i < MAX_PORTS) && ports[i]; i++) {
ftp[i].tuple.dst.protonum = IPPROTO_TCP;
ftp[i].tuple.src.u.tcp.port = htons(ports[i]);
ftp[i].mask.dst.protonum = 0xFFFF;
ftp[i].mask.src.u.tcp.port = 0xFFFF;
ftp[i].help = help;
ftp[i].me = THIS_MODULE;
ftp[i].flags = 0;
ftp[i].expect = ftp_nat_expected;
tmpname = &ftp_names[i][0];
if (ports[i] == FTP_PORT)
sprintf(tmpname, "ftp");
else
sprintf(tmpname, "ftp-%d", i);
ftp[i].name = tmpname;
DEBUGP("ip_nat_ftp: Trying to register for port %d\n",
ports[i]);
ret = ip_nat_helper_register(&ftp[i]);
if (ret) {
printk("ip_nat_ftp: error registering "
"helper for port %d\n", ports[i]);
fini();
return ret;
}
ports_c++;
}
return ret;
}
NEEDS_CONNTRACK(ftp);
module_init(init);
module_exit(fini);
| gpl-2.0 |
OS2World/DEV-UTIL-gawk | extension/dl.c | 1 | 2091 | /*
* dl.c - Example of adding a new builtin function to gawk.
*
* Christos Zoulas, Thu Jun 29 17:40:41 EDT 1995
* Arnold Robbins, update for 3.1, Wed Sep 13 09:38:56 2000
*/
/*
* Copyright (C) 1995 - 2001 the Free Software Foundation, Inc.
*
* This file is part of GAWK, the GNU implementation of the
* AWK Programming Language.
*
* GAWK is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GAWK is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "awk.h"
#include <dlfcn.h>
static void *sdl = NULL;
static NODE *
zaxxon(tree)
NODE *tree;
{
NODE *obj;
int i;
int comma = 0;
/*
* Print the arguments
*/
printf("External linkage %s(", tree->param);
for (i = 0; i < tree->param_cnt; i++) {
obj = get_argument(tree, i);
if (obj == NULL)
break;
force_string(obj);
printf(comma ? ", %s" : "%s", obj->stptr);
free_temp(obj);
comma = 1;
}
printf(");\n");
/*
* Do something useful
*/
obj = get_argument(tree, 0);
if (obj != NULL) {
force_string(obj);
if (strcmp(obj->stptr, "unload") == 0 && sdl) {
/*
* XXX: How to clean up the function?
* I would like the ability to remove a function...
*/
dlclose(sdl);
sdl = NULL;
}
free_temp(obj);
}
/* Set the return value */
set_value(tmp_number((AWKNUM) 3.14));
/* Just to make the interpreter happy */
return tmp_number((AWKNUM) 0);
}
NODE *
dlload(tree, dl)
NODE *tree;
void *dl;
{
sdl = dl;
make_builtin("zaxxon", zaxxon, 4);
return tmp_number((AWKNUM) 0);
}
| gpl-2.0 |
meizuosc/m75 | mediatek/platform/mt6595/kernel/drivers/dispsys/ddp_rdma.c | 1 | 23966 | #define LOG_TAG "RDMA"
#include "ddp_log.h"
#include <mach/mt_clkmgr.h>
#include <linux/delay.h>
#include "ddp_info.h"
#include "ddp_reg.h"
#include "ddp_matrix_para.h"
#include "ddp_rdma.h"
#include "ddp_dump.h"
enum RDMA_INPUT_FORMAT {
RDMA_INPUT_FORMAT_BGR565 = 0,
RDMA_INPUT_FORMAT_RGB888 = 1,
RDMA_INPUT_FORMAT_RGBA8888 = 2,
RDMA_INPUT_FORMAT_ARGB8888 = 3,
RDMA_INPUT_FORMAT_VYUY = 4,
RDMA_INPUT_FORMAT_YVYU = 5,
RDMA_INPUT_FORMAT_RGB565 = 6,
RDMA_INPUT_FORMAT_BGR888 = 7,
RDMA_INPUT_FORMAT_BGRA8888 = 8,
RDMA_INPUT_FORMAT_ABGR8888 = 9,
RDMA_INPUT_FORMAT_UYVY = 10,
RDMA_INPUT_FORMAT_YUYV = 11,
RDMA_INPUT_FORMAT_UNKNOW = 32,
};
static unsigned int rdma_fps[RDMA_INSTANCES]={60,60,60};
static enum RDMA_INPUT_FORMAT rdma_input_format_convert(DpColorFormat fmt)
{
enum RDMA_INPUT_FORMAT rdma_fmt = RDMA_INPUT_FORMAT_RGB565;
switch(fmt)
{
case eBGR565 :
rdma_fmt = RDMA_INPUT_FORMAT_BGR565 ; break;
case eRGB888 :
rdma_fmt = RDMA_INPUT_FORMAT_RGB888 ; break;
case eRGBA8888 :
rdma_fmt = RDMA_INPUT_FORMAT_RGBA8888 ; break;
case eARGB8888 :
rdma_fmt = RDMA_INPUT_FORMAT_ARGB8888 ; break;
case eVYUY :
rdma_fmt = RDMA_INPUT_FORMAT_VYUY ; break;
case eYVYU :
rdma_fmt = RDMA_INPUT_FORMAT_YVYU ; break;
case eRGB565 :
rdma_fmt = RDMA_INPUT_FORMAT_RGB565 ; break;
case eBGR888 :
rdma_fmt = RDMA_INPUT_FORMAT_BGR888 ; break;
case eBGRA8888 :
rdma_fmt = RDMA_INPUT_FORMAT_BGRA8888 ; break;
case eABGR8888 :
rdma_fmt = RDMA_INPUT_FORMAT_ABGR8888 ; break;
case eUYVY :
rdma_fmt = RDMA_INPUT_FORMAT_UYVY ; break;
case eYUY2 :
rdma_fmt = RDMA_INPUT_FORMAT_YUYV ; break;
default:
DDPERR("rdma_fmt_convert fmt=%d, rdma_fmt=%d \n", fmt, rdma_fmt);
}
return rdma_fmt;
}
static unsigned int rdma_input_format_byte_swap(enum RDMA_INPUT_FORMAT inputFormat)
{
int input_swap = 0;
switch(inputFormat)
{
case RDMA_INPUT_FORMAT_BGR565:
case RDMA_INPUT_FORMAT_RGB888:
case RDMA_INPUT_FORMAT_RGBA8888:
case RDMA_INPUT_FORMAT_ARGB8888:
case RDMA_INPUT_FORMAT_VYUY:
case RDMA_INPUT_FORMAT_YVYU:
input_swap = 1;
break;
case RDMA_INPUT_FORMAT_RGB565:
case RDMA_INPUT_FORMAT_BGR888:
case RDMA_INPUT_FORMAT_BGRA8888:
case RDMA_INPUT_FORMAT_ABGR8888:
case RDMA_INPUT_FORMAT_UYVY:
case RDMA_INPUT_FORMAT_YUYV:
input_swap = 0;
break;
default:
DDPERR("unknow RDMA input format is %d\n", inputFormat);
ASSERT(0);
}
return input_swap;
}
static unsigned int rdma_input_format_bpp(enum RDMA_INPUT_FORMAT inputFormat)
{
int bpp = 0;
switch(inputFormat)
{
case RDMA_INPUT_FORMAT_BGR565:
case RDMA_INPUT_FORMAT_RGB565:
case RDMA_INPUT_FORMAT_VYUY:
case RDMA_INPUT_FORMAT_UYVY:
case RDMA_INPUT_FORMAT_YVYU:
case RDMA_INPUT_FORMAT_YUYV:
bpp = 2;
break;
case RDMA_INPUT_FORMAT_RGB888:
case RDMA_INPUT_FORMAT_BGR888:
bpp = 3;
break;
case RDMA_INPUT_FORMAT_ARGB8888:
case RDMA_INPUT_FORMAT_ABGR8888:
case RDMA_INPUT_FORMAT_RGBA8888:
case RDMA_INPUT_FORMAT_BGRA8888:
bpp = 4;
break;
default:
DDPERR("unknown RDMA input format = %d\n", inputFormat);
ASSERT(0);
}
return bpp;
}
static unsigned int rdma_input_format_color_space(enum RDMA_INPUT_FORMAT inputFormat)
{
int space = 0;
switch(inputFormat)
{
case RDMA_INPUT_FORMAT_BGR565:
case RDMA_INPUT_FORMAT_RGB565:
case RDMA_INPUT_FORMAT_RGB888:
case RDMA_INPUT_FORMAT_BGR888:
case RDMA_INPUT_FORMAT_RGBA8888:
case RDMA_INPUT_FORMAT_BGRA8888:
case RDMA_INPUT_FORMAT_ARGB8888:
case RDMA_INPUT_FORMAT_ABGR8888:
space = 0;
break;
case RDMA_INPUT_FORMAT_VYUY:
case RDMA_INPUT_FORMAT_UYVY:
case RDMA_INPUT_FORMAT_YVYU:
case RDMA_INPUT_FORMAT_YUYV:
space = 1;
break;
default:
DDPERR("unknown RDMA input format = %d\n", inputFormat);
ASSERT(0);
}
return space;
}
static unsigned int rdma_input_format_reg_value(enum RDMA_INPUT_FORMAT inputFormat)
{
int reg_value = 0;
switch(inputFormat)
{
case RDMA_INPUT_FORMAT_BGR565:
case RDMA_INPUT_FORMAT_RGB565:
reg_value = 0x0;
break;
case RDMA_INPUT_FORMAT_RGB888:
case RDMA_INPUT_FORMAT_BGR888:
reg_value = 0x1;
break;
case RDMA_INPUT_FORMAT_RGBA8888:
case RDMA_INPUT_FORMAT_BGRA8888:
reg_value = 0x2;
break;
case RDMA_INPUT_FORMAT_ARGB8888:
case RDMA_INPUT_FORMAT_ABGR8888:
reg_value = 0x3;
break;
case RDMA_INPUT_FORMAT_VYUY:
case RDMA_INPUT_FORMAT_UYVY:
reg_value = 0x4;
break;
case RDMA_INPUT_FORMAT_YVYU:
case RDMA_INPUT_FORMAT_YUYV:
reg_value = 0x5;
break;
default:
DDPERR("unknow RDMA input format is %d\n", inputFormat);
ASSERT(0);
}
return reg_value;
}
static char* rdma_intput_format_name(enum RDMA_INPUT_FORMAT fmt, int swap)
{
switch(fmt)
{
case RDMA_INPUT_FORMAT_BGR565 :
{
return swap ? "eBGR565" : "eRGB565";
}
case RDMA_INPUT_FORMAT_RGB565 :
{
return "eRGB565";
}
case RDMA_INPUT_FORMAT_RGB888 :
{
return swap ? "eRGB888" : "eBGR888";
}
case RDMA_INPUT_FORMAT_BGR888 :
{
return "eBGR888";
}
case RDMA_INPUT_FORMAT_RGBA8888 :
{
return swap ? "eRGBA888" : "eBGRA888";
}
case RDMA_INPUT_FORMAT_BGRA8888 :
{
return "eBGRA888";
}
case RDMA_INPUT_FORMAT_ARGB8888 :
{
return swap ? "eARGB8888" : "eABGR8888";
}
case RDMA_INPUT_FORMAT_ABGR8888 :
{
return "eABGR8888";
}
case RDMA_INPUT_FORMAT_VYUY :
{
return swap ? "eVYUY" : "eUYVY";
}
case RDMA_INPUT_FORMAT_UYVY :
{
return "eUYVY";
}
case RDMA_INPUT_FORMAT_YVYU :
{
return swap ? "eYVYU" : "eYUY2";
}
case RDMA_INPUT_FORMAT_YUYV :
{
return "eYUY2";
}
default:
{
DDPERR("rdma_intput_format_name unknow fmt=%d, swap=%d \n", fmt, swap);
break;
}
}
return "unknow";
}
static unsigned int rdma_index(DISP_MODULE_ENUM module)
{
int idx = 0;
switch(module) {
case DISP_MODULE_RDMA0:
idx = 0;
break;
case DISP_MODULE_RDMA1:
idx = 1;
break;
case DISP_MODULE_RDMA2:
idx = 2;
break;
default:
DDPERR("invalid rdma module=%d \n", module);// invalid module
ASSERT(0);
}
return idx;
}
int rdma_start(DISP_MODULE_ENUM module, void * handle) {
unsigned int idx = rdma_index(module);
ASSERT(idx <= 2);
if(module == DISP_MODULE_RDMA0)
{
DISP_REG_SET(handle,idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_INT_ENABLE, 0x02);
}
else
{
DISP_REG_SET(handle,idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_INT_ENABLE, 0x1E);
}
DISP_REG_SET_FIELD(handle,GLOBAL_CON_FLD_ENGINE_EN, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_GLOBAL_CON, 1);
return 0;
}
int rdma_stop(DISP_MODULE_ENUM module,void * handle) {
unsigned int idx = rdma_index(module);
ASSERT(idx <= 2);
DISP_REG_SET_FIELD(handle,GLOBAL_CON_FLD_ENGINE_EN, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_GLOBAL_CON, 0);
DISP_REG_SET(handle,idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_INT_ENABLE, 0);
DISP_REG_SET(handle,idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_INT_STATUS, 0);
return 0;
}
int rdma_reset(DISP_MODULE_ENUM module,void * handle) {
unsigned int delay_cnt=0;
int ret =0;
unsigned int idx = rdma_index(module);
ASSERT(idx <= 2);
DISP_REG_SET_FIELD(handle,GLOBAL_CON_FLD_SOFT_RESET, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_GLOBAL_CON, 1);
while((DISP_REG_GET(idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_GLOBAL_CON)&0x700)==0x100)
{
delay_cnt++;
udelay(10);
if(delay_cnt>10000)
{
ret = -1;
DDPERR("rdma%d_reset timeout, stage 1! DISP_REG_RDMA_GLOBAL_CON=0x%x \n", idx, DISP_REG_GET(idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_GLOBAL_CON));
break;
}
}
DISP_REG_SET_FIELD(handle,GLOBAL_CON_FLD_SOFT_RESET, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_GLOBAL_CON, 0);
delay_cnt =0;
while((DISP_REG_GET(idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_GLOBAL_CON)&0x700)!=0x100)
{
delay_cnt++;
udelay(10);
if(delay_cnt>10000)
{
ret = -1;
DDPERR("rdma%d_reset timeout, stage 2! DISP_REG_RDMA_GLOBAL_CON=0x%x \n", idx, DISP_REG_GET(idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_GLOBAL_CON));
break;
}
}
return ret;
}
// set ultra registers
void rdma_set_ultra(unsigned int idx, unsigned int width, unsigned int height, unsigned int bpp, unsigned int frame_rate,void * handle)
{
// constant
static const unsigned int blank_overhead = 115; //it is 1.15, need to divide 100 later
static const unsigned int rdma_fifo_width = 16; //in unit of byte
static const unsigned int ultra_low_time = 5; //in unit of us
static const unsigned int pre_ultra_low_time = 7; //in unit of us
static const unsigned int pre_ultra_high_time = 8; //in unit of us
static const unsigned int fifo_size = 512;
static const unsigned int fifo_valid_line_ratio = 125; //valid size 1/8 line;
static const unsigned int fifo_min_size = 24;
// working variables
unsigned int consume_levels_per_sec;
unsigned int ultra_low_level;
unsigned int pre_ultra_low_level;
unsigned int pre_ultra_high_level;
unsigned int ultra_high_ofs;
unsigned int pre_ultra_low_ofs;
unsigned int pre_ultra_high_ofs;
unsigned int fifo_valid_size = 16;
/* compute fifo valid size */
fifo_valid_size = (width * bpp * fifo_valid_line_ratio) / (rdma_fifo_width * 1000);
fifo_valid_size = fifo_valid_size > fifo_min_size ? fifo_valid_size : fifo_min_size;
//consume_levels_per_sec = ((long long unsigned int)width*height*frame_rate*blank_overhead*bpp)/rdma_fifo_width/100;
//change calculation order to prevent overflow of unsigned int
consume_levels_per_sec = (width*height*frame_rate*bpp/rdma_fifo_width/100)*blank_overhead;
// /1000000 for ultra_low_time in unit of us
ultra_low_level = (unsigned int)(ultra_low_time * consume_levels_per_sec / 1000000 );
pre_ultra_low_level = (unsigned int)(pre_ultra_low_time * consume_levels_per_sec / 1000000 );
pre_ultra_high_level = (unsigned int)(pre_ultra_high_time * consume_levels_per_sec / 1000000 );
pre_ultra_low_ofs = pre_ultra_low_level - ultra_low_level;
ultra_high_ofs = 1;
pre_ultra_high_ofs = pre_ultra_high_level - pre_ultra_low_level ;
//write ultra_low_level, ultra_high_ofs, pre_ultra_low_ofs, pre_ultra_high_ofs into register DISP_RDMA_MEM_GMC_SETTING_0
DISP_REG_SET(handle,idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_MEM_GMC_SETTING_0,
ultra_low_level|(pre_ultra_low_ofs<<8)|(ultra_high_ofs<<16)|(pre_ultra_high_ofs<<24));
DISP_REG_SET_FIELD(handle,FIFO_CON_FLD_OUTPUT_VALID_FIFO_THRESHOLD, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_FIFO_CON, fifo_valid_size);
DISP_REG_SET_FIELD(handle,FIFO_CON_FLD_FIFO_UNDERFLOW_EN, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_FIFO_CON, 1);//add this
DDPDBG("FIFO_VALID_Size = 0x%03x = %d\n", fifo_valid_size , fifo_valid_size );
DDPDBG("ultra_low_level = 0x%03x = %d\n", ultra_low_level , ultra_low_level );
DDPDBG("pre_ultra_low_level = 0x%03x = %d\n", pre_ultra_low_level , pre_ultra_low_level );
DDPDBG("pre_ultra_high_level = 0x%03x = %d\n", pre_ultra_high_level, pre_ultra_high_level);
DDPDBG("ultra_high_ofs = 0x%03x = %d\n", ultra_high_ofs , ultra_high_ofs );
DDPDBG("pre_ultra_low_ofs = 0x%03x = %d\n", pre_ultra_low_ofs , pre_ultra_low_ofs );
DDPDBG("pre_ultra_high_ofs = 0x%03x = %d\n", pre_ultra_high_ofs , pre_ultra_high_ofs );
}
// fixme: spec has no RDMA format, fix enum definition here
int rdma_config(DISP_MODULE_ENUM module,
enum RDMA_MODE mode,
unsigned long address,
DpColorFormat inFormat,
unsigned pitch,
unsigned width,
unsigned height,
void * handle) // ourput setting
{
unsigned int output_is_yuv = 0;
enum RDMA_INPUT_FORMAT inputFormat = rdma_input_format_convert(inFormat);
unsigned int bpp = rdma_input_format_bpp(inputFormat);
unsigned int input_is_yuv = rdma_input_format_color_space(inputFormat);
unsigned int input_swap = rdma_input_format_byte_swap(inputFormat);
unsigned int input_format_reg = rdma_input_format_reg_value(inputFormat);
unsigned int color_matrix = 0x4; //0100 MTX_JPEG_TO_RGB (YUV FUll TO RGB)
unsigned int idx = rdma_index(module);
DDPDBG("RDMAConfig idx %d, mode %d, address 0x%x, inputformat %s, pitch %u, width %u, height %u\n",
idx, mode, address, rdma_intput_format_name(inputFormat,input_swap), pitch,width, height);
ASSERT(idx <= 2);
if((width > RDMA_MAX_WIDTH) || (height > RDMA_MAX_HEIGHT))
{
DDPERR("RDMA input overflow, w=%d, h=%d, max_w=%d, max_h=%d\n", width, height, RDMA_MAX_WIDTH, RDMA_MAX_HEIGHT);
}
if(input_is_yuv==1 && output_is_yuv==0)
{
DISP_REG_SET_FIELD(handle,SIZE_CON_0_FLD_MATRIX_ENABLE, idx * DISP_RDMA_INDEX_OFFSET + DISP_REG_RDMA_SIZE_CON_0, 1);
DISP_REG_SET_FIELD(handle,SIZE_CON_0_FLD_MATRIX_INT_MTX_SEL, idx * DISP_RDMA_INDEX_OFFSET + DISP_REG_RDMA_SIZE_CON_0, color_matrix);
}
else if(input_is_yuv==0 && output_is_yuv==1)
{
color_matrix = 0x2; //0x0010, RGB_TO_BT601
DISP_REG_SET_FIELD(handle,SIZE_CON_0_FLD_MATRIX_ENABLE, idx * DISP_RDMA_INDEX_OFFSET + DISP_REG_RDMA_SIZE_CON_0, 1);
DISP_REG_SET_FIELD(handle,SIZE_CON_0_FLD_MATRIX_INT_MTX_SEL, idx * DISP_RDMA_INDEX_OFFSET + DISP_REG_RDMA_SIZE_CON_0, color_matrix);
}
else
{
DISP_REG_SET_FIELD(handle,SIZE_CON_0_FLD_MATRIX_ENABLE, idx * DISP_RDMA_INDEX_OFFSET + DISP_REG_RDMA_SIZE_CON_0, 0);
DISP_REG_SET_FIELD(handle,SIZE_CON_0_FLD_MATRIX_INT_MTX_SEL, idx * DISP_RDMA_INDEX_OFFSET + DISP_REG_RDMA_SIZE_CON_0, 0);
}
DISP_REG_SET_FIELD(handle,GLOBAL_CON_FLD_MODE_SEL, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_GLOBAL_CON, mode);
// FORMAT & SWAP only works when RDMA memory mode, set both to 0 when RDMA direct link mode.
DISP_REG_SET_FIELD(handle,MEM_CON_FLD_MEM_MODE_INPUT_FORMAT, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_MEM_CON, ((mode == RDMA_MODE_DIRECT_LINK) ? 0 : input_format_reg&0xf));
DISP_REG_SET_FIELD(handle,MEM_CON_FLD_MEM_MODE_INPUT_SWAP, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_MEM_CON, ((mode == RDMA_MODE_DIRECT_LINK) ? 0 : input_swap));
DISP_REG_SET(handle,idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_MEM_START_ADDR, address);
DISP_REG_SET(handle,idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_MEM_SRC_PITCH, pitch);
DISP_REG_SET(handle,idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_INT_ENABLE, 0x1F);
DISP_REG_SET_FIELD(handle,SIZE_CON_0_FLD_OUTPUT_FRAME_WIDTH, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_SIZE_CON_0, width);
DISP_REG_SET_FIELD(handle,SIZE_CON_1_FLD_OUTPUT_FRAME_HEIGHT, idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_SIZE_CON_1, height);
rdma_set_ultra(idx, width, height, bpp, rdma_fps[idx], handle);
return 0;
}
void rdma_set_target_line(DISP_MODULE_ENUM module, unsigned int line,void * handle)
{
unsigned int idx = rdma_index(module);
DISP_REG_SET(handle,idx*DISP_RDMA_INDEX_OFFSET+ DISP_REG_RDMA_TARGET_LINE, line);
}
static int rdma_clock_on(DISP_MODULE_ENUM module,void * handle)
{
unsigned int idx = rdma_index(module);
enable_clock(MT_CG_DISP0_DISP_RDMA0+idx , "DDP");
DDPMSG("rdma_%d_clock_on CG 0x%x \n",idx, DISP_REG_GET(DISP_REG_CONFIG_MMSYS_CG_CON0));
return 0;
}
static int rdma_clock_off(DISP_MODULE_ENUM module,void * handle)
{
unsigned int idx = rdma_index(module);
disable_clock(MT_CG_DISP0_DISP_RDMA0+idx , "DDP");
DDPMSG("rdma_%d_clock_off CG 0x%x \n",idx, DISP_REG_GET(DISP_REG_CONFIG_MMSYS_CG_CON0));
return 0;
}
static int rdma_init(DISP_MODULE_ENUM module,void * handle)
{
unsigned int idx = rdma_index(module);
enable_clock(MT_CG_DISP0_DISP_RDMA0+idx , "DDP");
DDPMSG("rdma%d_init CG 0x%x \n",idx, DISP_REG_GET(DISP_REG_CONFIG_MMSYS_CG_CON0));
return 0;
}
static int rdma_deinit(DISP_MODULE_ENUM module,void * handle)
{
unsigned int idx = rdma_index(module);
disable_clock(MT_CG_DISP0_DISP_RDMA0+idx , "DDP");
DDPMSG("rdma%d_deinit CG 0x%x \n",idx, DISP_REG_GET(DISP_REG_CONFIG_MMSYS_CG_CON0));
return 0;
}
void rdma_dump_reg(DISP_MODULE_ENUM module)
{
unsigned int idx = rdma_index(module);
unsigned int off_st = DISP_RDMA_INDEX_OFFSET*idx;
DDPDUMP("== DISP RDMA%d REGS ==\n", idx);
DDPDUMP("RDMA:0x000=0x%08x,0x004=0x%08x,0x010=0x%08x,0x014=0x%08x\n",
DISP_REG_GET(DISP_REG_RDMA_INT_ENABLE + off_st),
DISP_REG_GET(DISP_REG_RDMA_INT_STATUS + off_st),
DISP_REG_GET(DISP_REG_RDMA_GLOBAL_CON + off_st),
DISP_REG_GET(DISP_REG_RDMA_SIZE_CON_0 + off_st));
DDPDUMP("RDMA:0x018=0x%08x,0x01c=0x%08x,0x024=0x%08x,0xf00=0x%08x\n",
DISP_REG_GET(DISP_REG_RDMA_SIZE_CON_1 + off_st),
DISP_REG_GET(DISP_REG_RDMA_TARGET_LINE + off_st),
DISP_REG_GET(DISP_REG_RDMA_MEM_CON + off_st),
DISP_REG_GET(DISP_REG_RDMA_MEM_START_ADDR + off_st));
DDPDUMP("RDMA:0x02c=0x%08x,0x030=0x%08x,0x034=0x%08x,0x038=0x%08x\n",
DISP_REG_GET(DISP_REG_RDMA_MEM_SRC_PITCH + off_st),
DISP_REG_GET(DISP_REG_RDMA_MEM_GMC_SETTING_0 + off_st),
DISP_REG_GET(DISP_REG_RDMA_MEM_SLOW_CON + off_st),
DISP_REG_GET(DISP_REG_RDMA_MEM_GMC_SETTING_1 + off_st));
DDPDUMP("RDMA:0x040=0x%08x,0x044=0x%08x,0x078=0x%08x,0x07c=0x%08x\n",
DISP_REG_GET(DISP_REG_RDMA_FIFO_CON + off_st),
DISP_REG_GET(DISP_REG_RDMA_FIFO_LOG + off_st),
DISP_REG_GET(DISP_REG_RDMA_PRE_ADD_0 + off_st),
DISP_REG_GET(DISP_REG_RDMA_PRE_ADD_1 + off_st));
DDPDUMP("RDMA:0x080=0x%08x,0x084=0x%08x,0x088=0x%08x,0x08c=0x%08x\n",
DISP_REG_GET(DISP_REG_RDMA_PRE_ADD_2 + off_st),
DISP_REG_GET(DISP_REG_RDMA_POST_ADD_0 + off_st),
DISP_REG_GET(DISP_REG_RDMA_POST_ADD_1 + off_st),
DISP_REG_GET(DISP_REG_RDMA_POST_ADD_2 + off_st));
DDPDUMP("RDMA:0x090=0x%08x,0x094=0x%08x,0x094=0x%08x\n",
DISP_REG_GET(DISP_REG_RDMA_DUMMY + off_st),
DISP_REG_GET(DISP_REG_RDMA_DEBUG_OUT_SEL + off_st),
DISP_REG_GET(DISP_REG_RDMA_MEM_START_ADDR + off_st));
return;
}
extern unsigned long long rdma_start_time[3];
extern unsigned long long rdma_end_time[3];
void rdma_dump_analysis(DISP_MODULE_ENUM module)
{
unsigned int idx = rdma_index(module);
DDPDUMP("==DISP RDMA%d ANALYSIS==\n", idx);
DDPDUMP("rdma%d: en=%d, w=%d, h=%d, pitch=%d, addr=0x%x, fmt=%s, fifo_min=%d rdma_start_time=%lld ns,rdma_end_time=%lld ns\n",
idx,
DISP_REG_GET(DISP_REG_RDMA_GLOBAL_CON+DISP_RDMA_INDEX_OFFSET*idx)&0x1,
DISP_REG_GET(DISP_REG_RDMA_SIZE_CON_0+DISP_RDMA_INDEX_OFFSET*idx)&0xfff,
DISP_REG_GET(DISP_REG_RDMA_SIZE_CON_1+DISP_RDMA_INDEX_OFFSET*idx)&0xfffff,
DISP_REG_GET(DISP_REG_RDMA_MEM_SRC_PITCH+DISP_RDMA_INDEX_OFFSET*idx),
DISP_REG_GET(DISP_REG_RDMA_MEM_START_ADDR+DISP_RDMA_INDEX_OFFSET*idx),
rdma_intput_format_name(
(DISP_REG_GET(DISP_REG_RDMA_MEM_CON+DISP_RDMA_INDEX_OFFSET*idx)>>4)&0xf,
(DISP_REG_GET(DISP_REG_RDMA_MEM_CON+DISP_RDMA_INDEX_OFFSET*idx)>>8)&0x1
),
DISP_REG_GET(DISP_REG_RDMA_FIFO_LOG+DISP_RDMA_INDEX_OFFSET*idx),
rdma_start_time[idx],
rdma_end_time[idx]
);
return ;
}
static int rdma_dump(DISP_MODULE_ENUM module,int level)
{
rdma_dump_analysis(module);
rdma_dump_reg(module);
return 0;
}
void rdma_get_address(DISP_MODULE_ENUM module, unsigned long * addr)
{
unsigned int idx = rdma_index(module);
*addr = DISP_REG_GET(DISP_REG_RDMA_MEM_START_ADDR+DISP_RDMA_INDEX_OFFSET*idx);
return ;
}
void rdma_get_info(int idx, RDMA_BASIC_STRUCT * info)
{
RDMA_BASIC_STRUCT *p = info;
p->addr = DISP_REG_GET(DISP_REG_RDMA_MEM_START_ADDR+DISP_RDMA_INDEX_OFFSET*idx);
p->src_w = DISP_REG_GET(DISP_REG_RDMA_SIZE_CON_0+DISP_RDMA_INDEX_OFFSET*idx)&0xfff;
p->src_h = DISP_REG_GET(DISP_REG_RDMA_SIZE_CON_1+DISP_RDMA_INDEX_OFFSET*idx)&0xfffff,
p->bpp = rdma_input_format_bpp((DISP_REG_GET(DISP_REG_RDMA_MEM_CON+DISP_RDMA_INDEX_OFFSET*idx)>>4)&0xf);
return ;
}
static int rdma_config_l(DISP_MODULE_ENUM module, disp_ddp_path_config* pConfig, void* handle)
{
RDMA_CONFIG_STRUCT * r_config = & pConfig->rdma_config;
enum RDMA_MODE mode = r_config->address ? RDMA_MODE_MEMORY : RDMA_MODE_DIRECT_LINK;
if(pConfig->dst_dirty)
{
if(pConfig->fps)
{
rdma_fps[rdma_index(module)] = pConfig->fps/100;
}
//config to direct link mode
rdma_config(module,
RDMA_MODE_DIRECT_LINK, // link mode
0, // address
eRGB888, // inputFormat
0, // pitch
pConfig->dst_w, // width
pConfig->dst_h, // height
handle);
}
else if(pConfig->rdma_dirty)
{
// decouple mode may use
rdma_config(module,
mode, // link mode
(mode == RDMA_MODE_DIRECT_LINK) ? 0 : r_config->address, // address
(mode == RDMA_MODE_DIRECT_LINK) ? eRGB888 : r_config->inputFormat, // inputFormat
(mode == RDMA_MODE_DIRECT_LINK) ? 0 : r_config->pitch, // pitch
r_config->width, // width
r_config->height, // height
handle);
}
return 0;
}
DDP_MODULE_DRIVER ddp_driver_rdma =
{
.init = rdma_init,
.deinit = rdma_deinit,
.config = rdma_config_l,
.start = rdma_start,
.trigger = NULL,
.stop = rdma_stop,
.reset = rdma_reset,
.power_on = rdma_clock_on,
.power_off = rdma_clock_off,
.is_idle = NULL,
.is_busy = NULL,
.dump_info = rdma_dump,
.bypass = NULL,
.build_cmdq = NULL,
.set_lcm_utils = NULL,
};
| gpl-2.0 |
OpenClovis/linux_tipc | net/tipc/discover.c | 1 | 12424 | /*
* net/tipc/discover.c
*
* Copyright (c) 2003-2006, Ericsson AB
* Copyright (c) 2005-2006, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "link.h"
#include "discover.h"
#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
#define TIPC_LINK_REQ_FAST 1000 /* max delay if bearer has no links */
#define TIPC_LINK_REQ_SLOW 60000 /* max delay if bearer has links */
#define TIPC_LINK_REQ_INACTIVE 0xffffffff /* indicates no timer in use */
/**
* struct tipc_link_req - information about an ongoing link setup request
* @bearer: bearer issuing requests
* @dest: destination address for request messages
* @domain: network domain to which links can be established
* @num_nodes: number of nodes currently discovered (i.e. with an active link)
* @lock: spinlock for controlling access to requests
* @buf: request message to be (repeatedly) sent
* @timer: timer governing period between requests
* @timer_intv: current interval between requests (in ms)
*/
struct tipc_link_req {
struct tipc_bearer *bearer;
struct tipc_media_addr dest;
u32 domain;
int num_nodes;
spinlock_t lock;
struct sk_buff *buf;
struct timer_list timer;
unsigned int timer_intv;
};
/**
* tipc_disc_init_msg - initialize a link setup message
* @type: message type (request or response)
* @dest_domain: network domain of node(s) which should respond to message
* @b_ptr: ptr to bearer issuing message
*/
static struct sk_buff *tipc_disc_init_msg(u32 type, u32 dest_domain,
struct tipc_bearer *b_ptr)
{
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE);
struct tipc_msg *msg;
if ((buf) || (buf = tipc_mem_mgmt_get_buf())) {
msg = buf_msg(buf);
tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
msg_set_non_seq(msg, 1);
msg_set_node_sig(msg, tipc_random);
msg_set_dest_domain(msg, dest_domain);
msg_set_bc_netid(msg, tipc_net_id);
b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
}
return buf;
}
/**
* disc_dupl_alert - issue node address duplication alert
* @b_ptr: pointer to bearer detecting duplication
* @node_addr: duplicated node address
* @media_addr: media address advertised by duplicated node
*/
static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
struct tipc_media_addr *media_addr)
{
char node_addr_str[16];
char media_addr_str[64];
tipc_addr_string_fill(node_addr_str, node_addr);
tipc_media_addr_printf(media_addr_str, sizeof(media_addr_str),
media_addr);
pr_warn("Duplicate %s using %s seen on <%s>\n", node_addr_str,
media_addr_str, b_ptr->name);
}
/**
* tipc_disc_rcv - handle incoming link setup message (request or response)
* @buf: buffer containing message
* @b_ptr: bearer that message arrived on
*/
void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr)
{
struct tipc_node *n_ptr;
struct tipc_link *link;
struct tipc_media_addr media_addr;
struct sk_buff *rbuf;
struct tipc_msg *msg = buf_msg(buf);
u32 dest = msg_dest_domain(msg);
u32 orig = msg_prevnode(msg);
u32 net_id = msg_bc_netid(msg);
u32 type = msg_type(msg);
u32 signature = msg_node_sig(msg);
int addr_mismatch;
int link_fully_up;
media_addr.broadcast = 1;
b_ptr->media->msg2addr(b_ptr, &media_addr, msg_media_addr(msg));
kfree_skb(buf);
/* Ensure message from node is valid and communication is permitted */
if (net_id != tipc_net_id){
drop_log("Didsovry Packet Rcvd with unknow network-Id[0x%x]\n", net_id);
return;
}
if (media_addr.broadcast)
return;
if (!tipc_addr_domain_valid(dest)){
drop_log("Didsovry Packet Rcvd with Invalid Domain-Id[0x%x]\n", dest);
return;
}
if (!tipc_addr_node_valid(orig)){
drop_log("Didsovry Packet Rcvd with Invalid origin node-Id[0x%x]\n", orig);
return;
}
if (orig == tipc_own_addr) {
if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr)))
disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
return;
}
if (!tipc_in_scope(dest, tipc_own_addr)){
drop_log("Discovery Packet Rcvd destination addr[0x%x] is not my address scope\n", dest);
return;
}
if (!tipc_in_scope(b_ptr->link_req->domain, orig)){
drop_log("Discovery Packet Rcvd origin address[0x%x] is not link requrest Domain Scope\n", orig);
return;
}
/* Locate structure corresponding to requesting node */
n_ptr = tipc_node_find(orig);
if (!n_ptr) {
n_ptr = tipc_node_create(orig);
if (!n_ptr){
drop_log("Failed to create New node with Origin address[0x%x]\n", orig);
return;
}
}
tipc_node_lock(n_ptr);
/* Prepare to validate requesting node's signature and media address */
link = n_ptr->links[b_ptr->identity];
addr_mismatch = (link != NULL) &&
memcmp(&link->media_addr, &media_addr, sizeof(media_addr));
/*
* Ensure discovery message's signature is correct
*
* If signature is incorrect and there is no working link to the node,
* accept the new signature but invalidate all existing links to the
* node so they won't re-activate without a new discovery message.
*
* If signature is incorrect and the requested link to the node is
* working, accept the new signature. (This is an instance of delayed
* rediscovery, where a link endpoint was able to re-establish contact
* with its peer endpoint on a node that rebooted before receiving a
* discovery message from that node.)
*
* If signature is incorrect and there is a working link to the node
* that is not the requested link, reject the request (must be from
* a duplicate node).
*/
if (signature != n_ptr->signature) {
if (n_ptr->working_links == 0) {
struct tipc_link *curr_link;
int i;
for (i = 0; i < MAX_BEARERS; i++) {
curr_link = n_ptr->links[i];
if (curr_link) {
memset(&curr_link->media_addr, 0,
sizeof(media_addr));
tipc_link_reset(curr_link);
}
}
addr_mismatch = (link != NULL);
} else if (tipc_link_is_up(link) && !addr_mismatch) {
/* delayed rediscovery */
} else {
disc_dupl_alert(b_ptr, orig, &media_addr);
tipc_node_unlock(n_ptr);
return;
}
n_ptr->signature = signature;
}
/*
* Ensure requesting node's media address is correct
*
* If media address doesn't match and the link is working, reject the
* request (must be from a duplicate node).
*
* If media address doesn't match and the link is not working, accept
* the new media address and reset the link to ensure it starts up
* cleanly.
*/
if (addr_mismatch) {
if (tipc_link_is_up(link)) {
disc_dupl_alert(b_ptr, orig, &media_addr);
tipc_node_unlock(n_ptr);
return;
} else {
memcpy(&link->media_addr, &media_addr,
sizeof(media_addr));
tipc_link_reset(link);
}
}
/* Create a link endpoint for this bearer, if necessary */
if (!link) {
link = tipc_link_create(n_ptr, b_ptr, &media_addr);
if (!link) {
tipc_node_unlock(n_ptr);
drop_log("Failed to create Link End Point\n");
return;
}
}
/* Accept discovery message & send response, if necessary */
link_fully_up = link_working_working(link);
if ((type == DSC_REQ_MSG) && !link_fully_up) {
rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
if (rbuf) {
tipc_bearer_send(b_ptr, rbuf, &media_addr);
kfree_skb(rbuf);
}
}
tipc_node_unlock(n_ptr);
}
/**
* disc_update - update frequency of periodic link setup requests
* @req: ptr to link request structure
*
* Reinitiates discovery process if discovery object has no associated nodes
* and is either not currently searching or is searching at a slow rate
*/
static void disc_update(struct tipc_link_req *req)
{
if (!req->num_nodes) {
if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
(req->timer_intv > TIPC_LINK_REQ_FAST)) {
req->timer_intv = TIPC_LINK_REQ_INIT;
k_start_timer(&req->timer, req->timer_intv);
}
}
}
/**
* tipc_disc_add_dest - increment set of discovered nodes
* @req: ptr to link request structure
*/
void tipc_disc_add_dest(struct tipc_link_req *req)
{
spin_lock_bh(&req->lock);
req->num_nodes++;
spin_unlock_bh(&req->lock);
}
/**
* tipc_disc_remove_dest - decrement set of discovered nodes
* @req: ptr to link request structure
*/
void tipc_disc_remove_dest(struct tipc_link_req *req)
{
spin_lock_bh(&req->lock);
req->num_nodes--;
disc_update(req);
spin_unlock_bh(&req->lock);
}
/**
* disc_timeout - send a periodic link setup request
* @req: ptr to link request structure
*
* Called whenever a link setup request timer associated with a bearer expires.
*/
static void disc_timeout(struct tipc_link_req *req)
{
int max_delay;
spin_lock_bh(&req->lock);
/* Stop searching if only desired node has been found */
if (tipc_node(req->domain) && req->num_nodes) {
req->timer_intv = TIPC_LINK_REQ_INACTIVE;
goto exit;
}
/*
* Send discovery message, then update discovery timer
*
* Keep doubling time between requests until limit is reached;
* hold at fast polling rate if don't have any associated nodes,
* otherwise hold at slow polling rate
*/
tipc_bearer_send(req->bearer, req->buf, &req->dest);
req->timer_intv *= 2;
if (req->num_nodes)
max_delay = TIPC_LINK_REQ_SLOW;
else
max_delay = TIPC_LINK_REQ_FAST;
if (req->timer_intv > max_delay)
req->timer_intv = max_delay;
k_start_timer(&req->timer, req->timer_intv);
exit:
spin_unlock_bh(&req->lock);
}
/**
* tipc_disc_create - create object to send periodic link setup requests
* @b_ptr: ptr to bearer issuing requests
* @dest: destination address for request messages
* @dest_domain: network domain to which links can be established
*
* Returns 0 if successful, otherwise -errno.
*/
int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
u32 dest_domain)
{
struct tipc_link_req *req;
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req){
drop_log("[%s:%d]Discovery msg creation failed, no memory\n", __FUNCTION__, __LINE__);
return -ENOMEM;
}
req->buf = tipc_disc_init_msg(DSC_REQ_MSG, dest_domain, b_ptr);
if (!req->buf) {
kfree(req);
drop_log("[%s:%d] Discovery Msg Init Failed \n", __FUNCTION__, __LINE__);
return -ENOMSG;
}
memcpy(&req->dest, dest, sizeof(*dest));
req->bearer = b_ptr;
req->domain = dest_domain;
req->num_nodes = 0;
req->timer_intv = TIPC_LINK_REQ_INIT;
spin_lock_init(&req->lock);
k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
k_start_timer(&req->timer, req->timer_intv);
b_ptr->link_req = req;
tipc_bearer_send(req->bearer, req->buf, &req->dest);
return 0;
}
/**
* tipc_disc_delete - destroy object sending periodic link setup requests
* @req: ptr to link request structure
*/
void tipc_disc_delete(struct tipc_link_req *req)
{
k_cancel_timer(&req->timer);
k_term_timer(&req->timer);
kfree_skb(req->buf);
kfree(req);
}
| gpl-2.0 |
xerpi/3ds-arm9-linux | linux-2.6.x/fs/jffs2/nodemgmt.c | 1 | 32095 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright (C) 2001-2003 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
* $Id: nodemgmt.c 573 2006-02-20 17:09:11Z stsp2 $
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/compiler.h>
#include <linux/sched.h> /* For cond_resched() */
#include "nodelist.h"
/**
* jffs2_reserve_space - request physical space to write nodes to flash
* @c: superblock info
* @minsize: Minimum acceptable size of allocation
* @ofs: Returned value of node offset
* @len: Returned value of allocation length
* @prio: Allocation type - ALLOC_{NORMAL,DELETION}
*
* Requests a block of physical space on the flash. Returns zero for success
* and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
* or other error if appropriate.
*
* If it returns zero, jffs2_reserve_space() also downs the per-filesystem
* allocation semaphore, to prevent more than one allocation from being
* active at any time. The semaphore is later released by jffs2_commit_allocation()
*
* jffs2_reserve_space() may trigger garbage collection in order to make room
* for the requested allocation.
*/
static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
{
int ret = -EAGAIN;
int blocksneeded = c->resv_blocks_write;
/* align it */
minsize = PAD(minsize);
D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
down(&c->alloc_sem);
D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
spin_lock(&c->erase_completion_lock);
/* this needs a little more thought (true <tglx> :)) */
while(ret == -EAGAIN) {
while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
int ret;
uint32_t dirty, avail;
/* calculate real dirty size
* dirty_size contains blocks on erase_pending_list
* those blocks are counted in c->nr_erasing_blocks.
* If one block is actually erased, it is not longer counted as dirty_space
* but it is counted in c->nr_erasing_blocks, so we add it and subtract it
* with c->nr_erasing_blocks * c->sector_size again.
* Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
* This helps us to force gc and pick eventually a clean block to spread the load.
* We add unchecked_size here, as we hopefully will find some space to use.
* This will affect the sum only once, as gc first finishes checking
* of nodes.
*/
dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
if (dirty < c->nospc_dirty_size) {
if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
break;
}
D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
dirty, c->unchecked_size, c->sector_size));
spin_unlock(&c->erase_completion_lock);
up(&c->alloc_sem);
return -ENOSPC;
}
/* Calc possibly available space. Possibly available means that we
* don't know, if unchecked size contains obsoleted nodes, which could give us some
* more usable space. This will affect the sum only once, as gc first finishes checking
* of nodes.
+ Return -ENOSPC, if the maximum possibly available space is less or equal than
* blocksneeded * sector_size.
* This blocks endless gc looping on a filesystem, which is nearly full, even if
* the check above passes.
*/
avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
if ( (avail / c->sector_size) <= blocksneeded) {
if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
break;
}
D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
avail, blocksneeded * c->sector_size));
spin_unlock(&c->erase_completion_lock);
up(&c->alloc_sem);
return -ENOSPC;
}
up(&c->alloc_sem);
D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
spin_unlock(&c->erase_completion_lock);
ret = jffs2_garbage_collect_pass(c);
if (ret)
return ret;
cond_resched();
if (signal_pending(current))
return -EINTR;
down(&c->alloc_sem);
spin_lock(&c->erase_completion_lock);
}
ret = jffs2_do_reserve_space(c, minsize, ofs, len);
if (ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
}
}
spin_unlock(&c->erase_completion_lock);
if (ret)
up(&c->alloc_sem);
return ret;
}
int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
{
int ret = -EAGAIN;
minsize = PAD(minsize);
D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
spin_lock(&c->erase_completion_lock);
while(ret == -EAGAIN) {
ret = jffs2_do_reserve_space(c, minsize, ofs, len);
if (ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
}
}
spin_unlock(&c->erase_completion_lock);
return ret;
}
/* Called with alloc sem _and_ erase_completion_lock */
static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
{
struct jffs2_eraseblock *jeb = c->nextblock;
restart:
if (jeb && minsize > jeb->free_size) {
/* Skip the end of this block and file it as having some dirty space */
/* If there's a pending write to it, flush now */
if (jffs2_wbuf_dirty(c)) {
spin_unlock(&c->erase_completion_lock);
D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
jeb = c->nextblock;
goto restart;
}
c->wasted_size += jeb->free_size;
c->free_size -= jeb->free_size;
jeb->wasted_size += jeb->free_size;
jeb->free_size = 0;
/* Check, if we have a dirty block now, or if it was dirty already */
if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
c->dirty_size += jeb->wasted_size;
c->wasted_size -= jeb->wasted_size;
jeb->dirty_size += jeb->wasted_size;
jeb->wasted_size = 0;
if (VERYDIRTY(c, jeb->dirty_size)) {
D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
list_add_tail(&jeb->list, &c->very_dirty_list);
} else {
D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
list_add_tail(&jeb->list, &c->dirty_list);
}
} else {
D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
list_add_tail(&jeb->list, &c->clean_list);
}
c->nextblock = jeb = NULL;
}
if (!jeb) {
struct list_head *next;
/* Take the next block off the 'free' list */
if (list_empty(&c->free_list)) {
if (!c->nr_erasing_blocks &&
!list_empty(&c->erasable_list)) {
struct jffs2_eraseblock *ejeb;
ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
list_del(&ejeb->list);
list_add_tail(&ejeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_erase_pending_trigger(c);
D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
ejeb->offset));
}
if (!c->nr_erasing_blocks &&
!list_empty(&c->erasable_pending_wbuf_list)) {
D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
/* c->nextblock is NULL, no update to c->nextblock allowed */
spin_unlock(&c->erase_completion_lock);
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
/* Have another go. It'll be on the erasable_list now */
return -EAGAIN;
}
if (!c->nr_erasing_blocks) {
/* Ouch. We're in GC, or we wouldn't have got here.
And there's no space left. At all. */
printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
return -ENOSPC;
}
spin_unlock(&c->erase_completion_lock);
/* Don't wait for it; just erase one right now */
jffs2_erase_pending_blocks(c, 1);
spin_lock(&c->erase_completion_lock);
/* An erase may have failed, decreasing the
amount of free space available. So we must
restart from the beginning */
return -EAGAIN;
}
next = c->free_list.next;
list_del(next);
c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
c->nr_free_blocks--;
if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
goto restart;
}
}
/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
enough space */
*ofs = jeb->offset + (c->sector_size - jeb->free_size);
*len = jeb->free_size;
if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
!jeb->first_node->next_in_ino) {
/* Only node in it beforehand was a CLEANMARKER node (we think).
So mark it obsolete now that there's going to be another node
in the block. This will reduce used_size to zero but We've
already set c->nextblock so that jffs2_mark_node_obsolete()
won't try to refile it to the dirty_list.
*/
spin_unlock(&c->erase_completion_lock);
jffs2_mark_node_obsolete(c, jeb->first_node);
spin_lock(&c->erase_completion_lock);
}
D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
return 0;
}
/**
* jffs2_add_physical_node_ref - add a physical node reference to the list
* @c: superblock info
* @new: new node reference to add
* @len: length of this physical node
* @dirty: dirty flag for new node
*
* Should only be used to report nodes for which space has been allocated
* by jffs2_reserve_space.
*
* Must be called with the alloc_sem held.
*/
int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
{
struct jffs2_eraseblock *jeb;
uint32_t len;
jeb = &c->blocks[new->flash_offset / c->sector_size];
len = ref_totlen(c, jeb, new);
D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
#if 1
/* we could get some obsolete nodes after nextblock was refiled
in wbuf.c */
if ((c->nextblock || !ref_obsolete(new))
&&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
printk(KERN_WARNING "argh. node added in wrong place\n");
jffs2_free_raw_node_ref(new);
return -EINVAL;
}
#endif
spin_lock(&c->erase_completion_lock);
if (!jeb->first_node)
jeb->first_node = new;
if (jeb->last_node)
jeb->last_node->next_phys = new;
jeb->last_node = new;
jeb->free_size -= len;
c->free_size -= len;
if (ref_obsolete(new)) {
jeb->dirty_size += len;
c->dirty_size += len;
} else {
jeb->used_size += len;
c->used_size += len;
}
if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
if (jffs2_wbuf_dirty(c)) {
/* Flush the last write in the block if it's outstanding */
spin_unlock(&c->erase_completion_lock);
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
}
list_add_tail(&jeb->list, &c->clean_list);
c->nextblock = NULL;
}
ACCT_SANITY_CHECK(c,jeb);
D1(ACCT_PARANOIA_CHECK(jeb));
spin_unlock(&c->erase_completion_lock);
return 0;
}
void jffs2_complete_reservation(struct jffs2_sb_info *c)
{
D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
jffs2_garbage_collect_trigger(c);
up(&c->alloc_sem);
}
static inline int on_list(struct list_head *obj, struct list_head *head)
{
struct list_head *this;
list_for_each(this, head) {
if (this == obj) {
D1(printk("%p is on list at %p\n", obj, head));
return 1;
}
}
return 0;
}
void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
{
struct jffs2_eraseblock *jeb;
int blocknr;
struct jffs2_unknown_node n;
int ret, addedsize;
size_t retlen;
if(!ref) {
printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
return;
}
if (ref_obsolete(ref)) {
D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
return;
}
blocknr = ref->flash_offset / c->sector_size;
if (blocknr >= c->nr_blocks) {
printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
BUG();
}
jeb = &c->blocks[blocknr];
if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
!(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
/* Hm. This may confuse static lock analysis. If any of the above
three conditions is false, we're going to return from this
function without actually obliterating any nodes or freeing
any jffs2_raw_node_refs. So we don't need to stop erases from
happening, or protect against people holding an obsolete
jffs2_raw_node_ref without the erase_completion_lock. */
down(&c->erase_free_sem);
}
spin_lock(&c->erase_completion_lock);
if (ref_flags(ref) == REF_UNCHECKED) {
D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
BUG();
})
D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
jeb->unchecked_size -= ref_totlen(c, jeb, ref);
c->unchecked_size -= ref_totlen(c, jeb, ref);
} else {
D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
BUG();
})
D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
jeb->used_size -= ref_totlen(c, jeb, ref);
c->used_size -= ref_totlen(c, jeb, ref);
}
// Take care, that wasted size is taken into concern
if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
D1(printk(KERN_DEBUG "Dirtying\n"));
addedsize = ref_totlen(c, jeb, ref);
jeb->dirty_size += ref_totlen(c, jeb, ref);
c->dirty_size += ref_totlen(c, jeb, ref);
/* Convert wasted space to dirty, if not a bad block */
if (jeb->wasted_size) {
if (on_list(&jeb->list, &c->bad_used_list)) {
D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
jeb->offset));
addedsize = 0; /* To fool the refiling code later */
} else {
D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
jeb->wasted_size, jeb->offset));
addedsize += jeb->wasted_size;
jeb->dirty_size += jeb->wasted_size;
c->dirty_size += jeb->wasted_size;
c->wasted_size -= jeb->wasted_size;
jeb->wasted_size = 0;
}
}
} else {
D1(printk(KERN_DEBUG "Wasting\n"));
addedsize = 0;
jeb->wasted_size += ref_totlen(c, jeb, ref);
c->wasted_size += ref_totlen(c, jeb, ref);
}
ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
ACCT_SANITY_CHECK(c, jeb);
D1(ACCT_PARANOIA_CHECK(jeb));
if (c->flags & JFFS2_SB_FLAG_SCANNING) {
/* Flash scanning is in progress. Don't muck about with the block
lists because they're not ready yet, and don't actually
obliterate nodes that look obsolete. If they weren't
marked obsolete on the flash at the time they _became_
obsolete, there was probably a reason for that. */
spin_unlock(&c->erase_completion_lock);
/* We didn't lock the erase_free_sem */
return;
}
if (jeb == c->nextblock) {
D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
} else if (!jeb->used_size && !jeb->unchecked_size) {
if (jeb == c->gcblock) {
D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
c->gcblock = NULL;
} else {
D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
list_del(&jeb->list);
}
if (jffs2_wbuf_dirty(c)) {
D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
} else {
if (jiffies & 127) {
/* Most of the time, we just erase it immediately. Otherwise we
spend ages scanning it on mount, etc. */
D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
list_add_tail(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_erase_pending_trigger(c);
} else {
/* Sometimes, however, we leave it elsewhere so it doesn't get
immediately reused, and we spread the load a bit. */
D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
list_add_tail(&jeb->list, &c->erasable_list);
}
}
D1(printk(KERN_DEBUG "Done OK\n"));
} else if (jeb == c->gcblock) {
D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
list_del(&jeb->list);
D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
list_add_tail(&jeb->list, &c->dirty_list);
} else if (VERYDIRTY(c, jeb->dirty_size) &&
!VERYDIRTY(c, jeb->dirty_size - addedsize)) {
D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
list_del(&jeb->list);
D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
list_add_tail(&jeb->list, &c->very_dirty_list);
} else {
D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
}
spin_unlock(&c->erase_completion_lock);
if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
(c->flags & JFFS2_SB_FLAG_BUILDING)) {
/* We didn't lock the erase_free_sem */
return;
}
/* The erase_free_sem is locked, and has been since before we marked the node obsolete
and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
by jffs2_free_all_node_refs() in erase.c. Which is nice. */
D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
if (ret) {
printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
goto out_erase_sem;
}
if (retlen != sizeof(n)) {
printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
goto out_erase_sem;
}
if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
goto out_erase_sem;
}
if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
goto out_erase_sem;
}
/* XXX FIXME: This is ugly now */
n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
if (ret) {
printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
goto out_erase_sem;
}
if (retlen != sizeof(n)) {
printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
goto out_erase_sem;
}
/* Nodes which have been marked obsolete no longer need to be
associated with any inode. Remove them from the per-inode list.
Note we can't do this for NAND at the moment because we need
obsolete dirent nodes to stay on the lists, because of the
horridness in jffs2_garbage_collect_deletion_dirent(). Also
because we delete the inocache, and on NAND we need that to
stay around until all the nodes are actually erased, in order
to stop us from giving the same inode number to another newly
created inode. */
if (ref->next_in_ino) {
struct jffs2_inode_cache *ic;
struct jffs2_raw_node_ref **p;
spin_lock(&c->erase_completion_lock);
ic = jffs2_raw_ref_to_ic(ref);
for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
;
*p = ref->next_in_ino;
ref->next_in_ino = NULL;
if (ic->nodes == (void *)ic && ic->nlink == 0)
jffs2_del_ino_cache(c, ic);
spin_unlock(&c->erase_completion_lock);
}
/* Merge with the next node in the physical list, if there is one
and if it's also obsolete and if it doesn't belong to any inode */
if (ref->next_phys && ref_obsolete(ref->next_phys) &&
!ref->next_phys->next_in_ino) {
struct jffs2_raw_node_ref *n = ref->next_phys;
spin_lock(&c->erase_completion_lock);
ref->__totlen += n->__totlen;
ref->next_phys = n->next_phys;
if (jeb->last_node == n) jeb->last_node = ref;
if (jeb->gc_node == n) {
/* gc will be happy continuing gc on this node */
jeb->gc_node=ref;
}
spin_unlock(&c->erase_completion_lock);
jffs2_free_raw_node_ref(n);
}
/* Also merge with the previous node in the list, if there is one
and that one is obsolete */
if (ref != jeb->first_node ) {
struct jffs2_raw_node_ref *p = jeb->first_node;
spin_lock(&c->erase_completion_lock);
while (p->next_phys != ref)
p = p->next_phys;
if (ref_obsolete(p) && !ref->next_in_ino) {
p->__totlen += ref->__totlen;
if (jeb->last_node == ref) {
jeb->last_node = p;
}
if (jeb->gc_node == ref) {
/* gc will be happy continuing gc on this node */
jeb->gc_node=p;
}
p->next_phys = ref->next_phys;
jffs2_free_raw_node_ref(ref);
}
spin_unlock(&c->erase_completion_lock);
}
out_erase_sem:
up(&c->erase_free_sem);
}
#if CONFIG_JFFS2_FS_DEBUG >= 2
void jffs2_dump_block_lists(struct jffs2_sb_info *c)
{
printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
if (c->nextblock) {
printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
} else {
printk(KERN_DEBUG "nextblock: NULL\n");
}
if (c->gcblock) {
printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
} else {
printk(KERN_DEBUG "gcblock: NULL\n");
}
if (list_empty(&c->clean_list)) {
printk(KERN_DEBUG "clean_list: empty\n");
} else {
struct list_head *this;
int numblocks = 0;
uint32_t dirty = 0;
list_for_each(this, &c->clean_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
numblocks ++;
dirty += jeb->wasted_size;
printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
}
printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
}
if (list_empty(&c->very_dirty_list)) {
printk(KERN_DEBUG "very_dirty_list: empty\n");
} else {
struct list_head *this;
int numblocks = 0;
uint32_t dirty = 0;
list_for_each(this, &c->very_dirty_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
numblocks ++;
dirty += jeb->dirty_size;
printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
}
printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
numblocks, dirty, dirty / numblocks);
}
if (list_empty(&c->dirty_list)) {
printk(KERN_DEBUG "dirty_list: empty\n");
} else {
struct list_head *this;
int numblocks = 0;
uint32_t dirty = 0;
list_for_each(this, &c->dirty_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
numblocks ++;
dirty += jeb->dirty_size;
printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
}
printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
numblocks, dirty, dirty / numblocks);
}
if (list_empty(&c->erasable_list)) {
printk(KERN_DEBUG "erasable_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->erasable_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
}
}
if (list_empty(&c->erasing_list)) {
printk(KERN_DEBUG "erasing_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->erasing_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
}
}
if (list_empty(&c->erase_pending_list)) {
printk(KERN_DEBUG "erase_pending_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->erase_pending_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
}
}
if (list_empty(&c->erasable_pending_wbuf_list)) {
printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->erasable_pending_wbuf_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
}
}
if (list_empty(&c->free_list)) {
printk(KERN_DEBUG "free_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->free_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
}
}
if (list_empty(&c->bad_list)) {
printk(KERN_DEBUG "bad_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->bad_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
}
}
if (list_empty(&c->bad_used_list)) {
printk(KERN_DEBUG "bad_used_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->bad_used_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
}
}
}
#endif /* CONFIG_JFFS2_FS_DEBUG */
int jffs2_thread_should_wake(struct jffs2_sb_info *c)
{
int ret = 0;
uint32_t dirty;
if (c->unchecked_size) {
D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
c->unchecked_size, c->checked_ino));
return 1;
}
/* dirty_size contains blocks on erase_pending_list
* those blocks are counted in c->nr_erasing_blocks.
* If one block is actually erased, it is not longer counted as dirty_space
* but it is counted in c->nr_erasing_blocks, so we add it and subtract it
* with c->nr_erasing_blocks * c->sector_size again.
* Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
* This helps us to force gc and pick eventually a clean block to spread the load.
*/
dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
(dirty > c->nospc_dirty_size))
ret = 1;
D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
return ret;
}
| gpl-2.0 |
build3r/Builderremote | jni/portaudio/hostapi/wasapi/pa_win_wasapi.c | 1 | 154124 | /*
* Portable Audio I/O Library WASAPI implementation
* Copyright (c) 2006-2010 David Viens, Dmitry Kostjuchenko
*
* Based on the Open Source API proposed by Ross Bencina
* Copyright (c) 1999-2002 Ross Bencina, Phil Burk
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* The text above constitutes the entire PortAudio license; however,
* the PortAudio community also makes the following non-binding requests:
*
* Any person wishing to distribute modifications to the Software is
* requested to send the modifications to the original developer so that
* they can be incorporated into the canonical version. It is also
* requested that these non-binding requests be included along with the
* license above.
*/
/** @file
@ingroup hostapi_src
@brief WASAPI implementation of support for a host API.
@note pa_wasapi currently requires minimum VC 2005, and the latest Vista SDK
*/
#define WIN32_LEAN_AND_MEAN // exclude rare headers
#include <windows.h>
#include <stdio.h>
#include <process.h>
#include <assert.h>
#include <mmsystem.h>
#include <mmreg.h> // must be before other Wasapi headers
#if defined(_MSC_VER) && (_MSC_VER >= 1400)
#include <Avrt.h>
#define COBJMACROS
#include <Audioclient.h>
#include <endpointvolume.h>
#define INITGUID // Avoid additional linkage of static libs, excessive code will be optimized out by the compiler
#include <mmdeviceapi.h>
#include <functiondiscoverykeys.h>
#include <devicetopology.h> // Used to get IKsJackDescription interface
#undef INITGUID
#endif
#ifndef __MWERKS__
#include <malloc.h>
#include <memory.h>
#endif /* __MWERKS__ */
#include "pa_util.h"
#include "pa_allocation.h"
#include "pa_hostapi.h"
#include "pa_stream.h"
#include "pa_cpuload.h"
#include "pa_process.h"
#include "pa_debugprint.h"
#include "pa_win_wasapi.h"
#ifndef NTDDI_VERSION
#undef WINVER
#undef _WIN32_WINNT
#define WINVER 0x0600 // VISTA
#define _WIN32_WINNT WINVER
#ifndef _AVRT_ //<< fix MinGW dummy compile by defining missing type: AVRT_PRIORITY
typedef enum _AVRT_PRIORITY
{
AVRT_PRIORITY_LOW = -1,
AVRT_PRIORITY_NORMAL,
AVRT_PRIORITY_HIGH,
AVRT_PRIORITY_CRITICAL
} AVRT_PRIORITY, *PAVRT_PRIORITY;
#endif
#include <basetyps.h> // << for IID/CLSID
#include <rpcsal.h>
#include <sal.h>
#ifndef __LPCGUID_DEFINED__
#define __LPCGUID_DEFINED__
typedef const GUID *LPCGUID;
#endif
#ifndef PROPERTYKEY_DEFINED
#define PROPERTYKEY_DEFINED
typedef struct _tagpropertykey
{
GUID fmtid;
DWORD pid;
} PROPERTYKEY;
#endif
#ifdef __midl_proxy
#define __MIDL_CONST
#else
#define __MIDL_CONST const
#endif
#ifdef WIN64
#include <wtypes.h>
typedef LONG NTSTATUS;
#define FASTCALL
#include <oleidl.h>
#include <objidl.h>
#else
typedef struct _BYTE_BLOB
{
unsigned long clSize;
unsigned char abData[ 1 ];
} BYTE_BLOB;
typedef /* [unique] */ __RPC_unique_pointer BYTE_BLOB *UP_BYTE_BLOB;
typedef LONGLONG REFERENCE_TIME;
#define NONAMELESSUNION
#endif
#ifndef WAVE_FORMAT_IEEE_FLOAT
#define WAVE_FORMAT_IEEE_FLOAT 0x0003 // 32-bit floating-point
#endif
#ifndef __MINGW_EXTENSION
#if defined(__GNUC__) || defined(__GNUG__)
#define __MINGW_EXTENSION __extension__
#else
#define __MINGW_EXTENSION
#endif
#endif
#include <sdkddkver.h>
#include <propkeydef.h>
#define COBJMACROS
#define INITGUID // Avoid additional linkage of static libs, excessive code will be optimized out by the compiler
#include <audioclient.h>
#include <mmdeviceapi.h>
#include <endpointvolume.h>
#include <functiondiscoverykeys.h>
#include <devicetopology.h> // Used to get IKsJackDescription interface
#undef INITGUID
#endif // NTDDI_VERSION
#ifndef GUID_SECT
#define GUID_SECT
#endif
#define __DEFINE_GUID(n,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) static const GUID n GUID_SECT = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}}
#define __DEFINE_IID(n,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) static const IID n GUID_SECT = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}}
#define __DEFINE_CLSID(n,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) static const CLSID n GUID_SECT = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}}
#define PA_DEFINE_CLSID(className, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
__DEFINE_CLSID(pa_CLSID_##className, 0x##l, 0x##w1, 0x##w2, 0x##b1, 0x##b2, 0x##b3, 0x##b4, 0x##b5, 0x##b6, 0x##b7, 0x##b8)
#define PA_DEFINE_IID(interfaceName, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
__DEFINE_IID(pa_IID_##interfaceName, 0x##l, 0x##w1, 0x##w2, 0x##b1, 0x##b2, 0x##b3, 0x##b4, 0x##b5, 0x##b6, 0x##b7, 0x##b8)
// "1CB9AD4C-DBFA-4c32-B178-C2F568A703B2"
PA_DEFINE_IID(IAudioClient, 1cb9ad4c, dbfa, 4c32, b1, 78, c2, f5, 68, a7, 03, b2);
// "1BE09788-6894-4089-8586-9A2A6C265AC5"
PA_DEFINE_IID(IMMEndpoint, 1be09788, 6894, 4089, 85, 86, 9a, 2a, 6c, 26, 5a, c5);
// "A95664D2-9614-4F35-A746-DE8DB63617E6"
PA_DEFINE_IID(IMMDeviceEnumerator, a95664d2, 9614, 4f35, a7, 46, de, 8d, b6, 36, 17, e6);
// "BCDE0395-E52F-467C-8E3D-C4579291692E"
PA_DEFINE_CLSID(IMMDeviceEnumerator,bcde0395, e52f, 467c, 8e, 3d, c4, 57, 92, 91, 69, 2e);
// "F294ACFC-3146-4483-A7BF-ADDCA7C260E2"
PA_DEFINE_IID(IAudioRenderClient, f294acfc, 3146, 4483, a7, bf, ad, dc, a7, c2, 60, e2);
// "C8ADBD64-E71E-48a0-A4DE-185C395CD317"
PA_DEFINE_IID(IAudioCaptureClient, c8adbd64, e71e, 48a0, a4, de, 18, 5c, 39, 5c, d3, 17);
// *2A07407E-6497-4A18-9787-32F79BD0D98F* Or this??
PA_DEFINE_IID(IDeviceTopology, 2A07407E, 6497, 4A18, 97, 87, 32, f7, 9b, d0, d9, 8f);
// *AE2DE0E4-5BCA-4F2D-AA46-5D13F8FDB3A9*
PA_DEFINE_IID(IPart, AE2DE0E4, 5BCA, 4F2D, aa, 46, 5d, 13, f8, fd, b3, a9);
// *4509F757-2D46-4637-8E62-CE7DB944F57B*
PA_DEFINE_IID(IKsJackDescription, 4509F757, 2D46, 4637, 8e, 62, ce, 7d, b9, 44, f5, 7b);
// Media formats:
__DEFINE_GUID(pa_KSDATAFORMAT_SUBTYPE_PCM, 0x00000001, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 );
__DEFINE_GUID(pa_KSDATAFORMAT_SUBTYPE_ADPCM, 0x00000002, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 );
__DEFINE_GUID(pa_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, 0x00000003, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 );
/* use CreateThread for CYGWIN/Windows Mobile, _beginthreadex for all others */
#if !defined(__CYGWIN__) && !defined(_WIN32_WCE)
#define CREATE_THREAD(PROC) (HANDLE)_beginthreadex( NULL, 0, (PROC), stream, 0, &stream->dwThreadId )
#define PA_THREAD_FUNC static unsigned WINAPI
#define PA_THREAD_ID unsigned
#else
#define CREATE_THREAD(PROC) CreateThread( NULL, 0, (PROC), stream, 0, &stream->dwThreadId )
#define PA_THREAD_FUNC static DWORD WINAPI
#define PA_THREAD_ID DWORD
#endif
// Thread function forward decl.
PA_THREAD_FUNC ProcThreadEvent(void *param);
PA_THREAD_FUNC ProcThreadPoll(void *param);
// Availabe from Windows 7
#ifndef AUDCLNT_E_BUFFER_ERROR
#define AUDCLNT_E_BUFFER_ERROR AUDCLNT_ERR(0x018)
#endif
#ifndef AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED
#define AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED AUDCLNT_ERR(0x019)
#endif
#ifndef AUDCLNT_E_INVALID_DEVICE_PERIOD
#define AUDCLNT_E_INVALID_DEVICE_PERIOD AUDCLNT_ERR(0x020)
#endif
#define MAX_STR_LEN 512
enum { S_INPUT = 0, S_OUTPUT = 1, S_COUNT = 2, S_FULLDUPLEX = 0 };
#define STATIC_ARRAY_SIZE(array) (sizeof(array)/sizeof(array[0]))
#define PRINT(x) PA_DEBUG(x);
#define PA_SKELETON_SET_LAST_HOST_ERROR( errorCode, errorText ) \
PaUtil_SetLastHostErrorInfo( paWASAPI, errorCode, errorText )
#define PA_WASAPI__IS_FULLDUPLEX(STREAM) ((STREAM)->in.client && (STREAM)->out.client)
#ifndef IF_FAILED_JUMP
#define IF_FAILED_JUMP(hr, label) if(FAILED(hr)) goto label;
#endif
#define SAFE_CLOSE(h) if ((h) != NULL) { CloseHandle((h)); (h) = NULL; }
#define SAFE_RELEASE(punk) if ((punk) != NULL) { (punk)->lpVtbl->Release((punk)); (punk) = NULL; }
// Mixer function
typedef void (*MixMonoToStereoF) (void *__to, void *__from, UINT32 count);
// AVRT is the new "multimedia schedulling stuff"
typedef BOOL (WINAPI *FAvRtCreateThreadOrderingGroup) (PHANDLE,PLARGE_INTEGER,GUID*,PLARGE_INTEGER);
typedef BOOL (WINAPI *FAvRtDeleteThreadOrderingGroup) (HANDLE);
typedef BOOL (WINAPI *FAvRtWaitOnThreadOrderingGroup) (HANDLE);
typedef HANDLE (WINAPI *FAvSetMmThreadCharacteristics) (LPCTSTR,LPDWORD);
typedef BOOL (WINAPI *FAvRevertMmThreadCharacteristics)(HANDLE);
typedef BOOL (WINAPI *FAvSetMmThreadPriority) (HANDLE,AVRT_PRIORITY);
static HMODULE hDInputDLL = 0;
FAvRtCreateThreadOrderingGroup pAvRtCreateThreadOrderingGroup = NULL;
FAvRtDeleteThreadOrderingGroup pAvRtDeleteThreadOrderingGroup = NULL;
FAvRtWaitOnThreadOrderingGroup pAvRtWaitOnThreadOrderingGroup = NULL;
FAvSetMmThreadCharacteristics pAvSetMmThreadCharacteristics = NULL;
FAvRevertMmThreadCharacteristics pAvRevertMmThreadCharacteristics = NULL;
FAvSetMmThreadPriority pAvSetMmThreadPriority = NULL;
#define _GetProc(fun, type, name) { \
fun = (type) GetProcAddress(hDInputDLL,name); \
if (fun == NULL) { \
PRINT(("GetProcAddr failed for %s" ,name)); \
return FALSE; \
} \
} \
// ------------------------------------------------------------------------------------------
/* prototypes for functions declared in this file */
#ifdef __cplusplus
extern "C"
{
#endif /* __cplusplus */
PaError PaWasapi_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex index );
#ifdef __cplusplus
}
#endif /* __cplusplus */
// dummy entry point for other compilers and sdks
// currently built using RC1 SDK (5600)
//#if _MSC_VER < 1400
//PaError PaWasapi_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex )
//{
//return paNoError;
//}
//#else
// ------------------------------------------------------------------------------------------
static void Terminate( struct PaUtilHostApiRepresentation *hostApi );
static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
const PaStreamParameters *inputParameters,
const PaStreamParameters *outputParameters,
double sampleRate );
static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
PaStream** s,
const PaStreamParameters *inputParameters,
const PaStreamParameters *outputParameters,
double sampleRate,
unsigned long framesPerBuffer,
PaStreamFlags streamFlags,
PaStreamCallback *streamCallback,
void *userData );
static PaError CloseStream( PaStream* stream );
static PaError StartStream( PaStream *stream );
static PaError StopStream( PaStream *stream );
static PaError AbortStream( PaStream *stream );
static PaError IsStreamStopped( PaStream *s );
static PaError IsStreamActive( PaStream *stream );
static PaTime GetStreamTime( PaStream *stream );
static double GetStreamCpuLoad( PaStream* stream );
static PaError ReadStream( PaStream* stream, void *buffer, unsigned long frames );
static PaError WriteStream( PaStream* stream, const void *buffer, unsigned long frames );
static signed long GetStreamReadAvailable( PaStream* stream );
static signed long GetStreamWriteAvailable( PaStream* stream );
// ------------------------------------------------------------------------------------------
/*
These are fields that can be gathered from IDevice and IAudioDevice PRIOR to Initialize, and
done in first pass i assume that neither of these will cause the Driver to "load", but again,
who knows how they implement their stuff
*/
typedef struct PaWasapiDeviceInfo
{
// Device
IMMDevice *device;
// from GetId
WCHAR szDeviceID[MAX_STR_LEN];
// from GetState
DWORD state;
// Fields filled from IMMEndpoint'sGetDataFlow
EDataFlow flow;
// Fields filled from IAudioDevice (_prior_ to Initialize)
// from GetDevicePeriod(
REFERENCE_TIME DefaultDevicePeriod;
REFERENCE_TIME MinimumDevicePeriod;
// from GetMixFormat
// WAVEFORMATEX *MixFormat;//needs to be CoTaskMemFree'd after use!
// Default format (setup through Control Panel by user)
WAVEFORMATEXTENSIBLE DefaultFormat;
// Formfactor
EndpointFormFactor formFactor;
}
PaWasapiDeviceInfo;
// ------------------------------------------------------------------------------------------
/* PaWasapiHostApiRepresentation - host api datastructure specific to this implementation */
typedef struct
{
PaUtilHostApiRepresentation inheritedHostApiRep;
PaUtilStreamInterface callbackStreamInterface;
PaUtilStreamInterface blockingStreamInterface;
PaUtilAllocationGroup *allocations;
/* implementation specific data goes here */
//in case we later need the synch
IMMDeviceEnumerator *enumerator;
//this is the REAL number of devices, whether they are usefull to PA or not!
UINT32 deviceCount;
WCHAR defaultRenderer [MAX_STR_LEN];
WCHAR defaultCapturer [MAX_STR_LEN];
PaWasapiDeviceInfo *devInfo;
// Is true when WOW64 Vista/7 Workaround is needed
BOOL useWOW64Workaround;
}
PaWasapiHostApiRepresentation;
// ------------------------------------------------------------------------------------------
/* PaWasapiStream - a stream data structure specifically for this implementation */
typedef struct PaWasapiSubStream
{
IAudioClient *client;
WAVEFORMATEXTENSIBLE wavex;
UINT32 bufferSize;
REFERENCE_TIME device_latency;
REFERENCE_TIME period;
double latency_seconds;
UINT32 framesPerHostCallback;
AUDCLNT_SHAREMODE shareMode;
UINT32 streamFlags; // AUDCLNT_STREAMFLAGS_EVENTCALLBACK, ...
UINT32 flags;
// Buffers
UINT32 buffers; //!< number of buffers used (from host side)
UINT32 framesPerBuffer; //!< number of frames per 1 buffer
BOOL userBufferAndHostMatch;
// Used by blocking interface:
UINT32 prevTime; // time ms between calls of WriteStream
UINT32 prevSleep; // time ms to sleep from frames written in previous call
// Used for Mono >> Stereo workaround, if driver does not support it
// (in Exclusive mode WASAPI usually refuses to operate with Mono (1-ch)
void *monoBuffer; //!< pointer to buffer
UINT32 monoBufferSize; //!< buffer size in bytes
MixMonoToStereoF monoMixer; //!< pointer to mixer function
}
PaWasapiSubStream;
// ------------------------------------------------------------------------------------------
/* PaWasapiHostProcessor - redirects processing data */
typedef struct PaWasapiHostProcessor
{
PaWasapiHostProcessorCallback processor;
void *userData;
}
PaWasapiHostProcessor;
// ------------------------------------------------------------------------------------------
typedef struct PaWasapiStream
{
/* IMPLEMENT ME: rename this */
PaUtilStreamRepresentation streamRepresentation;
PaUtilCpuLoadMeasurer cpuLoadMeasurer;
PaUtilBufferProcessor bufferProcessor;
// input
PaWasapiSubStream in;
IAudioCaptureClient *cclient;
IAudioEndpointVolume *inVol;
// output
PaWasapiSubStream out;
IAudioRenderClient *rclient;
IAudioEndpointVolume *outVol;
// event handles for event-driven processing mode
HANDLE event[S_COUNT];
// buffer mode
PaUtilHostBufferSizeMode bufferMode;
// must be volatile to avoid race condition on user query while
// thread is being started
volatile BOOL running;
PA_THREAD_ID dwThreadId;
HANDLE hThread;
HANDLE hCloseRequest;
HANDLE hThreadStart; //!< signalled by thread on start
HANDLE hThreadExit; //!< signalled by thread on exit
HANDLE hBlockingOpStreamRD;
HANDLE hBlockingOpStreamWR;
// Host callback Output overrider
PaWasapiHostProcessor hostProcessOverrideOutput;
// Host callback Input overrider
PaWasapiHostProcessor hostProcessOverrideInput;
// Defines blocking/callback interface used
BOOL bBlocking;
// Av Task (MM thread management)
HANDLE hAvTask;
// Thread priority level
PaWasapiThreadPriority nThreadPriority;
}
PaWasapiStream;
// Local stream methods
static void _OnStreamStop(PaWasapiStream *stream);
static void _FinishStream(PaWasapiStream *stream);
// Local statics
static volatile BOOL g_WasapiCOMInit = FALSE;
static volatile DWORD g_WasapiInitThread = 0;
// ------------------------------------------------------------------------------------------
#define LogHostError(HRES) __LogHostError(HRES, __FUNCTION__, __FILE__, __LINE__)
static HRESULT __LogHostError(HRESULT res, const char *func, const char *file, int line)
{
const char *text = NULL;
switch (res)
{
case S_OK: return res;
case E_POINTER :text ="E_POINTER"; break;
case E_INVALIDARG :text ="E_INVALIDARG"; break;
case AUDCLNT_E_NOT_INITIALIZED :text ="AUDCLNT_E_NOT_INITIALIZED"; break;
case AUDCLNT_E_ALREADY_INITIALIZED :text ="AUDCLNT_E_ALREADY_INITIALIZED"; break;
case AUDCLNT_E_WRONG_ENDPOINT_TYPE :text ="AUDCLNT_E_WRONG_ENDPOINT_TYPE"; break;
case AUDCLNT_E_DEVICE_INVALIDATED :text ="AUDCLNT_E_DEVICE_INVALIDATED"; break;
case AUDCLNT_E_NOT_STOPPED :text ="AUDCLNT_E_NOT_STOPPED"; break;
case AUDCLNT_E_BUFFER_TOO_LARGE :text ="AUDCLNT_E_BUFFER_TOO_LARGE"; break;
case AUDCLNT_E_OUT_OF_ORDER :text ="AUDCLNT_E_OUT_OF_ORDER"; break;
case AUDCLNT_E_UNSUPPORTED_FORMAT :text ="AUDCLNT_E_UNSUPPORTED_FORMAT"; break;
case AUDCLNT_E_INVALID_SIZE :text ="AUDCLNT_E_INVALID_SIZE"; break;
case AUDCLNT_E_DEVICE_IN_USE :text ="AUDCLNT_E_DEVICE_IN_USE"; break;
case AUDCLNT_E_BUFFER_OPERATION_PENDING :text ="AUDCLNT_E_BUFFER_OPERATION_PENDING"; break;
case AUDCLNT_E_THREAD_NOT_REGISTERED :text ="AUDCLNT_E_THREAD_NOT_REGISTERED"; break;
case AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED :text ="AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED"; break;
case AUDCLNT_E_ENDPOINT_CREATE_FAILED :text ="AUDCLNT_E_ENDPOINT_CREATE_FAILED"; break;
case AUDCLNT_E_SERVICE_NOT_RUNNING :text ="AUDCLNT_E_SERVICE_NOT_RUNNING"; break;
case AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED :text ="AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED"; break;
case AUDCLNT_E_EXCLUSIVE_MODE_ONLY :text ="AUDCLNT_E_EXCLUSIVE_MODE_ONLY"; break;
case AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL :text ="AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL"; break;
case AUDCLNT_E_EVENTHANDLE_NOT_SET :text ="AUDCLNT_E_EVENTHANDLE_NOT_SET"; break;
case AUDCLNT_E_INCORRECT_BUFFER_SIZE :text ="AUDCLNT_E_INCORRECT_BUFFER_SIZE"; break;
case AUDCLNT_E_BUFFER_SIZE_ERROR :text ="AUDCLNT_E_BUFFER_SIZE_ERROR"; break;
case AUDCLNT_E_CPUUSAGE_EXCEEDED :text ="AUDCLNT_E_CPUUSAGE_EXCEEDED"; break;
case AUDCLNT_E_BUFFER_ERROR :text ="AUDCLNT_E_BUFFER_ERROR"; break;
case AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED :text ="AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; break;
case AUDCLNT_E_INVALID_DEVICE_PERIOD :text ="AUDCLNT_E_INVALID_DEVICE_PERIOD"; break;
case AUDCLNT_S_BUFFER_EMPTY :text ="AUDCLNT_S_BUFFER_EMPTY"; break;
case AUDCLNT_S_THREAD_ALREADY_REGISTERED :text ="AUDCLNT_S_THREAD_ALREADY_REGISTERED"; break;
case AUDCLNT_S_POSITION_STALLED :text ="AUDCLNT_S_POSITION_STALLED"; break;
// other windows common errors:
case CO_E_NOTINITIALIZED :text ="CO_E_NOTINITIALIZED: you must call CoInitialize() before Pa_OpenStream()"; break;
default:
text = "UNKNOWN ERROR";
}
PRINT(("WASAPI ERROR HRESULT: 0x%X : %s\n [FUNCTION: %s FILE: %s {LINE: %d}]\n", res, text, func, file, line));
PA_SKELETON_SET_LAST_HOST_ERROR(res, text);
return res;
}
// ------------------------------------------------------------------------------------------
#define LogPaError(PAERR) __LogPaError(PAERR, __FUNCTION__, __FILE__, __LINE__)
static PaError __LogPaError(PaError err, const char *func, const char *file, int line)
{
if (err == paNoError)
return err;
PRINT(("WASAPI ERROR PAERROR: %i : %s\n [FUNCTION: %s FILE: %s {LINE: %d}]\n", err, Pa_GetErrorText(err), func, file, line));
return err;
}
// ------------------------------------------------------------------------------------------
/*static double nano100ToMillis(REFERENCE_TIME ref)
{
// 1 nano = 0.000000001 seconds
//100 nano = 0.0000001 seconds
//100 nano = 0.0001 milliseconds
return ((double)ref)*0.0001;
}*/
// ------------------------------------------------------------------------------------------
static double nano100ToSeconds(REFERENCE_TIME ref)
{
// 1 nano = 0.000000001 seconds
//100 nano = 0.0000001 seconds
//100 nano = 0.0001 milliseconds
return ((double)ref)*0.0000001;
}
// ------------------------------------------------------------------------------------------
/*static REFERENCE_TIME MillisTonano100(double ref)
{
// 1 nano = 0.000000001 seconds
//100 nano = 0.0000001 seconds
//100 nano = 0.0001 milliseconds
return (REFERENCE_TIME)(ref/0.0001);
}*/
// ------------------------------------------------------------------------------------------
static REFERENCE_TIME SecondsTonano100(double ref)
{
// 1 nano = 0.000000001 seconds
//100 nano = 0.0000001 seconds
//100 nano = 0.0001 milliseconds
return (REFERENCE_TIME)(ref/0.0000001);
}
// ------------------------------------------------------------------------------------------
// Makes Hns period from frames and sample rate
static REFERENCE_TIME MakeHnsPeriod(UINT32 nFrames, DWORD nSamplesPerSec)
{
return (REFERENCE_TIME)((10000.0 * 1000 / nSamplesPerSec * nFrames) + 0.5);
}
// ------------------------------------------------------------------------------------------
// Converts PaSampleFormat to bits per sample value
static WORD PaSampleFormatToBitsPerSample(PaSampleFormat format_id)
{
switch (format_id & ~paNonInterleaved)
{
case paFloat32:
case paInt32: return 32;
case paInt24: return 24;
case paInt16: return 16;
case paInt8:
case paUInt8: return 8;
}
return 0;
}
// ------------------------------------------------------------------------------------------
// Converts PaSampleFormat to bits per sample value
/*static WORD PaSampleFormatToBytesPerSample(PaSampleFormat format_id)
{
return PaSampleFormatToBitsPerSample(format_id) >> 3; // 'bits/8'
}*/
// ------------------------------------------------------------------------------------------
// Converts Hns period into number of frames
static UINT32 MakeFramesFromHns(REFERENCE_TIME hnsPeriod, UINT32 nSamplesPerSec)
{
UINT32 nFrames = (UINT32)( // frames =
1.0 * hnsPeriod * // hns *
nSamplesPerSec / // (frames / s) /
1000 / // (ms / s) /
10000 // (hns / s) /
+ 0.5 // rounding
);
return nFrames;
}
// Aligning function type
typedef UINT32 (*ALIGN_FUNC) (UINT32 v, UINT32 align);
// ------------------------------------------------------------------------------------------
// Aligns 'v' backwards
static UINT32 ALIGN_BWD(UINT32 v, UINT32 align)
{
return ((v - (align ? v % align : 0)));
}
// ------------------------------------------------------------------------------------------
// Aligns 'v' forward
static UINT32 ALIGN_FWD(UINT32 v, UINT32 align)
{
UINT32 remainder = (align ? (v % align) : 0);
if (remainder == 0)
return v;
return v + (align - remainder);
}
// ------------------------------------------------------------------------------------------
// Aligns WASAPI buffer to 128 byte packet boundary. HD Audio will fail to play if buffer
// is misaligned. This problem was solved in Windows 7 were AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED
// is thrown although we must align for Vista anyway.
static UINT32 AlignFramesPerBuffer(UINT32 nFrames, UINT32 nSamplesPerSec, UINT32 nBlockAlign,
ALIGN_FUNC pAlignFunc)
{
#define HDA_PACKET_SIZE (128)
long frame_bytes = nFrames * nBlockAlign;
long packets;
// align to packet size
frame_bytes = pAlignFunc(frame_bytes, HDA_PACKET_SIZE); // use ALIGN_FWD if bigger but safer period is more desired
// atlest 1 frame must be available
if (frame_bytes < HDA_PACKET_SIZE)
frame_bytes = HDA_PACKET_SIZE;
nFrames = frame_bytes / nBlockAlign;
packets = frame_bytes / HDA_PACKET_SIZE;
frame_bytes = packets * HDA_PACKET_SIZE;
nFrames = frame_bytes / nBlockAlign;
return nFrames;
#undef HDA_PACKET_SIZE
}
// ------------------------------------------------------------------------------------------
static UINT32 GetFramesSleepTime(UINT32 nFrames, UINT32 nSamplesPerSec)
{
REFERENCE_TIME nDuration;
if (nSamplesPerSec == 0)
return 0;
#define REFTIMES_PER_SEC 10000000
#define REFTIMES_PER_MILLISEC 10000
// Calculate the actual duration of the allocated buffer.
nDuration = (REFERENCE_TIME)((double)REFTIMES_PER_SEC * nFrames / nSamplesPerSec);
return (UINT32)(nDuration/REFTIMES_PER_MILLISEC/2);
}
// ------------------------------------------------------------------------------------------
static UINT32 GetFramesSleepTimeMicroseconds(UINT32 nFrames, UINT32 nSamplesPerSec)
{
REFERENCE_TIME nDuration;
if (nSamplesPerSec == 0)
return 0;
#define REFTIMES_PER_SEC 10000000
#define REFTIMES_PER_MILLISEC 10000
// Calculate the actual duration of the allocated buffer.
nDuration = (REFERENCE_TIME)((double)REFTIMES_PER_SEC * nFrames / nSamplesPerSec);
return (UINT32)(nDuration/10/2);
}
// ------------------------------------------------------------------------------------------
static BOOL SetupAVRT()
{
hDInputDLL = LoadLibraryA("avrt.dll");
if (hDInputDLL == NULL)
return FALSE;
_GetProc(pAvRtCreateThreadOrderingGroup, FAvRtCreateThreadOrderingGroup, "AvRtCreateThreadOrderingGroup");
_GetProc(pAvRtDeleteThreadOrderingGroup, FAvRtDeleteThreadOrderingGroup, "AvRtDeleteThreadOrderingGroup");
_GetProc(pAvRtWaitOnThreadOrderingGroup, FAvRtWaitOnThreadOrderingGroup, "AvRtWaitOnThreadOrderingGroup");
_GetProc(pAvSetMmThreadCharacteristics, FAvSetMmThreadCharacteristics, "AvSetMmThreadCharacteristicsA");
_GetProc(pAvRevertMmThreadCharacteristics,FAvRevertMmThreadCharacteristics,"AvRevertMmThreadCharacteristics");
_GetProc(pAvSetMmThreadPriority, FAvSetMmThreadPriority, "AvSetMmThreadPriority");
return pAvRtCreateThreadOrderingGroup &&
pAvRtDeleteThreadOrderingGroup &&
pAvRtWaitOnThreadOrderingGroup &&
pAvSetMmThreadCharacteristics &&
pAvRevertMmThreadCharacteristics &&
pAvSetMmThreadPriority;
}
// ------------------------------------------------------------------------------------------
static void CloseAVRT()
{
if (hDInputDLL != NULL)
FreeLibrary(hDInputDLL);
hDInputDLL = NULL;
}
// ------------------------------------------------------------------------------------------
static BOOL IsWow64()
{
// http://msdn.microsoft.com/en-us/library/ms684139(VS.85).aspx
typedef BOOL (WINAPI *LPFN_ISWOW64PROCESS) (HANDLE, PBOOL);
LPFN_ISWOW64PROCESS fnIsWow64Process;
BOOL bIsWow64 = FALSE;
// IsWow64Process is not available on all supported versions of Windows.
// Use GetModuleHandle to get a handle to the DLL that contains the function
// and GetProcAddress to get a pointer to the function if available.
fnIsWow64Process = (LPFN_ISWOW64PROCESS) GetProcAddress(
GetModuleHandle(TEXT("kernel32")), TEXT("IsWow64Process"));
if (fnIsWow64Process == NULL)
return FALSE;
if (!fnIsWow64Process(GetCurrentProcess(), &bIsWow64))
return FALSE;
return bIsWow64;
}
// ------------------------------------------------------------------------------------------
typedef enum EWindowsVersion
{
WINDOWS_UNKNOWN = 0,
WINDOWS_VISTA_SERVER2008 = (1 << 0),
WINDOWS_7_SERVER2008R2 = (1 << 1),
WINDOWS_FUTURE = (1 << 2)
}
EWindowsVersion;
// Defines Windows 7/Windows Server 2008 R2 and up (future versions)
#define WINDOWS_7_SERVER2008R2_AND_UP (WINDOWS_7_SERVER2008R2|WINDOWS_FUTURE)
// The function is limited to Vista/7 mostly as we need just to find out Vista/WOW64 combination
// in order to use WASAPI WOW64 workarounds.
static UINT32 GetWindowsVersion()
{
static UINT32 version = WINDOWS_UNKNOWN;
if (version == WINDOWS_UNKNOWN)
{
DWORD dwVersion = 0;
DWORD dwMajorVersion = 0;
DWORD dwMinorVersion = 0;
DWORD dwBuild = 0;
typedef DWORD (WINAPI *LPFN_GETVERSION)(VOID);
LPFN_GETVERSION fnGetVersion;
fnGetVersion = (LPFN_GETVERSION) GetProcAddress(GetModuleHandle(TEXT("kernel32")), TEXT("GetVersion"));
if (fnGetVersion == NULL)
return WINDOWS_UNKNOWN;
dwVersion = fnGetVersion();
// Get the Windows version
dwMajorVersion = (DWORD)(LOBYTE(LOWORD(dwVersion)));
dwMinorVersion = (DWORD)(HIBYTE(LOWORD(dwVersion)));
// Get the build number
if (dwVersion < 0x80000000)
dwBuild = (DWORD)(HIWORD(dwVersion));
switch (dwMajorVersion)
{
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
break; // skip lower
case 6:
switch (dwMinorVersion)
{
case 0:
version |= WINDOWS_VISTA_SERVER2008;
break;
case 1:
version |= WINDOWS_7_SERVER2008R2;
break;
default:
version |= WINDOWS_FUTURE;
}
break;
default:
version |= WINDOWS_FUTURE;
}
}
return version;
}
// ------------------------------------------------------------------------------------------
static BOOL UseWOW64Workaround()
{
// note: WOW64 bug is common to Windows Vista x64, thus we fall back to safe Poll-driven
// method. Windows 7 x64 seems has WOW64 bug fixed.
return (IsWow64() && (GetWindowsVersion() & WINDOWS_VISTA_SERVER2008));
}
// ------------------------------------------------------------------------------------------
typedef enum EMixerDir { MIX_DIR__1TO2, MIX_DIR__2TO1 } EMixerDir;
// ------------------------------------------------------------------------------------------
#define _WASAPI_MONO_TO_STEREO_MIXER_1_TO_2(TYPE)\
TYPE * __restrict to = __to;\
TYPE * __restrict from = __from;\
TYPE * __restrict end = from + count;\
while (from != end)\
{\
*to ++ = *from;\
*to ++ = *from;\
++ from;\
}
// ------------------------------------------------------------------------------------------
#define _WASAPI_MONO_TO_STEREO_MIXER_2_TO_1(TYPE)\
TYPE * __restrict to = (TYPE *)__to;\
TYPE * __restrict from = (TYPE *)__from;\
TYPE * __restrict end = to + count;\
while (to != end)\
{\
*to ++ = (TYPE)((float)(from[0] + from[1]) * 0.5f);\
from += 2;\
}
// ------------------------------------------------------------------------------------------
static void _MixMonoToStereo_1TO2_8(void *__to, void *__from, UINT32 count) { _WASAPI_MONO_TO_STEREO_MIXER_1_TO_2(BYTE); }
static void _MixMonoToStereo_1TO2_16(void *__to, void *__from, UINT32 count) { _WASAPI_MONO_TO_STEREO_MIXER_1_TO_2(short); }
static void _MixMonoToStereo_1TO2_24(void *__to, void *__from, UINT32 count) { _WASAPI_MONO_TO_STEREO_MIXER_1_TO_2(int); /* !!! int24 data is contained in 32-bit containers*/ }
static void _MixMonoToStereo_1TO2_32(void *__to, void *__from, UINT32 count) { _WASAPI_MONO_TO_STEREO_MIXER_1_TO_2(int); }
static void _MixMonoToStereo_1TO2_32f(void *__to, void *__from, UINT32 count) { _WASAPI_MONO_TO_STEREO_MIXER_1_TO_2(float); }
// ------------------------------------------------------------------------------------------
static void _MixMonoToStereo_2TO1_8(void *__to, void *__from, UINT32 count) { _WASAPI_MONO_TO_STEREO_MIXER_2_TO_1(BYTE); }
static void _MixMonoToStereo_2TO1_16(void *__to, void *__from, UINT32 count) { _WASAPI_MONO_TO_STEREO_MIXER_2_TO_1(short); }
static void _MixMonoToStereo_2TO1_24(void *__to, void *__from, UINT32 count) { _WASAPI_MONO_TO_STEREO_MIXER_2_TO_1(int); /* !!! int24 data is contained in 32-bit containers*/ }
static void _MixMonoToStereo_2TO1_32(void *__to, void *__from, UINT32 count) { _WASAPI_MONO_TO_STEREO_MIXER_2_TO_1(int); }
static void _MixMonoToStereo_2TO1_32f(void *__to, void *__from, UINT32 count) { _WASAPI_MONO_TO_STEREO_MIXER_2_TO_1(float); }
// ------------------------------------------------------------------------------------------
static MixMonoToStereoF _GetMonoToStereoMixer(PaSampleFormat format, EMixerDir dir)
{
switch (dir)
{
case MIX_DIR__1TO2:
switch (format & ~paNonInterleaved)
{
case paUInt8: return _MixMonoToStereo_1TO2_8;
case paInt16: return _MixMonoToStereo_1TO2_16;
case paInt24: return _MixMonoToStereo_1TO2_24;
case paInt32: return _MixMonoToStereo_1TO2_32;
case paFloat32: return _MixMonoToStereo_1TO2_32f;
}
break;
case MIX_DIR__2TO1:
switch (format & ~paNonInterleaved)
{
case paUInt8: return _MixMonoToStereo_2TO1_8;
case paInt16: return _MixMonoToStereo_2TO1_16;
case paInt24: return _MixMonoToStereo_2TO1_24;
case paInt32: return _MixMonoToStereo_2TO1_32;
case paFloat32: return _MixMonoToStereo_2TO1_32f;
}
break;
}
return NULL;
}
// ------------------------------------------------------------------------------------------
PaError PaWasapi_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex )
{
PaError result = paNoError;
PaWasapiHostApiRepresentation *paWasapi;
PaDeviceInfo *deviceInfoArray;
HRESULT hr = S_OK;
IMMDeviceCollection* pEndPoints = NULL;
UINT i;
if (!SetupAVRT())
{
PRINT(("WASAPI: No AVRT! (not VISTA?)"));
return paNoError;
}
/*
If COM is already initialized CoInitialize will either return
FALSE, or RPC_E_CHANGED_MODE if it was initialised in a different
threading mode. In either case we shouldn't consider it an error
but we need to be careful to not call CoUninitialize() if
RPC_E_CHANGED_MODE was returned.
*/
hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED);
if (FAILED(hr) && (hr != RPC_E_CHANGED_MODE))
{
PRINT(("WASAPI: failed CoInitialize"));
return paUnanticipatedHostError;
}
if (hr != RPC_E_CHANGED_MODE)
g_WasapiCOMInit = TRUE;
// Memorize calling thread id and report warning on Uninitialize if calling thread is different
// as CoInitialize must match CoUninitialize in the same thread.
g_WasapiInitThread = GetCurrentThreadId();
paWasapi = (PaWasapiHostApiRepresentation *)PaUtil_AllocateMemory( sizeof(PaWasapiHostApiRepresentation) );
if (paWasapi == NULL)
{
result = paInsufficientMemory;
goto error;
}
paWasapi->allocations = PaUtil_CreateAllocationGroup();
if (paWasapi->allocations == NULL)
{
result = paInsufficientMemory;
goto error;
}
*hostApi = &paWasapi->inheritedHostApiRep;
(*hostApi)->info.structVersion = 1;
(*hostApi)->info.type = paWASAPI;
(*hostApi)->info.name = "Windows WASAPI";
(*hostApi)->info.deviceCount = 0;
(*hostApi)->info.defaultInputDevice = paNoDevice;
(*hostApi)->info.defaultOutputDevice = paNoDevice;
paWasapi->enumerator = NULL;
hr = CoCreateInstance(&pa_CLSID_IMMDeviceEnumerator, NULL, CLSCTX_INPROC_SERVER,
&pa_IID_IMMDeviceEnumerator, (void **)&paWasapi->enumerator);
IF_FAILED_JUMP(hr, error);
// getting default device ids in the eMultimedia "role"
{
{
IMMDevice *defaultRenderer = NULL;
hr = IMMDeviceEnumerator_GetDefaultAudioEndpoint(paWasapi->enumerator, eRender, eMultimedia, &defaultRenderer);
if (hr != S_OK)
{
if (hr != E_NOTFOUND)
IF_FAILED_JUMP(hr, error);
}
else
{
WCHAR *pszDeviceId = NULL;
hr = IMMDevice_GetId(defaultRenderer, &pszDeviceId);
IF_FAILED_JUMP(hr, error);
wcsncpy(paWasapi->defaultRenderer, pszDeviceId, MAX_STR_LEN-1);
CoTaskMemFree(pszDeviceId);
IMMDevice_Release(defaultRenderer);
}
}
{
IMMDevice *defaultCapturer = NULL;
hr = IMMDeviceEnumerator_GetDefaultAudioEndpoint(paWasapi->enumerator, eCapture, eMultimedia, &defaultCapturer);
if (hr != S_OK)
{
if (hr != E_NOTFOUND)
IF_FAILED_JUMP(hr, error);
}
else
{
WCHAR *pszDeviceId = NULL;
hr = IMMDevice_GetId(defaultCapturer, &pszDeviceId);
IF_FAILED_JUMP(hr, error);
wcsncpy(paWasapi->defaultCapturer, pszDeviceId, MAX_STR_LEN-1);
CoTaskMemFree(pszDeviceId);
IMMDevice_Release(defaultCapturer);
}
}
}
hr = IMMDeviceEnumerator_EnumAudioEndpoints(paWasapi->enumerator, eAll, DEVICE_STATE_ACTIVE, &pEndPoints);
IF_FAILED_JUMP(hr, error);
hr = IMMDeviceCollection_GetCount(pEndPoints, &paWasapi->deviceCount);
IF_FAILED_JUMP(hr, error);
paWasapi->devInfo = (PaWasapiDeviceInfo *)malloc(sizeof(PaWasapiDeviceInfo) * paWasapi->deviceCount);
for (i = 0; i < paWasapi->deviceCount; ++i)
memset(&paWasapi->devInfo[i], 0, sizeof(PaWasapiDeviceInfo));
if (paWasapi->deviceCount > 0)
{
(*hostApi)->deviceInfos = (PaDeviceInfo **)PaUtil_GroupAllocateMemory(
paWasapi->allocations, sizeof(PaDeviceInfo *) * paWasapi->deviceCount);
if ((*hostApi)->deviceInfos == NULL)
{
result = paInsufficientMemory;
goto error;
}
/* allocate all device info structs in a contiguous block */
deviceInfoArray = (PaDeviceInfo *)PaUtil_GroupAllocateMemory(
paWasapi->allocations, sizeof(PaDeviceInfo) * paWasapi->deviceCount);
if (deviceInfoArray == NULL)
{
result = paInsufficientMemory;
goto error;
}
for (i = 0; i < paWasapi->deviceCount; ++i)
{
DWORD state = 0;
PaDeviceInfo *deviceInfo = &deviceInfoArray[i];
deviceInfo->structVersion = 2;
deviceInfo->hostApi = hostApiIndex;
PA_DEBUG(("WASAPI: device idx: %02d\n", i));
PA_DEBUG(("WASAPI: ---------------\n"));
hr = IMMDeviceCollection_Item(pEndPoints, i, &paWasapi->devInfo[i].device);
IF_FAILED_JUMP(hr, error);
// getting ID
{
WCHAR *pszDeviceId = NULL;
hr = IMMDevice_GetId(paWasapi->devInfo[i].device, &pszDeviceId);
IF_FAILED_JUMP(hr, error);
wcsncpy(paWasapi->devInfo[i].szDeviceID, pszDeviceId, MAX_STR_LEN-1);
CoTaskMemFree(pszDeviceId);
if (lstrcmpW(paWasapi->devInfo[i].szDeviceID, paWasapi->defaultCapturer) == 0)
{// we found the default input!
(*hostApi)->info.defaultInputDevice = (*hostApi)->info.deviceCount;
}
if (lstrcmpW(paWasapi->devInfo[i].szDeviceID, paWasapi->defaultRenderer) == 0)
{// we found the default output!
(*hostApi)->info.defaultOutputDevice = (*hostApi)->info.deviceCount;
}
}
hr = IMMDevice_GetState(paWasapi->devInfo[i].device, &paWasapi->devInfo[i].state);
IF_FAILED_JUMP(hr, error);
if (paWasapi->devInfo[i].state != DEVICE_STATE_ACTIVE)
{
PRINT(("WASAPI device: %d is not currently available (state:%d)\n",i,state));
}
{
IPropertyStore *pProperty;
hr = IMMDevice_OpenPropertyStore(paWasapi->devInfo[i].device, STGM_READ, &pProperty);
IF_FAILED_JUMP(hr, error);
// "Friendly" Name
{
char *deviceName;
PROPVARIANT value;
PropVariantInit(&value);
hr = IPropertyStore_GetValue(pProperty, &PKEY_Device_FriendlyName, &value);
IF_FAILED_JUMP(hr, error);
deviceInfo->name = NULL;
deviceName = (char *)PaUtil_GroupAllocateMemory(paWasapi->allocations, MAX_STR_LEN + 1);
if (deviceName == NULL)
{
result = paInsufficientMemory;
goto error;
}
if (value.pwszVal)
wcstombs(deviceName, value.pwszVal, MAX_STR_LEN-1);
else
_snprintf(deviceName, MAX_STR_LEN-1, "baddev%d", i);
deviceInfo->name = deviceName;
PropVariantClear(&value);
PA_DEBUG(("WASAPI:%d| name[%s]\n", i, deviceInfo->name));
}
// Default format
{
PROPVARIANT value;
PropVariantInit(&value);
hr = IPropertyStore_GetValue(pProperty, &PKEY_AudioEngine_DeviceFormat, &value);
IF_FAILED_JUMP(hr, error);
memcpy(&paWasapi->devInfo[i].DefaultFormat, value.blob.pBlobData, min(sizeof(paWasapi->devInfo[i].DefaultFormat), value.blob.cbSize));
// cleanup
PropVariantClear(&value);
}
// Formfactor
{
PROPVARIANT value;
PropVariantInit(&value);
hr = IPropertyStore_GetValue(pProperty, &PKEY_AudioEndpoint_FormFactor, &value);
IF_FAILED_JUMP(hr, error);
// set
#if defined(DUMMYUNIONNAME) && defined(NONAMELESSUNION)
// avoid breaking strict-aliasing rules in such line: (EndpointFormFactor)(*((UINT *)(((WORD *)&value.wReserved3)+1)));
UINT v;
memcpy(&v, (((WORD *)&value.wReserved3)+1), sizeof(v));
paWasapi->devInfo[i].formFactor = (EndpointFormFactor)v;
#else
paWasapi->devInfo[i].formFactor = (EndpointFormFactor)value.uintVal;
#endif
PA_DEBUG(("WASAPI:%d| form-factor[%d]\n", i, paWasapi->devInfo[i].formFactor));
// cleanup
PropVariantClear(&value);
}
SAFE_RELEASE(pProperty);
}
// Endpoint data
{
IMMEndpoint *endpoint = NULL;
hr = IMMDevice_QueryInterface(paWasapi->devInfo[i].device, &pa_IID_IMMEndpoint, (void **)&endpoint);
if (SUCCEEDED(hr))
{
hr = IMMEndpoint_GetDataFlow(endpoint, &paWasapi->devInfo[i].flow);
SAFE_RELEASE(endpoint);
}
}
// Getting a temporary IAudioClient for more fields
// we make sure NOT to call Initialize yet!
{
IAudioClient *tmpClient = NULL;
hr = IMMDevice_Activate(paWasapi->devInfo[i].device, &pa_IID_IAudioClient,
CLSCTX_INPROC_SERVER, NULL, (void **)&tmpClient);
IF_FAILED_JUMP(hr, error);
hr = IAudioClient_GetDevicePeriod(tmpClient,
&paWasapi->devInfo[i].DefaultDevicePeriod,
&paWasapi->devInfo[i].MinimumDevicePeriod);
IF_FAILED_JUMP(hr, error);
//hr = tmpClient->GetMixFormat(&paWasapi->devInfo[i].MixFormat);
// Release client
SAFE_RELEASE(tmpClient);
if (hr != S_OK)
{
//davidv: this happened with my hardware, previously for that same device in DirectSound:
//Digital Output (Realtek AC'97 Audio)'s GUID: {0x38f2cf50,0x7b4c,0x4740,0x86,0xeb,0xd4,0x38,0x66,0xd8,0xc8, 0x9f}
//so something must be _really_ wrong with this device, TODO handle this better. We kind of need GetMixFormat
LogHostError(hr);
goto error;
}
}
// we can now fill in portaudio device data
deviceInfo->maxInputChannels = 0;
deviceInfo->maxOutputChannels = 0;
deviceInfo->defaultSampleRate = paWasapi->devInfo[i].DefaultFormat.Format.nSamplesPerSec;
switch (paWasapi->devInfo[i].flow)
{
case eRender: {
deviceInfo->maxOutputChannels = paWasapi->devInfo[i].DefaultFormat.Format.nChannels;
deviceInfo->defaultHighOutputLatency = nano100ToSeconds(paWasapi->devInfo[i].DefaultDevicePeriod);
deviceInfo->defaultLowOutputLatency = nano100ToSeconds(paWasapi->devInfo[i].MinimumDevicePeriod);
PA_DEBUG(("WASAPI:%d| def.SR[%d] max.CH[%d] latency{hi[%f] lo[%f]}\n", i, (UINT32)deviceInfo->defaultSampleRate,
deviceInfo->maxOutputChannels, (float)deviceInfo->defaultHighOutputLatency, (float)deviceInfo->defaultLowOutputLatency));
break;}
case eCapture: {
deviceInfo->maxInputChannels = paWasapi->devInfo[i].DefaultFormat.Format.nChannels;
deviceInfo->defaultHighInputLatency = nano100ToSeconds(paWasapi->devInfo[i].DefaultDevicePeriod);
deviceInfo->defaultLowInputLatency = nano100ToSeconds(paWasapi->devInfo[i].MinimumDevicePeriod);
PA_DEBUG(("WASAPI:%d| def.SR[%d] max.CH[%d] latency{hi[%f] lo[%f]}\n", i, (UINT32)deviceInfo->defaultSampleRate,
deviceInfo->maxInputChannels, (float)deviceInfo->defaultHighInputLatency, (float)deviceInfo->defaultLowInputLatency));
break; }
default:
PRINT(("WASAPI:%d| bad Data Flow!\n", i));
//continue; // do not skip from list, allow to initialize
break;
}
(*hostApi)->deviceInfos[i] = deviceInfo;
++(*hostApi)->info.deviceCount;
}
}
(*hostApi)->Terminate = Terminate;
(*hostApi)->OpenStream = OpenStream;
(*hostApi)->IsFormatSupported = IsFormatSupported;
PaUtil_InitializeStreamInterface( &paWasapi->callbackStreamInterface, CloseStream, StartStream,
StopStream, AbortStream, IsStreamStopped, IsStreamActive,
GetStreamTime, GetStreamCpuLoad,
PaUtil_DummyRead, PaUtil_DummyWrite,
PaUtil_DummyGetReadAvailable, PaUtil_DummyGetWriteAvailable );
PaUtil_InitializeStreamInterface( &paWasapi->blockingStreamInterface, CloseStream, StartStream,
StopStream, AbortStream, IsStreamStopped, IsStreamActive,
GetStreamTime, PaUtil_DummyGetCpuLoad,
ReadStream, WriteStream, GetStreamReadAvailable, GetStreamWriteAvailable );
// findout if platform workaround is required
paWasapi->useWOW64Workaround = UseWOW64Workaround();
SAFE_RELEASE(pEndPoints);
PRINT(("WASAPI: initialized ok\n"));
return paNoError;
error:
PRINT(("WASAPI: failed %s error[%d|%s]\n", __FUNCTION__, result, Pa_GetErrorText(result)));
SAFE_RELEASE(pEndPoints);
Terminate((PaUtilHostApiRepresentation *)paWasapi);
return result;
}
// ------------------------------------------------------------------------------------------
static void Terminate( PaUtilHostApiRepresentation *hostApi )
{
UINT i;
PaWasapiHostApiRepresentation *paWasapi = (PaWasapiHostApiRepresentation*)hostApi;
if (paWasapi == NULL)
return;
// Release IMMDeviceEnumerator
SAFE_RELEASE(paWasapi->enumerator);
for (i = 0; i < paWasapi->deviceCount; ++i)
{
PaWasapiDeviceInfo *info = &paWasapi->devInfo[i];
SAFE_RELEASE(info->device);
//if (info->MixFormat)
// CoTaskMemFree(info->MixFormat);
}
free(paWasapi->devInfo);
if (paWasapi->allocations)
{
PaUtil_FreeAllAllocations(paWasapi->allocations);
PaUtil_DestroyAllocationGroup(paWasapi->allocations);
}
PaUtil_FreeMemory(paWasapi);
// Close AVRT
CloseAVRT();
// Uninit COM (checking calling thread we won't unitialize user's COM if one is calling
// Pa_Unitialize by mistake from not initializing thread)
if (g_WasapiCOMInit)
{
DWORD calling_thread_id = GetCurrentThreadId();
if (g_WasapiInitThread != calling_thread_id)
{
PRINT(("WASAPI: failed CoUninitializes calling thread[%d] does not match initializing thread[%d]\n",
calling_thread_id, g_WasapiInitThread));
}
else
{
CoUninitialize();
}
}
}
// ------------------------------------------------------------------------------------------
static PaWasapiHostApiRepresentation *_GetHostApi(PaError *_error)
{
PaError error;
PaUtilHostApiRepresentation *pApi;
if ((error = PaUtil_GetHostApiRepresentation(&pApi, paWASAPI)) != paNoError)
{
if (_error != NULL)
(*_error) = error;
return NULL;
}
return (PaWasapiHostApiRepresentation *)pApi;
}
// ------------------------------------------------------------------------------------------
int PaWasapi_GetDeviceDefaultFormat( void *pFormat, unsigned int nFormatSize, PaDeviceIndex nDevice )
{
PaError ret;
PaWasapiHostApiRepresentation *paWasapi;
UINT32 size;
PaDeviceIndex index;
if (pFormat == NULL)
return paBadBufferPtr;
if (nFormatSize <= 0)
return paBufferTooSmall;
// Get API
paWasapi = _GetHostApi(&ret);
if (paWasapi == NULL)
return ret;
// Get device index
ret = PaUtil_DeviceIndexToHostApiDeviceIndex(&index, nDevice, &paWasapi->inheritedHostApiRep);
if (ret != paNoError)
return ret;
// Validate index
if ((UINT32)index >= paWasapi->deviceCount)
return paInvalidDevice;
size = min(nFormatSize, (UINT32)sizeof(paWasapi->devInfo[ index ].DefaultFormat));
memcpy(pFormat, &paWasapi->devInfo[ index ].DefaultFormat, size);
return size;
}
// ------------------------------------------------------------------------------------------
int PaWasapi_GetDeviceRole( PaDeviceIndex nDevice )
{
PaError ret;
PaDeviceIndex index;
// Get API
PaWasapiHostApiRepresentation *paWasapi = _GetHostApi(&ret);
if (paWasapi == NULL)
return paNotInitialized;
// Get device index
ret = PaUtil_DeviceIndexToHostApiDeviceIndex(&index, nDevice, &paWasapi->inheritedHostApiRep);
if (ret != paNoError)
return ret;
// Validate index
if ((UINT32)index >= paWasapi->deviceCount)
return paInvalidDevice;
return paWasapi->devInfo[ index ].formFactor;
}
// ------------------------------------------------------------------------------------------
PaError PaWasapi_GetFramesPerHostBuffer( PaStream *pStream, unsigned int *nInput, unsigned int *nOutput )
{
PaWasapiStream *stream = (PaWasapiStream *)pStream;
if (stream == NULL)
return paBadStreamPtr;
if (nInput != NULL)
(*nInput) = stream->in.framesPerHostCallback;
if (nOutput != NULL)
(*nOutput) = stream->out.framesPerHostCallback;
return paNoError;
}
// ------------------------------------------------------------------------------------------
static void LogWAVEFORMATEXTENSIBLE(const WAVEFORMATEXTENSIBLE *in)
{
const WAVEFORMATEX *old = (WAVEFORMATEX *)in;
switch (old->wFormatTag)
{
case WAVE_FORMAT_EXTENSIBLE: {
PRINT(("wFormatTag =WAVE_FORMAT_EXTENSIBLE\n"));
if (IsEqualGUID(&in->SubFormat, &pa_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT))
{
PRINT(("SubFormat =KSDATAFORMAT_SUBTYPE_IEEE_FLOAT\n"));
}
else
if (IsEqualGUID(&in->SubFormat, &pa_KSDATAFORMAT_SUBTYPE_PCM))
{
PRINT(("SubFormat =KSDATAFORMAT_SUBTYPE_PCM\n"));
}
else
{
PRINT(("SubFormat =CUSTOM GUID{%d:%d:%d:%d%d%d%d%d%d%d%d}\n",
in->SubFormat.Data1,
in->SubFormat.Data2,
in->SubFormat.Data3,
(int)in->SubFormat.Data4[0],
(int)in->SubFormat.Data4[1],
(int)in->SubFormat.Data4[2],
(int)in->SubFormat.Data4[3],
(int)in->SubFormat.Data4[4],
(int)in->SubFormat.Data4[5],
(int)in->SubFormat.Data4[6],
(int)in->SubFormat.Data4[7]));
}
PRINT(("Samples.wValidBitsPerSample =%d\n", in->Samples.wValidBitsPerSample));
PRINT(("dwChannelMask =0x%X\n",in->dwChannelMask));
break; }
case WAVE_FORMAT_PCM: PRINT(("wFormatTag =WAVE_FORMAT_PCM\n")); break;
case WAVE_FORMAT_IEEE_FLOAT: PRINT(("wFormatTag =WAVE_FORMAT_IEEE_FLOAT\n")); break;
default:
PRINT(("wFormatTag =UNKNOWN(%d)\n",old->wFormatTag)); break;
}
PRINT(("nChannels =%d\n",old->nChannels));
PRINT(("nSamplesPerSec =%d\n",old->nSamplesPerSec));
PRINT(("nAvgBytesPerSec=%d\n",old->nAvgBytesPerSec));
PRINT(("nBlockAlign =%d\n",old->nBlockAlign));
PRINT(("wBitsPerSample =%d\n",old->wBitsPerSample));
PRINT(("cbSize =%d\n",old->cbSize));
}
// ------------------------------------------------------------------------------------------
static PaSampleFormat WaveToPaFormat(const WAVEFORMATEXTENSIBLE *in)
{
const WAVEFORMATEX *old = (WAVEFORMATEX *)in;
switch (old->wFormatTag)
{
case WAVE_FORMAT_EXTENSIBLE: {
if (IsEqualGUID(&in->SubFormat, &pa_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT))
{
if (in->Samples.wValidBitsPerSample == 32)
return paFloat32;
}
else
if (IsEqualGUID(&in->SubFormat, &pa_KSDATAFORMAT_SUBTYPE_PCM))
{
switch (old->wBitsPerSample)
{
case 32: return paInt32;
case 24: return paInt24;
case 8: return paUInt8;
case 16: return paInt16;
}
}
break; }
case WAVE_FORMAT_IEEE_FLOAT:
return paFloat32;
case WAVE_FORMAT_PCM: {
switch (old->wBitsPerSample)
{
case 32: return paInt32;
case 24: return paInt24;
case 8: return paUInt8;
case 16: return paInt16;
}
break; }
}
return paCustomFormat;
}
// ------------------------------------------------------------------------------------------
static PaError MakeWaveFormatFromParams(WAVEFORMATEXTENSIBLE *wavex, const PaStreamParameters *params,
double sampleRate)
{
WORD bitsPerSample;
WAVEFORMATEX *old;
DWORD channelMask = 0;
PaWasapiStreamInfo *streamInfo = (PaWasapiStreamInfo *)params->hostApiSpecificStreamInfo;
// Get user assigned channel mask
if ((streamInfo != NULL) && (streamInfo->flags & paWinWasapiUseChannelMask))
channelMask = streamInfo->channelMask;
// Convert PaSampleFormat to bits per sample
if ((bitsPerSample = PaSampleFormatToBitsPerSample(params->sampleFormat)) == 0)
return paSampleFormatNotSupported;
memset(wavex, 0, sizeof(*wavex));
old = (WAVEFORMATEX *)wavex;
old->nChannels = (WORD)params->channelCount;
old->nSamplesPerSec = (DWORD)sampleRate;
if ((old->wBitsPerSample = bitsPerSample) > 16)
{
old->wBitsPerSample = 32; // 20 or 24 bits must go in 32 bit containers (ints)
}
old->nBlockAlign = (old->nChannels * (old->wBitsPerSample/8));
old->nAvgBytesPerSec = (old->nSamplesPerSec * old->nBlockAlign);
// WAVEFORMATEX
if ((params->channelCount <= 2) && ((bitsPerSample == 16) || (bitsPerSample == 8)))
{
old->cbSize = 0;
old->wFormatTag = WAVE_FORMAT_PCM;
}
// WAVEFORMATEXTENSIBLE
else
{
old->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
old->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
if ((params->sampleFormat & ~paNonInterleaved) == paFloat32)
wavex->SubFormat = pa_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
else
wavex->SubFormat = pa_KSDATAFORMAT_SUBTYPE_PCM;
wavex->Samples.wValidBitsPerSample = bitsPerSample; //no extra padding!
// Set channel mask
if (channelMask != 0)
{
wavex->dwChannelMask = channelMask;
}
else
{
switch (params->channelCount)
{
case 1: wavex->dwChannelMask = KSAUDIO_SPEAKER_MONO; break;
case 2: wavex->dwChannelMask = KSAUDIO_SPEAKER_STEREO; break;
case 3: wavex->dwChannelMask = KSAUDIO_SPEAKER_STEREO|SPEAKER_LOW_FREQUENCY; break;
case 4: wavex->dwChannelMask = KSAUDIO_SPEAKER_QUAD; break;
case 5: wavex->dwChannelMask = KSAUDIO_SPEAKER_QUAD|SPEAKER_LOW_FREQUENCY; break;
#ifdef KSAUDIO_SPEAKER_5POINT1_SURROUND
case 6: wavex->dwChannelMask = KSAUDIO_SPEAKER_5POINT1_SURROUND; break;
#else
case 6: wavex->dwChannelMask = KSAUDIO_SPEAKER_5POINT1; break;
#endif
#ifdef KSAUDIO_SPEAKER_5POINT1_SURROUND
case 7: wavex->dwChannelMask = KSAUDIO_SPEAKER_5POINT1_SURROUND|SPEAKER_BACK_CENTER; break;
#else
case 7: wavex->dwChannelMask = KSAUDIO_SPEAKER_5POINT1|SPEAKER_BACK_CENTER; break;
#endif
#ifdef KSAUDIO_SPEAKER_7POINT1_SURROUND
case 8: wavex->dwChannelMask = KSAUDIO_SPEAKER_7POINT1_SURROUND; break;
#else
case 8: wavex->dwChannelMask = KSAUDIO_SPEAKER_7POINT1; break;
#endif
default: wavex->dwChannelMask = 0;
}
}
}
return paNoError;
}
// ------------------------------------------------------------------------------------------
/*static void wasapiFillWFEXT( WAVEFORMATEXTENSIBLE* pwfext, PaSampleFormat sampleFormat, double sampleRate, int channelCount)
{
PA_DEBUG(( "sampleFormat = %lx\n" , sampleFormat ));
PA_DEBUG(( "sampleRate = %f\n" , sampleRate ));
PA_DEBUG(( "chanelCount = %d\n", channelCount ));
pwfext->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
pwfext->Format.nChannels = (WORD)channelCount;
pwfext->Format.nSamplesPerSec = (DWORD)sampleRate;
if(channelCount == 1)
pwfext->dwChannelMask = KSAUDIO_SPEAKER_DIRECTOUT;
else
pwfext->dwChannelMask = KSAUDIO_SPEAKER_STEREO;
if(sampleFormat == paFloat32)
{
pwfext->Format.nBlockAlign = (WORD)(channelCount * 4);
pwfext->Format.wBitsPerSample = 32;
pwfext->Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
pwfext->Samples.wValidBitsPerSample = 32;
pwfext->SubFormat = pa_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
}
else if(sampleFormat == paInt32)
{
pwfext->Format.nBlockAlign = (WORD)(channelCount * 4);
pwfext->Format.wBitsPerSample = 32;
pwfext->Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
pwfext->Samples.wValidBitsPerSample = 32;
pwfext->SubFormat = pa_KSDATAFORMAT_SUBTYPE_PCM;
}
else if(sampleFormat == paInt24)
{
pwfext->Format.nBlockAlign = (WORD)(channelCount * 4);
pwfext->Format.wBitsPerSample = 32; // 24-bit in 32-bit int container
pwfext->Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
pwfext->Samples.wValidBitsPerSample = 24;
pwfext->SubFormat = pa_KSDATAFORMAT_SUBTYPE_PCM;
}
else if(sampleFormat == paInt16)
{
pwfext->Format.nBlockAlign = (WORD)(channelCount * 2);
pwfext->Format.wBitsPerSample = 16;
pwfext->Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
pwfext->Samples.wValidBitsPerSample = 16;
pwfext->SubFormat = pa_KSDATAFORMAT_SUBTYPE_PCM;
}
pwfext->Format.nAvgBytesPerSec = pwfext->Format.nSamplesPerSec * pwfext->Format.nBlockAlign;
}*/
// ------------------------------------------------------------------------------------------
static PaError GetClosestFormat(IAudioClient *myClient, double sampleRate,
const PaStreamParameters *_params, AUDCLNT_SHAREMODE shareMode, WAVEFORMATEXTENSIBLE *outWavex,
BOOL output)
{
PaError answer = paInvalidSampleRate;
WAVEFORMATEX *sharedClosestMatch = NULL;
HRESULT hr = !S_OK;
PaStreamParameters params = (*_params);
/* It was not noticed that 24-bit Input producing no output while device accepts this format.
To fix this issue let's ask for 32-bits and let PA converters convert host 32-bit data
to 24-bit for user-space. The bug concerns Vista, if Windows 7 supports 24-bits for Input
please report to PortAudio developers to exclude Windows 7.
*/
if ((params.sampleFormat == paInt24) && (output == FALSE))
params.sampleFormat = paFloat32;
MakeWaveFormatFromParams(outWavex, ¶ms, sampleRate);
hr = IAudioClient_IsFormatSupported(myClient, shareMode, &outWavex->Format, (shareMode == AUDCLNT_SHAREMODE_SHARED ? &sharedClosestMatch : NULL));
if (hr == S_OK)
answer = paFormatIsSupported;
else
if (sharedClosestMatch)
{
WORD bitsPerSample;
WAVEFORMATEXTENSIBLE *ext = (WAVEFORMATEXTENSIBLE*)sharedClosestMatch;
GUID subf_guid = GUID_NULL;
if (sharedClosestMatch->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
{
memcpy(outWavex, sharedClosestMatch, sizeof(WAVEFORMATEXTENSIBLE));
subf_guid = ext->SubFormat;
}
else
memcpy(outWavex, sharedClosestMatch, sizeof(WAVEFORMATEX));
CoTaskMemFree(sharedClosestMatch);
// Make supported by default
answer = paFormatIsSupported;
// Validate SampleRate
if ((DWORD)sampleRate != outWavex->Format.nSamplesPerSec)
return paInvalidSampleRate;
// Validate Channel count
if ((WORD)params.channelCount != outWavex->Format.nChannels)
{
// If mono, then driver does not support 1 channel, we use internal workaround
// of tiny software mixing functionality, e.g. we provide to user buffer 1 channel
// but then mix into 2 for device buffer
if ((params.channelCount == 1) && (outWavex->Format.nChannels == 2))
return paFormatIsSupported;
else
return paInvalidChannelCount;
}
// Validate Sample format
if ((bitsPerSample = PaSampleFormatToBitsPerSample(params.sampleFormat)) == 0)
return paSampleFormatNotSupported;
// Validate Sample format: bit size (WASAPI does not limit 'bit size')
//if (bitsPerSample != outWavex->Format.wBitsPerSample)
// return paSampleFormatNotSupported;
// Validate Sample format: paFloat32 (WASAPI does not limit 'bit type')
//if ((params->sampleFormat == paFloat32) && (subf_guid != KSDATAFORMAT_SUBTYPE_IEEE_FLOAT))
// return paSampleFormatNotSupported;
// Validate Sample format: paInt32 (WASAPI does not limit 'bit type')
//if ((params->sampleFormat == paInt32) && (subf_guid != KSDATAFORMAT_SUBTYPE_PCM))
// return paSampleFormatNotSupported;
}
else
{
static const int BestToWorst[] = { paFloat32, paInt24, paInt16 };
int i;
// Try combination stereo and we will use built-in mono-stereo mixer then
if (params.channelCount == 1)
{
WAVEFORMATEXTENSIBLE stereo = { 0 };
PaStreamParameters stereo_params = params;
stereo_params.channelCount = 2;
MakeWaveFormatFromParams(&stereo, &stereo_params, sampleRate);
hr = IAudioClient_IsFormatSupported(myClient, shareMode, &stereo.Format, (shareMode == AUDCLNT_SHAREMODE_SHARED ? &sharedClosestMatch : NULL));
if (hr == S_OK)
{
memcpy(outWavex, &stereo, sizeof(WAVEFORMATEXTENSIBLE));
CoTaskMemFree(sharedClosestMatch);
return (answer = paFormatIsSupported);
}
// Try selecting suitable sample type
for (i = 0; i < STATIC_ARRAY_SIZE(BestToWorst); ++i)
{
WAVEFORMATEXTENSIBLE sample = { 0 };
PaStreamParameters sample_params = stereo_params;
sample_params.sampleFormat = BestToWorst[i];
MakeWaveFormatFromParams(&sample, &sample_params, sampleRate);
hr = IAudioClient_IsFormatSupported(myClient, shareMode, &sample.Format, (shareMode == AUDCLNT_SHAREMODE_SHARED ? &sharedClosestMatch : NULL));
if (hr == S_OK)
{
memcpy(outWavex, &sample, sizeof(WAVEFORMATEXTENSIBLE));
CoTaskMemFree(sharedClosestMatch);
return (answer = paFormatIsSupported);
}
}
}
// Try selecting suitable sample type
for (i = 0; i < STATIC_ARRAY_SIZE(BestToWorst); ++i)
{
WAVEFORMATEXTENSIBLE spfmt = { 0 };
PaStreamParameters spfmt_params = params;
spfmt_params.sampleFormat = BestToWorst[i];
MakeWaveFormatFromParams(&spfmt, &spfmt_params, sampleRate);
hr = IAudioClient_IsFormatSupported(myClient, shareMode, &spfmt.Format, (shareMode == AUDCLNT_SHAREMODE_SHARED ? &sharedClosestMatch : NULL));
if (hr == S_OK)
{
memcpy(outWavex, &spfmt, sizeof(WAVEFORMATEXTENSIBLE));
CoTaskMemFree(sharedClosestMatch);
answer = paFormatIsSupported;
break;
}
}
// Nothing helped
LogHostError(hr);
}
return answer;
}
// ------------------------------------------------------------------------------------------
static PaError IsStreamParamsValid(struct PaUtilHostApiRepresentation *hostApi,
const PaStreamParameters *inputParameters,
const PaStreamParameters *outputParameters,
double sampleRate)
{
if (hostApi == NULL)
return paHostApiNotFound;
if ((UINT32)sampleRate == 0)
return paInvalidSampleRate;
if (inputParameters != NULL)
{
/* all standard sample formats are supported by the buffer adapter,
this implementation doesn't support any custom sample formats */
if (inputParameters->sampleFormat & paCustomFormat)
return paSampleFormatNotSupported;
/* unless alternate device specification is supported, reject the use of
paUseHostApiSpecificDeviceSpecification */
if (inputParameters->device == paUseHostApiSpecificDeviceSpecification)
return paInvalidDevice;
/* check that input device can support inputChannelCount */
if (inputParameters->channelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels)
return paInvalidChannelCount;
/* validate inputStreamInfo */
if (inputParameters->hostApiSpecificStreamInfo)
{
PaWasapiStreamInfo *inputStreamInfo = (PaWasapiStreamInfo *)inputParameters->hostApiSpecificStreamInfo;
if ((inputStreamInfo->size != sizeof(PaWasapiStreamInfo)) ||
(inputStreamInfo->version != 1) ||
(inputStreamInfo->hostApiType != paWASAPI))
{
return paIncompatibleHostApiSpecificStreamInfo;
}
}
return paNoError;
}
if (outputParameters != NULL)
{
/* all standard sample formats are supported by the buffer adapter,
this implementation doesn't support any custom sample formats */
if (outputParameters->sampleFormat & paCustomFormat)
return paSampleFormatNotSupported;
/* unless alternate device specification is supported, reject the use of
paUseHostApiSpecificDeviceSpecification */
if (outputParameters->device == paUseHostApiSpecificDeviceSpecification)
return paInvalidDevice;
/* check that output device can support outputChannelCount */
if (outputParameters->channelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels)
return paInvalidChannelCount;
/* validate outputStreamInfo */
if(outputParameters->hostApiSpecificStreamInfo)
{
PaWasapiStreamInfo *outputStreamInfo = (PaWasapiStreamInfo *)outputParameters->hostApiSpecificStreamInfo;
if ((outputStreamInfo->size != sizeof(PaWasapiStreamInfo)) ||
(outputStreamInfo->version != 1) ||
(outputStreamInfo->hostApiType != paWASAPI))
{
return paIncompatibleHostApiSpecificStreamInfo;
}
}
return paNoError;
}
return (inputParameters || outputParameters ? paNoError : paInternalError);
}
// ------------------------------------------------------------------------------------------
static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
const PaStreamParameters *inputParameters,
const PaStreamParameters *outputParameters,
double sampleRate )
{
IAudioClient *tmpClient = NULL;
PaWasapiHostApiRepresentation *paWasapi = (PaWasapiHostApiRepresentation*)hostApi;
PaWasapiStreamInfo *inputStreamInfo = NULL, *outputStreamInfo = NULL;
// Validate PaStreamParameters
PaError error;
if ((error = IsStreamParamsValid(hostApi, inputParameters, outputParameters, sampleRate)) != paNoError)
return error;
if (inputParameters != NULL)
{
WAVEFORMATEXTENSIBLE wavex;
HRESULT hr;
PaError answer;
AUDCLNT_SHAREMODE shareMode = AUDCLNT_SHAREMODE_SHARED;
inputStreamInfo = (PaWasapiStreamInfo *)inputParameters->hostApiSpecificStreamInfo;
if (inputStreamInfo && (inputStreamInfo->flags & paWinWasapiExclusive))
shareMode = AUDCLNT_SHAREMODE_EXCLUSIVE;
hr = IMMDevice_Activate(paWasapi->devInfo[inputParameters->device].device,
&pa_IID_IAudioClient, CLSCTX_INPROC_SERVER, NULL, (void **)&tmpClient);
if (hr != S_OK)
{
LogHostError(hr);
return paInvalidDevice;
}
answer = GetClosestFormat(tmpClient, sampleRate, inputParameters, shareMode, &wavex, FALSE);
SAFE_RELEASE(tmpClient);
if (answer != paFormatIsSupported)
return answer;
}
if (outputParameters != NULL)
{
HRESULT hr;
WAVEFORMATEXTENSIBLE wavex;
PaError answer;
AUDCLNT_SHAREMODE shareMode = AUDCLNT_SHAREMODE_SHARED;
outputStreamInfo = (PaWasapiStreamInfo *)outputParameters->hostApiSpecificStreamInfo;
if (outputStreamInfo && (outputStreamInfo->flags & paWinWasapiExclusive))
shareMode = AUDCLNT_SHAREMODE_EXCLUSIVE;
hr = IMMDevice_Activate(paWasapi->devInfo[outputParameters->device].device,
&pa_IID_IAudioClient, CLSCTX_INPROC_SERVER, NULL, (void **)&tmpClient);
if (hr != S_OK)
{
LogHostError(hr);
return paInvalidDevice;
}
answer = GetClosestFormat(tmpClient, sampleRate, outputParameters, shareMode, &wavex, TRUE);
SAFE_RELEASE(tmpClient);
if (answer != paFormatIsSupported)
return answer;
}
return paFormatIsSupported;
}
// ------------------------------------------------------------------------------------------
static PaUint32 PaUtil_GetFramesPerHostBuffer(PaUint32 userFramesPerBuffer, PaTime suggestedLatency, double sampleRate, PaUint32 TimerJitterMs)
{
PaUint32 frames = userFramesPerBuffer + max( userFramesPerBuffer, (PaUint32)(suggestedLatency * sampleRate) );
frames += (PaUint32)((sampleRate * 0.001) * TimerJitterMs);
return frames;
}
// ------------------------------------------------------------------------------------------
static void _RecalculateBuffersCount(PaWasapiSubStream *sub, UINT32 userFramesPerBuffer, UINT32 framesPerLatency, BOOL fullDuplex)
{
// Count buffers (must be at least 1)
sub->buffers = (userFramesPerBuffer ? framesPerLatency / userFramesPerBuffer : 0);
if (sub->buffers == 0)
sub->buffers = 1;
// Determine amount of buffers used:
// - Full-duplex mode will lead to period difference, thus only 1.
// - Input mode, only 1, as WASAPI allows extraction of only 1 packet.
// - For Shared mode we use double buffering.
if ((sub->shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE) || fullDuplex)
{
// Exclusive mode does not allow >1 buffers be used for Event interface, e.g. GetBuffer
// call must acquire max buffer size and it all must be processed.
if (sub->streamFlags & AUDCLNT_STREAMFLAGS_EVENTCALLBACK)
sub->userBufferAndHostMatch = 1;
// Use paUtilBoundedHostBufferSize because exclusive mode will starve and produce
// bad quality of audio
sub->buffers = 1;
}
}
// ------------------------------------------------------------------------------------------
static void _CalculateAlignedPeriod(PaWasapiSubStream *pSub, UINT32 *nFramesPerLatency,
ALIGN_FUNC pAlignFunc)
{
// Align frames to HD Audio packet size of 128 bytes for Exclusive mode only.
// Not aligning on Windows Vista will cause Event timeout, although Windows 7 will
// return AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED error to realign buffer. Aligning is necessary
// for Exclusive mode only! when audio data is feeded directly to hardware.
if (pSub->shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE)
{
(*nFramesPerLatency) = AlignFramesPerBuffer((*nFramesPerLatency),
pSub->wavex.Format.nSamplesPerSec, pSub->wavex.Format.nBlockAlign, pAlignFunc);
}
// Calculate period
pSub->period = MakeHnsPeriod((*nFramesPerLatency), pSub->wavex.Format.nSamplesPerSec);
}
// ------------------------------------------------------------------------------------------
static HRESULT CreateAudioClient(PaWasapiStream *pStream, PaWasapiSubStream *pSub,
PaWasapiDeviceInfo *pInfo, const PaStreamParameters *params, UINT32 framesPerLatency,
double sampleRate, BOOL blocking, BOOL output, BOOL fullDuplex, PaError *pa_error)
{
PaError error;
HRESULT hr;
const UINT32 userFramesPerBuffer = framesPerLatency;
IAudioClient *audioClient = NULL;
// Validate parameters
if (!pSub || !pInfo || !params)
return E_POINTER;
if ((UINT32)sampleRate == 0)
return E_INVALIDARG;
// Get the audio client
hr = IMMDevice_Activate(pInfo->device, &pa_IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&audioClient);
if (hr != S_OK)
{
LogHostError(hr);
goto done;
}
// Get closest format
if ((error = GetClosestFormat(audioClient, sampleRate, params, pSub->shareMode, &pSub->wavex, output)) != paFormatIsSupported)
{
if (pa_error)
(*pa_error) = error;
LogHostError(hr = AUDCLNT_E_UNSUPPORTED_FORMAT);
goto done; // fail, format not supported
}
// Check for Mono <<>> Stereo workaround
if ((params->channelCount == 1) && (pSub->wavex.Format.nChannels == 2))
{
if (blocking)
{
LogHostError(hr = AUDCLNT_E_UNSUPPORTED_FORMAT);
goto done; // fail, blocking mode not supported
}
// select mixer
pSub->monoMixer = _GetMonoToStereoMixer(WaveToPaFormat(&pSub->wavex), (pInfo->flow == eRender ? MIX_DIR__1TO2 : MIX_DIR__2TO1));
if (pSub->monoMixer == NULL)
{
LogHostError(hr = AUDCLNT_E_UNSUPPORTED_FORMAT);
goto done; // fail, no mixer for format
}
}
#if 0
// Add suggestd latency
framesPerLatency += MakeFramesFromHns(SecondsTonano100(params->suggestedLatency), pSub->wavex.Format.nSamplesPerSec);
#else
// Calculate host buffer size
if ((pSub->shareMode != AUDCLNT_SHAREMODE_EXCLUSIVE) &&
(!pSub->streamFlags || ((pSub->streamFlags & AUDCLNT_STREAMFLAGS_EVENTCALLBACK) == 0)))
{
framesPerLatency = PaUtil_GetFramesPerHostBuffer(userFramesPerBuffer,
params->suggestedLatency, pSub->wavex.Format.nSamplesPerSec, 0/*,
(pSub->streamFlags & AUDCLNT_STREAMFLAGS_EVENTCALLBACK ? 0 : 1)*/);
}
else
{
REFERENCE_TIME overall;
// Work 1:1 with user buffer (only polling allows to use >1)
framesPerLatency += MakeFramesFromHns(SecondsTonano100(params->suggestedLatency), pSub->wavex.Format.nSamplesPerSec);
// Use Polling if overall latency is > 5ms as it allows to use 100% CPU in a callback,
// or user specified latency parameter
overall = MakeHnsPeriod(framesPerLatency, pSub->wavex.Format.nSamplesPerSec);
if ((overall >= (106667*2)/*21.33ms*/) || ((INT32)(params->suggestedLatency*100000.0) != 0/*0.01 msec granularity*/))
{
framesPerLatency = PaUtil_GetFramesPerHostBuffer(userFramesPerBuffer,
params->suggestedLatency, pSub->wavex.Format.nSamplesPerSec, 0/*,
(streamFlags & AUDCLNT_STREAMFLAGS_EVENTCALLBACK ? 0 : 1)*/);
// Use Polling interface
pSub->streamFlags &= ~AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
PRINT(("WASAPI: CreateAudioClient: forcing POLL mode\n"));
}
}
#endif
// For full-duplex output resize buffer to be the same as for input
if (output && fullDuplex)
framesPerLatency = pStream->in.framesPerHostCallback;
// Avoid 0 frames
if (framesPerLatency == 0)
framesPerLatency = MakeFramesFromHns(pInfo->DefaultDevicePeriod, pSub->wavex.Format.nSamplesPerSec);
// Calculate aligned period
_CalculateAlignedPeriod(pSub, &framesPerLatency, ALIGN_BWD);
/*! Enforce min/max period for device in Shared mode to avoid bad audio quality.
Avoid doing so for Exclusive mode as alignment will suffer.
*/
if (pSub->shareMode == AUDCLNT_SHAREMODE_SHARED)
{
if (pSub->period < pInfo->DefaultDevicePeriod)
{
pSub->period = pInfo->DefaultDevicePeriod;
// Recalculate aligned period
framesPerLatency = MakeFramesFromHns(pSub->period, pSub->wavex.Format.nSamplesPerSec);
_CalculateAlignedPeriod(pSub, &framesPerLatency, ALIGN_BWD);
}
}
else
{
if (pSub->period < pInfo->MinimumDevicePeriod)
{
pSub->period = pInfo->MinimumDevicePeriod;
// Recalculate aligned period
framesPerLatency = MakeFramesFromHns(pSub->period, pSub->wavex.Format.nSamplesPerSec);
_CalculateAlignedPeriod(pSub, &framesPerLatency, ALIGN_FWD);
}
}
/*! Windows 7 does not allow to set latency lower than minimal device period and will
return error: AUDCLNT_E_INVALID_DEVICE_PERIOD. Under Vista we enforce the same behavior
manually for unified behavior on all platforms.
*/
{
/*! AUDCLNT_E_BUFFER_SIZE_ERROR: Applies to Windows 7 and later.
Indicates that the buffer duration value requested by an exclusive-mode client is
out of range. The requested duration value for pull mode must not be greater than
500 milliseconds; for push mode the duration value must not be greater than 2 seconds.
*/
if (pSub->shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE)
{
static const REFERENCE_TIME MAX_BUFFER_EVENT_DURATION = 500 * 10000;
static const REFERENCE_TIME MAX_BUFFER_POLL_DURATION = 2000 * 10000;
if (pSub->streamFlags & AUDCLNT_STREAMFLAGS_EVENTCALLBACK) // pull mode, max 500ms
{
if (pSub->period > MAX_BUFFER_EVENT_DURATION)
{
pSub->period = MAX_BUFFER_EVENT_DURATION;
// Recalculate aligned period
framesPerLatency = MakeFramesFromHns(pSub->period, pSub->wavex.Format.nSamplesPerSec);
_CalculateAlignedPeriod(pSub, &framesPerLatency, ALIGN_BWD);
}
}
else // push mode, max 2000ms
{
if (pSub->period > MAX_BUFFER_POLL_DURATION)
{
pSub->period = MAX_BUFFER_POLL_DURATION;
// Recalculate aligned period
framesPerLatency = MakeFramesFromHns(pSub->period, pSub->wavex.Format.nSamplesPerSec);
_CalculateAlignedPeriod(pSub, &framesPerLatency, ALIGN_BWD);
}
}
}
}
// Open the stream and associate it with an audio session
hr = IAudioClient_Initialize(audioClient,
pSub->shareMode,
pSub->streamFlags,
pSub->period,
(pSub->shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE ? pSub->period : 0),
&pSub->wavex.Format,
NULL);
/*! WASAPI is tricky on large device buffer, sometimes 2000ms can be allocated sometimes
less. There is no known guaranteed level thus we make subsequent tries by decreasing
buffer by 100ms per try.
*/
while ((hr == E_OUTOFMEMORY) && (pSub->period > (100 * 10000)))
{
PRINT(("WASAPI: CreateAudioClient: decreasing buffer size to %d milliseconds\n", (pSub->period / 10000)));
// Decrease by 100ms and try again
pSub->period -= (100 * 10000);
// Recalculate aligned period
framesPerLatency = MakeFramesFromHns(pSub->period, pSub->wavex.Format.nSamplesPerSec);
_CalculateAlignedPeriod(pSub, &framesPerLatency, ALIGN_BWD);
// Release the previous allocations
SAFE_RELEASE(audioClient);
// Create a new audio client
hr = IMMDevice_Activate(pInfo->device, &pa_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&audioClient);
if (hr != S_OK)
{
LogHostError(hr);
goto done;
}
// Open the stream and associate it with an audio session
hr = IAudioClient_Initialize(audioClient,
pSub->shareMode,
pSub->streamFlags,
pSub->period,
(pSub->shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE ? pSub->period : 0),
&pSub->wavex.Format,
NULL);
}
/*! WASAPI buffer size failure. Fallback to using default size.
*/
if (hr == AUDCLNT_E_BUFFER_SIZE_ERROR)
{
// Use default
pSub->period = pInfo->DefaultDevicePeriod;
PRINT(("WASAPI: CreateAudioClient: correcting buffer size to device default\n"));
// Release the previous allocations
SAFE_RELEASE(audioClient);
// Create a new audio client
hr = IMMDevice_Activate(pInfo->device, &pa_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&audioClient);
if (hr != S_OK)
{
LogHostError(hr);
goto done;
}
// Open the stream and associate it with an audio session
hr = IAudioClient_Initialize(audioClient,
pSub->shareMode,
pSub->streamFlags,
pSub->period,
(pSub->shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE ? pSub->period : 0),
&pSub->wavex.Format,
NULL);
}
/*! If the requested buffer size is not aligned. Can be triggered by Windows 7 and up.
Should not be be triggered ever as we do align buffers always with _CalculateAlignedPeriod.
*/
if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED)
{
UINT32 frames = 0;
// Get the next aligned frame
hr = IAudioClient_GetBufferSize(audioClient, &frames);
if (hr != S_OK)
{
LogHostError(hr);
goto done;
}
PRINT(("WASAPI: CreateAudioClient: aligning buffer size to % frames\n", frames));
// Release the previous allocations
SAFE_RELEASE(audioClient);
// Create a new audio client
hr = IMMDevice_Activate(pInfo->device, &pa_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&audioClient);
if (hr != S_OK)
{
LogHostError(hr);
goto done;
}
// Get closest format
if ((error = GetClosestFormat(audioClient, sampleRate, params, pSub->shareMode, &pSub->wavex, output)) != paFormatIsSupported)
{
if (pa_error)
(*pa_error) = error;
LogHostError(hr = AUDCLNT_E_UNSUPPORTED_FORMAT); // fail, format not supported
goto done;
}
// Check for Mono >> Stereo workaround
if ((params->channelCount == 1) && (pSub->wavex.Format.nChannels == 2))
{
if (blocking)
{
LogHostError(hr = AUDCLNT_E_UNSUPPORTED_FORMAT);
goto done; // fail, blocking mode not supported
}
// Select mixer
pSub->monoMixer = _GetMonoToStereoMixer(WaveToPaFormat(&pSub->wavex), (pInfo->flow == eRender ? MIX_DIR__1TO2 : MIX_DIR__2TO1));
if (pSub->monoMixer == NULL)
{
LogHostError(hr = AUDCLNT_E_UNSUPPORTED_FORMAT);
goto done; // fail, no mixer for format
}
}
// Calculate period
pSub->period = MakeHnsPeriod(frames, pSub->wavex.Format.nSamplesPerSec);
// Open the stream and associate it with an audio session
hr = IAudioClient_Initialize(audioClient,
pSub->shareMode,
pSub->streamFlags,
pSub->period,
(pSub->shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE ? pSub->period : 0),
&pSub->wavex.Format,
NULL);
if (hr != S_OK)
{
LogHostError(hr);
goto done;
}
}
else
if (hr != S_OK)
{
LogHostError(hr);
goto done;
}
// Set client
pSub->client = audioClient;
IAudioClient_AddRef(pSub->client);
// Recalculate buffers count
_RecalculateBuffersCount(pSub,
userFramesPerBuffer,
MakeFramesFromHns(pSub->period, pSub->wavex.Format.nSamplesPerSec),
fullDuplex);
done:
// Clean up
SAFE_RELEASE(audioClient);
return hr;
}
// ------------------------------------------------------------------------------------------
static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
PaStream** s,
const PaStreamParameters *inputParameters,
const PaStreamParameters *outputParameters,
double sampleRate,
unsigned long framesPerBuffer,
PaStreamFlags streamFlags,
PaStreamCallback *streamCallback,
void *userData )
{
PaError result = paNoError;
HRESULT hr;
PaWasapiHostApiRepresentation *paWasapi = (PaWasapiHostApiRepresentation*)hostApi;
PaWasapiStream *stream = NULL;
int inputChannelCount, outputChannelCount;
PaSampleFormat inputSampleFormat, outputSampleFormat;
PaSampleFormat hostInputSampleFormat, hostOutputSampleFormat;
PaWasapiStreamInfo *inputStreamInfo = NULL, *outputStreamInfo = NULL;
PaWasapiDeviceInfo *info = NULL;
UINT32 maxBufferSize;
PaTime buffer_latency;
ULONG framesPerHostCallback;
PaUtilHostBufferSizeMode bufferMode;
const BOOL fullDuplex = ((inputParameters != NULL) && (outputParameters != NULL));
// validate PaStreamParameters
if ((result = IsStreamParamsValid(hostApi, inputParameters, outputParameters, sampleRate)) != paNoError)
return LogPaError(result);
// Validate platform specific flags
if ((streamFlags & paPlatformSpecificFlags) != 0)
{
LogPaError(result = paInvalidFlag); /* unexpected platform specific flag */
goto error;
}
// Allocate memory for PaWasapiStream
if ((stream = (PaWasapiStream *)PaUtil_AllocateMemory(sizeof(PaWasapiStream))) == NULL)
{
LogPaError(result = paInsufficientMemory);
goto error;
}
// Default thread priority is Audio: for exclusive mode we will use Pro Audio.
stream->nThreadPriority = eThreadPriorityAudio;
// Set default number of frames: paFramesPerBufferUnspecified
if (framesPerBuffer == paFramesPerBufferUnspecified)
{
UINT32 framesPerBufferIn = 0, framesPerBufferOut = 0;
if (inputParameters != NULL)
{
info = &paWasapi->devInfo[inputParameters->device];
framesPerBufferIn = MakeFramesFromHns(info->DefaultDevicePeriod, (UINT32)sampleRate);
}
if (outputParameters != NULL)
{
info = &paWasapi->devInfo[outputParameters->device];
framesPerBufferOut = MakeFramesFromHns(info->DefaultDevicePeriod, (UINT32)sampleRate);
}
// choosing maximum default size
framesPerBuffer = max(framesPerBufferIn, framesPerBufferOut);
}
if (framesPerBuffer == 0)
framesPerBuffer = ((UINT32)sampleRate / 100) * 2;
// Try create device: Input
if (inputParameters != NULL)
{
inputChannelCount = inputParameters->channelCount;
inputSampleFormat = inputParameters->sampleFormat;
inputStreamInfo = (PaWasapiStreamInfo *)inputParameters->hostApiSpecificStreamInfo;
info = &paWasapi->devInfo[inputParameters->device];
stream->in.flags = (inputStreamInfo ? inputStreamInfo->flags : 0);
// Select Exclusive/Shared mode
stream->in.shareMode = AUDCLNT_SHAREMODE_SHARED;
if ((inputStreamInfo != NULL) && (inputStreamInfo->flags & paWinWasapiExclusive))
{
// Boost thread priority
stream->nThreadPriority = eThreadPriorityProAudio;
// Make Exclusive
stream->in.shareMode = AUDCLNT_SHAREMODE_EXCLUSIVE;
}
// If user provided explicit thread priority level, use it
if ((inputStreamInfo != NULL) && (inputStreamInfo->flags & paWinWasapiThreadPriority))
{
if ((inputStreamInfo->threadPriority > eThreadPriorityNone) &&
(inputStreamInfo->threadPriority <= eThreadPriorityWindowManager))
stream->nThreadPriority = inputStreamInfo->threadPriority;
}
// Choose processing mode
stream->in.streamFlags = (stream->in.shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE ? AUDCLNT_STREAMFLAGS_EVENTCALLBACK : 0);
if (paWasapi->useWOW64Workaround)
stream->in.streamFlags = 0; // polling interface
else
if (streamCallback == NULL)
stream->in.streamFlags = 0; // polling interface
else
if ((inputStreamInfo != NULL) && (inputStreamInfo->flags & paWinWasapiPolling))
stream->in.streamFlags = 0; // polling interface
else
if (fullDuplex)
stream->in.streamFlags = 0; // polling interface is implemented for full-duplex mode also
// Create Audio client
hr = CreateAudioClient(stream, &stream->in, info, inputParameters, framesPerBuffer/*framesPerLatency*/,
sampleRate, (streamCallback == NULL), FALSE, fullDuplex, &result);
if (hr != S_OK)
{
LogPaError(result = paInvalidDevice);
goto error;
}
LogWAVEFORMATEXTENSIBLE(&stream->in.wavex);
// Get closest format
hostInputSampleFormat = PaUtil_SelectClosestAvailableFormat( WaveToPaFormat(&stream->in.wavex), inputSampleFormat );
// Create volume mgr
stream->inVol = NULL;
/*hr = info->device->Activate(
__uuidof(IAudioEndpointVolume), CLSCTX_INPROC_SERVER, NULL,
(void**)&stream->inVol);
if (hr != S_OK)
return paInvalidDevice;*/
// Set user-side custom host processor
if ((inputStreamInfo != NULL) &&
(inputStreamInfo->flags & paWinWasapiRedirectHostProcessor))
{
stream->hostProcessOverrideInput.processor = inputStreamInfo->hostProcessorInput;
stream->hostProcessOverrideInput.userData = userData;
}
// Get max possible buffer size to check if it is not less than that we request
hr = IAudioClient_GetBufferSize(stream->in.client, &maxBufferSize);
if (hr != S_OK)
{
LogHostError(hr);
LogPaError(result = paInvalidDevice);
goto error;
}
// Correct buffer to max size if it maxed out result of GetBufferSize
stream->in.bufferSize = maxBufferSize;
// Get interface latency (actually uneeded as we calculate latency from the size
// of maxBufferSize).
hr = IAudioClient_GetStreamLatency(stream->in.client, &stream->in.device_latency);
if (hr != S_OK)
{
LogHostError(hr);
LogPaError(result = paInvalidDevice);
goto error;
}
//stream->in.latency_seconds = nano100ToSeconds(stream->in.device_latency);
// Number of frames that are required at each period
stream->in.framesPerHostCallback = maxBufferSize;
// Calculate frames per single buffer, if buffers > 1 then always framesPerBuffer
stream->in.framesPerBuffer =
(stream->in.userBufferAndHostMatch ? stream->in.framesPerHostCallback : framesPerBuffer);
// Calculate buffer latency
buffer_latency = (PaTime)maxBufferSize / stream->in.wavex.Format.nSamplesPerSec;
// Append buffer latency to interface latency in shared mode (see GetStreamLatency notes)
stream->in.latency_seconds += buffer_latency;
PRINT(("WASAPI::OpenStream(input): framesPerUser[ %d ] framesPerHost[ %d ] latency[ %.02fms ] exclusive[ %s ] wow64_fix[ %s ] mode[ %s ]\n", (UINT32)framesPerBuffer, (UINT32)stream->in.framesPerHostCallback, (float)(stream->in.latency_seconds*1000.0f), (stream->in.shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE ? "YES" : "NO"), (paWasapi->useWOW64Workaround ? "YES" : "NO"), (stream->in.streamFlags & AUDCLNT_STREAMFLAGS_EVENTCALLBACK ? "EVENT" : "POLL")));
}
else
{
inputChannelCount = 0;
inputSampleFormat = hostInputSampleFormat = paInt16; /* Surpress 'uninitialised var' warnings. */
}
// Try create device: Output
if (outputParameters != NULL)
{
outputChannelCount = outputParameters->channelCount;
outputSampleFormat = outputParameters->sampleFormat;
outputStreamInfo = (PaWasapiStreamInfo *)outputParameters->hostApiSpecificStreamInfo;
info = &paWasapi->devInfo[outputParameters->device];
stream->out.flags = (outputStreamInfo ? outputStreamInfo->flags : 0);
// Select Exclusive/Shared mode
stream->out.shareMode = AUDCLNT_SHAREMODE_SHARED;
if ((outputStreamInfo != NULL) && (outputStreamInfo->flags & paWinWasapiExclusive))
{
// Boost thread priority
stream->nThreadPriority = eThreadPriorityProAudio;
// Make Exclusive
stream->out.shareMode = AUDCLNT_SHAREMODE_EXCLUSIVE;
}
// If user provided explicit thread priority level, use it
if ((outputStreamInfo != NULL) && (outputStreamInfo->flags & paWinWasapiThreadPriority))
{
if ((outputStreamInfo->threadPriority > eThreadPriorityNone) &&
(outputStreamInfo->threadPriority <= eThreadPriorityWindowManager))
stream->nThreadPriority = outputStreamInfo->threadPriority;
}
// Choose processing mode
stream->out.streamFlags = (stream->out.shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE ? AUDCLNT_STREAMFLAGS_EVENTCALLBACK : 0);
if (paWasapi->useWOW64Workaround)
stream->out.streamFlags = 0; // polling interface
else
if (streamCallback == NULL)
stream->out.streamFlags = 0; // polling interface
else
if ((outputStreamInfo != NULL) && (outputStreamInfo->flags & paWinWasapiPolling))
stream->out.streamFlags = 0; // polling interface
else
if (fullDuplex)
stream->out.streamFlags = 0; // polling interface is implemented for full-duplex mode also
// Create Audio client
hr = CreateAudioClient(stream, &stream->out, info, outputParameters, framesPerBuffer/*framesPerLatency*/,
sampleRate, (streamCallback == NULL), TRUE, fullDuplex, &result);
if (hr != S_OK)
{
LogPaError(result = paInvalidDevice);
goto error;
}
LogWAVEFORMATEXTENSIBLE(&stream->out.wavex);
// Get closest format
hostOutputSampleFormat = PaUtil_SelectClosestAvailableFormat( WaveToPaFormat(&stream->out.wavex), outputSampleFormat );
// Activate volume
stream->outVol = NULL;
/*hr = info->device->Activate(
__uuidof(IAudioEndpointVolume), CLSCTX_INPROC_SERVER, NULL,
(void**)&stream->outVol);
if (hr != S_OK)
return paInvalidDevice;*/
// Set user-side custom host processor
if ((outputStreamInfo != NULL) &&
(outputStreamInfo->flags & paWinWasapiRedirectHostProcessor))
{
stream->hostProcessOverrideOutput.processor = outputStreamInfo->hostProcessorOutput;
stream->hostProcessOverrideOutput.userData = userData;
}
// Get max possible buffer size to check if it is not less than that we request
hr = IAudioClient_GetBufferSize(stream->out.client, &maxBufferSize);
if (hr != S_OK)
{
LogHostError(hr);
LogPaError(result = paInvalidDevice);
goto error;
}
// Correct buffer to max size if it maxed out result of GetBufferSize
stream->out.bufferSize = maxBufferSize;
// Get interface latency (actually uneeded as we calculate latency from the size
// of maxBufferSize).
hr = IAudioClient_GetStreamLatency(stream->out.client, &stream->out.device_latency);
if (hr != S_OK)
{
LogHostError(hr);
LogPaError(result = paInvalidDevice);
goto error;
}
//stream->out.latency_seconds = nano100ToSeconds(stream->out.device_latency);
// Number of frames that are required at each period
stream->out.framesPerHostCallback = maxBufferSize;
// Calculate frames per single buffer, if buffers > 1 then always framesPerBuffer
stream->out.framesPerBuffer =
(stream->out.userBufferAndHostMatch ? stream->out.framesPerHostCallback : framesPerBuffer);
// Calculate buffer latency
buffer_latency = (PaTime)maxBufferSize / stream->out.wavex.Format.nSamplesPerSec;
// Append buffer latency to interface latency in shared mode (see GetStreamLatency notes)
stream->out.latency_seconds += buffer_latency;
PRINT(("WASAPI::OpenStream(output): framesPerUser[ %d ] framesPerHost[ %d ] latency[ %.02fms ] exclusive[ %s ] wow64_fix[ %s ] mode[ %s ]\n", (UINT32)framesPerBuffer, (UINT32)stream->out.framesPerHostCallback, (float)(stream->out.latency_seconds*1000.0f), (stream->out.shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE ? "YES" : "NO"), (paWasapi->useWOW64Workaround ? "YES" : "NO"), (stream->out.streamFlags & AUDCLNT_STREAMFLAGS_EVENTCALLBACK ? "EVENT" : "POLL")));
}
else
{
outputChannelCount = 0;
outputSampleFormat = hostOutputSampleFormat = paInt16; /* Surpress 'uninitialized var' warnings. */
}
// log full-duplex
if (fullDuplex)
PRINT(("WASAPI::OpenStream: full-duplex mode\n"));
// paWinWasapiPolling must be on/or not on both streams
if ((inputParameters != NULL) && (outputParameters != NULL))
{
if ((inputStreamInfo != NULL) && (outputStreamInfo != NULL))
{
if (((inputStreamInfo->flags & paWinWasapiPolling) &&
!(outputStreamInfo->flags & paWinWasapiPolling))
||
(!(inputStreamInfo->flags & paWinWasapiPolling) &&
(outputStreamInfo->flags & paWinWasapiPolling)))
{
LogPaError(result = paInvalidFlag);
goto error;
}
}
}
// Initialize stream representation
if (streamCallback)
{
stream->bBlocking = FALSE;
PaUtil_InitializeStreamRepresentation(&stream->streamRepresentation,
&paWasapi->callbackStreamInterface,
streamCallback, userData);
}
else
{
stream->bBlocking = TRUE;
PaUtil_InitializeStreamRepresentation(&stream->streamRepresentation,
&paWasapi->blockingStreamInterface,
streamCallback, userData);
}
// Initialize CPU measurer
PaUtil_InitializeCpuLoadMeasurer(&stream->cpuLoadMeasurer, sampleRate);
if (outputParameters && inputParameters)
{
// serious problem #1 - No, Not a problem, especially concerning Exclusive mode.
// Input device in exclusive mode somehow is getting large buffer always, thus we
// adjust Output latency to reflect it, thus period will differ but playback will be
// normal.
/*if (stream->in.period != stream->out.period)
{
PRINT(("WASAPI: OpenStream: period discrepancy\n"));
LogPaError(result = paBadIODeviceCombination);
goto error;
}*/
// serious problem #2 - No, Not a problem, as framesPerHostCallback take into account
// sample size while it is not a problem for PA full-duplex, we must care of
// preriod only!
/*if (stream->out.framesPerHostCallback != stream->in.framesPerHostCallback)
{
PRINT(("WASAPI: OpenStream: framesPerHostCallback discrepancy\n"));
goto error;
}*/
}
// Calculate frames per host for processor
framesPerHostCallback = (outputParameters ? stream->out.framesPerBuffer : stream->in.framesPerBuffer);
// Choose correct mode of buffer processing:
// Exclusive/Shared non paWinWasapiPolling mode: paUtilFixedHostBufferSize - always fixed
// Exclusive/Shared paWinWasapiPolling mode: paUtilBoundedHostBufferSize - may vary for Exclusive or Full-duplex
bufferMode = paUtilFixedHostBufferSize;
if (inputParameters) // !!! WASAPI IAudioCaptureClient::GetBuffer extracts not number of frames but 1 packet, thus we always must adapt
bufferMode = paUtilBoundedHostBufferSize;
else
if (outputParameters)
{
if ((stream->out.buffers == 1) &&
(!stream->out.streamFlags || ((stream->out.streamFlags & AUDCLNT_STREAMFLAGS_EVENTCALLBACK) == 0)))
bufferMode = paUtilBoundedHostBufferSize;
}
stream->bufferMode = bufferMode;
// Initialize buffer processor
result = PaUtil_InitializeBufferProcessor(
&stream->bufferProcessor,
inputChannelCount,
inputSampleFormat,
hostInputSampleFormat,
outputChannelCount,
outputSampleFormat,
hostOutputSampleFormat,
sampleRate,
streamFlags,
framesPerBuffer,
framesPerHostCallback,
bufferMode,
streamCallback,
userData);
if (result != paNoError)
{
LogPaError(result);
goto error;
}
// Set Input latency
stream->streamRepresentation.streamInfo.inputLatency =
((double)PaUtil_GetBufferProcessorInputLatency(&stream->bufferProcessor) / sampleRate)
+ ((inputParameters)?stream->in.latency_seconds : 0);
// Set Output latency
stream->streamRepresentation.streamInfo.outputLatency =
((double)PaUtil_GetBufferProcessorOutputLatency(&stream->bufferProcessor) / sampleRate)
+ ((outputParameters)?stream->out.latency_seconds : 0);
// Set SR
stream->streamRepresentation.streamInfo.sampleRate = sampleRate;
(*s) = (PaStream *)stream;
return result;
error:
if (stream != NULL)
CloseStream(stream);
return result;
}
// ------------------------------------------------------------------------------------------
static PaError CloseStream( PaStream* s )
{
PaError result = paNoError;
PaWasapiStream *stream = (PaWasapiStream*)s;
// abort active stream
if (IsStreamActive(s))
{
if ((result = AbortStream(s)) != paNoError)
return result;
}
SAFE_RELEASE(stream->cclient);
SAFE_RELEASE(stream->rclient);
SAFE_RELEASE(stream->out.client);
SAFE_RELEASE(stream->in.client);
SAFE_RELEASE(stream->inVol);
SAFE_RELEASE(stream->outVol);
CloseHandle(stream->event[S_INPUT]);
CloseHandle(stream->event[S_OUTPUT]);
SAFE_CLOSE(stream->hThread);
SAFE_CLOSE(stream->hThreadStart);
SAFE_CLOSE(stream->hThreadExit);
SAFE_CLOSE(stream->hCloseRequest);
SAFE_CLOSE(stream->hBlockingOpStreamRD);
SAFE_CLOSE(stream->hBlockingOpStreamWR);
free(stream->in.monoBuffer);
free(stream->out.monoBuffer);
PaUtil_TerminateBufferProcessor(&stream->bufferProcessor);
PaUtil_TerminateStreamRepresentation(&stream->streamRepresentation);
PaUtil_FreeMemory(stream);
return result;
}
// ------------------------------------------------------------------------------------------
static PaError StartStream( PaStream *s )
{
HRESULT hr;
PaWasapiStream *stream = (PaWasapiStream*)s;
// check if stream is active already
if (IsStreamActive(s))
return paStreamIsNotStopped;
PaUtil_ResetBufferProcessor(&stream->bufferProcessor);
// Create close event
stream->hCloseRequest = CreateEvent(NULL, TRUE, FALSE, NULL);
// Create thread
if (!stream->bBlocking)
{
// Create thread events
stream->hThreadStart = CreateEvent(NULL, TRUE, FALSE, NULL);
stream->hThreadExit = CreateEvent(NULL, TRUE, FALSE, NULL);
if ((stream->in.client && (stream->in.streamFlags & AUDCLNT_STREAMFLAGS_EVENTCALLBACK)) ||
(stream->out.client && (stream->out.streamFlags & AUDCLNT_STREAMFLAGS_EVENTCALLBACK)))
{
if ((stream->hThread = CREATE_THREAD(ProcThreadEvent)) == NULL)
return paUnanticipatedHostError;
}
else
{
if ((stream->hThread = CREATE_THREAD(ProcThreadPoll)) == NULL)
return paUnanticipatedHostError;
}
// Wait for thread to start
if (WaitForSingleObject(stream->hThreadStart, 60*1000) == WAIT_TIMEOUT)
return paUnanticipatedHostError;
}
else
{
// Create blocking operation events (non-signaled event means - blocking operation is pending)
if (stream->out.client)
stream->hBlockingOpStreamWR = CreateEvent(NULL, TRUE, TRUE, NULL);
if (stream->in.client)
stream->hBlockingOpStreamRD = CreateEvent(NULL, TRUE, TRUE, NULL);
// Initialize event & start INPUT stream
if (stream->in.client)
{
if ((hr = IAudioClient_GetService(stream->in.client, &pa_IID_IAudioCaptureClient, (void **)&stream->cclient)) != S_OK)
{
LogHostError(hr);
return paUnanticipatedHostError;
}
if ((hr = IAudioClient_Start(stream->in.client)) != S_OK)
{
LogHostError(hr);
return paUnanticipatedHostError;
}
}
// Initialize event & start OUTPUT stream
if (stream->out.client)
{
if ((hr = IAudioClient_GetService(stream->out.client, &pa_IID_IAudioRenderClient, (void **)&stream->rclient)) != S_OK)
{
LogHostError(hr);
return paUnanticipatedHostError;
}
// Start
if ((hr = IAudioClient_Start(stream->out.client)) != S_OK)
{
LogHostError(hr);
return paUnanticipatedHostError;
}
}
// Signal: stream running
stream->running = TRUE;
// Set current time
stream->out.prevTime = timeGetTime();
stream->out.prevSleep = 0;
}
return paNoError;
}
// ------------------------------------------------------------------------------------------
static void _FinishStream(PaWasapiStream *stream)
{
// Issue command to thread to stop processing and wait for thread exit
if (!stream->bBlocking)
{
SignalObjectAndWait(stream->hCloseRequest, stream->hThreadExit, INFINITE, FALSE);
}
else
// Blocking mode does not own thread
{
// Signal close event and wait for each of 2 blocking operations to complete
if (stream->out.client)
SignalObjectAndWait(stream->hCloseRequest, stream->hBlockingOpStreamWR, INFINITE, TRUE);
if (stream->out.client)
SignalObjectAndWait(stream->hCloseRequest, stream->hBlockingOpStreamRD, INFINITE, TRUE);
// Process stop
_OnStreamStop(stream);
}
// Close thread handles to allow restart
SAFE_CLOSE(stream->hThread);
SAFE_CLOSE(stream->hThreadStart);
SAFE_CLOSE(stream->hThreadExit);
SAFE_CLOSE(stream->hCloseRequest);
SAFE_CLOSE(stream->hBlockingOpStreamRD);
SAFE_CLOSE(stream->hBlockingOpStreamWR);
stream->running = FALSE;
}
// ------------------------------------------------------------------------------------------
static PaError StopStream( PaStream *s )
{
// Finish stream
_FinishStream((PaWasapiStream *)s);
return paNoError;
}
// ------------------------------------------------------------------------------------------
static PaError AbortStream( PaStream *s )
{
// Finish stream
_FinishStream((PaWasapiStream *)s);
return paNoError;
}
// ------------------------------------------------------------------------------------------
static PaError IsStreamStopped( PaStream *s )
{
return !((PaWasapiStream *)s)->running;
}
// ------------------------------------------------------------------------------------------
static PaError IsStreamActive( PaStream *s )
{
return ((PaWasapiStream *)s)->running;
}
// ------------------------------------------------------------------------------------------
static PaTime GetStreamTime( PaStream *s )
{
PaWasapiStream *stream = (PaWasapiStream*)s;
/* suppress unused variable warnings */
(void) stream;
/* IMPLEMENT ME, see portaudio.h for required behavior*/
//this is lame ds and mme does the same thing, quite useless method imho
//why dont we fetch the time in the pa callbacks?
//at least its doing to be clocked to something
return PaUtil_GetTime();
}
// ------------------------------------------------------------------------------------------
static double GetStreamCpuLoad( PaStream* s )
{
return PaUtil_GetCpuLoad(&((PaWasapiStream *)s)->cpuLoadMeasurer);
}
// ------------------------------------------------------------------------------------------
/* NOT TESTED */
static PaError ReadStream( PaStream* s, void *_buffer, unsigned long _frames )
{
PaWasapiStream *stream = (PaWasapiStream*)s;
HRESULT hr = S_OK;
UINT32 frames;
BYTE *user_buffer = (BYTE *)_buffer;
BYTE *wasapi_buffer = NULL;
DWORD flags = 0;
UINT32 i;
// validate
if (!stream->running)
return paStreamIsStopped;
if (stream->cclient == NULL)
return paBadStreamPtr;
// Notify blocking op has begun
ResetEvent(stream->hBlockingOpStreamRD);
// make a local copy of the user buffer pointer(s), this is necessary
// because PaUtil_CopyOutput() advances these pointers every time it is called
if (!stream->bufferProcessor.userInputIsInterleaved)
{
user_buffer = (BYTE *)alloca(sizeof(BYTE *) * stream->bufferProcessor.inputChannelCount);
if (user_buffer == NULL)
return paInsufficientMemory;
for (i = 0; i < stream->bufferProcessor.inputChannelCount; ++i)
((BYTE **)user_buffer)[i] = ((BYTE **)_buffer)[i];
}
while (_frames != 0)
{
UINT32 processed, processed_size;
// Get the available data in the shared buffer.
if ((hr = IAudioCaptureClient_GetBuffer(stream->cclient, &wasapi_buffer, &frames, &flags, NULL, NULL)) != S_OK)
{
if (hr == AUDCLNT_S_BUFFER_EMPTY)
{
// Check if blocking call must be interrupted
if (WaitForSingleObject(stream->hCloseRequest, 1) != WAIT_TIMEOUT)
break;
}
return LogHostError(hr);
goto stream_rd_end;
}
// Detect silence
// if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
// data = NULL;
// Check if frames <= _frames
if (frames > _frames)
frames = _frames;
// Register available frames to processor
PaUtil_SetInputFrameCount(&stream->bufferProcessor, frames);
// Register host buffer pointer to processor
PaUtil_SetInterleavedInputChannels(&stream->bufferProcessor, 0, wasapi_buffer, stream->bufferProcessor.inputChannelCount);
// Copy user data to host buffer (with conversion if applicable)
processed = PaUtil_CopyInput(&stream->bufferProcessor, (void **)&user_buffer, frames);
// Advance user buffer to consumed portion
processed_size = processed * stream->in.wavex.Format.nBlockAlign;
if (stream->bufferProcessor.userInputIsInterleaved)
{
user_buffer += processed_size;
}
else
{
for (i = 0; i < stream->bufferProcessor.inputChannelCount; ++i)
((BYTE **)user_buffer)[i] = ((BYTE **)user_buffer)[i] + processed_size;
}
// Release host buffer
if ((hr = IAudioCaptureClient_ReleaseBuffer(stream->cclient, processed)) != S_OK)
{
LogHostError(hr);
goto stream_rd_end;
}
_frames -= processed;
}
stream_rd_end:
// Notify blocking op has ended
SetEvent(stream->hBlockingOpStreamRD);
return (hr != S_OK ? paUnanticipatedHostError : paNoError);
}
// ------------------------------------------------------------------------------------------
static PaError WriteStream( PaStream* s, const void *_buffer, unsigned long _frames )
{
PaWasapiStream *stream = (PaWasapiStream*)s;
UINT32 frames;
const BYTE *user_buffer = (const BYTE *)_buffer;
BYTE *wasapi_buffer;
HRESULT hr = S_OK;
UINT32 next_rev_sleep, blocks, block_sleep_ms;
UINT32 i;
// validate
if (!stream->running)
return paStreamIsStopped;
if (stream->rclient == NULL)
return paBadStreamPtr;
// Notify blocking op has begun
ResetEvent(stream->hBlockingOpStreamWR);
// Calculate sleep time for next call
{
UINT32 remainder = 0;
UINT32 sleep_ms = 0;
DWORD elapsed_ms;
blocks = _frames / stream->out.framesPerHostCallback;
block_sleep_ms = GetFramesSleepTime(stream->out.framesPerHostCallback, stream->out.wavex.Format.nSamplesPerSec);
if (blocks == 0)
{
blocks = 1;
sleep_ms = GetFramesSleepTime(_frames, stream->out.wavex.Format.nSamplesPerSec); // partial
}
else
{
remainder = _frames - blocks * stream->out.framesPerHostCallback;
sleep_ms = block_sleep_ms; // full
}
// Sleep for remainder
elapsed_ms = timeGetTime() - stream->out.prevTime;
if (sleep_ms >= elapsed_ms)
sleep_ms -= elapsed_ms;
next_rev_sleep = sleep_ms;
}
// Sleep diff from last call
if (stream->out.prevSleep)
Sleep(stream->out.prevSleep);
stream->out.prevSleep = next_rev_sleep;
// make a local copy of the user buffer pointer(s), this is necessary
// because PaUtil_CopyOutput() advances these pointers every time it is called
if (!stream->bufferProcessor.userOutputIsInterleaved)
{
user_buffer = (const BYTE *)alloca(sizeof(const BYTE *) * stream->bufferProcessor.outputChannelCount);
if (user_buffer == NULL)
return paInsufficientMemory;
for (i = 0; i < stream->bufferProcessor.outputChannelCount; ++i)
((const BYTE **)user_buffer)[i] = ((const BYTE **)_buffer)[i];
}
// Feed engine
for (i = 0; i < blocks; ++i)
{
UINT32 available, processed;
// Get block frames
frames = stream->out.framesPerHostCallback;
if (frames > _frames)
frames = _frames;
if (i)
Sleep(block_sleep_ms);
while (frames != 0)
{
UINT32 padding = 0;
UINT32 processed_size;
// Check if blocking call must be interrupted
if (WaitForSingleObject(stream->hCloseRequest, 0) != WAIT_TIMEOUT)
break;
// Get Read position
hr = IAudioClient_GetCurrentPadding(stream->out.client, &padding);
if (hr != S_OK)
{
LogHostError(hr);
goto stream_wr_end;
}
// Calculate frames available
if (frames >= padding)
available = frames - padding;
else
available = frames;
// Get pointer to host buffer
if ((hr = IAudioRenderClient_GetBuffer(stream->rclient, available, &wasapi_buffer)) != S_OK)
{
// Buffer size is too big, waiting
if (hr == AUDCLNT_E_BUFFER_TOO_LARGE)
continue;
LogHostError(hr);
goto stream_wr_end;
}
// Register available frames to processor
PaUtil_SetOutputFrameCount(&stream->bufferProcessor, available);
// Register host buffer pointer to processor
PaUtil_SetInterleavedOutputChannels(&stream->bufferProcessor, 0, wasapi_buffer, stream->bufferProcessor.outputChannelCount);
// Copy user data to host buffer (with conversion if applicable)
processed = PaUtil_CopyOutput(&stream->bufferProcessor, (const void **)&user_buffer, available);
// Advance user buffer to consumed portion
processed_size = processed * stream->out.wavex.Format.nBlockAlign;
if (stream->bufferProcessor.userOutputIsInterleaved)
{
user_buffer += processed_size;
}
else
{
for (i = 0; i < stream->bufferProcessor.outputChannelCount; ++i)
((const BYTE **)user_buffer)[i] = ((const BYTE **)user_buffer)[i] + processed_size;
}
// Release host buffer
if ((hr = IAudioRenderClient_ReleaseBuffer(stream->rclient, processed, 0)) != S_OK)
{
LogHostError(hr);
goto stream_wr_end;
}
// Deduct frames
frames -= processed;
}
_frames -= frames;
}
stream_wr_end:
// Set prev time
stream->out.prevTime = timeGetTime();
// Notify blocking op has ended
SetEvent(stream->hBlockingOpStreamWR);
return (hr != S_OK ? paUnanticipatedHostError : paNoError);
}
// ------------------------------------------------------------------------------------------
/* NOT TESTED */
static signed long GetStreamReadAvailable( PaStream* s )
{
PaWasapiStream *stream = (PaWasapiStream*)s;
HRESULT hr;
UINT32 pending = 0;
// validate
if (!stream->running)
return paStreamIsStopped;
if (stream->cclient == NULL)
return paBadStreamPtr;
hr = IAudioClient_GetCurrentPadding(stream->in.client, &pending);
if (hr != S_OK)
{
LogHostError(hr);
return paUnanticipatedHostError;
}
return (long)pending;
}
// ------------------------------------------------------------------------------------------
static signed long GetStreamWriteAvailable( PaStream* s )
{
PaWasapiStream *stream = (PaWasapiStream*)s;
UINT32 frames = stream->out.framesPerHostCallback;
HRESULT hr;
UINT32 padding = 0;
// validate
if (!stream->running)
return paStreamIsStopped;
if (stream->rclient == NULL)
return paBadStreamPtr;
hr = IAudioClient_GetCurrentPadding(stream->out.client, &padding);
if (hr != S_OK)
{
LogHostError(hr);
return paUnanticipatedHostError;
}
// Calculate
frames -= padding;
return frames;
}
// ------------------------------------------------------------------------------------------
static void WaspiHostProcessingLoop( void *inputBuffer, long inputFrames,
void *outputBuffer, long outputFrames,
void *userData )
{
PaWasapiStream *stream = (PaWasapiStream*)userData;
PaStreamCallbackTimeInfo timeInfo = {0,0,0};
PaStreamCallbackFlags flags = 0;
int callbackResult;
unsigned long framesProcessed;
HRESULT hr;
UINT32 pending;
PaUtil_BeginCpuLoadMeasurement( &stream->cpuLoadMeasurer );
/*
Pa_GetStreamTime:
- generate timing information
- handle buffer slips
*/
timeInfo.currentTime = PaUtil_GetTime();
// Query input latency
if (stream->in.client != NULL)
{
PaTime pending_time;
if ((hr = IAudioClient_GetCurrentPadding(stream->in.client, &pending)) == S_OK)
pending_time = (PaTime)pending / (PaTime)stream->in.wavex.Format.nSamplesPerSec;
else
pending_time = (PaTime)stream->in.latency_seconds;
timeInfo.inputBufferAdcTime = timeInfo.currentTime + pending_time;
}
// Query output current latency
if (stream->out.client != NULL)
{
PaTime pending_time;
if ((hr = IAudioClient_GetCurrentPadding(stream->out.client, &pending)) == S_OK)
pending_time = (PaTime)pending / (PaTime)stream->out.wavex.Format.nSamplesPerSec;
else
pending_time = (PaTime)stream->out.latency_seconds;
timeInfo.outputBufferDacTime = timeInfo.currentTime + pending_time;
}
/*
If you need to byte swap or shift inputBuffer to convert it into a
portaudio format, do it here.
*/
PaUtil_BeginBufferProcessing( &stream->bufferProcessor, &timeInfo, flags );
/*
depending on whether the host buffers are interleaved, non-interleaved
or a mixture, you will want to call PaUtil_SetInterleaved*Channels(),
PaUtil_SetNonInterleaved*Channel() or PaUtil_Set*Channel() here.
*/
if (stream->bufferProcessor.inputChannelCount > 0)
{
PaUtil_SetInputFrameCount( &stream->bufferProcessor, inputFrames );
PaUtil_SetInterleavedInputChannels( &stream->bufferProcessor,
0, /* first channel of inputBuffer is channel 0 */
inputBuffer,
0 ); /* 0 - use inputChannelCount passed to init buffer processor */
}
if (stream->bufferProcessor.outputChannelCount > 0)
{
PaUtil_SetOutputFrameCount( &stream->bufferProcessor, outputFrames);
PaUtil_SetInterleavedOutputChannels( &stream->bufferProcessor,
0, /* first channel of outputBuffer is channel 0 */
outputBuffer,
0 ); /* 0 - use outputChannelCount passed to init buffer processor */
}
/* you must pass a valid value of callback result to PaUtil_EndBufferProcessing()
in general you would pass paContinue for normal operation, and
paComplete to drain the buffer processor's internal output buffer.
You can check whether the buffer processor's output buffer is empty
using PaUtil_IsBufferProcessorOuputEmpty( bufferProcessor )
*/
callbackResult = paContinue;
framesProcessed = PaUtil_EndBufferProcessing( &stream->bufferProcessor, &callbackResult );
/*
If you need to byte swap or shift outputBuffer to convert it to
host format, do it here.
*/
PaUtil_EndCpuLoadMeasurement( &stream->cpuLoadMeasurer, framesProcessed );
if (callbackResult == paContinue)
{
/* nothing special to do */
}
else
if (callbackResult == paAbort)
{
// stop stream
SetEvent(stream->hCloseRequest);
}
else
{
// stop stream
SetEvent(stream->hCloseRequest);
}
}
// ------------------------------------------------------------------------------------------
HANDLE MMCSS_activate(const char *name)
{
DWORD task_idx = 0;
HANDLE hTask = pAvSetMmThreadCharacteristics(name, &task_idx);
if (hTask == NULL)
{
PRINT(("WASAPI: AvSetMmThreadCharacteristics failed!\n"));
}
/*BOOL priority_ok = pAvSetMmThreadPriority(hTask, AVRT_PRIORITY_NORMAL);
if (priority_ok == FALSE)
{
PRINT(("WASAPI: AvSetMmThreadPriority failed!\n"));
}*/
// debug
{
int cur_priority = GetThreadPriority(GetCurrentThread());
DWORD cur_priority_class = GetPriorityClass(GetCurrentProcess());
PRINT(("WASAPI: thread[ priority-0x%X class-0x%X ]\n", cur_priority, cur_priority_class));
}
return hTask;
}
// ------------------------------------------------------------------------------------------
void MMCSS_deactivate(HANDLE hTask)
{
if (!hTask)
return;
if (pAvRevertMmThreadCharacteristics(hTask) == FALSE)
{
PRINT(("WASAPI: AvRevertMmThreadCharacteristics failed!\n"));
}
}
// ------------------------------------------------------------------------------------------
PaError PaWasapi_ThreadPriorityBoost(void **hTask, PaWasapiThreadPriority nPriorityClass)
{
static const char *mmcs_name[] =
{
NULL,
"Audio",
"Capture",
"Distribution",
"Games",
"Playback",
"Pro Audio",
"Window Manager"
};
HANDLE task;
if (hTask == NULL)
return paUnanticipatedHostError;
if ((UINT32)nPriorityClass >= STATIC_ARRAY_SIZE(mmcs_name))
return paUnanticipatedHostError;
task = MMCSS_activate(mmcs_name[nPriorityClass]);
if (task == NULL)
return paUnanticipatedHostError;
(*hTask) = task;
return paNoError;
}
// ------------------------------------------------------------------------------------------
PaError PaWasapi_ThreadPriorityRevert(void *hTask)
{
if (hTask == NULL)
return paUnanticipatedHostError;
MMCSS_deactivate((HANDLE)hTask);
return paNoError;
}
// ------------------------------------------------------------------------------------------
// Described at:
// http://msdn.microsoft.com/en-us/library/dd371387(v=VS.85).aspx
PaError PaWasapi_GetJackCount(PaDeviceIndex nDevice, int *jcount)
{
PaError ret;
HRESULT hr = S_OK;
PaDeviceIndex index;
IDeviceTopology *pDeviceTopology = NULL;
IConnector *pConnFrom = NULL;
IConnector *pConnTo = NULL;
IPart *pPart = NULL;
IKsJackDescription *pJackDesc = NULL;
UINT jackCount = 0;
PaWasapiHostApiRepresentation *paWasapi = _GetHostApi(&ret);
if (paWasapi == NULL)
return paNotInitialized;
// Get device index.
ret = PaUtil_DeviceIndexToHostApiDeviceIndex(&index, nDevice, &paWasapi->inheritedHostApiRep);
if (ret != paNoError)
return ret;
// Validate index.
if ((UINT32)index >= paWasapi->deviceCount)
return paInvalidDevice;
// Get the endpoint device's IDeviceTopology interface.
hr = IMMDevice_Activate(paWasapi->devInfo[index].device, &pa_IID_IDeviceTopology,
CLSCTX_INPROC_SERVER, NULL, (void**)&pDeviceTopology);
IF_FAILED_JUMP(hr, error);
// The device topology for an endpoint device always contains just one connector (connector number 0).
hr = IDeviceTopology_GetConnector(pDeviceTopology, 0, &pConnFrom);
IF_FAILED_JUMP(hr, error);
// Step across the connection to the jack on the adapter.
hr = IConnector_GetConnectedTo(pConnFrom, &pConnTo);
if (HRESULT_FROM_WIN32(ERROR_PATH_NOT_FOUND) == hr)
{
// The adapter device is not currently active.
hr = E_NOINTERFACE;
}
IF_FAILED_JUMP(hr, error);
// Get the connector's IPart interface.
hr = IConnector_QueryInterface(pConnTo, &pa_IID_IPart, (void**)&pPart);
IF_FAILED_JUMP(hr, error);
// Activate the connector's IKsJackDescription interface.
hr = IPart_Activate(pPart, CLSCTX_INPROC_SERVER, &pa_IID_IKsJackDescription, (void**)&pJackDesc);
IF_FAILED_JUMP(hr, error);
// Return jack count for this device.
hr = IKsJackDescription_GetJackCount(pJackDesc, &jackCount);
IF_FAILED_JUMP(hr, error);
// Set.
(*jcount) = jackCount;
// Ok.
ret = paNoError;
error:
SAFE_RELEASE(pDeviceTopology);
SAFE_RELEASE(pConnFrom);
SAFE_RELEASE(pConnTo);
SAFE_RELEASE(pPart);
SAFE_RELEASE(pJackDesc);
LogHostError(hr);
return paNoError;
}
// ------------------------------------------------------------------------------------------
static PaWasapiJackConnectionType ConvertJackConnectionTypeWASAPIToPA(int connType)
{
switch (connType)
{
case eConnTypeUnknown: return eJackConnTypeUnknown;
#ifdef _KS_
case eConnType3Point5mm: return eJackConnType3Point5mm;
#else
case eConnTypeEighth: return eJackConnType3Point5mm;
#endif
case eConnTypeQuarter: return eJackConnTypeQuarter;
case eConnTypeAtapiInternal: return eJackConnTypeAtapiInternal;
case eConnTypeRCA: return eJackConnTypeRCA;
case eConnTypeOptical: return eJackConnTypeOptical;
case eConnTypeOtherDigital: return eJackConnTypeOtherDigital;
case eConnTypeOtherAnalog: return eJackConnTypeOtherAnalog;
case eConnTypeMultichannelAnalogDIN: return eJackConnTypeMultichannelAnalogDIN;
case eConnTypeXlrProfessional: return eJackConnTypeXlrProfessional;
case eConnTypeRJ11Modem: return eJackConnTypeRJ11Modem;
case eConnTypeCombination: return eJackConnTypeCombination;
}
return eJackConnTypeUnknown;
}
// ------------------------------------------------------------------------------------------
static PaWasapiJackGeoLocation ConvertJackGeoLocationWASAPIToPA(int geoLoc)
{
switch (geoLoc)
{
case eGeoLocRear: return eJackGeoLocRear;
case eGeoLocFront: return eJackGeoLocFront;
case eGeoLocLeft: return eJackGeoLocLeft;
case eGeoLocRight: return eJackGeoLocRight;
case eGeoLocTop: return eJackGeoLocTop;
case eGeoLocBottom: return eJackGeoLocBottom;
#ifdef _KS_
case eGeoLocRearPanel: return eJackGeoLocRearPanel;
#else
case eGeoLocRearOPanel: return eJackGeoLocRearPanel;
#endif
case eGeoLocRiser: return eJackGeoLocRiser;
case eGeoLocInsideMobileLid: return eJackGeoLocInsideMobileLid;
case eGeoLocDrivebay: return eJackGeoLocDrivebay;
case eGeoLocHDMI: return eJackGeoLocHDMI;
case eGeoLocOutsideMobileLid: return eJackGeoLocOutsideMobileLid;
case eGeoLocATAPI: return eJackGeoLocATAPI;
}
return eJackGeoLocUnk;
}
// ------------------------------------------------------------------------------------------
static PaWasapiJackGenLocation ConvertJackGenLocationWASAPIToPA(int genLoc)
{
switch (genLoc)
{
case eGenLocPrimaryBox: return eJackGenLocPrimaryBox;
case eGenLocInternal: return eJackGenLocInternal;
#ifdef _KS_
case eGenLocSeparate: return eJackGenLocSeparate;
#else
case eGenLocSeperate: return eJackGenLocSeparate;
#endif
case eGenLocOther: return eJackGenLocOther;
}
return eJackGenLocPrimaryBox;
}
// ------------------------------------------------------------------------------------------
static PaWasapiJackPortConnection ConvertJackPortConnectionWASAPIToPA(int portConn)
{
switch (portConn)
{
case ePortConnJack: return eJackPortConnJack;
case ePortConnIntegratedDevice: return eJackPortConnIntegratedDevice;
case ePortConnBothIntegratedAndJack:return eJackPortConnBothIntegratedAndJack;
case ePortConnUnknown: return eJackPortConnUnknown;
}
return eJackPortConnJack;
}
// ------------------------------------------------------------------------------------------
// Described at:
// http://msdn.microsoft.com/en-us/library/dd371387(v=VS.85).aspx
PaError PaWasapi_GetJackDescription(PaDeviceIndex nDevice, int jindex, PaWasapiJackDescription *pJackDescription)
{
PaError ret;
HRESULT hr = S_OK;
PaDeviceIndex index;
IDeviceTopology *pDeviceTopology = NULL;
IConnector *pConnFrom = NULL;
IConnector *pConnTo = NULL;
IPart *pPart = NULL;
IKsJackDescription *pJackDesc = NULL;
KSJACK_DESCRIPTION jack = { 0 };
PaWasapiHostApiRepresentation *paWasapi = _GetHostApi(&ret);
if (paWasapi == NULL)
return paNotInitialized;
// Get device index.
ret = PaUtil_DeviceIndexToHostApiDeviceIndex(&index, nDevice, &paWasapi->inheritedHostApiRep);
if (ret != paNoError)
return ret;
// Validate index.
if ((UINT32)index >= paWasapi->deviceCount)
return paInvalidDevice;
// Get the endpoint device's IDeviceTopology interface.
hr = IMMDevice_Activate(paWasapi->devInfo[index].device, &pa_IID_IDeviceTopology,
CLSCTX_INPROC_SERVER, NULL, (void**)&pDeviceTopology);
IF_FAILED_JUMP(hr, error);
// The device topology for an endpoint device always contains just one connector (connector number 0).
hr = IDeviceTopology_GetConnector(pDeviceTopology, 0, &pConnFrom);
IF_FAILED_JUMP(hr, error);
// Step across the connection to the jack on the adapter.
hr = IConnector_GetConnectedTo(pConnFrom, &pConnTo);
if (HRESULT_FROM_WIN32(ERROR_PATH_NOT_FOUND) == hr)
{
// The adapter device is not currently active.
hr = E_NOINTERFACE;
}
IF_FAILED_JUMP(hr, error);
// Get the connector's IPart interface.
hr = IConnector_QueryInterface(pConnTo, &pa_IID_IPart, (void**)&pPart);
IF_FAILED_JUMP(hr, error);
// Activate the connector's IKsJackDescription interface.
hr = IPart_Activate(pPart, CLSCTX_INPROC_SERVER, &pa_IID_IKsJackDescription, (void**)&pJackDesc);
IF_FAILED_JUMP(hr, error);
// Test to return jack description struct for index 0.
hr = IKsJackDescription_GetJackDescription(pJackDesc, jindex, &jack);
IF_FAILED_JUMP(hr, error);
// Convert WASAPI values to PA format.
pJackDescription->channelMapping = jack.ChannelMapping;
pJackDescription->color = jack.Color;
pJackDescription->connectionType = ConvertJackConnectionTypeWASAPIToPA(jack.ConnectionType);
pJackDescription->genLocation = ConvertJackGenLocationWASAPIToPA(jack.GenLocation);
pJackDescription->geoLocation = ConvertJackGeoLocationWASAPIToPA(jack.GeoLocation);
pJackDescription->isConnected = jack.IsConnected;
pJackDescription->portConnection = ConvertJackPortConnectionWASAPIToPA(jack.PortConnection);
// Ok.
ret = paNoError;
error:
SAFE_RELEASE(pDeviceTopology);
SAFE_RELEASE(pConnFrom);
SAFE_RELEASE(pConnTo);
SAFE_RELEASE(pPart);
SAFE_RELEASE(pJackDesc);
LogHostError(hr);
return ret;
}
// ------------------------------------------------------------------------------------------
static HRESULT ProcessOutputBuffer(PaWasapiStream *stream, PaWasapiHostProcessor *processor, UINT32 frames)
{
HRESULT hr;
BYTE *data = NULL;
// Get buffer
if ((hr = IAudioRenderClient_GetBuffer(stream->rclient, frames, &data)) != S_OK)
{
if (stream->out.shareMode == AUDCLNT_SHAREMODE_SHARED)
{
// Using GetCurrentPadding to overcome AUDCLNT_E_BUFFER_TOO_LARGE in
// shared mode results in no sound in Event-driven mode (MSDN does not
// document this, or is it WASAPI bug?), thus we better
// try to acquire buffer next time when GetBuffer allows to do so.
#if 0
// Get Read position
UINT32 padding = 0;
hr = stream->out.client->GetCurrentPadding(&padding);
if (hr != S_OK)
return LogHostError(hr);
// Get frames to write
frames -= padding;
if (frames == 0)
return S_OK;
if ((hr = stream->rclient->GetBuffer(frames, &data)) != S_OK)
return LogHostError(hr);
#else
if (hr == AUDCLNT_E_BUFFER_TOO_LARGE)
return S_OK; // be silent in shared mode, try again next time
#endif
}
else
return LogHostError(hr);
}
// Process data
if (stream->out.monoMixer != NULL)
{
#define __DIV_8(v) ((v) >> 3) //!< (v / 8)
// expand buffer (one way only for better performancedue to no calls to realloc)
UINT32 mono_frames_size = frames * __DIV_8(stream->out.wavex.Format.wBitsPerSample);
if (mono_frames_size > stream->out.monoBufferSize)
stream->out.monoBuffer = realloc(stream->out.monoBuffer, (stream->out.monoBufferSize = mono_frames_size));
// process
processor[S_OUTPUT].processor(NULL, 0, (BYTE *)stream->out.monoBuffer, frames, processor[S_OUTPUT].userData);
// mix 1 to 2 channels
stream->out.monoMixer(data, stream->out.monoBuffer, frames);
#undef __DIV_8
}
else
{
processor[S_OUTPUT].processor(NULL, 0, data, frames, processor[S_OUTPUT].userData);
}
// Release buffer
if ((hr = IAudioRenderClient_ReleaseBuffer(stream->rclient, frames, 0)) != S_OK)
LogHostError(hr);
return hr;
}
// ------------------------------------------------------------------------------------------
static HRESULT ProcessInputBuffer(PaWasapiStream *stream, PaWasapiHostProcessor *processor)
{
HRESULT hr = S_OK;
UINT32 frames;
BYTE *data = NULL;
DWORD flags = 0;
for (;;)
{
// Check if blocking call must be interrupted
if (WaitForSingleObject(stream->hCloseRequest, 0) != WAIT_TIMEOUT)
break;
// Get the available data in the shared buffer.
if ((hr = IAudioCaptureClient_GetBuffer(stream->cclient, &data, &frames, &flags, NULL, NULL)) != S_OK)
{
if (hr == AUDCLNT_S_BUFFER_EMPTY)
{
hr = S_OK;
break; // capture buffer exhausted
}
return LogHostError(hr);
break;
}
// Detect silence
// if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
// data = NULL;
// Process data
if (stream->in.monoMixer != NULL)
{
#define __DIV_8(v) ((v) >> 3) //!< (v / 8)
// expand buffer (one way only for better performancedue to no calls to realloc)
UINT32 mono_frames_size = frames * __DIV_8(stream->in.wavex.Format.wBitsPerSample);
if (mono_frames_size > stream->in.monoBufferSize)
stream->in.monoBuffer = realloc(stream->in.monoBuffer, (stream->in.monoBufferSize = mono_frames_size));
// mix 1 to 2 channels
stream->in.monoMixer(stream->in.monoBuffer, data, frames);
// process
processor[S_INPUT].processor((BYTE *)stream->in.monoBuffer, frames, NULL, 0, processor[S_INPUT].userData);
#undef __DIV_8
}
else
{
processor[S_INPUT].processor(data, frames, NULL, 0, processor[S_INPUT].userData);
}
// Release buffer
if ((hr = IAudioCaptureClient_ReleaseBuffer(stream->cclient, frames)) != S_OK)
return LogHostError(hr);
}
return hr;
}
// ------------------------------------------------------------------------------------------
void _OnStreamStop(PaWasapiStream *stream)
{
// Stop INPUT client
if (stream->in.client != NULL)
IAudioClient_Stop(stream->in.client);
// Stop OUTPUT client
if (stream->out.client != NULL)
IAudioClient_Stop(stream->out.client);
// Restore thread priority
if (stream->hAvTask != NULL)
{
PaWasapi_ThreadPriorityRevert(stream->hAvTask);
stream->hAvTask = NULL;
}
// Notify
if (stream->streamRepresentation.streamFinishedCallback != NULL)
stream->streamRepresentation.streamFinishedCallback(stream->streamRepresentation.userData);
}
// ------------------------------------------------------------------------------------------
PA_THREAD_FUNC ProcThreadEvent(void *param)
{
PaWasapiHostProcessor processor[S_COUNT];
HRESULT hr;
DWORD dwResult;
PaWasapiStream *stream = (PaWasapiStream *)param;
PaWasapiHostProcessor defaultProcessor;
BOOL set_event[S_COUNT] = { FALSE, FALSE };
// Waiting on all events in case of Full-Duplex/Exclusive mode.
BOOL bWaitAllEvents = FALSE;
if ((stream->in.client != NULL) && (stream->out.client != NULL))
{
bWaitAllEvents = (stream->in.shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE) &&
(stream->out.shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE);
}
// Setup data processors
defaultProcessor.processor = WaspiHostProcessingLoop;
defaultProcessor.userData = stream;
processor[S_INPUT] = (stream->hostProcessOverrideInput.processor != NULL ? stream->hostProcessOverrideInput : defaultProcessor);
processor[S_OUTPUT] = (stream->hostProcessOverrideOutput.processor != NULL ? stream->hostProcessOverrideOutput : defaultProcessor);
// Boost thread priority
PaWasapi_ThreadPriorityBoost((void **)&stream->hAvTask, stream->nThreadPriority);
// Create events
if (stream->event[S_OUTPUT] == NULL)
{
stream->event[S_OUTPUT] = CreateEvent(NULL, FALSE, FALSE, NULL);
set_event[S_OUTPUT] = TRUE;
}
if (stream->event[S_INPUT] == NULL)
{
stream->event[S_INPUT] = CreateEvent(NULL, FALSE, FALSE, NULL);
set_event[S_INPUT] = TRUE;
}
if ((stream->event[S_OUTPUT] == NULL) || (stream->event[S_INPUT] == NULL))
{
PRINT(("WASAPI Thread: failed creating Input/Output event handle\n"));
goto thread_error;
}
// Initialize event & start INPUT stream
if (stream->in.client)
{
// Create & set handle
if (set_event[S_INPUT])
{
if ((hr = IAudioClient_SetEventHandle(stream->in.client, stream->event[S_INPUT])) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
// Create Capture client
if (stream->cclient == NULL)
{
if ((hr = IAudioClient_GetService(stream->in.client, &pa_IID_IAudioCaptureClient, (void **)&stream->cclient)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
// Start
if ((hr = IAudioClient_Start(stream->in.client)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
// Initialize event & start OUTPUT stream
if (stream->out.client)
{
// Create & set handle
if (set_event[S_OUTPUT])
{
if ((hr = IAudioClient_SetEventHandle(stream->out.client, stream->event[S_OUTPUT])) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
// Create Render client
if (stream->rclient == NULL)
{
if ((hr = IAudioClient_GetService(stream->out.client, &pa_IID_IAudioRenderClient, (void **)&stream->rclient)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
// Preload buffer before start
if ((hr = ProcessOutputBuffer(stream, processor, stream->out.framesPerBuffer)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
// Start
if ((hr = IAudioClient_Start(stream->out.client)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
// Signal: stream running
stream->running = TRUE;
// Notify: thread started
SetEvent(stream->hThreadStart);
// Processing Loop
for (;;)
{
// 2 sec timeout
dwResult = WaitForMultipleObjects(S_COUNT, stream->event, bWaitAllEvents, 10000);
// Check for close event (after wait for buffers to avoid any calls to user
// callback when hCloseRequest was set)
if (WaitForSingleObject(stream->hCloseRequest, 0) != WAIT_TIMEOUT)
break;
// Process S_INPUT/S_OUTPUT
switch (dwResult)
{
case WAIT_TIMEOUT: {
PRINT(("WASAPI Thread: WAIT_TIMEOUT - probably bad audio driver or Vista x64 bug: use paWinWasapiPolling instead\n"));
goto thread_end;
break; }
// Input stream
case WAIT_OBJECT_0 + S_INPUT: {
if (stream->cclient == NULL)
break;
if ((hr = ProcessInputBuffer(stream, processor)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
break; }
// Output stream
case WAIT_OBJECT_0 + S_OUTPUT: {
if (stream->rclient == NULL)
break;
if ((hr = ProcessOutputBuffer(stream, processor, stream->out.framesPerBuffer)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
break; }
}
}
thread_end:
// Process stop
_OnStreamStop(stream);
// Notify: thread exited
SetEvent(stream->hThreadExit);
// Notify: not running
stream->running = FALSE;
return 0;
thread_error:
// Prevent deadlocking in Pa_StreamStart
SetEvent(stream->hThreadStart);
// Exit
goto thread_end;
}
// ------------------------------------------------------------------------------------------
static HRESULT PollGetOutputFramesAvailable(PaWasapiStream *stream, UINT32 *available)
{
HRESULT hr;
UINT32 frames = stream->out.framesPerHostCallback,
padding = 0;
(*available) = 0;
// get read position
if ((hr = IAudioClient_GetCurrentPadding(stream->out.client, &padding)) != S_OK)
return LogHostError(hr);
// get available
frames -= padding;
// set
(*available) = frames;
return hr;
}
// ------------------------------------------------------------------------------------------
/*! \class ThreadSleepScheduler
Allows to emulate thread sleep of less than 1 millisecond under Windows. Scheduler
calculates number of times the thread must run untill next sleep of 1 millisecond.
It does not make thread sleeping for real number of microseconds but rather controls
how many of imaginary microseconds the thread task can allow thread to sleep.
*/
typedef struct ThreadIdleScheduler
{
UINT32 m_idle_microseconds; //!< number of microseconds to sleep
UINT32 m_next_sleep; //!< next sleep round
UINT32 m_i; //!< current round iterator position
UINT32 m_resolution; //!< resolution in number of milliseconds
}
ThreadIdleScheduler;
//! Setup scheduler.
static void ThreadIdleScheduler_Setup(ThreadIdleScheduler *sched, UINT32 resolution, UINT32 microseconds)
{
assert(microseconds != 0);
assert(resolution != 0);
assert((resolution * 1000) >= microseconds);
memset(sched, 0, sizeof(*sched));
sched->m_idle_microseconds = microseconds;
sched->m_resolution = resolution;
sched->m_next_sleep = (resolution * 1000) / microseconds;
}
//! Iterate and check if can sleep.
static UINT32 ThreadIdleScheduler_NextSleep(ThreadIdleScheduler *sched)
{
// advance and check if thread can sleep
if (++ sched->m_i == sched->m_next_sleep)
{
sched->m_i = 0;
return sched->m_resolution;
}
return 0;
}
// ------------------------------------------------------------------------------------------
PA_THREAD_FUNC ProcThreadPoll(void *param)
{
PaWasapiHostProcessor processor[S_COUNT];
HRESULT hr;
PaWasapiStream *stream = (PaWasapiStream *)param;
PaWasapiHostProcessor defaultProcessor;
INT32 i;
ThreadIdleScheduler scheduler;
// Calculate the actual duration of the allocated buffer.
DWORD sleep_ms = 0;
DWORD sleep_ms_in = GetFramesSleepTime(stream->in.framesPerBuffer, stream->in.wavex.Format.nSamplesPerSec);
DWORD sleep_ms_out = GetFramesSleepTime(stream->out.framesPerBuffer, stream->out.wavex.Format.nSamplesPerSec);
// Adjust polling time
if (stream->bufferMode != paUtilFixedHostBufferSize)
{
sleep_ms_in = GetFramesSleepTime(stream->bufferProcessor.framesPerUserBuffer, stream->in.wavex.Format.nSamplesPerSec);
sleep_ms_out = GetFramesSleepTime(stream->bufferProcessor.framesPerUserBuffer, stream->out.wavex.Format.nSamplesPerSec);
}
// Choose smallest
if ((sleep_ms_in != 0) && (sleep_ms_out != 0))
sleep_ms = min(sleep_ms_in, sleep_ms_out);
else
{
sleep_ms = (sleep_ms_in ? sleep_ms_in : sleep_ms_out);
}
// Make sure not 0, othervise use ThreadIdleScheduler
if (sleep_ms == 0)
{
sleep_ms_in = GetFramesSleepTimeMicroseconds(stream->bufferProcessor.framesPerUserBuffer, stream->in.wavex.Format.nSamplesPerSec);
sleep_ms_out = GetFramesSleepTimeMicroseconds(stream->bufferProcessor.framesPerUserBuffer, stream->out.wavex.Format.nSamplesPerSec);
// Choose smallest
if ((sleep_ms_in != 0) && (sleep_ms_out != 0))
sleep_ms = min(sleep_ms_in, sleep_ms_out);
else
{
sleep_ms = (sleep_ms_in ? sleep_ms_in : sleep_ms_out);
}
// Setup thread sleep scheduler
ThreadIdleScheduler_Setup(&scheduler, 1, sleep_ms/* microseconds here actually */);
sleep_ms = 0;
}
// Setup data processors
defaultProcessor.processor = WaspiHostProcessingLoop;
defaultProcessor.userData = stream;
processor[S_INPUT] = (stream->hostProcessOverrideInput.processor != NULL ? stream->hostProcessOverrideInput : defaultProcessor);
processor[S_OUTPUT] = (stream->hostProcessOverrideOutput.processor != NULL ? stream->hostProcessOverrideOutput : defaultProcessor);
// Boost thread priority
PaWasapi_ThreadPriorityBoost((void **)&stream->hAvTask, stream->nThreadPriority);
// Initialize event & start INPUT stream
if (stream->in.client)
{
if (stream->cclient == NULL)
{
if ((hr = IAudioClient_GetService(stream->in.client, &pa_IID_IAudioCaptureClient, (void **)&stream->cclient)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
if ((hr = IAudioClient_Start(stream->in.client)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
// Initialize event & start OUTPUT stream
if (stream->out.client)
{
if (stream->rclient == NULL)
{
if ((hr = IAudioClient_GetService(stream->out.client, &pa_IID_IAudioRenderClient, (void **)&stream->rclient)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
// Preload buffer (obligatory, othervise ->Start() will fail), avoid processing
// when in full-duplex mode as it requires input processing as well
if (!PA_WASAPI__IS_FULLDUPLEX(stream))
{
UINT32 frames = 0;
if ((hr = PollGetOutputFramesAvailable(stream, &frames)) == S_OK)
{
if (stream->bufferMode == paUtilFixedHostBufferSize)
{
if (frames >= stream->out.framesPerBuffer)
frames = stream->out.framesPerBuffer;
}
if (frames != 0)
{
if ((hr = ProcessOutputBuffer(stream, processor, frames)) != S_OK)
{
LogHostError(hr); // not fatal, just log
}
}
}
else
{
LogHostError(hr); // not fatal, just log
}
}
// Start
if ((hr = IAudioClient_Start(stream->out.client)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
// Signal: stream running
stream->running = TRUE;
// Notify: thread started
SetEvent(stream->hThreadStart);
if (!PA_WASAPI__IS_FULLDUPLEX(stream))
{
// Processing Loop
UINT32 next_sleep = sleep_ms;
while (WaitForSingleObject(stream->hCloseRequest, next_sleep) == WAIT_TIMEOUT)
{
// Get next sleep time
if (sleep_ms == 0)
{
next_sleep = ThreadIdleScheduler_NextSleep(&scheduler);
}
for (i = 0; i < S_COUNT; ++i)
{
// Process S_INPUT/S_OUTPUT
switch (i)
{
// Input stream
case S_INPUT: {
if (stream->cclient == NULL)
break;
if ((hr = ProcessInputBuffer(stream, processor)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
break; }
// Output stream
case S_OUTPUT: {
UINT32 frames;
if (stream->rclient == NULL)
break;
// get available frames
if ((hr = PollGetOutputFramesAvailable(stream, &frames)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
// output
if (stream->bufferMode == paUtilFixedHostBufferSize)
{
if (frames >= stream->out.framesPerBuffer)
{
if ((hr = ProcessOutputBuffer(stream, processor, stream->out.framesPerBuffer)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
}
else
{
if (frames != 0)
{
if ((hr = ProcessOutputBuffer(stream, processor, frames)) != S_OK)
{
LogHostError(hr);
goto thread_error;
}
}
}
break; }
}
}
}
}
else
{
#if 0
// Processing Loop
while (WaitForSingleObject(stream->hCloseRequest, 1) == WAIT_TIMEOUT)
{
UINT32 i_frames = 0, i_processed = 0;
BYTE *i_data = NULL, *o_data = NULL, *o_data_host = NULL;
DWORD i_flags = 0;
UINT32 o_frames = 0;
// get host input buffer
if ((hr = IAudioCaptureClient_GetBuffer(stream->cclient, &i_data, &i_frames, &i_flags, NULL, NULL)) != S_OK)
{
if (hr == AUDCLNT_S_BUFFER_EMPTY)
continue; // no data in capture buffer
LogHostError(hr);
break;
}
// get available frames
if ((hr = PollGetOutputFramesAvailable(stream, &o_frames)) != S_OK)
{
LogHostError(hr);
break;
}
// process equal ammount of frames
if (o_frames >= i_frames)
{
// process input ammount of frames
UINT32 o_processed = i_frames;
// get host output buffer
if ((hr = IAudioRenderClient_GetBuffer(stream->rclient, o_processed, &o_data)) == S_OK)
{
// processed amount of i_frames
i_processed = i_frames;
o_data_host = o_data;
// convert output mono
if (stream->out.monoMixer)
{
#define __DIV_8(v) ((v) >> 3) //!< (v / 8)
UINT32 mono_frames_size = o_processed * __DIV_8(stream->out.wavex.Format.wBitsPerSample);
#undef __DIV_8
// expand buffer (one way only for better performance due to no calls to realloc)
if (mono_frames_size > stream->out.monoBufferSize)
{
stream->out.monoBuffer = realloc(stream->out.monoBuffer, (stream->out.monoBufferSize = mono_frames_size));
if (stream->out.monoBuffer == NULL)
{
LogPaError(paInsufficientMemory);
break;
}
}
// replace buffer pointer
o_data = (BYTE *)stream->out.monoBuffer;
}
// convert input mono
if (stream->in.monoMixer)
{
#define __DIV_8(v) ((v) >> 3) //!< (v / 8)
UINT32 mono_frames_size = i_processed * __DIV_8(stream->in.wavex.Format.wBitsPerSample);
#undef __DIV_8
// expand buffer (one way only for better performance due to no calls to realloc)
if (mono_frames_size > stream->in.monoBufferSize)
{
stream->in.monoBuffer = realloc(stream->in.monoBuffer, (stream->in.monoBufferSize = mono_frames_size));
if (stream->in.monoBuffer == NULL)
{
LogPaError(paInsufficientMemory);
break;
}
}
// mix 2 to 1 input channels
stream->in.monoMixer(stream->in.monoBuffer, i_data, i_processed);
// replace buffer pointer
i_data = (BYTE *)stream->in.monoBuffer;
}
// process
processor[S_FULLDUPLEX].processor(i_data, i_processed, o_data, o_processed, processor[S_FULLDUPLEX].userData);
// mix 1 to 2 output channels
if (stream->out.monoBuffer)
stream->out.monoMixer(o_data_host, stream->out.monoBuffer, o_processed);
// release host output buffer
if ((hr = IAudioRenderClient_ReleaseBuffer(stream->rclient, o_processed, 0)) != S_OK)
LogHostError(hr);
}
else
{
if (stream->out.shareMode != AUDCLNT_SHAREMODE_SHARED)
LogHostError(hr); // be silent in shared mode, try again next time
}
}
// release host input buffer
if ((hr = IAudioCaptureClient_ReleaseBuffer(stream->cclient, i_processed)) != S_OK)
{
LogHostError(hr);
break;
}
}
#else
// Processing Loop
UINT32 next_sleep = sleep_ms;
while (WaitForSingleObject(stream->hCloseRequest, next_sleep) == WAIT_TIMEOUT)
{
UINT32 i_frames = 0, i_processed = 0;
BYTE *i_data = NULL, *o_data = NULL, *o_data_host = NULL;
DWORD i_flags = 0;
UINT32 o_frames = 0;
//BOOL repeat = FALSE;
// going below 1 msec resolution, switching between 1 ms and no waiting
//if (stream->in.shareMode == AUDCLNT_SHAREMODE_EXCLUSIVE)
// sleep_ms = !sleep_ms;
// Get next sleep time
if (sleep_ms == 0)
{
next_sleep = ThreadIdleScheduler_NextSleep(&scheduler);
}
// get available frames
if ((hr = PollGetOutputFramesAvailable(stream, &o_frames)) != S_OK)
{
LogHostError(hr);
break;
}
while (o_frames != 0)
{
// get host input buffer
if ((hr = IAudioCaptureClient_GetBuffer(stream->cclient, &i_data, &i_frames, &i_flags, NULL, NULL)) != S_OK)
{
if (hr == AUDCLNT_S_BUFFER_EMPTY)
break; // no data in capture buffer
LogHostError(hr);
break;
}
//PA_DEBUG(("full-duplex: o_frames[%d] i_frames[%d] repeat[%d]\n", o_frames, i_frames, repeat));
//repeat = TRUE;
// process equal ammount of frames
if (o_frames >= i_frames)
{
// process input ammount of frames
UINT32 o_processed = i_frames;
// get host output buffer
if ((hr = IAudioRenderClient_GetBuffer(stream->rclient, o_processed, &o_data)) == S_OK)
{
// processed amount of i_frames
i_processed = i_frames;
o_data_host = o_data;
// convert output mono
if (stream->out.monoMixer)
{
#define __DIV_8(v) ((v) >> 3) //!< (v / 8)
UINT32 mono_frames_size = o_processed * __DIV_8(stream->out.wavex.Format.wBitsPerSample);
#undef __DIV_8
// expand buffer (one way only for better performance due to no calls to realloc)
if (mono_frames_size > stream->out.monoBufferSize)
{
stream->out.monoBuffer = realloc(stream->out.monoBuffer, (stream->out.monoBufferSize = mono_frames_size));
if (stream->out.monoBuffer == NULL)
{
LogPaError(paInsufficientMemory);
goto thread_error;
}
}
// replace buffer pointer
o_data = (BYTE *)stream->out.monoBuffer;
}
// convert input mono
if (stream->in.monoMixer)
{
#define __DIV_8(v) ((v) >> 3) //!< (v / 8)
UINT32 mono_frames_size = i_processed * __DIV_8(stream->in.wavex.Format.wBitsPerSample);
#undef __DIV_8
// expand buffer (one way only for better performance due to no calls to realloc)
if (mono_frames_size > stream->in.monoBufferSize)
{
stream->in.monoBuffer = realloc(stream->in.monoBuffer, (stream->in.monoBufferSize = mono_frames_size));
if (stream->in.monoBuffer == NULL)
{
LogPaError(paInsufficientMemory);
goto thread_error;
}
}
// mix 2 to 1 input channels
stream->in.monoMixer(stream->in.monoBuffer, i_data, i_processed);
// replace buffer pointer
i_data = (BYTE *)stream->in.monoBuffer;
}
// process
processor[S_FULLDUPLEX].processor(i_data, i_processed, o_data, o_processed, processor[S_FULLDUPLEX].userData);
// mix 1 to 2 output channels
if (stream->out.monoBuffer)
stream->out.monoMixer(o_data_host, stream->out.monoBuffer, o_processed);
// release host output buffer
if ((hr = IAudioRenderClient_ReleaseBuffer(stream->rclient, o_processed, 0)) != S_OK)
LogHostError(hr);
o_frames -= o_processed;
}
else
{
if (stream->out.shareMode != AUDCLNT_SHAREMODE_SHARED)
LogHostError(hr); // be silent in shared mode, try again next time
}
}
else
{
i_processed = 0;
goto fd_release_buffer_in;
}
fd_release_buffer_in:
// release host input buffer
if ((hr = IAudioCaptureClient_ReleaseBuffer(stream->cclient, i_processed)) != S_OK)
{
LogHostError(hr);
break;
}
// break processing, input hasn't been accumulated yet
if (i_processed == 0)
break;
}
}
#endif
}
thread_end:
// Process stop
_OnStreamStop(stream);
// Notify: thread exited
SetEvent(stream->hThreadExit);
// Notify: not running
stream->running = FALSE;
return 0;
thread_error:
// Prevent deadlocking in Pa_StreamStart
SetEvent(stream->hThreadStart);
// Exit
goto thread_end;
}
//#endif //VC 2005
#if 0
if(bFirst) {
float masteur;
hr = stream->outVol->GetMasterVolumeLevelScalar(&masteur);
if (hr != S_OK)
LogHostError(hr);
float chan1, chan2;
hr = stream->outVol->GetChannelVolumeLevelScalar(0, &chan1);
if (hr != S_OK)
LogHostError(hr);
hr = stream->outVol->GetChannelVolumeLevelScalar(1, &chan2);
if (hr != S_OK)
LogHostError(hr);
BOOL bMute;
hr = stream->outVol->GetMute(&bMute);
if (hr != S_OK)
LogHostError(hr);
stream->outVol->SetMasterVolumeLevelScalar(0.5, NULL);
stream->outVol->SetChannelVolumeLevelScalar(0, 0.5, NULL);
stream->outVol->SetChannelVolumeLevelScalar(1, 0.5, NULL);
stream->outVol->SetMute(FALSE, NULL);
bFirst = FALSE;
}
#endif
| gpl-2.0 |
wazari972/Grisbi | src/gsb_plugins.c | 1 | 6331 | /* ************************************************************************** */
/* */
/* Copyright (C) 2006-2006 Benjamin Drieu (bdrieu@april.org) */
/* http://www.grisbi.org */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* */
/* ************************************************************************** */
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "include.h"
#ifndef ENABLE_STATIC
#include <gmodule.h>
#endif
#include <glib/gi18n.h>
/*START_INCLUDE*/
#include "gsb_plugins.h"
#include "dialog.h"
#ifdef ENABLE_STATIC
#include "plugins/gnucash/gnucash.h"
#include "plugins/ofx/ofx.h"
#include "plugins/openssl/openssl.h"
#endif /* ENABLE_STATIC */
/*END_INCLUDE*/
/*START_EXTERN*/
/*END_EXTERN*/
/*START_STATIC*/
/*END_STATIC*/
/** List of registered plugins. It should contain gsb_plugin structures. */
static GSList * plugins = NULL;
/**
*
*
*/
void gsb_plugins_scan_dir ( const char *dirname )
{
#ifdef ENABLE_STATIC
gsb_plugin *plugin = NULL;
plugin = g_malloc0 ( sizeof ( gsb_plugin ) );
plugin -> name = "gnucash";
plugin -> plugin_register = &gnucash_plugin_register;
plugin -> plugin_run = &gnucash_plugin_run;
plugin -> plugin_register ();
plugins = g_slist_append ( plugins, plugin );
#ifdef HAVE_OFX
plugin = g_malloc0 ( sizeof ( gsb_plugin ) );
plugin -> name = "ofx";
plugin -> plugin_register = &ofx_plugin_register;
plugin -> plugin_run = &ofx_plugin_run;
plugin -> plugin_register ();
plugins = g_slist_append ( plugins, plugin );
#endif /* HAVE_OFX */
plugin = g_malloc0 ( sizeof ( gsb_plugin ) );
plugin -> name = "openssl";
plugin -> plugin_register = &openssl_plugin_register;
plugin -> plugin_run = &openssl_plugin_run;
plugin -> plugin_register ();
plugins = g_slist_append ( plugins, plugin );
#else /* ENABLE_STATIC */
GDir * plugin_dir;
const gchar * filename;
gchar * plugin_name;
plugin_dir = g_dir_open ( dirname, 0, NULL );
if ( ! plugin_dir )
return;
while ( ( filename = g_dir_read_name ( plugin_dir ) ) != NULL )
{
gchar * complete_filename, * tmp;
gchar ** split_filename;
gsb_plugin * plugin = g_malloc0 ( sizeof ( gsb_plugin ) );
split_filename = g_strsplit(filename, ".", 2);
if (!split_filename[1])
continue;
if ( strncmp ( split_filename[1], G_MODULE_SUFFIX, strlen(G_MODULE_SUFFIX) ) )
continue;
complete_filename = g_build_filename ( dirname, filename, NULL );
if ( ! ( plugin -> handle =
g_module_open (complete_filename, 0 ) ) )
{
gchar* tmpstr = g_strdup_printf ( "Couldn't load module %s: %s", filename,
g_module_error() );
dialogue_error ( tmpstr );
g_free ( tmpstr );
g_free ( plugin );
g_free ( complete_filename);
continue;
}
g_free (complete_filename);
if ( ! g_module_symbol ( plugin -> handle, "plugin_name",
(gpointer) &plugin_name ) )
{
gchar* tmpstr = g_strdup_printf ( "Plugin %s has no register symbol",
filename );
dialogue_error ( tmpstr );
g_free ( tmpstr );
g_free ( plugin );
continue;
}
plugin -> name = plugin_name;
tmp = g_strconcat ( plugin_name, "_plugin_register", NULL );
if ( ! g_module_symbol ( plugin -> handle, tmp,
(gpointer) &( plugin -> plugin_register ) ) )
{
gchar* tmpstr = g_strdup_printf ( "Plugin %s has no register symbol",
filename );
dialogue_error ( tmpstr );
g_free ( tmpstr );
g_free ( plugin );
continue;
}
g_free ( tmp );
plugin -> plugin_register ();
tmp = g_strconcat ( plugin_name, "_plugin_run", NULL );
if ( ! g_module_symbol ( plugin -> handle, tmp,
(gpointer) &( plugin -> plugin_run ) ) )
{
gchar* tmpstr = g_strdup_printf ( "Plugin %s has no run symbol",
filename );
dialogue_error ( tmpstr );
g_free ( tmpstr );
g_free ( plugin );
continue;
}
g_free ( tmp );
plugins = g_slist_append ( plugins, plugin );
}
g_dir_close ( plugin_dir );
#endif /* ENABLE_STATIC */
}
/**
*
*
*
*/
gsb_plugin * gsb_plugin_find ( gchar * plugin_name )
{
GSList * tmp = plugins;
g_return_val_if_fail ( plugin_name, NULL );
while ( tmp )
{
gsb_plugin * plugin = (gsb_plugin *) tmp -> data;
if ( ! strcmp ( plugin_name, plugin -> name ) )
{
return plugin;
}
tmp = tmp -> next;
}
return NULL;
}
/**
* Get activated plugins.
*
* \return A newly-allocated string representing activated plugins.
*/
gchar * gsb_plugin_get_list ()
{
gchar * list = NULL;
gchar * old_list = NULL;
GSList * tmp = plugins;
while ( tmp )
{
gsb_plugin * plugin = (gsb_plugin *) tmp -> data;
if ( ! list )
{
list = g_strconcat ( _("with plugins"), " ", plugin -> name, NULL );
}
else
{
old_list = list;
list = g_strconcat ( list, ", ", plugin -> name, NULL );
g_free(old_list);
}
tmp = tmp -> next;
}
if (! list)
list = g_strdup("no plugin");
return list;
}
/* Local Variables: */
/* c-basic-offset: 4 */
/* End: */
| gpl-2.0 |
ndtrung81/lammps | lib/kokkos/core/src/SYCL/Kokkos_SYCL_Instance.cpp | 1 | 12685 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Core.hpp> //kokkos_malloc
namespace Kokkos {
namespace Experimental {
namespace Impl {
std::vector<std::optional<sycl::queue>*> SYCLInternal::all_queues;
std::mutex SYCLInternal::mutex;
SYCLInternal::~SYCLInternal() {
if (!was_finalized || m_scratchSpace || m_scratchFlags ||
m_scratchConcurrentBitset) {
std::cerr << "Kokkos::Experimental::SYCL ERROR: Failed to call "
"Kokkos::Experimental::SYCL::finalize()"
<< std::endl;
std::cerr.flush();
}
}
int SYCLInternal::verify_is_initialized(const char* const label) const {
if (!is_initialized()) {
std::cerr << "Kokkos::Experimental::SYCL::" << label
<< " : ERROR device not initialized" << std::endl;
}
return is_initialized();
}
SYCLInternal& SYCLInternal::singleton() {
static SYCLInternal self;
return self;
}
void SYCLInternal::initialize(const sycl::device& d) {
auto exception_handler = [](sycl::exception_list exceptions) {
bool asynchronous_error = false;
for (std::exception_ptr const& e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const& e) {
std::cerr << e.what() << '\n';
asynchronous_error = true;
}
}
if (asynchronous_error)
Kokkos::Impl::throw_runtime_exception(
"There was an asynchronous SYCL error!\n");
};
// FIXME_SYCL using an in-order queue here should not be necessary since we
// are using submit_barrier for managing kernel dependencies but this seems to
// be required as a hot fix for now.
initialize(
sycl::queue{d, exception_handler, sycl::property::queue::in_order()});
}
// FIXME_SYCL
void SYCLInternal::initialize(const sycl::queue& q) {
if (was_finalized)
Kokkos::abort("Calling SYCL::initialize after SYCL::finalize is illegal\n");
if (is_initialized()) return;
if (!HostSpace::execution_space::impl_is_initialized()) {
const std::string msg(
"SYCL::initialize ERROR : HostSpace::execution_space is not "
"initialized");
Kokkos::Impl::throw_runtime_exception(msg);
}
const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags;
const bool ok_dev = true;
if (ok_init && ok_dev) {
m_queue = q;
// guard pushing to all_queues
{
std::lock_guard<std::mutex> lock(mutex);
all_queues.push_back(&m_queue);
}
const sycl::device& d = m_queue->get_device();
m_maxWorkgroupSize =
d.template get_info<sycl::info::device::max_work_group_size>();
// FIXME_SYCL this should give the correct value for NVIDIA GPUs
m_maxConcurrency =
m_maxWorkgroupSize * 2 *
d.template get_info<sycl::info::device::max_compute_units>();
// Setup concurent bitset for obtaining unique tokens from within an
// executing kernel.
{
const int32_t buffer_bound =
Kokkos::Impl::concurrent_bitset::buffer_bound(m_maxConcurrency);
using Record = Kokkos::Impl::SharedAllocationRecord<
Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
Record* const r =
Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
"Kokkos::Experimental::SYCL::InternalScratchBitset",
sizeof(uint32_t) * buffer_bound);
Record::increment(r);
m_scratchConcurrentBitset = reinterpret_cast<uint32_t*>(r->data());
auto event = m_queue->memset(m_scratchConcurrentBitset, 0,
sizeof(uint32_t) * buffer_bound);
fence(event,
"Kokkos::Experimental::SYCLInternal::initialize: fence after "
"initializing m_scratchConcurrentBitset",
m_instance_id);
}
m_maxShmemPerBlock =
d.template get_info<sycl::info::device::local_mem_size>();
m_indirectKernelMem.reset(*m_queue, m_instance_id);
m_indirectReducerMem.reset(*m_queue, m_instance_id);
} else {
std::ostringstream msg;
msg << "Kokkos::Experimental::SYCL::initialize(...) FAILED";
if (!ok_init) {
msg << " : Already initialized";
}
Kokkos::Impl::throw_runtime_exception(msg.str());
}
m_team_scratch_current_size = 0;
m_team_scratch_ptr = nullptr;
}
void* SYCLInternal::resize_team_scratch_space(std::int64_t bytes,
bool force_shrink) {
if (m_team_scratch_current_size == 0) {
m_team_scratch_current_size = bytes;
m_team_scratch_ptr =
Kokkos::kokkos_malloc<Experimental::SYCLDeviceUSMSpace>(
"Kokkos::Experimental::SYCLDeviceUSMSpace::TeamScratchMemory",
m_team_scratch_current_size);
}
if ((bytes > m_team_scratch_current_size) ||
((bytes < m_team_scratch_current_size) && (force_shrink))) {
m_team_scratch_current_size = bytes;
m_team_scratch_ptr =
Kokkos::kokkos_realloc<Experimental::SYCLDeviceUSMSpace>(
m_team_scratch_ptr, m_team_scratch_current_size);
}
return m_team_scratch_ptr;
}
uint32_t SYCLInternal::impl_get_instance_id() const { return m_instance_id; }
void SYCLInternal::finalize() {
SYCLInternal::fence(*m_queue,
"Kokkos::SYCLInternal::finalize: fence on finalization",
m_instance_id);
was_finalized = true;
using RecordSYCL = Kokkos::Impl::SharedAllocationRecord<SYCLDeviceUSMSpace>;
if (nullptr != m_scratchSpace)
RecordSYCL::decrement(RecordSYCL::get_record(m_scratchSpace));
if (nullptr != m_scratchFlags)
RecordSYCL::decrement(RecordSYCL::get_record(m_scratchFlags));
m_syclDev = -1;
m_scratchSpaceCount = 0;
m_scratchSpace = nullptr;
m_scratchFlagsCount = 0;
m_scratchFlags = nullptr;
RecordSYCL::decrement(RecordSYCL::get_record(m_scratchConcurrentBitset));
m_scratchConcurrentBitset = nullptr;
if (m_team_scratch_current_size > 0)
Kokkos::kokkos_free<Kokkos::Experimental::SYCLDeviceUSMSpace>(
m_team_scratch_ptr);
m_team_scratch_current_size = 0;
m_team_scratch_ptr = nullptr;
m_indirectKernelMem.reset();
m_indirectReducerMem.reset();
// guard erasing from all_queues
{
std::lock_guard<std::mutex> lock(mutex);
all_queues.erase(std::find(all_queues.begin(), all_queues.end(), &m_queue));
}
m_queue.reset();
}
void* SYCLInternal::scratch_space(
const Kokkos::Experimental::SYCL::size_type size) {
const size_type sizeScratchGrain =
sizeof(Kokkos::Experimental::SYCL::size_type);
if (verify_is_initialized("scratch_space") &&
m_scratchSpaceCount * sizeScratchGrain < size) {
m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
using Record = Kokkos::Impl::SharedAllocationRecord<
Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
if (nullptr != m_scratchSpace)
Record::decrement(Record::get_record(m_scratchSpace));
Record* const r =
Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
"Kokkos::Experimental::SYCL::InternalScratchSpace",
(sizeScratchGrain * m_scratchSpaceCount));
Record::increment(r);
m_scratchSpace = reinterpret_cast<size_type*>(r->data());
}
return m_scratchSpace;
}
void* SYCLInternal::scratch_flags(
const Kokkos::Experimental::SYCL::size_type size) {
const size_type sizeScratchGrain =
sizeof(Kokkos::Experimental::SYCL::size_type);
if (verify_is_initialized("scratch_flags") &&
m_scratchFlagsCount * sizeScratchGrain < size) {
m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
using Record = Kokkos::Impl::SharedAllocationRecord<
Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
if (nullptr != m_scratchFlags)
Record::decrement(Record::get_record(m_scratchFlags));
Record* const r =
Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
"Kokkos::Experimental::SYCL::InternalScratchFlags",
(sizeScratchGrain * m_scratchFlagsCount));
Record::increment(r);
m_scratchFlags = reinterpret_cast<size_type*>(r->data());
}
m_queue->memset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain);
fence(*m_queue,
"Kokkos::Experimental::SYCLInternal::scratch_flags fence after "
"initializing m_scratchFlags",
m_instance_id);
return m_scratchFlags;
}
template <typename WAT>
void SYCLInternal::fence_helper(WAT& wat, const std::string& name,
uint32_t instance_id) {
Kokkos::Tools::Experimental::Impl::profile_fence_event<
Kokkos::Experimental::SYCL>(
name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{instance_id},
[&]() {
try {
wat.wait_and_throw();
} catch (sycl::exception const& e) {
Kokkos::Impl::throw_runtime_exception(
std::string("There was a synchronous SYCL error:\n") += e.what());
}
});
}
template void SYCLInternal::fence_helper<sycl::queue>(sycl::queue&,
const std::string&,
uint32_t);
template void SYCLInternal::fence_helper<sycl::event>(sycl::event&,
const std::string&,
uint32_t);
template <sycl::usm::alloc Kind>
size_t SYCLInternal::USMObjectMem<Kind>::reserve(size_t n) {
assert(m_q);
if (m_capacity < n) {
using Record = Kokkos::Impl::SharedAllocationRecord<AllocationSpace, void>;
// First free what we have (in case malloc can reuse it)
if (m_data) Record::decrement(Record::get_record(m_data));
Record* const r = Record::allocate(
AllocationSpace(*m_q), "Kokkos::Experimental::SYCL::USMObjectMem", n);
Record::increment(r);
m_data = r->data();
m_capacity = n;
}
return m_capacity;
}
template <sycl::usm::alloc Kind>
void SYCLInternal::USMObjectMem<Kind>::reset() {
if (m_data) {
// This implies a fence since this class is not copyable
// and deallocating implies a fence across all registered queues.
using Record = Kokkos::Impl::SharedAllocationRecord<AllocationSpace, void>;
Record::decrement(Record::get_record(m_data));
m_capacity = 0;
m_data = nullptr;
}
m_q.reset();
}
template class SYCLInternal::USMObjectMem<sycl::usm::alloc::shared>;
template class SYCLInternal::USMObjectMem<sycl::usm::alloc::device>;
template class SYCLInternal::USMObjectMem<sycl::usm::alloc::host>;
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
| gpl-2.0 |
SrgShv/speex_stm32 | speex/STM32/libspeex/cb_search.c | 1 | 14152 | /* Copyright (C) 2002-2006 Jean-Marc Valin
File: cb_search.c
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of the Xiph.org Foundation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "cb_search.h"
#include "filters.h"
#include "stack_alloc.h"
#include "vq.h"
#include "arch.h"
#include "math_approx.h"
#include "os_support.h"
#ifdef _USE_SSE
#include "cb_search_sse.h"
#elif defined(ARM4_ASM) || defined(ARM5E_ASM)
#include "cb_search_arm4.h"
#elif defined(BFIN_ASM)
#include "cb_search_bfin.h"
#endif
//added by MCD Application Team June 2008
#include "cb_search_cortexM3.h"
#ifndef OVERRIDE_COMPUTE_WEIGHTED_CODEBOOK
static void compute_weighted_codebook(const signed char *shape_cb, const spx_word16_t *r, spx_word16_t *resp, spx_word16_t *resp2, spx_word32_t *E, int shape_cb_size, int subvect_size, char *stack)
{
int i, j, k;
VARDECL(spx_word16_t *shape);
ALLOC(shape, subvect_size, spx_word16_t);
for (i=0;i<shape_cb_size;i++)
{
spx_word16_t *res;
res = resp+i*subvect_size;
for (k=0;k<subvect_size;k++)
shape[k] = (spx_word16_t)shape_cb[i*subvect_size+k];
E[i]=0;
/* Compute codeword response using convolution with impulse response */
for(j=0;j<subvect_size;j++)
{
spx_word32_t resj=0;
spx_word16_t res16;
for (k=0;k<=j;k++)
resj = MAC16_16(resj,shape[k],r[j-k]);
#ifdef FIXED_POINT
res16 = EXTRACT16(SHR32(resj, 13));
#else
res16 = 0.03125f*resj;
#endif
/* Compute codeword energy */
E[i]=MAC16_16(E[i],res16,res16);
res[j] = res16;
/*printf ("%d\n", (int)res[j]);*/
}
}
}
#endif
#ifndef OVERRIDE_TARGET_UPDATE
static inline void target_update(spx_word16_t *t, spx_word16_t g, spx_word16_t *r, int len)
{
int n;
for (n=0;n<len;n++)
t[n] = SUB16(t[n],PSHR32(MULT16_16(g,r[n]),13));
}
#endif
/********************************************************************************************/
/* This function has been modified by STMicroelectronics, MCD Application team, June 2008. */
/********************************************************************************************/
extern const signed char exc_10_32_table[];
void split_cb_search_shape_sign(
spx_word16_t target[], /* target vector */
spx_coef_t ak[], /* LPCs for this subframe */
spx_coef_t awk1[], /* Weighted LPCs for this subframe */
spx_coef_t awk2[], /* Weighted LPCs for this subframe */
const void *par, /* Codebook/search parameters*/
int p, /* number of LPC coeffs */
int nsf, /* number of samples in subframe */
spx_sig_t *exc,
spx_word16_t *r,
SpeexBits *bits,
char *stack,
int complexity,
int update_target
)
{
int i,j,m,q;
const signed char *shape_cb;
int shape_cb_size = 32, subvect_size = 10;
int best_index;
spx_word32_t best_dist;
spx_word16_t resp[320];
spx_word16_t *resp2 = resp;
spx_word32_t E[32];
spx_word16_t t[40];
spx_sig_t e[40];
shape_cb=exc_10_32_table;
/* FIXME: Do we still need to copy the target? */
SPEEX_COPY(t, target, nsf);
//compute_weighted_codebook
{
int i, k;
spx_word16_t shape[10];
for (i=0;i<shape_cb_size;i++)
{
spx_word16_t *res;
res = resp+i*subvect_size;
for (k=0;k<subvect_size;k++)
shape[k] = (spx_word16_t)shape_cb[i*subvect_size+k];
E[i]=0;
/* Compute codeword response using convolution with impulse response */
{
spx_word32_t resj;
spx_word16_t res16;
// 0
resj = MULT16_16(shape[0],r[0]);
res16 = EXTRACT16(SHR32(resj, 13));
// Compute codeword energy
E[i]=MAC16_16(E[i],res16,res16);
res[0] = res16;
//++++++++++++++++++++++++++
// 1
resj = MULT16_16(shape[0],r[1]);
resj = MAC16_16(resj,shape[1],r[0]);
res16 = EXTRACT16(SHR32(resj, 13));
// Compute codeword energy
E[i]=MAC16_16(E[i],res16,res16);
res[1] = res16;
//++++++++++++++++++++++++++
// 2
resj = MULT16_16(shape[0],r[2]);
resj = MAC16_16(resj,shape[1],r[1]);
resj = MAC16_16(resj,shape[2],r[0]);
res16 = EXTRACT16(SHR32(resj, 13));
// Compute codeword energy
E[i]=MAC16_16(E[i],res16,res16);
res[2] = res16;
//++++++++++++++++++++++++++
// 3
resj = MULT16_16(shape[0],r[3]);
resj = MAC16_16(resj,shape[1],r[2]);
resj = MAC16_16(resj,shape[2],r[1]);
resj = MAC16_16(resj,shape[3],r[0]);
res16 = EXTRACT16(SHR32(resj, 13));
// Compute codeword energy
E[i]=MAC16_16(E[i],res16,res16);
res[3] = res16;
//++++++++++++++++++++++++++
// 4
resj = MULT16_16(shape[0],r[4]);
resj = MAC16_16(resj,shape[1],r[3]);
resj = MAC16_16(resj,shape[2],r[2]);
resj = MAC16_16(resj,shape[3],r[1]);
resj = MAC16_16(resj,shape[4],r[0]);
res16 = EXTRACT16(SHR32(resj, 13));
// Compute codeword energy
E[i]=MAC16_16(E[i],res16,res16);
res[4] = res16;
//++++++++++++++++++++++++++
// 5
resj = MULT16_16(shape[0],r[5]);
resj = MAC16_16(resj,shape[1],r[4]);
resj = MAC16_16(resj,shape[2],r[3]);
resj = MAC16_16(resj,shape[3],r[2]);
resj = MAC16_16(resj,shape[4],r[1]);
resj = MAC16_16(resj,shape[5],r[0]);
res16 = EXTRACT16(SHR32(resj, 13));
// Compute codeword energy
E[i]=MAC16_16(E[i],res16,res16);
res[5] = res16;
//++++++++++++++++++++++++++
// 6
resj = MULT16_16(shape[0],r[6]);
resj = MAC16_16(resj,shape[1],r[5]);
resj = MAC16_16(resj,shape[2],r[4]);
resj = MAC16_16(resj,shape[3],r[3]);
resj = MAC16_16(resj,shape[4],r[2]);
resj = MAC16_16(resj,shape[5],r[1]);
resj = MAC16_16(resj,shape[6],r[0]);
res16 = EXTRACT16(SHR32(resj, 13));
// Compute codeword energy
E[i]=MAC16_16(E[i],res16,res16);
res[6] = res16;
//++++++++++++++++++++++++++
// 7
resj = MULT16_16(shape[0],r[7]);
resj = MAC16_16(resj,shape[1],r[6]);
resj = MAC16_16(resj,shape[2],r[5]);
resj = MAC16_16(resj,shape[3],r[4]);
resj = MAC16_16(resj,shape[4],r[3]);
resj = MAC16_16(resj,shape[5],r[2]);
resj = MAC16_16(resj,shape[6],r[1]);
resj = MAC16_16(resj,shape[7],r[0]);
res16 = EXTRACT16(SHR32(resj, 13));
// Compute codeword energy
E[i]=MAC16_16(E[i],res16,res16);
res[7] = res16;
//++++++++++++++++++++++++++
// 8
resj = MULT16_16(shape[0],r[8]);
resj = MAC16_16(resj,shape[1],r[7]);
resj = MAC16_16(resj,shape[2],r[6]);
resj = MAC16_16(resj,shape[3],r[5]);
resj = MAC16_16(resj,shape[4],r[4]);
resj = MAC16_16(resj,shape[5],r[3]);
resj = MAC16_16(resj,shape[6],r[2]);
resj = MAC16_16(resj,shape[7],r[1]);
resj = MAC16_16(resj,shape[8],r[0]);
res16 = EXTRACT16(SHR32(resj, 13));
// Compute codeword energy
E[i]=MAC16_16(E[i],res16,res16);
res[8] = res16;
//++++++++++++++++++++++++++
// 9
resj = MULT16_16(shape[0],r[9]);
resj = MAC16_16(resj,shape[1],r[8]);
resj = MAC16_16(resj,shape[2],r[7]);
resj = MAC16_16(resj,shape[3],r[6]);
resj = MAC16_16(resj,shape[4],r[5]);
resj = MAC16_16(resj,shape[5],r[4]);
resj = MAC16_16(resj,shape[6],r[3]);
resj = MAC16_16(resj,shape[7],r[2]);
resj = MAC16_16(resj,shape[8],r[1]);
resj = MAC16_16(resj,shape[9],r[0]);
res16 = EXTRACT16(SHR32(resj, 13));
// Compute codeword energy
E[i]=MAC16_16(E[i],res16,res16);
res[9] = res16;
//++++++++++++++++++++++++++
}
}
}
for (i=0;i<4;i++)
{
spx_word16_t *x=t+subvect_size*i;
/*Find new n-best based on previous n-best j*/
vq_nbest(x, resp2, subvect_size, shape_cb_size, E, 1, &best_index, &best_dist, stack);
speex_bits_pack(bits,best_index,5);
{
int rind;
spx_word16_t *res;
spx_word16_t sign=1;
rind = best_index;
if (rind>=shape_cb_size)
{
sign=-1;
rind-=shape_cb_size;
}
res = resp+rind*subvect_size;
if (sign>0)
for (m=0;m<subvect_size;m++)
t[subvect_size*i+m] = SUB16(t[subvect_size*i+m], res[m]);
else
for (m=0;m<subvect_size;m++)
t[subvect_size*i+m] = ADD16(t[subvect_size*i+m], res[m]);
if (sign==1)
{
for (j=0;j<subvect_size;j++)
e[subvect_size*i+j]=SHL32(EXTEND32(shape_cb[rind*subvect_size+j]),SIG_SHIFT-5);
} else {
for (j=0;j<subvect_size;j++)
e[subvect_size*i+j]=NEG32(SHL32(EXTEND32(shape_cb[rind*subvect_size+j]),SIG_SHIFT-5));
}
}
for (m=0;m<subvect_size;m++)
{
spx_word16_t g;
int rind;
spx_word16_t sign=1;
rind = best_index;
if (rind>=shape_cb_size)
{
sign=-1;
rind-=shape_cb_size;
}
q=subvect_size-m;
g=sign*shape_cb[rind*subvect_size+m];
target_update(t+subvect_size*(i+1), g, r+q, nsf-subvect_size*(i+1));
}
}
/* Update excitation */
/* FIXME: We could update the excitation directly above */
for (j=0;j<nsf;j++)
exc[j]=ADD32(exc[j],e[j]);
}
void split_cb_shape_sign_unquant(
spx_sig_t *exc,
const void *par, /* non-overlapping codebook */
int nsf, /* number of samples in subframe */
SpeexBits *bits,
char *stack,
signed int *seed
)
{
int i,j;
VARDECL(int *ind);
VARDECL(int *signs);
const signed char *shape_cb;
int subvect_size, nb_subvect;
const split_cb_params *params;
int have_sign;
params = (const split_cb_params *) par;
subvect_size = params->subvect_size;
nb_subvect = params->nb_subvect;
shape_cb = params->shape_cb;
have_sign = params->have_sign;
ALLOC(ind, nb_subvect, int);
ALLOC(signs, nb_subvect, int);
/* Decode codewords and gains */
for (i=0;i<nb_subvect;i++)
{
if (have_sign)
signs[i] = speex_bits_unpack_unsigned(bits, 1);
else
signs[i] = 0;
ind[i] = speex_bits_unpack_unsigned(bits, params->shape_bits);
}
/* Compute decoded excitation */
for (i=0;i<nb_subvect;i++)
{
spx_word16_t s=1;
if (signs[i])
s=-1;
#ifdef FIXED_POINT
if (s==1)
{
for (j=0;j<subvect_size;j++)
exc[subvect_size*i+j]=SHL32(EXTEND32(shape_cb[ind[i]*subvect_size+j]),SIG_SHIFT-5);
} else {
for (j=0;j<subvect_size;j++)
exc[subvect_size*i+j]=NEG32(SHL32(EXTEND32(shape_cb[ind[i]*subvect_size+j]),SIG_SHIFT-5));
}
#else
for (j=0;j<subvect_size;j++)
exc[subvect_size*i+j]+=s*0.03125*shape_cb[ind[i]*subvect_size+j];
#endif
}
}
void noise_codebook_quant(
spx_word16_t target[], /* target vector */
spx_coef_t ak[], /* LPCs for this subframe */
spx_coef_t awk1[], /* Weighted LPCs for this subframe */
spx_coef_t awk2[], /* Weighted LPCs for this subframe */
const void *par, /* Codebook/search parameters*/
int p, /* number of LPC coeffs */
int nsf, /* number of samples in subframe */
spx_sig_t *exc,
spx_word16_t *r,
SpeexBits *bits,
char *stack,
int complexity,
int update_target
)
{
int i;
VARDECL(spx_word16_t *tmp);
ALLOC(tmp, nsf, spx_word16_t);
residue_percep_zero16(target, ak, awk1, awk2, tmp, nsf, p, stack);
for (i=0;i<nsf;i++)
exc[i]+=SHL32(EXTEND32(tmp[i]),8);
SPEEX_MEMSET(target, 0, nsf);
}
void noise_codebook_unquant(
spx_sig_t *exc,
const void *par, /* non-overlapping codebook */
int nsf, /* number of samples in subframe */
SpeexBits *bits,
char *stack,
signed int *seed
)
{
int i;
/* FIXME: This is bad, but I don't think the function ever gets called anyway */
for (i=0;i<nsf;i++)
exc[i]=SHL32(EXTEND32(speex_rand(1, seed)),SIG_SHIFT);
}
| gpl-2.0 |
KDE/calligra | plugins/textshape/TextShape.cpp | 1 | 15696 | /* This file is part of the KDE project
* SPDX-FileCopyrightText: 2006-2010 Thomas Zander <zander@kde.org>
* SPDX-FileCopyrightText: 2008-2010 Thorsten Zachmann <zachmann@kde.org>
* SPDX-FileCopyrightText: 2008 Pierre Stirnweiss \pierre.stirnweiss_calligra@gadz.org>
* SPDX-FileCopyrightText: 2010 KO GmbH <cbo@kogmbh.com>
*
* SPDX-License-Identifier: LGPL-2.0-or-later
*/
#include "TextShape.h"
#include "ShrinkToFitShapeContainer.h"
#include <KoTextSharedLoadingData.h>
#include "SimpleRootAreaProvider.h"
#include <KoTextLayoutRootArea.h>
#include <KoTextEditor.h>
#include <KoCanvasBase.h>
#include <KoCanvasResourceManager.h>
#include <KoChangeTracker.h>
#include <KoInlineTextObjectManager.h>
#include <KoTextRangeManager.h>
#include <KoOdfLoadingContext.h>
#include <KoOdfWorkaround.h>
#include <KoParagraphStyle.h>
#include <KoPostscriptPaintDevice.h>
#include <KoSelection.h>
#include <KoShapeLoadingContext.h>
#include <KoShapeBackground.h>
#include <KoShapePaintingContext.h>
#include <KoShapeSavingContext.h>
#include <KoText.h>
#include <KoTextDocument.h>
#include <KoTextDocumentLayout.h>
#include <KoTextPage.h>
#include <KoTextShapeContainerModel.h>
#include <KoPageProvider.h>
#include <KoViewConverter.h>
#include <KoXmlWriter.h>
#include <KoXmlReader.h>
#include <KoXmlNS.h>
#include <KoStyleStack.h>
#include <QAbstractTextDocumentLayout>
#include <QApplication>
#include <QFont>
#include <QPainter>
#include <QPainterPath>
#include <QPen>
#include <QTextLayout>
#include <QDebug>
TextShape::TextShape(KoInlineTextObjectManager *inlineTextObjectManager, KoTextRangeManager *textRangeManager)
: KoShapeContainer(new KoTextShapeContainerModel())
, KoFrameShape(KoXmlNS::draw, "text-box")
, m_pageProvider(0)
, m_imageCollection(0)
, m_clip(true)
{
setShapeId(TextShape_SHAPEID);
m_textShapeData = new KoTextShapeData();
setUserData(m_textShapeData);
SimpleRootAreaProvider *provider = new SimpleRootAreaProvider(m_textShapeData, this);
KoTextDocument(m_textShapeData->document()).setInlineTextObjectManager(inlineTextObjectManager);
KoTextDocument(m_textShapeData->document()).setTextRangeManager(textRangeManager);
m_layout = new KoTextDocumentLayout(m_textShapeData->document(), provider);
m_textShapeData->document()->setDocumentLayout(m_layout);
setCollisionDetection(true);
QObject::connect(m_layout, &KoTextDocumentLayout::layoutIsDirty, m_layout, &KoTextDocumentLayout::scheduleLayout);
}
TextShape::~TextShape()
{
}
void TextShape::paintComponent(QPainter &painter, const KoViewConverter &converter,
KoShapePaintingContext &paintContext)
{
painter.save();
applyConversion(painter, converter);
KoBorder *border = this->border();
if (border) {
paintBorder(painter, converter);
}
else if (paintContext.showTextShapeOutlines) {
// No need to paint the outlines if there is a real border.
if (qAbs(rotation()) > 1)
painter.setRenderHint(QPainter::Antialiasing);
QPen pen(QColor(210, 210, 210), 0); // use cosmetic pen
QPointF onePixel = converter.viewToDocument(QPointF(1.0, 1.0));
QRectF rect(QPointF(0.0, 0.0), size() - QSizeF(onePixel.x(), onePixel.y()));
painter.setPen(pen);
painter.drawRect(rect);
}
painter.restore();
if (m_textShapeData->isDirty()) { // not layouted yet.
return;
}
QTextDocument *doc = m_textShapeData->document();
Q_ASSERT(doc);
KoTextDocumentLayout *lay = qobject_cast<KoTextDocumentLayout*>(doc->documentLayout());
Q_ASSERT(lay);
lay->showInlineObjectVisualization(paintContext.showInlineObjectVisualization);
applyConversion(painter, converter);
if (background()) {
QPainterPath p;
p.addRect(QRectF(QPointF(), size()));
background()->paint(painter, converter, paintContext, p);
}
// this enables to use the same shapes on different pages showing different page numbers
if (m_pageProvider) {
KoTextPage *page = m_pageProvider->page(this);
if (page) {
// this is used to not trigger repaints if layout during the painting is done
m_paintRegion = painter.clipRegion();
if (!m_textShapeData->rootArea()->page() || page->pageNumber() != m_textShapeData->rootArea()->page()->pageNumber()) {
m_textShapeData->rootArea()->setPage(page); // takes over ownership of the page
} else {
delete page;
}
}
}
KoTextDocumentLayout::PaintContext pc;
QAbstractTextDocumentLayout::Selection selection;
KoTextEditor *textEditor = KoTextDocument(m_textShapeData->document()).textEditor();
selection.cursor = *(textEditor->cursor());
QPalette palette = pc.textContext.palette;
selection.format.setBackground(palette.brush(QPalette::Highlight));
selection.format.setForeground(palette.brush(QPalette::HighlightedText));
pc.textContext.selections.append(selection);
pc.textContext.selections += KoTextDocument(doc).selections();
pc.viewConverter = &converter;
pc.imageCollection = m_imageCollection;
pc.showFormattingCharacters = paintContext.showFormattingCharacters;
pc.showTableBorders = paintContext.showTableBorders;
pc.showSectionBounds = paintContext.showSectionBounds;
pc.showSpellChecking = paintContext.showSpellChecking;
pc.showSelections = paintContext.showSelections;
// When clipping the painter we need to make sure not to cutoff cosmetic pens which
// may used to draw e.g. table-borders for user convenience when on screen (but not
// on e.g. printing). Such cosmetic pens are special cause they will always have the
// same pen-width (1 pixel) independent of zoom-factor or painter transformations and
// are not taken into account in any border-calculations.
QRectF clipRect = outlineRect();
qreal cosmeticPenX = 1 * 72. / painter.device()->logicalDpiX();
qreal cosmeticPenY = 1 * 72. / painter.device()->logicalDpiY();
painter.setClipRect(clipRect.adjusted(-cosmeticPenX, -cosmeticPenY, cosmeticPenX, cosmeticPenY), Qt::IntersectClip);
painter.save();
painter.translate(0, -m_textShapeData->documentOffset());
m_textShapeData->rootArea()->paint(&painter, pc); // only need to draw ourselves
painter.restore();
m_paintRegion = QRegion();
}
QPointF TextShape::convertScreenPos(const QPointF &point) const
{
QPointF p = absoluteTransformation(0).inverted().map(point);
return p + QPointF(0.0, m_textShapeData->documentOffset());
}
QPainterPath TextShape::outline() const
{
QPainterPath path;
path.addRect(QRectF(QPointF(0,0), size()));
return path;
}
QRectF TextShape::outlineRect() const
{
if (m_textShapeData->rootArea()) {
QRectF rect = m_textShapeData->rootArea()->boundingRect();
rect.moveTop(rect.top() - m_textShapeData->rootArea()->top());
if (m_clip) {
rect.setHeight(size().height());
}
return rect | QRectF(QPointF(0, 0), size());
}
return QRectF(QPointF(0,0), size());
}
void TextShape::shapeChanged(ChangeType type, KoShape *shape)
{
Q_UNUSED(shape);
KoShapeContainer::shapeChanged(type, shape);
if (type == PositionChanged || type == SizeChanged || type == CollisionDetected) {
m_textShapeData->setDirty();
}
}
void TextShape::saveOdf(KoShapeSavingContext &context) const
{
KoXmlWriter & writer = context.xmlWriter();
QString textHeight = additionalAttribute("fo:min-height");
const_cast<TextShape*>(this)->removeAdditionalAttribute("fo:min-height");
writer.startElement("draw:frame");
// if the TextShape is wrapped in a shrink to fit container we need to save the geometry of the container as
// the geometry of the shape might have been changed.
if (ShrinkToFitShapeContainer *stf = dynamic_cast<ShrinkToFitShapeContainer *>(this->parent())) {
stf->saveOdfAttributes(context, OdfSize | OdfPosition | OdfTransformation );
saveOdfAttributes(context, OdfAdditionalAttributes | OdfMandatories | OdfCommonChildElements);
}
else {
saveOdfAttributes(context, OdfAllAttributes);
}
writer.startElement("draw:text-box");
if (! textHeight.isEmpty())
writer.addAttribute("fo:min-height", textHeight);
KoTextDocumentLayout *lay = qobject_cast<KoTextDocumentLayout*>(m_textShapeData->document()->documentLayout());
int index = -1;
if (lay) {
int i = 0;
foreach (KoShape *shape, lay->shapes()) {
if (shape == this) {
index = i;
} else if (index >= 0) {
writer.addAttribute("draw:chain-next-name", shape->name());
break;
}
++i;
}
}
const bool saveMyText = index == 0; // only save the text once.
m_textShapeData->saveOdf(context, 0, 0, saveMyText ? -1 : 0);
writer.endElement(); // draw:text-box
saveOdfCommonChildElements(context);
writer.endElement(); // draw:frame
}
QString TextShape::saveStyle(KoGenStyle &style, KoShapeSavingContext &context) const
{
Qt::Alignment vAlign(m_textShapeData->verticalAlignment());
QString verticalAlign = "top";
if (vAlign == Qt::AlignBottom) {
verticalAlign = "bottom";
}
else if ( vAlign == Qt::AlignVCenter ) {
verticalAlign = "middle";
}
style.addProperty("draw:textarea-vertical-align", verticalAlign);
KoTextShapeData::ResizeMethod resize = m_textShapeData->resizeMethod();
if (resize == KoTextShapeData::AutoGrowWidth || resize == KoTextShapeData::AutoGrowWidthAndHeight)
style.addProperty("draw:auto-grow-width", "true");
if (resize != KoTextShapeData::AutoGrowHeight && resize != KoTextShapeData::AutoGrowWidthAndHeight)
style.addProperty("draw:auto-grow-height", "false");
if (resize == KoTextShapeData::ShrinkToFitResize)
style.addProperty("draw:fit-to-size", "true");
m_textShapeData->saveStyle(style, context);
return KoShape::saveStyle(style, context);
}
void TextShape::loadStyle(const KoXmlElement &element, KoShapeLoadingContext &context)
{
KoShape::loadStyle(element, context);
KoStyleStack &styleStack = context.odfLoadingContext().styleStack();
styleStack.setTypeProperties("graphic");
QString verticalAlign(styleStack.property(KoXmlNS::draw, "textarea-vertical-align"));
Qt::Alignment alignment(Qt::AlignTop);
if (verticalAlign == "bottom") {
alignment = Qt::AlignBottom;
}
else if (verticalAlign == "justify") {
// not yet supported
alignment = Qt::AlignVCenter;
}
else if (verticalAlign == "middle") {
alignment = Qt::AlignVCenter;
}
m_textShapeData->setVerticalAlignment(alignment);
const QString fitToSize = styleStack.property(KoXmlNS::draw, "fit-to-size");
KoTextShapeData::ResizeMethod resize = KoTextShapeData::NoResize;
if (fitToSize == "true" || fitToSize == "shrink-to-fit") { // second is buggy value from impress
resize = KoTextShapeData::ShrinkToFitResize;
}
else {
// An explicit svg:width or svg:height defined do change the default value (means those value
// used if not explicit defined otherwise) for auto-grow-height and auto-grow-height. So
// they are mutable exclusive.
// It is not clear (means we did not test and took care of it) what happens if both are
// defined and are in conflict with each other or how the fit-to-size is related to this.
QString autoGrowWidth = styleStack.property(KoXmlNS::draw, "auto-grow-width");
if (autoGrowWidth.isEmpty()) {
autoGrowWidth = element.hasAttributeNS(KoXmlNS::svg, "width") ? "false" : "true";
}
QString autoGrowHeight = styleStack.property(KoXmlNS::draw, "auto-grow-height");
if (autoGrowHeight.isEmpty()) {
autoGrowHeight = element.hasAttributeNS(KoXmlNS::svg, "height") ? "false" : "true";
}
if (autoGrowWidth == "true") {
resize = autoGrowHeight == "true" ? KoTextShapeData::AutoGrowWidthAndHeight : KoTextShapeData::AutoGrowWidth;
}
else if (autoGrowHeight == "true") {
resize = KoTextShapeData::AutoGrowHeight;
}
}
m_textShapeData->setResizeMethod(resize);
}
bool TextShape::loadOdf(const KoXmlElement &element, KoShapeLoadingContext &context)
{
m_textShapeData->document()->setUndoRedoEnabled(false);
loadOdfAttributes(element, context, OdfAllAttributes);
// this cannot be done in loadStyle as that fill the style stack wrongly and therefor it results
// in wrong data to be loaded.
m_textShapeData->loadStyle(element, context);
#ifndef NWORKAROUND_ODF_BUGS
KoTextShapeData::ResizeMethod method = m_textShapeData->resizeMethod();
if (KoOdfWorkaround::fixAutoGrow(method, context)) {
KoTextDocumentLayout *lay = qobject_cast<KoTextDocumentLayout*>(m_textShapeData->document()->documentLayout());
Q_ASSERT(lay);
if (lay) {
SimpleRootAreaProvider *provider = dynamic_cast<SimpleRootAreaProvider*>(lay->provider());
if (provider) {
provider->m_fixAutogrow = true;
}
}
}
#endif
bool answer = loadOdfFrame(element, context);
m_textShapeData->document()->setUndoRedoEnabled(true);
return answer;
}
bool TextShape::loadOdfFrame(const KoXmlElement &element, KoShapeLoadingContext &context)
{
// If the loadOdfFrame from the base class for draw:text-box fails, check
// for table:table, because that is a legal child of draw:frame in ODF 1.2.
if (!KoFrameShape::loadOdfFrame(element, context)) {
const KoXmlElement &possibleTableElement(KoXml::namedItemNS(element, KoXmlNS::table, "table"));
if (possibleTableElement.isNull()) {
return false;
}
else {
return loadOdfFrameElement(possibleTableElement, context);
}
}
return true;
}
bool TextShape::loadOdfFrameElement(const KoXmlElement &element, KoShapeLoadingContext &context)
{
bool ok = m_textShapeData->loadOdf(element, context, 0, this);
if (ok)
ShrinkToFitShapeContainer::tryWrapShape(this, element, context);
return ok;
}
void TextShape::update() const
{
KoShapeContainer::update();
}
void TextShape::update(const QRectF &shape) const
{
// this is done to avoid updates which are called during the paint event and not needed.
if (!m_paintRegion.contains(shape.toRect())) {
KoShape::update(shape);
}
}
void TextShape::waitUntilReady(const KoViewConverter &, bool asynchronous) const
{
Q_UNUSED(asynchronous);
KoTextDocumentLayout *lay = qobject_cast<KoTextDocumentLayout*>(m_textShapeData->document()->documentLayout());
Q_ASSERT(lay);
if (m_textShapeData->isDirty()) {
// Do a simple layout-call which will make sure to relayout till things are done. If more
// layouts are scheduled then we don't need to wait for them here but can just continue.
lay->layout();
}
}
KoImageCollection *TextShape::imageCollection()
{
return m_imageCollection;
}
void TextShape::updateDocumentData()
{
if (m_layout) {
KoTextDocument document(m_textShapeData->document());
m_layout->setStyleManager(document.styleManager());
m_layout->setInlineTextObjectManager(document.inlineTextObjectManager());
m_layout->setTextRangeManager(document.textRangeManager());
m_layout->setChangeTracker(document.changeTracker());
}
}
| gpl-2.0 |
cxxr-devel/cxxr-svn-mirror | src/library/Recommended/survival/src/survConcordance.c | 1 | 4986 | /*
** $Id$
**
** For each observation, we want to know, for the subset of observations
** with longer survival (and only those)
** number with smaller, bigger, and tied x values
**
** The input data is sorted, largest survival to smallest survival
**
** n number of time/status/x values
** time
** status needed to keep track of tied survival times
** x vector of scores
** n2 number of unique x values
** x2 sorted vector of unique x values, smallest to largest
**
** temp scratch vector of length 2* n2
**
** returned
** result number concordant, discordant, tied survival, tied x but
** not tied survival, and incomparable times
** (bigger survival + smaller risk score = concordant)
*/
#include "survS.h"
#include <stdio.h>
void survConcordance(Sint *np, double *time, Sint *status,
double *x, Sint *n2p, double *x2,
Sint *temp,Sint *result) {
int i, j, k=0;
int start, end;
int n, n2;
Sint *count1, *count2, *count;
int tdeath;
int nright, nsame;
n = *np;
n2= *n2p;
count1 = &(temp[0]);
count2 = &(temp[n2]);
for (i=0; i<5; i++) result[i] =0; /* redundant I think */
for (i=0; i<n2; i++) count1[i]=0;
/*
** The heart of the algorithm is to think of the ordered list of
** unique values as a balanced binary tree. (Credit to Brad Broom
** of Rice U for this idea).
** For any node k, below it to the left all values are < x2[k],
** and below to the right all values are > x2[k]. (Draw a picture).
** The root of the tree is element k= floor((n2-1)/2), with value x2[k].
** In general, for any subtree that "owns" elements i to j, the root
** of that subtree is element k= floor((i+j)/2), whose left subtree
** owns elements i to k-1 of the tree, and right subtree owns elements
** k+1 to j.
**
** As we update, count[i] will be the number of data values in this
** node and all nodes below.
**
** We walk through the data one survival time at at time, comparing each
** to all the survival times above it.
** If the time is censored, all those above are "incomparable".
** Otherwise, we need to find the position of x[i], among x[1: (i-1)]
** We do this by updating the counts in the binary tree. The count
** vector contains the number of x[0 to i] that are in or below any
** given node k of the binary tree.
**
** Tied death times are a nuisance; we have to refrain from updating
** the counts until the end of each set of them. Thus a vector
** count1 (up to date) and count2 (lagged).
** nright = sum(# values to the right, each time I take a left branch)
*/
tdeath =0; /* current count of tied deaths */
for (i=0; i<n; i++) {
if (status[i] > 0) {
/*
** Walk the tree a first time, to count this observation's
** position
*/
nright = 0;
start = 0; end= n2-1; /*start to end of sublist being looked at */
if (tdeath==0) count=count1; /* use the appropriate count */
else count=count2;
while(start <= end) {
k = (start+end)/2;
if (x[i] == x2[k]) break;
if (x[i] < x2[k]) {
/* take the left branch (smaller numbers) */
end = k-1;
nright = nright + (count[k] - count[(start+end)/2]);
}
else start = k+1; /*right branch */
}
/*
** At this point x[i] = x2[k]; we've found the number in the
** x2 list
*/
nsame = count[k]; /*provisional */
if (k<end) { /* there is a right hand branch below this node*/
j = count[(k+1+end)/2]; /*number to the right */
nsame = nsame -j;
nright= nright+j;
}
if (k > start) /* there is a left hand branch below here */
nsame = nsame - count[(start+k-1)/2];
result[3] += nsame;
result[1] += nright; /* # values bigger than x[i] */
result[0] += i - (tdeath + nsame + nright); /* # smaller */
/* Is the next survival time tied with this one? */
if (i<(n-1) && status[i+1]>0 &&(time[i] == time[i+1])) {
tdeath += 1; /* Yes it is */
if (tdeath==1) {
for (j=0; j<n2; j++) count2[j] = count1[j];
}
}
else {
result[2] += (tdeath * (tdeath+1))/2;
tdeath =0;
}
}
else {
/*
** censored survival time
** All those above it on the list are "incomparable"
*/
tdeath =0;
result[4] += i;
}
/*
** Now, walk the list one more time, updating count1
*/
start = 0; end= n2-1; /*start to end of sublist being looked at */
while(start <= end) {
k = (start+end)/2;
count1[k]++;
if (x[i] == x2[k]) break;
if (x[i] < x2[k]) end = k-1; /* left branch */
else start = k+1; /*right branch */
}
}
}
| gpl-2.0 |
zehome/cpige | id3.c | 1 | 1961 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "tool.h"
#include "id3.h"
char *GetId3v1(char *titre, char *artiste, char *radioName)
{
char *chunk;
char *relativePointer;
int padding = 0;
chunk = (char *)memory_allocation(128);
/* id3v1 tag */
strncat(chunk, "TAG", 4);
/* Title */
relativePointer = chunk + 3; /* 3 octets */
if (titre) {
padding = (30 - strlen(titre));
if (padding < 0)
{
snprintf(relativePointer+3, 31, "%s", titre);
} else {
snprintf(relativePointer, 128-3, "%s", titre);
memset(relativePointer + (30-padding), 0, padding);
}
} else {
memset(relativePointer, 0, 30);
}
/* Artist */
relativePointer = relativePointer + 30; /* 33 octets */
if (artiste) {
padding = (30 - strlen(artiste));
if (padding < 0) {
snprintf(relativePointer, 31, "%s", artiste);
} else {
snprintf(relativePointer, 128-33, "%s", artiste);
memset(relativePointer + (30 - padding), 0, padding);
}
} else {
memset(relativePointer, 0, 30);
}
/* Album (on met l'url de la radio ...) */
relativePointer = relativePointer + 30; /* 63 octets */
if (radioName != NULL) {
padding = (30 - strlen(radioName));
if (padding < 0) {
snprintf(relativePointer, 31, "%s", radioName);
} else {
snprintf(relativePointer, 128-63, "%s", radioName);
memset(relativePointer + (30-padding), 0, padding);
}
} else {
memset(relativePointer, 0, 30);
}
/* Year */
relativePointer = relativePointer + 30; /* 93 octets */
memset(relativePointer, 0, 4);
/* Comment */
relativePointer = relativePointer + 4; /* 97 octets */
snprintf(relativePointer, 31, "by cPige http://ed.zehome.com/");
/* Ouah la chance, ça rentre! */
/* genre */
relativePointer = relativePointer + 30; /* 127 octets */
memset(relativePointer, 1, 1);
/* 128 bytes ! We won :) */
return chunk;
}
| gpl-2.0 |
geekammo/doxygen | src/xmldocvisitor.cpp | 1 | 23410 | /******************************************************************************
*
*
*
*
* Copyright (C) 1997-2014 by Dimitri van Heesch.
*
* Permission to use, copy, modify, and distribute this software and its
* documentation under the terms of the GNU General Public License is hereby
* granted. No representations are made about the suitability of this software
* for any purpose. It is provided "as is" without express or implied warranty.
* See the GNU General Public License for more details.
*
* Documents produced by Doxygen are derivative works derived from the
* input used in their production; they are not affected by this license.
*
*/
#include <qfileinfo.h>
#include "xmldocvisitor.h"
#include "docparser.h"
#include "language.h"
#include "doxygen.h"
#include "outputgen.h"
#include "xmlgen.h"
#include "dot.h"
#include "message.h"
#include "util.h"
#include "parserintf.h"
#include "filename.h"
#include "config.h"
#include "htmlentity.h"
XmlDocVisitor::XmlDocVisitor(FTextStream &t,CodeOutputInterface &ci)
: DocVisitor(DocVisitor_XML), m_t(t), m_ci(ci), m_insidePre(FALSE), m_hide(FALSE)
{
}
//--------------------------------------
// visitor functions for leaf nodes
//--------------------------------------
void XmlDocVisitor::visit(DocWord *w)
{
if (m_hide) return;
filter(w->word());
}
void XmlDocVisitor::visit(DocLinkedWord *w)
{
if (m_hide) return;
startLink(w->ref(),w->file(),w->anchor());
filter(w->word());
endLink();
}
void XmlDocVisitor::visit(DocWhiteSpace *w)
{
if (m_hide) return;
if (m_insidePre)
{
m_t << w->chars();
}
else
{
m_t << " ";
}
}
void XmlDocVisitor::visit(DocSymbol *s)
{
if (m_hide) return;
const char *res = HtmlEntityMapper::instance()->xml(s->symbol());
if (res)
{
m_t << res;
}
else
{
err("XML: non supported HTML-entity found: %s\n",HtmlEntityMapper::instance()->html(s->symbol(),TRUE));
}
}
void XmlDocVisitor::visit(DocURL *u)
{
if (m_hide) return;
m_t << "<ulink url=\"";
if (u->isEmail()) m_t << "mailto:";
filter(u->url());
m_t << "\">";
filter(u->url());
m_t << "</ulink>";
}
void XmlDocVisitor::visit(DocLineBreak *)
{
if (m_hide) return;
m_t << "<linebreak/>\n";
}
void XmlDocVisitor::visit(DocHorRuler *)
{
if (m_hide) return;
m_t << "<hruler/>\n";
}
void XmlDocVisitor::visit(DocStyleChange *s)
{
if (m_hide) return;
switch (s->style())
{
case DocStyleChange::Bold:
if (s->enable()) m_t << "<bold>"; else m_t << "</bold>";
break;
case DocStyleChange::Italic:
if (s->enable()) m_t << "<emphasis>"; else m_t << "</emphasis>";
break;
case DocStyleChange::Code:
if (s->enable()) m_t << "<computeroutput>"; else m_t << "</computeroutput>";
break;
case DocStyleChange::Subscript:
if (s->enable()) m_t << "<subscript>"; else m_t << "</subscript>";
break;
case DocStyleChange::Superscript:
if (s->enable()) m_t << "<superscript>"; else m_t << "</superscript>";
break;
case DocStyleChange::Center:
if (s->enable()) m_t << "<center>"; else m_t << "</center>";
break;
case DocStyleChange::Small:
if (s->enable()) m_t << "<small>"; else m_t << "</small>";
break;
case DocStyleChange::Preformatted:
if (s->enable())
{
m_t << "<preformatted>";
m_insidePre=TRUE;
}
else
{
m_t << "</preformatted>";
m_insidePre=FALSE;
}
break;
case DocStyleChange::Div: /* HTML only */ break;
case DocStyleChange::Span: /* HTML only */ break;
}
}
void XmlDocVisitor::visit(DocVerbatim *s)
{
if (m_hide) return;
QCString lang = m_langExt;
if (!s->language().isEmpty()) // explicit language setting
{
lang = s->language();
}
SrcLangExt langExt = getLanguageFromFileName(lang);
switch(s->type())
{
case DocVerbatim::Code: // fall though
m_t << "<programlisting>";
Doxygen::parserManager->getParser(lang)
->parseCode(m_ci,s->context(),s->text(),langExt,
s->isExample(),s->exampleFile());
m_t << "</programlisting>";
break;
case DocVerbatim::Verbatim:
m_t << "<verbatim>";
filter(s->text());
m_t << "</verbatim>";
break;
case DocVerbatim::HtmlOnly:
m_t << "<htmlonly>";
filter(s->text());
m_t << "</htmlonly>";
break;
case DocVerbatim::RtfOnly:
m_t << "<rtfonly>";
filter(s->text());
m_t << "</rtfonly>";
break;
case DocVerbatim::ManOnly:
m_t << "<manonly>";
filter(s->text());
m_t << "</manonly>";
break;
case DocVerbatim::LatexOnly:
m_t << "<latexonly>";
filter(s->text());
m_t << "</latexonly>";
break;
case DocVerbatim::XmlOnly:
m_t << s->text();
break;
case DocVerbatim::DocbookOnly:
m_t << "<docbookonly>";
filter(s->text());
m_t << "</docbookonly>";
break;
case DocVerbatim::Dot:
m_t << "<dot>";
filter(s->text());
m_t << "</dot>";
break;
case DocVerbatim::Msc:
m_t << "<msc>";
filter(s->text());
m_t << "</msc>";
break;
}
}
void XmlDocVisitor::visit(DocAnchor *anc)
{
if (m_hide) return;
m_t << "<anchor id=\"" << anc->file() << "_1" << anc->anchor() << "\"/>";
}
void XmlDocVisitor::visit(DocInclude *inc)
{
if (m_hide) return;
SrcLangExt langExt = getLanguageFromFileName(inc->extension());
switch(inc->type())
{
case DocInclude::IncWithLines:
{
m_t << "<programlisting>";
QFileInfo cfi( inc->file() );
FileDef fd( cfi.dirPath().utf8(), cfi.fileName().utf8() );
Doxygen::parserManager->getParser(inc->extension())
->parseCode(m_ci,inc->context(),
inc->text(),
langExt,
inc->isExample(),
inc->exampleFile(), &fd);
m_t << "</programlisting>";
}
break;
case DocInclude::Include:
m_t << "<programlisting>";
Doxygen::parserManager->getParser(inc->extension())
->parseCode(m_ci,inc->context(),
inc->text(),
langExt,
inc->isExample(),
inc->exampleFile());
m_t << "</programlisting>";
break;
case DocInclude::DontInclude:
break;
case DocInclude::HtmlInclude:
m_t << "<htmlonly>";
filter(inc->text());
m_t << "</htmlonly>";
break;
case DocInclude::LatexInclude:
m_t << "<latexonly>";
filter(inc->text());
m_t << "</latexonly>";
break;
case DocInclude::VerbInclude:
m_t << "<verbatim>";
filter(inc->text());
m_t << "</verbatim>";
break;
case DocInclude::Snippet:
m_t << "<programlisting>";
Doxygen::parserManager->getParser(inc->extension())
->parseCode(m_ci,
inc->context(),
extractBlock(inc->text(),inc->blockId()),
langExt,
inc->isExample(),
inc->exampleFile()
);
m_t << "</programlisting>";
break;
}
}
void XmlDocVisitor::visit(DocIncOperator *op)
{
//printf("DocIncOperator: type=%d first=%d, last=%d text=`%s'\n",
// op->type(),op->isFirst(),op->isLast(),op->text().data());
if (op->isFirst())
{
if (!m_hide)
{
m_t << "<programlisting>";
}
pushEnabled();
m_hide = TRUE;
}
SrcLangExt langExt = getLanguageFromFileName(m_langExt);
if (op->type()!=DocIncOperator::Skip)
{
popEnabled();
if (!m_hide)
{
Doxygen::parserManager->getParser(m_langExt)
->parseCode(m_ci,op->context(),
op->text(),langExt,op->isExample(),
op->exampleFile());
}
pushEnabled();
m_hide=TRUE;
}
if (op->isLast())
{
popEnabled();
if (!m_hide) m_t << "</programlisting>";
}
else
{
if (!m_hide) m_t << endl;
}
}
void XmlDocVisitor::visit(DocFormula *f)
{
if (m_hide) return;
m_t << "<formula id=\"" << f->id() << "\">";
filter(f->text());
m_t << "</formula>";
}
void XmlDocVisitor::visit(DocIndexEntry *ie)
{
if (m_hide) return;
m_t << "<indexentry>"
"<primaryie>";
filter(ie->entry());
m_t << "</primaryie>"
"<secondaryie></secondaryie>"
"</indexentry>";
}
void XmlDocVisitor::visit(DocSimpleSectSep *sep)
{
if (sep->parent() && sep->parent()->kind()==DocNode::Kind_SimpleSect)
{
visitPost((DocSimpleSect*)sep->parent()); // end current section
visitPre((DocSimpleSect*)sep->parent()); // start new section
}
}
void XmlDocVisitor::visit(DocCite *cite)
{
if (m_hide) return;
if (!cite->file().isEmpty()) startLink(cite->ref(),cite->file(),cite->anchor());
filter(cite->text());
if (!cite->file().isEmpty()) endLink();
}
//--------------------------------------
// visitor functions for compound nodes
//--------------------------------------
void XmlDocVisitor::visitPre(DocAutoList *l)
{
if (m_hide) return;
if (l->isEnumList())
{
m_t << "<orderedlist>\n";
}
else
{
m_t << "<itemizedlist>\n";
}
}
void XmlDocVisitor::visitPost(DocAutoList *l)
{
if (m_hide) return;
if (l->isEnumList())
{
m_t << "</orderedlist>\n";
}
else
{
m_t << "</itemizedlist>\n";
}
}
void XmlDocVisitor::visitPre(DocAutoListItem *)
{
if (m_hide) return;
m_t << "<listitem>";
}
void XmlDocVisitor::visitPost(DocAutoListItem *)
{
if (m_hide) return;
m_t << "</listitem>";
}
void XmlDocVisitor::visitPre(DocPara *)
{
if (m_hide) return;
m_t << "<para>";
}
void XmlDocVisitor::visitPost(DocPara *)
{
if (m_hide) return;
m_t << "</para>";
}
void XmlDocVisitor::visitPre(DocRoot *)
{
//m_t << "<hr><h4><font color=\"red\">New parser:</font></h4>\n";
}
void XmlDocVisitor::visitPost(DocRoot *)
{
//m_t << "<hr><h4><font color=\"red\">Old parser:</font></h4>\n";
}
void XmlDocVisitor::visitPre(DocSimpleSect *s)
{
if (m_hide) return;
m_t << "<simplesect kind=\"";
switch(s->type())
{
case DocSimpleSect::See:
m_t << "see"; break;
case DocSimpleSect::Return:
m_t << "return"; break;
case DocSimpleSect::Author:
m_t << "author"; break;
case DocSimpleSect::Authors:
m_t << "authors"; break;
case DocSimpleSect::Version:
m_t << "version"; break;
case DocSimpleSect::Since:
m_t << "since"; break;
case DocSimpleSect::Date:
m_t << "date"; break;
case DocSimpleSect::Note:
m_t << "note"; break;
case DocSimpleSect::Warning:
m_t << "warning"; break;
case DocSimpleSect::Pre:
m_t << "pre"; break;
case DocSimpleSect::Post:
m_t << "post"; break;
case DocSimpleSect::Copyright:
m_t << "copyright"; break;
case DocSimpleSect::Invar:
m_t << "invariant"; break;
case DocSimpleSect::Remark:
m_t << "remark"; break;
case DocSimpleSect::Attention:
m_t << "attention"; break;
case DocSimpleSect::User:
m_t << "par"; break;
case DocSimpleSect::Rcs:
m_t << "rcs"; break;
case DocSimpleSect::Unknown: break;
}
m_t << "\">";
}
void XmlDocVisitor::visitPost(DocSimpleSect *)
{
if (m_hide) return;
m_t << "</simplesect>\n";
}
void XmlDocVisitor::visitPre(DocTitle *)
{
if (m_hide) return;
m_t << "<title>";
}
void XmlDocVisitor::visitPost(DocTitle *)
{
if (m_hide) return;
m_t << "</title>";
}
void XmlDocVisitor::visitPre(DocSimpleList *)
{
if (m_hide) return;
m_t << "<itemizedlist>\n";
}
void XmlDocVisitor::visitPost(DocSimpleList *)
{
if (m_hide) return;
m_t << "</itemizedlist>\n";
}
void XmlDocVisitor::visitPre(DocSimpleListItem *)
{
if (m_hide) return;
m_t << "<listitem>";
}
void XmlDocVisitor::visitPost(DocSimpleListItem *)
{
if (m_hide) return;
m_t << "</listitem>\n";
}
void XmlDocVisitor::visitPre(DocSection *s)
{
if (m_hide) return;
m_t << "<sect" << s->level() << " id=\"" << s->file();
if (!s->anchor().isEmpty()) m_t << "_1" << s->anchor();
m_t << "\">" << endl;
m_t << "<title>";
filter(convertCharEntitiesToUTF8(s->title()));
m_t << "</title>" << endl;
}
void XmlDocVisitor::visitPost(DocSection *s)
{
m_t << "</sect" << s->level() << ">\n";
}
void XmlDocVisitor::visitPre(DocHtmlList *s)
{
if (m_hide) return;
if (s->type()==DocHtmlList::Ordered)
m_t << "<orderedlist>\n";
else
m_t << "<itemizedlist>\n";
}
void XmlDocVisitor::visitPost(DocHtmlList *s)
{
if (m_hide) return;
if (s->type()==DocHtmlList::Ordered)
m_t << "</orderedlist>\n";
else
m_t << "</itemizedlist>\n";
}
void XmlDocVisitor::visitPre(DocHtmlListItem *)
{
if (m_hide) return;
m_t << "<listitem>\n";
}
void XmlDocVisitor::visitPost(DocHtmlListItem *)
{
if (m_hide) return;
m_t << "</listitem>\n";
}
void XmlDocVisitor::visitPre(DocHtmlDescList *)
{
if (m_hide) return;
m_t << "<variablelist>\n";
}
void XmlDocVisitor::visitPost(DocHtmlDescList *)
{
if (m_hide) return;
m_t << "</variablelist>\n";
}
void XmlDocVisitor::visitPre(DocHtmlDescTitle *)
{
if (m_hide) return;
m_t << "<varlistentry><term>";
}
void XmlDocVisitor::visitPost(DocHtmlDescTitle *)
{
if (m_hide) return;
m_t << "</term></varlistentry>\n";
}
void XmlDocVisitor::visitPre(DocHtmlDescData *)
{
if (m_hide) return;
m_t << "<listitem>";
}
void XmlDocVisitor::visitPost(DocHtmlDescData *)
{
if (m_hide) return;
m_t << "</listitem>\n";
}
void XmlDocVisitor::visitPre(DocHtmlTable *t)
{
if (m_hide) return;
m_t << "<table rows=\"" << t->numRows()
<< "\" cols=\"" << t->numColumns() << "\">" ;
}
void XmlDocVisitor::visitPost(DocHtmlTable *)
{
if (m_hide) return;
m_t << "</table>\n";
}
void XmlDocVisitor::visitPre(DocHtmlRow *)
{
if (m_hide) return;
m_t << "<row>\n";
}
void XmlDocVisitor::visitPost(DocHtmlRow *)
{
if (m_hide) return;
m_t << "</row>\n";
}
void XmlDocVisitor::visitPre(DocHtmlCell *c)
{
if (m_hide) return;
if (c->isHeading()) m_t << "<entry thead=\"yes\">"; else m_t << "<entry thead=\"no\">";
}
void XmlDocVisitor::visitPost(DocHtmlCell *)
{
if (m_hide) return;
m_t << "</entry>";
}
void XmlDocVisitor::visitPre(DocHtmlCaption *)
{
if (m_hide) return;
m_t << "<caption>";
}
void XmlDocVisitor::visitPost(DocHtmlCaption *)
{
if (m_hide) return;
m_t << "</caption>\n";
}
void XmlDocVisitor::visitPre(DocInternal *)
{
if (m_hide) return;
m_t << "<internal>";
}
void XmlDocVisitor::visitPost(DocInternal *)
{
if (m_hide) return;
m_t << "</internal>" << endl;
}
void XmlDocVisitor::visitPre(DocHRef *href)
{
if (m_hide) return;
m_t << "<ulink url=\"" << href->url() << "\">";
}
void XmlDocVisitor::visitPost(DocHRef *)
{
if (m_hide) return;
m_t << "</ulink>";
}
void XmlDocVisitor::visitPre(DocHtmlHeader *header)
{
if (m_hide) return;
m_t << "<heading level=\"" << header->level() << "\">";
}
void XmlDocVisitor::visitPost(DocHtmlHeader *)
{
if (m_hide) return;
m_t << "</heading>\n";
}
void XmlDocVisitor::visitPre(DocImage *img)
{
if (m_hide) return;
m_t << "<image type=\"";
switch(img->type())
{
case DocImage::Html: m_t << "html"; break;
case DocImage::Latex: m_t << "latex"; break;
case DocImage::Rtf: m_t << "rtf"; break;
case DocImage::DocBook: m_t << "docbook"; break;
}
m_t << "\"";
QCString baseName=img->name();
int i;
if ((i=baseName.findRev('/'))!=-1 || (i=baseName.findRev('\\'))!=-1)
{
baseName=baseName.right(baseName.length()-i-1);
}
m_t << " name=\"" << baseName << "\"";
if (!img->width().isEmpty())
{
m_t << " width=\"";
filter(img->width());
m_t << "\"";
}
else if (!img->height().isEmpty())
{
m_t << " height=\"";
filter(img->height());
m_t << "\"";
}
m_t << ">";
// copy the image to the output dir
QFile inImage(img->name());
QFile outImage(Config_getString("XML_OUTPUT")+"/"+baseName.data());
if (inImage.open(IO_ReadOnly))
{
if (outImage.open(IO_WriteOnly))
{
char *buffer = new char[inImage.size()];
inImage.readBlock(buffer,inImage.size());
outImage.writeBlock(buffer,inImage.size());
outImage.flush();
delete[] buffer;
}
}
}
void XmlDocVisitor::visitPost(DocImage *)
{
if (m_hide) return;
m_t << "</image>" << endl;
}
void XmlDocVisitor::visitPre(DocDotFile *df)
{
if (m_hide) return;
m_t << "<dotfile name=\"" << df->file() << "\">";
}
void XmlDocVisitor::visitPost(DocDotFile *)
{
if (m_hide) return;
m_t << "</dotfile>" << endl;
}
void XmlDocVisitor::visitPre(DocMscFile *df)
{
if (m_hide) return;
m_t << "<mscfile name=\"" << df->file() << "\">";
}
void XmlDocVisitor::visitPost(DocMscFile *)
{
if (m_hide) return;
m_t << "</mscfile>" << endl;
}
void XmlDocVisitor::visitPre(DocDiaFile *df)
{
if (m_hide) return;
m_t << "<diafile name=\"" << df->file() << "\">";
}
void XmlDocVisitor::visitPost(DocDiaFile *)
{
if (m_hide) return;
m_t << "</diafile>" << endl;
}
void XmlDocVisitor::visitPre(DocLink *lnk)
{
if (m_hide) return;
startLink(lnk->ref(),lnk->file(),lnk->anchor());
}
void XmlDocVisitor::visitPost(DocLink *)
{
if (m_hide) return;
endLink();
}
void XmlDocVisitor::visitPre(DocRef *ref)
{
if (m_hide) return;
if (!ref->file().isEmpty())
{
startLink(ref->ref(),ref->file(),ref->isSubPage() ? QCString() : ref->anchor());
}
if (!ref->hasLinkText()) filter(ref->targetTitle());
}
void XmlDocVisitor::visitPost(DocRef *ref)
{
if (m_hide) return;
if (!ref->file().isEmpty()) endLink();
//m_t << " ";
}
void XmlDocVisitor::visitPre(DocSecRefItem *ref)
{
if (m_hide) return;
m_t << "<tocitem id=\"" << ref->file() << "_1" << ref->anchor() << "\">";
}
void XmlDocVisitor::visitPost(DocSecRefItem *)
{
if (m_hide) return;
m_t << "</tocitem>" << endl;
}
void XmlDocVisitor::visitPre(DocSecRefList *)
{
if (m_hide) return;
m_t << "<toclist>" << endl;
}
void XmlDocVisitor::visitPost(DocSecRefList *)
{
if (m_hide) return;
m_t << "</toclist>" << endl;
}
//void XmlDocVisitor::visitPre(DocLanguage *l)
//{
// if (m_hide) return;
// m_t << "<language langid=\"" << l->id() << "\">";
//}
//
//void XmlDocVisitor::visitPost(DocLanguage *)
//{
// if (m_hide) return;
// m_t << "</language>" << endl;
//}
void XmlDocVisitor::visitPre(DocParamSect *s)
{
if (m_hide) return;
m_t << "<parameterlist kind=\"";
switch(s->type())
{
case DocParamSect::Param:
m_t << "param"; break;
case DocParamSect::RetVal:
m_t << "retval"; break;
case DocParamSect::Exception:
m_t << "exception"; break;
case DocParamSect::TemplateParam:
m_t << "templateparam"; break;
default:
ASSERT(0);
}
m_t << "\">";
}
void XmlDocVisitor::visitPost(DocParamSect *)
{
if (m_hide) return;
m_t << "</parameterlist>" << endl;
}
void XmlDocVisitor::visitPre(DocParamList *pl)
{
if (m_hide) return;
m_t << "<parameteritem>" << endl;
m_t << "<parameternamelist>" << endl;
//QStrListIterator li(pl->parameters());
//const char *s;
QListIterator<DocNode> li(pl->parameters());
DocNode *param;
for (li.toFirst();(param=li.current());++li)
{
if (pl->paramTypes().count()>0)
{
QListIterator<DocNode> li(pl->paramTypes());
DocNode *type;
for (li.toFirst();(type=li.current());++li)
{
m_t << "<parametertype>";
if (type->kind()==DocNode::Kind_Word)
{
visit((DocWord*)type);
}
else if (type->kind()==DocNode::Kind_LinkedWord)
{
visit((DocLinkedWord*)type);
}
m_t << "</parametertype>" << endl;
}
}
m_t << "<parametername";
if (pl->direction()!=DocParamSect::Unspecified)
{
m_t << " direction=\"";
if (pl->direction()==DocParamSect::In)
{
m_t << "in";
}
else if (pl->direction()==DocParamSect::Out)
{
m_t << "out";
}
else if (pl->direction()==DocParamSect::InOut)
{
m_t << "inout";
}
m_t << "\"";
}
m_t << ">";
if (param->kind()==DocNode::Kind_Word)
{
visit((DocWord*)param);
}
else if (param->kind()==DocNode::Kind_LinkedWord)
{
visit((DocLinkedWord*)param);
}
m_t << "</parametername>" << endl;
}
m_t << "</parameternamelist>" << endl;
m_t << "<parameterdescription>" << endl;
}
void XmlDocVisitor::visitPost(DocParamList *)
{
if (m_hide) return;
m_t << "</parameterdescription>" << endl;
m_t << "</parameteritem>" << endl;
}
void XmlDocVisitor::visitPre(DocXRefItem *x)
{
if (m_hide) return;
if (x->title().isEmpty()) return;
m_t << "<xrefsect id=\"";
m_t << x->file() << "_1" << x->anchor();
m_t << "\">";
m_t << "<xreftitle>";
filter(x->title());
m_t << "</xreftitle>";
m_t << "<xrefdescription>";
}
void XmlDocVisitor::visitPost(DocXRefItem *x)
{
if (m_hide) return;
if (x->title().isEmpty()) return;
m_t << "</xrefdescription>";
m_t << "</xrefsect>";
}
void XmlDocVisitor::visitPre(DocInternalRef *ref)
{
if (m_hide) return;
startLink(0,ref->file(),ref->anchor());
}
void XmlDocVisitor::visitPost(DocInternalRef *)
{
if (m_hide) return;
endLink();
m_t << " ";
}
void XmlDocVisitor::visitPre(DocCopy *c)
{
if (m_hide) return;
m_t << "<copydoc link=\"" << convertToXML(c->link()) << "\">";
}
void XmlDocVisitor::visitPost(DocCopy *)
{
if (m_hide) return;
m_t << "</copydoc>" << endl;
}
void XmlDocVisitor::visitPre(DocText *)
{
}
void XmlDocVisitor::visitPost(DocText *)
{
}
void XmlDocVisitor::visitPre(DocHtmlBlockQuote *)
{
if (m_hide) return;
m_t << "<blockquote>";
}
void XmlDocVisitor::visitPost(DocHtmlBlockQuote *)
{
if (m_hide) return;
m_t << "</blockquote>";
}
void XmlDocVisitor::visitPre(DocVhdlFlow *)
{
}
void XmlDocVisitor::visitPost(DocVhdlFlow *)
{
}
void XmlDocVisitor::visitPre(DocParBlock *)
{
if (m_hide) return;
m_t << "<parblock>";
}
void XmlDocVisitor::visitPost(DocParBlock *)
{
if (m_hide) return;
m_t << "</parblock>";
}
void XmlDocVisitor::filter(const char *str)
{
m_t << convertToXML(str);
}
void XmlDocVisitor::startLink(const QCString &ref,const QCString &file,const QCString &anchor)
{
//printf("XmlDocVisitor: file=%s anchor=%s\n",file.data(),anchor.data());
m_t << "<ref refid=\"" << file;
if (!anchor.isEmpty()) m_t << "_1" << anchor;
m_t << "\" kindref=\"";
if (!anchor.isEmpty()) m_t << "member"; else m_t << "compound";
m_t << "\"";
if (!ref.isEmpty()) m_t << " external=\"" << ref << "\"";
m_t << ">";
}
void XmlDocVisitor::endLink()
{
m_t << "</ref>";
}
void XmlDocVisitor::pushEnabled()
{
m_enabled.push(new bool(m_hide));
}
void XmlDocVisitor::popEnabled()
{
bool *v=m_enabled.pop();
ASSERT(v!=0);
m_hide = *v;
delete v;
}
| gpl-2.0 |
mcclung/glue | lib/plugins/stonith/suicide.c | 1 | 6968 | /* File: suicide.c
* Description: Stonith module for suicide
*
* Author: Sun Jiang Dong <sunjd@cn.ibm.com>
* Copyright (c) 2004 International Business Machines
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <lha_internal.h>
#include <config.h>
#include <sys/utsname.h>
#define DEVICE "Suicide STONITH device"
#include "stonith_plugin_common.h"
#define PIL_PLUGIN suicide
#define PIL_PLUGIN_S "suicide"
#define PIL_PLUGINLICENSE LICENSE_LGPL
#define PIL_PLUGINLICENSEURL URL_LGPL
#include <pils/plugin.h>
static StonithPlugin * suicide_new(const char *);
static void suicide_destroy(StonithPlugin *);
static const char * const * suicide_get_confignames(StonithPlugin *);
static int suicide_set_config(StonithPlugin *, StonithNVpair*);
static const char * suicide_get_info(StonithPlugin * s, int InfoType);
static int suicide_status(StonithPlugin * );
static int suicide_reset_req(StonithPlugin * s, int request
, const char * host);
static char ** suicide_hostlist(StonithPlugin *);
static struct stonith_ops suicideOps ={
suicide_new, /* Create new STONITH object */
suicide_destroy, /* Destroy STONITH object */
suicide_get_info, /* Return STONITH info string */
suicide_get_confignames, /* Return configuration parameters */
suicide_set_config, /* Set configuration */
suicide_status, /* Return STONITH device status */
suicide_reset_req, /* Request a reset */
suicide_hostlist, /* Return list of supported hosts */
};
PIL_PLUGIN_BOILERPLATE2("1.0", Debug)
static const PILPluginImports* PluginImports;
static PILPlugin* OurPlugin;
static PILInterface* OurInterface;
static StonithImports* OurImports;
static void* interfprivate;
PIL_rc
PIL_PLUGIN_INIT(PILPlugin*us, const PILPluginImports* imports);
PIL_rc
PIL_PLUGIN_INIT(PILPlugin*us, const PILPluginImports* imports)
{
/* Force the compiler to do a little type checking */
(void)(PILPluginInitFun)PIL_PLUGIN_INIT;
PluginImports = imports;
OurPlugin = us;
/* Register ourself as a plugin */
imports->register_plugin(us, &OurPIExports);
/* Register our interface implementation */
return imports->register_interface(us, PIL_PLUGINTYPE_S
, PIL_PLUGIN_S
, &suicideOps
, NULL /*close */
, &OurInterface
, (void*)&OurImports
, &interfprivate);
}
#define REBOOT_COMMAND "nohup sh -c 'sleep 2; " REBOOT " " REBOOT_OPTIONS " </dev/null >/dev/null 2>&1' &"
#define POWEROFF_COMMAND "nohup sh -c 'sleep 2; " POWEROFF_CMD " " POWEROFF_OPTIONS " </dev/null >/dev/null 2>&1' &"
/*
#define REBOOT_COMMAND "echo 'sleep 2; " REBOOT " " REBOOT_OPTIONS "' | SHELL=/bin/sh at now >/dev/null 2>&1"
#define POWEROFF_COMMAND "echo 'sleep 2; " POWEROFF_CMD " " POWEROFF_OPTIONS "' | SHELL=/bin/sh at now >/dev/null 2>&1"
*/
/*
* Suicide STONITH device
*/
struct pluginDevice {
StonithPlugin sp;
const char * pluginid;
const char * idinfo;
};
static const char * pluginid = "SuicideDevice-Stonith";
static const char * NOTpluginid = "Suicide device has been destroyed";
#include "stonith_config_xml.h"
static const char *suicideXML =
XML_PARAMETERS_BEGIN
XML_PARAMETERS_END;
static int
suicide_status(StonithPlugin *s)
{
ERRIFWRONGDEV(s, S_OOPS);
return S_OK;
}
/*
* Return the list of hosts configured for this Suicide device
*/
static char **
suicide_hostlist(StonithPlugin *s)
{
char** ret = NULL;
struct utsname name;
ERRIFWRONGDEV(s, NULL);
if (uname(&name) == -1) {
LOG(PIL_CRIT, "uname error %d", errno);
return ret;
}
ret = OurImports->StringToHostList(name.nodename);
if (ret == NULL) {
LOG(PIL_CRIT, "out of memory");
return ret;
}
g_strdown(ret[0]);
return ret;
}
/*
* Suicide - reset or poweroff itself.
*/
static int
suicide_reset_req(StonithPlugin * s, int request, const char * host)
{
int rc = -1;
struct utsname name;
ERRIFWRONGDEV(s, S_OOPS);
if (request == ST_POWERON) {
LOG(PIL_CRIT, "%s not capable of power-on operation", DEVICE);
return S_INVAL;
} else if (request != ST_POWEROFF && request != ST_GENERIC_RESET) {
LOG(PIL_CRIT, "As for suicide virtual stonith device, "
"reset request=%d is not supported", request);
return S_INVAL;
}
if (uname(&name) == -1) {
LOG(PIL_CRIT, "uname error %d", errno);
return S_RESETFAIL ;
}
if (strcmp(name.nodename, host)) {
LOG(PIL_CRIT, "%s doesn't control host [%s]"
, name.nodename, host);
return S_RESETFAIL ;
}
LOG(PIL_INFO, "Initiating suicide on host %s", host);
rc = system(
request == ST_GENERIC_RESET ? REBOOT_COMMAND : POWEROFF_COMMAND);
if (rc == 0) {
LOG(PIL_INFO, "Suicide stonith succeeded.");
return S_OK;
} else {
LOG(PIL_CRIT, "Suicide stonith failed.");
return S_RESETFAIL ;
}
}
static const char * const *
suicide_get_confignames(StonithPlugin* p)
{
/* Donnot need to initialize from external. */
static const char * SuicideParams[] = { NULL };
return SuicideParams;
}
/*
* Parse the config information in the given string, and stash it away...
*/
static int
suicide_set_config(StonithPlugin* s, StonithNVpair* list)
{
ERRIFWRONGDEV(s,S_OOPS);
return S_OK;
}
static const char *
suicide_get_info(StonithPlugin * s, int reqtype)
{
struct pluginDevice* sd = (struct pluginDevice *)s;
const char * ret;
ERRIFWRONGDEV(s, NULL);
sd = (struct pluginDevice *)s;
switch (reqtype) {
case ST_DEVICEID:
ret = sd->idinfo;
break;
case ST_DEVICENAME:
ret = "suicide STONITH device";
break;
case ST_DEVICEDESCR: /* Description of device type */
ret = "Virtual device to reboot/powerdown itself.\n";
break;
case ST_CONF_XML: /* XML metadata */
ret = suicideXML;
break;
default:
ret = NULL;
break;
}
return ret;
}
/*
* Suicide Stonith destructor...
*/
static void
suicide_destroy(StonithPlugin *s)
{
struct pluginDevice* sd;
VOIDERRIFWRONGDEV(s);
sd = (struct pluginDevice *)s;
sd->pluginid = NOTpluginid;
FREE(sd);
}
/* Create a new suicide Stonith device */
static StonithPlugin*
suicide_new(const char * subplugin)
{
struct pluginDevice* sd = ST_MALLOCT(struct pluginDevice);
if (sd == NULL) {
LOG(PIL_CRIT, "out of memory");
return(NULL);
}
memset(sd, 0, sizeof(*sd));
sd->pluginid = pluginid;
sd->idinfo = DEVICE;
sd->sp.s_ops = &suicideOps;
return &(sd->sp);
}
| gpl-2.0 |
Erikhht/TCPMP | ffmpeg/libavcodec/h264.c | 1 | 298580 | /*
* H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/**
* @file h264.c
* H.264 / AVC / MPEG4 part10 codec.
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#include "common.h"
#include "dsputil.h"
#include "avcodec.h"
#include "mpegvideo.h"
#include "h264data.h"
#include "golomb.h"
#include "cabac.h"
//#undef NDEBUG
//#include <assert.h>
#define interlaced_dct interlaced_dct_is_a_bad_name
#define mb_intra mb_intra_isnt_initalized_see_mb_type
#define LUMA_DC_BLOCK_INDEX 25
#define CHROMA_DC_BLOCK_INDEX 26
#define CHROMA_DC_COEFF_TOKEN_VLC_BITS 8
#define COEFF_TOKEN_VLC_BITS 8
#define TOTAL_ZEROS_VLC_BITS 9
#define CHROMA_DC_TOTAL_ZEROS_VLC_BITS 3
#define RUN_VLC_BITS 3
#define RUN7_VLC_BITS 6
#define MAX_SPS_COUNT 32
#define MAX_PPS_COUNT 256
#define MAX_MMCO_COUNT 66
/**
* Sequence parameter set
*/
typedef struct SPS{
int profile_idc;
int level_idc;
int transform_bypass; ///< qpprime_y_zero_transform_bypass_flag
int log2_max_frame_num; ///< log2_max_frame_num_minus4 + 4
int poc_type; ///< pic_order_cnt_type
int log2_max_poc_lsb; ///< log2_max_pic_order_cnt_lsb_minus4
int delta_pic_order_always_zero_flag;
int offset_for_non_ref_pic;
int offset_for_top_to_bottom_field;
int poc_cycle_length; ///< num_ref_frames_in_pic_order_cnt_cycle
int ref_frame_count; ///< num_ref_frames
int gaps_in_frame_num_allowed_flag;
int mb_width; ///< frame_width_in_mbs_minus1 + 1
int mb_height; ///< frame_height_in_mbs_minus1 + 1
int frame_mbs_only_flag;
int mb_aff; ///<mb_adaptive_frame_field_flag
int direct_8x8_inference_flag;
int crop; ///< frame_cropping_flag
int crop_left; ///< frame_cropping_rect_left_offset
int crop_right; ///< frame_cropping_rect_right_offset
int crop_top; ///< frame_cropping_rect_top_offset
int crop_bottom; ///< frame_cropping_rect_bottom_offset
int vui_parameters_present_flag;
AVRational sar;
int timing_info_present_flag;
uint32_t num_units_in_tick;
uint32_t time_scale;
int fixed_frame_rate_flag;
short offset_for_ref_frame[256]; //FIXME dyn aloc?
int bitstream_restriction_flag;
int num_reorder_frames;
}SPS;
/**
* Picture parameter set
*/
typedef struct PPS{
int sps_id;
int cabac; ///< entropy_coding_mode_flag
int pic_order_present; ///< pic_order_present_flag
int slice_group_count; ///< num_slice_groups_minus1 + 1
int mb_slice_group_map_type;
int ref_count[2]; ///< num_ref_idx_l0/1_active_minus1 + 1
int weighted_pred; ///< weighted_pred_flag
int weighted_bipred_idc;
int init_qp; ///< pic_init_qp_minus26 + 26
int init_qs; ///< pic_init_qs_minus26 + 26
int chroma_qp_index_offset;
int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
int constrained_intra_pred; ///< constrained_intra_pred_flag
int redundant_pic_cnt_present; ///< redundant_pic_cnt_present_flag
int transform_8x8_mode; ///< transform_8x8_mode_flag
}PPS;
/**
* Memory management control operation opcode.
*/
typedef enum MMCOOpcode{
MMCO_END=0,
MMCO_SHORT2UNUSED,
MMCO_LONG2UNUSED,
MMCO_SHORT2LONG,
MMCO_SET_MAX_LONG,
MMCO_RESET,
MMCO_LONG,
} MMCOOpcode;
/**
* Memory management control operation.
*/
typedef struct MMCO{
MMCOOpcode opcode;
int short_frame_num;
int long_index;
} MMCO;
/**
* H264Context
*/
typedef struct H264Context{
MpegEncContext s;
int nal_ref_idc;
int nal_unit_type;
#define NAL_SLICE 1
#define NAL_DPA 2
#define NAL_DPB 3
#define NAL_DPC 4
#define NAL_IDR_SLICE 5
#define NAL_SEI 6
#define NAL_SPS 7
#define NAL_PPS 8
#define NAL_PICTURE_DELIMITER 9
#define NAL_FILTER_DATA 10
uint8_t *rbsp_buffer;
int rbsp_buffer_size;
/**
* Used to parse AVC variant of h264
*/
int is_avc; ///< this flag is != 0 if codec is avc1
int got_avcC; ///< flag used to parse avcC data only once
int nal_length_size; ///< Number of bytes used for nal length (1, 2 or 4)
int chroma_qp; //QPc
int prev_mb_skipped; //FIXME remove (IMHO not used)
//prediction stuff
int chroma_pred_mode;
int intra16x16_pred_mode;
int top_mb_xy;
int left_mb_xy[2];
int8_t intra4x4_pred_mode_cache[5*8];
int8_t (*intra4x4_pred_mode)[8];
void (*pred4x4 [9+3])(uint8_t *src, uint8_t *topright, int stride);//FIXME move to dsp?
void (*pred8x8l [9+3])(uint8_t *src, int topleft, int topright, int stride);
void (*pred8x8 [4+3])(uint8_t *src, int stride);
void (*pred16x16[4+3])(uint8_t *src, int stride);
unsigned int topleft_samples_available;
unsigned int top_samples_available;
unsigned int topright_samples_available;
unsigned int left_samples_available;
uint8_t (*top_borders[2])[16+2*8];
uint8_t left_border[2*(17+2*9)];
/**
* non zero coeff count cache.
* is 64 if not available.
*/
__align(a)
uint8_t non_zero_count_cache[6*8] __align8;
uint8_t (*non_zero_count)[16];
/**
* Motion vector cache.
*/
int16_t mv_cache[2][5*8][2] __align8;
int8_t ref_cache[2][5*8] __align8;
#define LIST_NOT_USED -1 //FIXME rename?
#define PART_NOT_AVAILABLE -2
/**
* is 1 if the specific list MV&references are set to 0,0,-2.
*/
int mv_cache_clean[2];
/**
* number of neighbors (top and/or left) that used 8x8 dct
*/
int neighbor_transform_size;
/**
* block_offset[ 0..23] for frame macroblocks
* block_offset[24..47] for field macroblocks
*/
int block_offset[2*(16+8)];
uint32_t *mb2b_xy; //FIXME are these 4 a good idea?
uint32_t *mb2b8_xy;
int b_stride; //FIXME use s->b4_stride
int b8_stride;
int halfpel_flag;
int thirdpel_flag;
int unknown_svq3_flag;
int next_slice_index;
SPS sps_buffer[MAX_SPS_COUNT];
SPS sps; ///< current sps
PPS pps_buffer[MAX_PPS_COUNT];
/**
* current pps
*/
PPS pps; //FIXME move to Picture perhaps? (->no) do we need that?
uint16_t (*dequant4_coeff)[16]; // FIXME quant matrices should be per SPS or PPS
uint16_t (*dequant8_coeff)[64];
int slice_num;
uint8_t *slice_table_base;
uint8_t *slice_table; ///< slice_table_base + mb_stride + 1
int slice_type;
int slice_type_fixed;
//interlacing specific flags
int mb_aff_frame;
int mb_field_decoding_flag;
int sub_mb_type[4];
//POC stuff
int poc_lsb;
int poc_msb;
int delta_poc_bottom;
int delta_poc[2];
int frame_num;
int prev_poc_msb; ///< poc_msb of the last reference pic for POC type 0
int prev_poc_lsb; ///< poc_lsb of the last reference pic for POC type 0
int frame_num_offset; ///< for POC type 2
int prev_frame_num_offset; ///< for POC type 2
int prev_frame_num; ///< frame_num of the last pic for POC type 1/2
/**
* frame_num for frames or 2*frame_num for field pics.
*/
int curr_pic_num;
/**
* max_frame_num or 2*max_frame_num for field pics.
*/
int max_pic_num;
//Weighted pred stuff
int use_weight;
int use_weight_chroma;
int luma_log2_weight_denom;
int chroma_log2_weight_denom;
int luma_weight[2][16];
int luma_offset[2][16];
int chroma_weight[2][16][2];
int chroma_offset[2][16][2];
int implicit_weight[16][16];
//deblock
int deblocking_filter; ///< disable_deblocking_filter_idc with 1<->0
int slice_alpha_c0_offset;
int slice_beta_offset;
int redundant_pic_count;
int direct_spatial_mv_pred;
int dist_scale_factor[16];
int map_col_to_list0[2][16];
/**
* num_ref_idx_l0/1_active_minus1 + 1
*/
int ref_count[2];// FIXME split for AFF
Picture *short_ref[32];
Picture *long_ref[32];
Picture default_ref_list[2][32];
Picture ref_list[2][32]; //FIXME size?
Picture field_ref_list[2][32]; //FIXME size?
Picture *delayed_pic[16]; //FIXME size?
Picture *delayed_output_pic;
/**
* memory management control operations buffer.
*/
MMCO mmco[MAX_MMCO_COUNT];
int mmco_index;
int long_ref_count; ///< number of actual long term references
int short_ref_count; ///< number of actual short term references
//data partitioning
GetBitContext intra_gb;
GetBitContext inter_gb;
GetBitContext *intra_gb_ptr;
GetBitContext *inter_gb_ptr;
DCTELEM mb[16*24] __align8;
/**
* Cabac
*/
CABACContext cabac;
uint8_t cabac_state[460];
int cabac_init_idc;
/* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0,1,2), 0x0? luma_cbp */
uint16_t *cbp_table;
int top_cbp;
int left_cbp;
/* chroma_pred_mode for i4x4 or i16x16, else 0 */
uint8_t *chroma_pred_mode_table;
int last_qscale_diff;
int16_t (*mvd_table[2])[2];
int16_t mvd_cache[2][5*8][2] __align8;
uint8_t *direct_table;
uint8_t direct_cache[5*8];
uint8_t zigzag_scan[16];
uint8_t field_scan[16];
const uint8_t *zigzag_scan_q0;
const uint8_t *field_scan_q0;
int x264_build;
}H264Context;
static VLC coeff_token_vlc[4];
static VLC chroma_dc_coeff_token_vlc;
static VLC total_zeros_vlc[15];
static VLC chroma_dc_total_zeros_vlc[3];
static VLC run_vlc[6];
static VLC run7_vlc;
static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp);
static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc);
static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
static inline uint32_t pack16to32(int a, int b){
#ifdef WORDS_BIGENDIAN
return (b&0xFFFF) + (a<<16);
#else
return (a&0xFFFF) + (b<<16);
#endif
}
/**
* fill a rectangle.
* @param h height of the rectangle, should be a constant
* @param w width of the rectangle, should be a constant
* @param size the size of val (1 or 4), should be a constant
*/
static inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ //FIXME ensure this IS inlined
uint8_t *p= (uint8_t*)vp;
assert(size==1 || size==4);
w *= size;
stride *= size;
assert((((int)vp)&(FFMIN(w, STRIDE_ALIGN)-1)) == 0);
assert((stride&(w-1))==0);
//FIXME check what gcc generates for 64 bit on x86 and possibly write a 32 bit ver of it
if(w==2 && h==2){
*(uint16_t*)(p + 0)=
*(uint16_t*)(p + stride)= size==4 ? val : val*0x0101;
}else if(w==2 && h==4){
*(uint16_t*)(p + 0*stride)=
*(uint16_t*)(p + 1*stride)=
*(uint16_t*)(p + 2*stride)=
*(uint16_t*)(p + 3*stride)= size==4 ? val : val*0x0101;
}else if(w==4 && h==1){
*(uint32_t*)(p + 0*stride)= size==4 ? val : val*0x01010101;
}else if(w==4 && h==2){
*(uint32_t*)(p + 0*stride)=
*(uint32_t*)(p + 1*stride)= size==4 ? val : val*0x01010101;
}else if(w==4 && h==4){
*(uint32_t*)(p + 0*stride)=
*(uint32_t*)(p + 1*stride)=
*(uint32_t*)(p + 2*stride)=
*(uint32_t*)(p + 3*stride)= size==4 ? val : val*0x01010101;
}else if(w==8 && h==1){
*(uint32_t*)(p + 0)=
*(uint32_t*)(p + 4)= size==4 ? val : val*0x01010101;
}else if(w==8 && h==2){
*(uint32_t*)(p + 0 + 0*stride)=
*(uint32_t*)(p + 4 + 0*stride)=
*(uint32_t*)(p + 0 + 1*stride)=
*(uint32_t*)(p + 4 + 1*stride)= size==4 ? val : val*0x01010101;
}else if(w==8 && h==4){
*(uint64_t*)(p + 0*stride)=
*(uint64_t*)(p + 1*stride)=
*(uint64_t*)(p + 2*stride)=
*(uint64_t*)(p + 3*stride)= size==4 ? (uint64_t)val*0x0100000001U : (uint64_t)val*0x0101010101010101U;
}else if(w==16 && h==2){
*(uint64_t*)(p + 0+0*stride)=
*(uint64_t*)(p + 8+0*stride)=
*(uint64_t*)(p + 0+1*stride)=
*(uint64_t*)(p + 8+1*stride)= size==4 ? (uint64_t)val*0x0100000001U : (uint64_t)val*0x0101010101010101U;
}else if(w==16 && h==4){
*(uint64_t*)(p + 0+0*stride)=
*(uint64_t*)(p + 8+0*stride)=
*(uint64_t*)(p + 0+1*stride)=
*(uint64_t*)(p + 8+1*stride)=
*(uint64_t*)(p + 0+2*stride)=
*(uint64_t*)(p + 8+2*stride)=
*(uint64_t*)(p + 0+3*stride)=
*(uint64_t*)(p + 8+3*stride)= size==4 ? (uint64_t)val*0x0100000001U : (uint64_t)val*0x0101010101010101U;
}else
assert(0);
}
static inline void fill_caches(H264Context *h, int mb_type, int for_deblock){
MpegEncContext * const s = &h->s;
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
int topleft_xy, top_xy, topright_xy, left_xy[2];
int topleft_type, top_type, topright_type, left_type[2];
int left_block[8];
int i;
//FIXME deblocking can skip fill_caches much of the time with multiple slices too.
// the actual condition is whether we're on the edge of a slice,
// and even then the intra and nnz parts are unnecessary.
if(for_deblock && h->slice_num == 1)
return;
//wow what a mess, why didn't they simplify the interlacing&intra stuff, i can't imagine that these complex rules are worth it
top_xy = mb_xy - s->mb_stride;
topleft_xy = top_xy - 1;
topright_xy= top_xy + 1;
left_xy[1] = left_xy[0] = mb_xy-1;
left_block[0]= 0;
left_block[1]= 1;
left_block[2]= 2;
left_block[3]= 3;
left_block[4]= 7;
left_block[5]= 10;
left_block[6]= 8;
left_block[7]= 11;
if(h->mb_aff_frame){
const int pair_xy = s->mb_x + (s->mb_y & ~1)*s->mb_stride;
const int top_pair_xy = pair_xy - s->mb_stride;
const int topleft_pair_xy = top_pair_xy - 1;
const int topright_pair_xy = top_pair_xy + 1;
const int topleft_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[topleft_pair_xy]);
const int top_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[top_pair_xy]);
const int topright_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[topright_pair_xy]);
const int left_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[pair_xy-1]);
const int curr_mb_frame_flag = !IS_INTERLACED(mb_type);
const int bottom = (s->mb_y & 1);
tprintf("fill_caches: curr_mb_frame_flag:%d, left_mb_frame_flag:%d, topleft_mb_frame_flag:%d, top_mb_frame_flag:%d, topright_mb_frame_flag:%d\n", curr_mb_frame_flag, left_mb_frame_flag, topleft_mb_frame_flag, top_mb_frame_flag, topright_mb_frame_flag);
if (bottom
? !curr_mb_frame_flag // bottom macroblock
: (!curr_mb_frame_flag && !top_mb_frame_flag) // top macroblock
) {
top_xy -= s->mb_stride;
}
if (bottom
? !curr_mb_frame_flag // bottom macroblock
: (!curr_mb_frame_flag && !topleft_mb_frame_flag) // top macroblock
) {
topleft_xy -= s->mb_stride;
}
if (bottom
? !curr_mb_frame_flag // bottom macroblock
: (!curr_mb_frame_flag && !topright_mb_frame_flag) // top macroblock
) {
topright_xy -= s->mb_stride;
}
if (left_mb_frame_flag != curr_mb_frame_flag) {
left_xy[1] = left_xy[0] = pair_xy - 1;
if (curr_mb_frame_flag) {
if (bottom) {
left_block[0]= 2;
left_block[1]= 2;
left_block[2]= 3;
left_block[3]= 3;
left_block[4]= 8;
left_block[5]= 11;
left_block[6]= 8;
left_block[7]= 11;
} else {
left_block[0]= 0;
left_block[1]= 0;
left_block[2]= 1;
left_block[3]= 1;
left_block[4]= 7;
left_block[5]= 10;
left_block[6]= 7;
left_block[7]= 10;
}
} else {
left_xy[1] += s->mb_stride;
//left_block[0]= 0;
left_block[1]= 2;
left_block[2]= 0;
left_block[3]= 2;
//left_block[4]= 7;
left_block[5]= 10;
left_block[6]= 7;
left_block[7]= 10;
}
}
}
h->top_mb_xy = top_xy;
h->left_mb_xy[0] = left_xy[0];
h->left_mb_xy[1] = left_xy[1];
if(for_deblock){
topleft_type = h->slice_table[topleft_xy ] < 255 ? s->current_picture.mb_type[topleft_xy] : 0;
top_type = h->slice_table[top_xy ] < 255 ? s->current_picture.mb_type[top_xy] : 0;
topright_type= h->slice_table[topright_xy] < 255 ? s->current_picture.mb_type[topright_xy]: 0;
left_type[0] = h->slice_table[left_xy[0] ] < 255 ? s->current_picture.mb_type[left_xy[0]] : 0;
left_type[1] = h->slice_table[left_xy[1] ] < 255 ? s->current_picture.mb_type[left_xy[1]] : 0;
}else{
topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0;
top_type = h->slice_table[top_xy ] == h->slice_num ? s->current_picture.mb_type[top_xy] : 0;
topright_type= h->slice_table[topright_xy] == h->slice_num ? s->current_picture.mb_type[topright_xy]: 0;
left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0;
left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0;
}
if(IS_INTRA(mb_type)){
h->topleft_samples_available=
h->top_samples_available=
h->left_samples_available= 0xFFFF;
h->topright_samples_available= 0xEEEA;
if(!IS_INTRA(top_type) && (top_type==0 || h->pps.constrained_intra_pred)){
h->topleft_samples_available= 0xB3FF;
h->top_samples_available= 0x33FF;
h->topright_samples_available= 0x26EA;
}
for(i=0; i<2; i++){
if(!IS_INTRA(left_type[i]) && (left_type[i]==0 || h->pps.constrained_intra_pred)){
h->topleft_samples_available&= 0xDF5F;
h->left_samples_available&= 0x5F5F;
}
}
if(!IS_INTRA(topleft_type) && (topleft_type==0 || h->pps.constrained_intra_pred))
h->topleft_samples_available&= 0x7FFF;
if(!IS_INTRA(topright_type) && (topright_type==0 || h->pps.constrained_intra_pred))
h->topright_samples_available&= 0xFBFF;
if(IS_INTRA4x4(mb_type)){
if(IS_INTRA4x4(top_type)){
h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode[top_xy][4];
h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode[top_xy][5];
h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode[top_xy][6];
h->intra4x4_pred_mode_cache[7+8*0]= h->intra4x4_pred_mode[top_xy][3];
}else{
int pred;
if(!top_type || (IS_INTER(top_type) && h->pps.constrained_intra_pred))
pred= -1;
else{
pred= 2;
}
h->intra4x4_pred_mode_cache[4+8*0]=
h->intra4x4_pred_mode_cache[5+8*0]=
h->intra4x4_pred_mode_cache[6+8*0]=
h->intra4x4_pred_mode_cache[7+8*0]= pred;
}
for(i=0; i<2; i++){
if(IS_INTRA4x4(left_type[i])){
h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[0+2*i]];
h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[1+2*i]];
}else{
int pred;
if(!left_type[i] || (IS_INTER(left_type[i]) && h->pps.constrained_intra_pred))
pred= -1;
else{
pred= 2;
}
h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= pred;
}
}
}
}
/*
0 . T T. T T T T
1 L . .L . . . .
2 L . .L . . . .
3 . T TL . . . .
4 L . .L . . . .
5 L . .. . . . .
*/
//FIXME constraint_intra_pred & partitioning & nnz (lets hope this is just a typo in the spec)
if(top_type){
h->non_zero_count_cache[4+8*0]= h->non_zero_count[top_xy][4];
h->non_zero_count_cache[5+8*0]= h->non_zero_count[top_xy][5];
h->non_zero_count_cache[6+8*0]= h->non_zero_count[top_xy][6];
h->non_zero_count_cache[7+8*0]= h->non_zero_count[top_xy][3];
h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][9];
h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][8];
h->non_zero_count_cache[1+8*3]= h->non_zero_count[top_xy][12];
h->non_zero_count_cache[2+8*3]= h->non_zero_count[top_xy][11];
}else{
h->non_zero_count_cache[4+8*0]=
h->non_zero_count_cache[5+8*0]=
h->non_zero_count_cache[6+8*0]=
h->non_zero_count_cache[7+8*0]=
h->non_zero_count_cache[1+8*0]=
h->non_zero_count_cache[2+8*0]=
h->non_zero_count_cache[1+8*3]=
h->non_zero_count_cache[2+8*3]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
}
for (i=0; i<2; i++) {
if(left_type[i]){
h->non_zero_count_cache[3+8*1 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[0+2*i]];
h->non_zero_count_cache[3+8*2 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[1+2*i]];
h->non_zero_count_cache[0+8*1 + 8*i]= h->non_zero_count[left_xy[i]][left_block[4+2*i]];
h->non_zero_count_cache[0+8*4 + 8*i]= h->non_zero_count[left_xy[i]][left_block[5+2*i]];
}else{
h->non_zero_count_cache[3+8*1 + 2*8*i]=
h->non_zero_count_cache[3+8*2 + 2*8*i]=
h->non_zero_count_cache[0+8*1 + 8*i]=
h->non_zero_count_cache[0+8*4 + 8*i]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
}
}
if( h->pps.cabac ) {
// top_cbp
if(top_type) {
h->top_cbp = h->cbp_table[top_xy];
} else if(IS_INTRA(mb_type)) {
h->top_cbp = 0x1C0;
} else {
h->top_cbp = 0;
}
// left_cbp
if (left_type[0]) {
h->left_cbp = h->cbp_table[left_xy[0]] & 0x1f0;
} else if(IS_INTRA(mb_type)) {
h->left_cbp = 0x1C0;
} else {
h->left_cbp = 0;
}
if (left_type[0]) {
h->left_cbp |= ((h->cbp_table[left_xy[0]]>>((left_block[0]&(~1))+1))&0x1) << 1;
}
if (left_type[1]) {
h->left_cbp |= ((h->cbp_table[left_xy[1]]>>((left_block[2]&(~1))+1))&0x1) << 3;
}
}
#if 1
//FIXME direct mb can skip much of this
if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){
int list;
for(list=0; list<1+(h->slice_type==B_TYPE); list++){
if(!USES_LIST(mb_type, list) && !IS_DIRECT(mb_type) && !h->deblocking_filter){
/*if(!h->mv_cache_clean[list]){
memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all?
memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t));
h->mv_cache_clean[list]= 1;
}*/
continue;
}
h->mv_cache_clean[list]= 0;
if(IS_INTER(top_type)){
const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride;
*(uint32_t*)h->mv_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 0];
*(uint32_t*)h->mv_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 1];
*(uint32_t*)h->mv_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 2];
*(uint32_t*)h->mv_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 3];
h->ref_cache[list][scan8[0] + 0 - 1*8]=
h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][b8_xy + 0];
h->ref_cache[list][scan8[0] + 2 - 1*8]=
h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1];
}else{
*(uint32_t*)h->mv_cache [list][scan8[0] + 0 - 1*8]=
*(uint32_t*)h->mv_cache [list][scan8[0] + 1 - 1*8]=
*(uint32_t*)h->mv_cache [list][scan8[0] + 2 - 1*8]=
*(uint32_t*)h->mv_cache [list][scan8[0] + 3 - 1*8]= 0;
*(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101;
}
//FIXME unify cleanup or sth
if(IS_INTER(left_type[0])){
const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1;
*(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0]];
*(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1]];
h->ref_cache[list][scan8[0] - 1 + 0*8]=
h->ref_cache[list][scan8[0] - 1 + 1*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0]>>1)];
}else{
*(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0*8]=
*(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 1*8]= 0;
h->ref_cache[list][scan8[0] - 1 + 0*8]=
h->ref_cache[list][scan8[0] - 1 + 1*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
if(IS_INTER(left_type[1])){
const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
const int b8_xy= h->mb2b8_xy[left_xy[1]] + 1;
*(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[2]];
*(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[3]];
h->ref_cache[list][scan8[0] - 1 + 2*8]=
h->ref_cache[list][scan8[0] - 1 + 3*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[2]>>1)];
}else{
*(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 2*8]=
*(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 3*8]= 0;
h->ref_cache[list][scan8[0] - 1 + 2*8]=
h->ref_cache[list][scan8[0] - 1 + 3*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
assert((!left_type[0]) == (!left_type[1]));
}
if(for_deblock || (IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred))
continue;
if(IS_INTER(topleft_type)){
const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + h->b8_stride;
*(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy];
}else{
*(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0;
h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
if(IS_INTER(topright_type)){
const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride;
const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride;
*(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy];
}else{
*(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0;
h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
h->ref_cache[list][scan8[5 ]+1] =
h->ref_cache[list][scan8[7 ]+1] =
h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewhere else)
h->ref_cache[list][scan8[4 ]] =
h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE;
*(uint32_t*)h->mv_cache [list][scan8[5 ]+1]=
*(uint32_t*)h->mv_cache [list][scan8[7 ]+1]=
*(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
*(uint32_t*)h->mv_cache [list][scan8[4 ]]=
*(uint32_t*)h->mv_cache [list][scan8[12]]= 0;
if( h->pps.cabac ) {
/* XXX beurk, Load mvd */
if(IS_INTER(topleft_type)){
const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
*(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy];
}else{
*(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= 0;
}
if(IS_INTER(top_type)){
const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
*(uint32_t*)h->mvd_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 0];
*(uint32_t*)h->mvd_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 1];
*(uint32_t*)h->mvd_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 2];
*(uint32_t*)h->mvd_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 3];
}else{
*(uint32_t*)h->mvd_cache [list][scan8[0] + 0 - 1*8]=
*(uint32_t*)h->mvd_cache [list][scan8[0] + 1 - 1*8]=
*(uint32_t*)h->mvd_cache [list][scan8[0] + 2 - 1*8]=
*(uint32_t*)h->mvd_cache [list][scan8[0] + 3 - 1*8]= 0;
}
if(IS_INTER(left_type[0])){
const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
*(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]];
*(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]];
}else{
*(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]=
*(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0;
}
if(IS_INTER(left_type[1])){
const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
*(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]];
*(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]];
}else{
*(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]=
*(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0;
}
*(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]=
*(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]=
*(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
*(uint32_t*)h->mvd_cache [list][scan8[4 ]]=
*(uint32_t*)h->mvd_cache [list][scan8[12]]= 0;
if(h->slice_type == B_TYPE){
fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, 0, 1);
if(IS_DIRECT(top_type)){
*(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0x01010101;
}else if(IS_8X8(top_type)){
int b8_xy = h->mb2b8_xy[top_xy] + h->b8_stride;
h->direct_cache[scan8[0] + 0 - 1*8]= h->direct_table[b8_xy];
h->direct_cache[scan8[0] + 2 - 1*8]= h->direct_table[b8_xy + 1];
}else{
*(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0;
}
//FIXME interlacing
if(IS_DIRECT(left_type[0])){
h->direct_cache[scan8[0] - 1 + 0*8]=
h->direct_cache[scan8[0] - 1 + 2*8]= 1;
}else if(IS_8X8(left_type[0])){
int b8_xy = h->mb2b8_xy[left_xy[0]] + 1;
h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_table[b8_xy];
h->direct_cache[scan8[0] - 1 + 2*8]= h->direct_table[b8_xy + h->b8_stride];
}else{
h->direct_cache[scan8[0] - 1 + 0*8]=
h->direct_cache[scan8[0] - 1 + 2*8]= 0;
}
}
}
}
}
#endif
h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[0]);
}
static inline void write_back_intra_pred_mode(H264Context *h){
MpegEncContext * const s = &h->s;
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
h->intra4x4_pred_mode[mb_xy][0]= h->intra4x4_pred_mode_cache[7+8*1];
h->intra4x4_pred_mode[mb_xy][1]= h->intra4x4_pred_mode_cache[7+8*2];
h->intra4x4_pred_mode[mb_xy][2]= h->intra4x4_pred_mode_cache[7+8*3];
h->intra4x4_pred_mode[mb_xy][3]= h->intra4x4_pred_mode_cache[7+8*4];
h->intra4x4_pred_mode[mb_xy][4]= h->intra4x4_pred_mode_cache[4+8*4];
h->intra4x4_pred_mode[mb_xy][5]= h->intra4x4_pred_mode_cache[5+8*4];
h->intra4x4_pred_mode[mb_xy][6]= h->intra4x4_pred_mode_cache[6+8*4];
}
/**
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
*/
static inline int check_intra4x4_pred_mode(H264Context *h){
MpegEncContext * const s = &h->s;
static const int8_t top [12]= {-1, 0,LEFT_DC_PRED,-1,-1,-1,-1,-1, 0};
static const int8_t left[12]= { 0,-1, TOP_DC_PRED, 0,-1,-1,-1, 0,-1,DC_128_PRED};
int i;
if(!(h->top_samples_available&0x8000)){
for(i=0; i<4; i++){
int status= top[ h->intra4x4_pred_mode_cache[scan8[0] + i] ];
if(status<0){
av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
return -1;
} else if(status){
h->intra4x4_pred_mode_cache[scan8[0] + i]= status;
}
}
}
if(!(h->left_samples_available&0x8000)){
for(i=0; i<4; i++){
int status= left[ h->intra4x4_pred_mode_cache[scan8[0] + 8*i] ];
if(status<0){
av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
return -1;
} else if(status){
h->intra4x4_pred_mode_cache[scan8[0] + 8*i]= status;
}
}
}
return 0;
} //FIXME cleanup like next
/**
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
*/
static inline int check_intra_pred_mode(H264Context *h, int mode){
MpegEncContext * const s = &h->s;
static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
if(mode < 0 || mode > 6) {
av_log(h->s.avctx, AV_LOG_ERROR, "out of range intra chroma pred mode at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
if(!(h->top_samples_available&0x8000)){
mode= top[ mode ];
if(mode<0){
av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
}
if(!(h->left_samples_available&0x8000)){
mode= left[ mode ];
if(mode<0){
av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
}
return mode;
}
/**
* gets the predicted intra4x4 prediction mode.
*/
static inline int pred_intra_mode(H264Context *h, int n){
const int index8= scan8[n];
const int left= h->intra4x4_pred_mode_cache[index8 - 1];
const int top = h->intra4x4_pred_mode_cache[index8 - 8];
const int min= FFMIN(left, top);
tprintf("mode:%d %d min:%d\n", left ,top, min);
if(min<0) return DC_PRED;
else return min;
}
static inline void write_back_non_zero_count(H264Context *h){
MpegEncContext * const s = &h->s;
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
h->non_zero_count[mb_xy][0]= h->non_zero_count_cache[7+8*1];
h->non_zero_count[mb_xy][1]= h->non_zero_count_cache[7+8*2];
h->non_zero_count[mb_xy][2]= h->non_zero_count_cache[7+8*3];
h->non_zero_count[mb_xy][3]= h->non_zero_count_cache[7+8*4];
h->non_zero_count[mb_xy][4]= h->non_zero_count_cache[4+8*4];
h->non_zero_count[mb_xy][5]= h->non_zero_count_cache[5+8*4];
h->non_zero_count[mb_xy][6]= h->non_zero_count_cache[6+8*4];
h->non_zero_count[mb_xy][9]= h->non_zero_count_cache[1+8*2];
h->non_zero_count[mb_xy][8]= h->non_zero_count_cache[2+8*2];
h->non_zero_count[mb_xy][7]= h->non_zero_count_cache[2+8*1];
h->non_zero_count[mb_xy][12]=h->non_zero_count_cache[1+8*5];
h->non_zero_count[mb_xy][11]=h->non_zero_count_cache[2+8*5];
h->non_zero_count[mb_xy][10]=h->non_zero_count_cache[2+8*4];
}
/**
* gets the predicted number of non zero coefficients.
* @param n block index
*/
static inline int pred_non_zero_count(H264Context *h, int n){
const int index8= scan8[n];
const int left= h->non_zero_count_cache[index8 - 1];
const int top = h->non_zero_count_cache[index8 - 8];
int i= left + top;
if(i<64) i= (i+1)>>1;
tprintf("pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31);
return i&31;
}
static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){
const int topright_ref= h->ref_cache[list][ i - 8 + part_width ];
if(topright_ref != PART_NOT_AVAILABLE){
*C= h->mv_cache[list][ i - 8 + part_width ];
return topright_ref;
}else{
tprintf("topright MV not available\n");
*C= h->mv_cache[list][ i - 8 - 1 ];
return h->ref_cache[list][ i - 8 - 1 ];
}
}
/**
* gets the predicted MV.
* @param n the block index
* @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
* @param mx the x component of the predicted motion vector
* @param my the y component of the predicted motion vector
*/
static inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){
const int index8= scan8[n];
const int top_ref= h->ref_cache[list][ index8 - 8 ];
const int left_ref= h->ref_cache[list][ index8 - 1 ];
const int16_t * const A= h->mv_cache[list][ index8 - 1 ];
const int16_t * const B= h->mv_cache[list][ index8 - 8 ];
const int16_t * C;
int diagonal_ref, match_count;
assert(part_width==1 || part_width==2 || part_width==4);
/* mv_cache
B . . A T T T T
U . . L . . , .
U . . L . . . .
U . . L . . , .
. . . L . . . .
*/
diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width);
match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref);
tprintf("pred_motion match_count=%d\n", match_count);
if(match_count > 1){ //most common
*mx= mid_pred(A[0], B[0], C[0]);
*my= mid_pred(A[1], B[1], C[1]);
}else if(match_count==1){
if(left_ref==ref){
*mx= A[0];
*my= A[1];
}else if(top_ref==ref){
*mx= B[0];
*my= B[1];
}else{
*mx= C[0];
*my= C[1];
}
}else{
if(top_ref == PART_NOT_AVAILABLE && diagonal_ref == PART_NOT_AVAILABLE && left_ref != PART_NOT_AVAILABLE){
*mx= A[0];
*my= A[1];
}else{
*mx= mid_pred(A[0], B[0], C[0]);
*my= mid_pred(A[1], B[1], C[1]);
}
}
tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);
}
/**
* gets the directionally predicted 16x8 MV.
* @param n the block index
* @param mx the x component of the predicted motion vector
* @param my the y component of the predicted motion vector
*/
static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
if(n==0){
const int top_ref= h->ref_cache[list][ scan8[0] - 8 ];
const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list);
if(top_ref == ref){
*mx= B[0];
*my= B[1];
return;
}
}else{
const int left_ref= h->ref_cache[list][ scan8[8] - 1 ];
const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ];
tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
if(left_ref == ref){
*mx= A[0];
*my= A[1];
return;
}
}
//RARE
pred_motion(h, n, 4, list, ref, mx, my);
}
/**
* gets the directionally predicted 8x16 MV.
* @param n the block index
* @param mx the x component of the predicted motion vector
* @param my the y component of the predicted motion vector
*/
static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
if(n==0){
const int left_ref= h->ref_cache[list][ scan8[0] - 1 ];
const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
if(left_ref == ref){
*mx= A[0];
*my= A[1];
return;
}
}else{
const int16_t * C;
int diagonal_ref;
diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2);
tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list);
if(diagonal_ref == ref){
*mx= C[0];
*my= C[1];
return;
}
}
//RARE
pred_motion(h, n, 2, list, ref, mx, my);
}
static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){
const int top_ref = h->ref_cache[0][ scan8[0] - 8 ];
const int left_ref= h->ref_cache[0][ scan8[0] - 1 ];
tprintf("pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE
|| (top_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ] == 0)
|| (left_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ] == 0)){
*mx = *my = 0;
return;
}
pred_motion(h, 0, 4, 0, 0, mx, my);
return;
}
static inline void direct_dist_scale_factor(H264Context * const h){
const int poc = h->s.current_picture_ptr->poc;
const int poc1 = h->ref_list[1][0].poc;
int i;
for(i=0; i<h->ref_count[0]; i++){
int poc0 = h->ref_list[0][i].poc;
int td = clip(poc1 - poc0, -128, 127);
if(td == 0 /* FIXME || pic0 is a long-term ref */){
h->dist_scale_factor[i] = 256;
}else{
int tb = clip(poc - poc0, -128, 127);
int tx = (16384 + (ABS(td) >> 1)) / td;
h->dist_scale_factor[i] = clip((tb*tx + 32) >> 6, -1024, 1023);
}
}
}
static inline void direct_ref_list_init(H264Context * const h){
MpegEncContext * const s = &h->s;
Picture * const ref1 = &h->ref_list[1][0];
Picture * const cur = s->current_picture_ptr;
int list, i, j;
if(cur->pict_type == I_TYPE)
cur->ref_count[0] = 0;
if(cur->pict_type != B_TYPE)
cur->ref_count[1] = 0;
for(list=0; list<2; list++){
cur->ref_count[list] = h->ref_count[list];
for(j=0; j<h->ref_count[list]; j++)
cur->ref_poc[list][j] = h->ref_list[list][j].poc;
}
if(cur->pict_type != B_TYPE || h->direct_spatial_mv_pred)
return;
for(list=0; list<2; list++){
for(i=0; i<ref1->ref_count[list]; i++){
const int poc = ref1->ref_poc[list][i];
h->map_col_to_list0[list][i] = PART_NOT_AVAILABLE;
for(j=0; j<h->ref_count[list]; j++)
if(h->ref_list[list][j].poc == poc){
h->map_col_to_list0[list][i] = j;
break;
}
}
}
}
static inline void pred_direct_motion(H264Context * const h, int *mb_type){
MpegEncContext * const s = &h->s;
const int mb_xy = s->mb_x + s->mb_y*s->mb_stride;
const int b8_xy = 2*s->mb_x + 2*s->mb_y*h->b8_stride;
const int b4_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
const int mb_type_col = h->ref_list[1][0].mb_type[mb_xy];
const int16_t (*l1mv0)[2] = (const int16_t (*)[2]) &h->ref_list[1][0].motion_val[0][b4_xy];
const int16_t (*l1mv1)[2] = (const int16_t (*)[2]) &h->ref_list[1][0].motion_val[1][b4_xy];
const int8_t *l1ref0 = &h->ref_list[1][0].ref_index[0][b8_xy];
const int8_t *l1ref1 = &h->ref_list[1][0].ref_index[1][b8_xy];
const int is_b8x8 = IS_8X8(*mb_type);
int sub_mb_type;
int i8, i4;
if(IS_8X8(mb_type_col) && !h->sps.direct_8x8_inference_flag){
/* FIXME save sub mb types from previous frames (or derive from MVs)
* so we know exactly what block size to use */
sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
*mb_type = MB_TYPE_8x8|MB_TYPE_L0L1;
}else if(!is_b8x8 && (IS_16X16(mb_type_col) || IS_INTRA(mb_type_col))){
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
*mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
}else{
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
*mb_type = MB_TYPE_8x8|MB_TYPE_L0L1;
}
if(!is_b8x8)
*mb_type |= MB_TYPE_DIRECT2;
tprintf("mb_type = %08x, sub_mb_type = %08x, is_b8x8 = %d, mb_type_col = %08x\n", *mb_type, sub_mb_type, is_b8x8, mb_type_col);
if(h->direct_spatial_mv_pred){
int ref[2];
int mv[2][2];
int list;
/* ref = min(neighbors) */
for(list=0; list<2; list++){
int refa = h->ref_cache[list][scan8[0] - 1];
int refb = h->ref_cache[list][scan8[0] - 8];
int refc = h->ref_cache[list][scan8[0] - 8 + 4];
if(refc == -2)
refc = h->ref_cache[list][scan8[0] - 8 - 1];
ref[list] = refa;
if(ref[list] < 0 || (refb < ref[list] && refb >= 0))
ref[list] = refb;
if(ref[list] < 0 || (refc < ref[list] && refc >= 0))
ref[list] = refc;
if(ref[list] < 0)
ref[list] = -1;
}
if(ref[0] < 0 && ref[1] < 0){
ref[0] = ref[1] = 0;
mv[0][0] = mv[0][1] =
mv[1][0] = mv[1][1] = 0;
}else{
for(list=0; list<2; list++){
if(ref[list] >= 0)
pred_motion(h, 0, 4, list, ref[list], &mv[list][0], &mv[list][1]);
else
mv[list][0] = mv[list][1] = 0;
}
}
if(ref[1] < 0){
*mb_type &= ~MB_TYPE_P0L1;
sub_mb_type &= ~MB_TYPE_P0L1;
}else if(ref[0] < 0){
*mb_type &= ~MB_TYPE_P0L0;
sub_mb_type &= ~MB_TYPE_P0L0;
}
if(IS_16X16(*mb_type)){
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref[0], 1);
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, ref[1], 1);
if(!IS_INTRA(mb_type_col)
&& ( (l1ref0[0] == 0 && ABS(l1mv0[0][0]) <= 1 && ABS(l1mv0[0][1]) <= 1)
|| (l1ref0[0] < 0 && l1ref1[0] == 0 && ABS(l1mv1[0][0]) <= 1 && ABS(l1mv1[0][1]) <= 1
&& (h->x264_build>33 || !h->x264_build)))){
if(ref[0] > 0)
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4);
else
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
if(ref[1] > 0)
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4);
else
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
}else{
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4);
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4);
}
}else{
for(i8=0; i8<4; i8++){
const int x8 = i8&1;
const int y8 = i8>>1;
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
continue;
h->sub_mb_type[i8] = sub_mb_type;
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mv[0][0],mv[0][1]), 4);
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mv[1][0],mv[1][1]), 4);
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref[0], 1);
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, ref[1], 1);
/* col_zero_flag */
if(!IS_INTRA(mb_type_col) && ( l1ref0[x8 + y8*h->b8_stride] == 0
|| (l1ref0[x8 + y8*h->b8_stride] < 0 && l1ref1[x8 + y8*h->b8_stride] == 0
&& (h->x264_build>33 || !h->x264_build)))){
const int16_t (*l1mv)[2]= l1ref0[x8 + y8*h->b8_stride] == 0 ? l1mv0 : l1mv1;
for(i4=0; i4<4; i4++){
const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
if(ABS(mv_col[0]) <= 1 && ABS(mv_col[1]) <= 1){
if(ref[0] == 0)
*(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0;
if(ref[1] == 0)
*(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = 0;
}
}
}
}
}
}else{ /* direct temporal mv pred */
if(IS_16X16(*mb_type)){
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
if(IS_INTRA(mb_type_col)){
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
}else{
const int ref0 = l1ref0[0] >= 0 ? h->map_col_to_list0[0][l1ref0[0]]
: h->map_col_to_list0[1][l1ref1[0]];
const int dist_scale_factor = h->dist_scale_factor[ref0];
const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
int mv_l0[2];
mv_l0[0] = (dist_scale_factor * mv_col[0] + 128) >> 8;
mv_l0[1] = (dist_scale_factor * mv_col[1] + 128) >> 8;
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref0, 1);
fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0],mv_l0[1]), 4);
fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]), 4);
}
}else{
for(i8=0; i8<4; i8++){
const int x8 = i8&1;
const int y8 = i8>>1;
int ref0, dist_scale_factor;
const int16_t (*l1mv)[2]= l1mv0;
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
continue;
h->sub_mb_type[i8] = sub_mb_type;
if(IS_INTRA(mb_type_col)){
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
continue;
}
ref0 = l1ref0[x8 + y8*h->b8_stride];
if(ref0 >= 0)
ref0 = h->map_col_to_list0[0][ref0];
else{
ref0 = h->map_col_to_list0[1][l1ref1[x8 + y8*h->b8_stride]];
l1mv= l1mv1;
}
dist_scale_factor = h->dist_scale_factor[ref0];
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
for(i4=0; i4<4; i4++){
const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
mv_l0[0] = (dist_scale_factor * mv_col[0] + 128) >> 8;
mv_l0[1] = (dist_scale_factor * mv_col[1] + 128) >> 8;
*(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] =
pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
}
}
}
}
}
static inline void write_back_motion(H264Context *h, int mb_type){
MpegEncContext * const s = &h->s;
const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride;
int list;
for(list=0; list<2; list++){
int y;
if(!USES_LIST(mb_type, list)){
if(1){ //FIXME skip or never read if mb_type doesn't use it
for(y=0; y<4; y++){
*(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]=
*(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= 0;
}
if( h->pps.cabac ) {
/* FIXME needed ? */
for(y=0; y<4; y++){
*(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]=
*(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= 0;
}
}
for(y=0; y<2; y++){
s->current_picture.ref_index[list][b8_xy + 0 + y*h->b8_stride]=
s->current_picture.ref_index[list][b8_xy + 1 + y*h->b8_stride]= LIST_NOT_USED;
}
}
continue;
}
for(y=0; y<4; y++){
*(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+0 + 8*y];
*(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+2 + 8*y];
}
if( h->pps.cabac ) {
for(y=0; y<4; y++){
*(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+0 + 8*y];
*(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+2 + 8*y];
}
}
for(y=0; y<2; y++){
s->current_picture.ref_index[list][b8_xy + 0 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+0 + 16*y];
s->current_picture.ref_index[list][b8_xy + 1 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+2 + 16*y];
}
}
if(h->slice_type == B_TYPE && h->pps.cabac){
if(IS_8X8(mb_type)){
h->direct_table[b8_xy+1+0*h->b8_stride] = IS_DIRECT(h->sub_mb_type[1]) ? 1 : 0;
h->direct_table[b8_xy+0+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[2]) ? 1 : 0;
h->direct_table[b8_xy+1+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[3]) ? 1 : 0;
}
}
}
/**
* Decodes a network abstraction layer unit.
* @param consumed is the number of bytes used as input
* @param length is the length of the array
* @param dst_length is the number of decoded bytes FIXME here or a decode rbsp tailing?
* @returns decoded bytes, might be src+1 if no escapes
*/
static uint8_t *decode_nal(H264Context *h, uint8_t *src, int *dst_length, int *consumed, int length){
int i, si, di;
uint8_t *dst;
// src[0]&0x80; //forbidden bit
h->nal_ref_idc= src[0]>>5;
h->nal_unit_type= src[0]&0x1F;
src++; length--;
#if 0
for(i=0; i<length; i++)
printf("%2X ", src[i]);
#endif
for(i=0; i+1<length; i+=2){
if(src[i]) continue;
if(i>0 && src[i-1]==0) i--;
if(i+2<length && src[i+1]==0 && src[i+2]<=3){
if(src[i+2]!=3){
/* startcode, so we must be past the end */
length=i;
}
break;
}
}
if(i>=length-1){ //no escaped 0
*dst_length= length;
*consumed= length+1; //+1 for the header
return src;
}
h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length);
dst= h->rbsp_buffer;
//printf("decoding esc\n");
si=di=0;
while(si<length){
//remove escapes (very rare 1:2^22)
if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){
if(src[si+2]==3){ //escape
dst[di++]= 0;
dst[di++]= 0;
si+=3;
continue;
}else //next start code
break;
}
dst[di++]= src[si++];
}
*dst_length= di;
*consumed= si + 1;//+1 for the header
//FIXME store exact number of bits in the getbitcontext (its needed for decoding)
return dst;
}
#if 0
/**
* @param src the data which should be escaped
* @param dst the target buffer, dst+1 == src is allowed as a special case
* @param length the length of the src data
* @param dst_length the length of the dst array
* @returns length of escaped data in bytes or -1 if an error occured
*/
static int encode_nal(H264Context *h, uint8_t *dst, uint8_t *src, int length, int dst_length){
int i, escape_count, si, di;
uint8_t *temp;
assert(length>=0);
assert(dst_length>0);
dst[0]= (h->nal_ref_idc<<5) + h->nal_unit_type;
if(length==0) return 1;
escape_count= 0;
for(i=0; i<length; i+=2){
if(src[i]) continue;
if(i>0 && src[i-1]==0)
i--;
if(i+2<length && src[i+1]==0 && src[i+2]<=3){
escape_count++;
i+=2;
}
}
if(escape_count==0){
if(dst+1 != src)
memcpy(dst+1, src, length);
return length + 1;
}
if(length + escape_count + 1> dst_length)
return -1;
//this should be damn rare (hopefully)
h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length + escape_count);
temp= h->rbsp_buffer;
//printf("encoding esc\n");
si= 0;
di= 0;
while(si < length){
if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){
temp[di++]= 0; si++;
temp[di++]= 0; si++;
temp[di++]= 3;
temp[di++]= src[si++];
}
else
temp[di++]= src[si++];
}
memcpy(dst+1, temp, length+escape_count);
assert(di == length+escape_count);
return di + 1;
}
/**
* write 1,10,100,1000,... for alignment, yes its exactly inverse to mpeg4
*/
static void encode_rbsp_trailing(PutBitContext *pb){
int length;
put_bits(pb, 1, 1);
length= (-put_bits_count(pb))&7;
if(length) put_bits(pb, length, 0);
}
#endif
/**
* identifies the exact end of the bitstream
* @return the length of the trailing, or 0 if damaged
*/
static int decode_rbsp_trailing(uint8_t *src){
int v= *src;
int r;
tprintf("rbsp trailing %X\n", v);
for(r=1; r<9; r++){
if(v&1) return r;
v>>=1;
}
return 0;
}
/**
* idct tranforms the 16 dc values and dequantize them.
* @param qp quantization parameter
*/
static void h264_luma_dc_dequant_idct_c(DCTELEM *block, int qp){
const int qmul= dequant_coeff[qp][0];
#define stride 16
int i;
int temp[16]; //FIXME check if this is a good idea
static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
//memset(block, 64, 2*256);
//return;
for(i=0; i<4; i++){
const int offset= y_offset[i];
const int z0= block[offset+stride*0] + block[offset+stride*4];
const int z1= block[offset+stride*0] - block[offset+stride*4];
const int z2= block[offset+stride*1] - block[offset+stride*5];
const int z3= block[offset+stride*1] + block[offset+stride*5];
temp[4*i+0]= z0+z3;
temp[4*i+1]= z1+z2;
temp[4*i+2]= z1-z2;
temp[4*i+3]= z0-z3;
}
for(i=0; i<4; i++){
const int offset= x_offset[i];
const int z0= temp[4*0+i] + temp[4*2+i];
const int z1= temp[4*0+i] - temp[4*2+i];
const int z2= temp[4*1+i] - temp[4*3+i];
const int z3= temp[4*1+i] + temp[4*3+i];
block[stride*0 +offset]= ((z0 + z3)*qmul + 2)>>2; //FIXME think about merging this into decode_resdual
block[stride*2 +offset]= ((z1 + z2)*qmul + 2)>>2;
block[stride*8 +offset]= ((z1 - z2)*qmul + 2)>>2;
block[stride*10+offset]= ((z0 - z3)*qmul + 2)>>2;
}
}
#if 0
/**
* dct tranforms the 16 dc values.
* @param qp quantization parameter ??? FIXME
*/
static void h264_luma_dc_dct_c(DCTELEM *block/*, int qp*/){
// const int qmul= dequant_coeff[qp][0];
int i;
int temp[16]; //FIXME check if this is a good idea
static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
for(i=0; i<4; i++){
const int offset= y_offset[i];
const int z0= block[offset+stride*0] + block[offset+stride*4];
const int z1= block[offset+stride*0] - block[offset+stride*4];
const int z2= block[offset+stride*1] - block[offset+stride*5];
const int z3= block[offset+stride*1] + block[offset+stride*5];
temp[4*i+0]= z0+z3;
temp[4*i+1]= z1+z2;
temp[4*i+2]= z1-z2;
temp[4*i+3]= z0-z3;
}
for(i=0; i<4; i++){
const int offset= x_offset[i];
const int z0= temp[4*0+i] + temp[4*2+i];
const int z1= temp[4*0+i] - temp[4*2+i];
const int z2= temp[4*1+i] - temp[4*3+i];
const int z3= temp[4*1+i] + temp[4*3+i];
block[stride*0 +offset]= (z0 + z3)>>1;
block[stride*2 +offset]= (z1 + z2)>>1;
block[stride*8 +offset]= (z1 - z2)>>1;
block[stride*10+offset]= (z0 - z3)>>1;
}
}
#endif
#undef xStride
#undef stride
static void chroma_dc_dequant_idct_c(DCTELEM *block, int qp){
const int qmul= dequant_coeff[qp][0];
const int stride= 16*2;
const int xStride= 16;
int a,b,c,d,e;
a= block[stride*0 + xStride*0];
b= block[stride*0 + xStride*1];
c= block[stride*1 + xStride*0];
d= block[stride*1 + xStride*1];
e= a-b;
a= a+b;
b= c-d;
c= c+d;
block[stride*0 + xStride*0]= ((a+c)*qmul + 0)>>1;
block[stride*0 + xStride*1]= ((e+b)*qmul + 0)>>1;
block[stride*1 + xStride*0]= ((a-c)*qmul + 0)>>1;
block[stride*1 + xStride*1]= ((e-b)*qmul + 0)>>1;
}
#if 0
static void chroma_dc_dct_c(DCTELEM *block){
const int stride= 16*2;
const int xStride= 16;
int a,b,c,d,e;
a= block[stride*0 + xStride*0];
b= block[stride*0 + xStride*1];
c= block[stride*1 + xStride*0];
d= block[stride*1 + xStride*1];
e= a-b;
a= a+b;
b= c-d;
c= c+d;
block[stride*0 + xStride*0]= (a+c);
block[stride*0 + xStride*1]= (e+b);
block[stride*1 + xStride*0]= (a-c);
block[stride*1 + xStride*1]= (e-b);
}
#endif
/**
* gets the chroma qp.
*/
static inline int get_chroma_qp(int chroma_qp_index_offset, int qscale){
return chroma_qp[clip(qscale + chroma_qp_index_offset, 0, 51)];
}
#if 0
static void h264_diff_dct_c(DCTELEM *block, uint8_t *src1, uint8_t *src2, int stride){
int i;
//FIXME try int temp instead of block
for(i=0; i<4; i++){
const int d0= src1[0 + i*stride] - src2[0 + i*stride];
const int d1= src1[1 + i*stride] - src2[1 + i*stride];
const int d2= src1[2 + i*stride] - src2[2 + i*stride];
const int d3= src1[3 + i*stride] - src2[3 + i*stride];
const int z0= d0 + d3;
const int z3= d0 - d3;
const int z1= d1 + d2;
const int z2= d1 - d2;
block[0 + 4*i]= z0 + z1;
block[1 + 4*i]= 2*z3 + z2;
block[2 + 4*i]= z0 - z1;
block[3 + 4*i]= z3 - 2*z2;
}
for(i=0; i<4; i++){
const int z0= block[0*4 + i] + block[3*4 + i];
const int z3= block[0*4 + i] - block[3*4 + i];
const int z1= block[1*4 + i] + block[2*4 + i];
const int z2= block[1*4 + i] - block[2*4 + i];
block[0*4 + i]= z0 + z1;
block[1*4 + i]= 2*z3 + z2;
block[2*4 + i]= z0 - z1;
block[3*4 + i]= z3 - 2*z2;
}
}
#endif
//FIXME need to check that this doesnt overflow signed 32 bit for low qp, i am not sure, it's very close
//FIXME check that gcc inlines this (and optimizes intra & seperate_dc stuff away)
static inline int quantize_c(DCTELEM *block, uint8_t *scantable, int qscale, int intra, int seperate_dc){
int i;
const int * const quant_table= quant_coeff[qscale];
const int bias= intra ? (1<<QUANT_SHIFT)/3 : (1<<QUANT_SHIFT)/6;
const unsigned int threshold1= (1<<QUANT_SHIFT) - bias - 1;
const unsigned int threshold2= (threshold1<<1);
int last_non_zero;
if(seperate_dc){
if(qscale<=18){
//avoid overflows
const int dc_bias= intra ? (1<<(QUANT_SHIFT-2))/3 : (1<<(QUANT_SHIFT-2))/6;
const unsigned int dc_threshold1= (1<<(QUANT_SHIFT-2)) - dc_bias - 1;
const unsigned int dc_threshold2= (dc_threshold1<<1);
int level= block[0]*quant_coeff[qscale+18][0];
if(((unsigned)(level+dc_threshold1))>dc_threshold2){
if(level>0){
level= (dc_bias + level)>>(QUANT_SHIFT-2);
block[0]= level;
}else{
level= (dc_bias - level)>>(QUANT_SHIFT-2);
block[0]= -level;
}
// last_non_zero = i;
}else{
block[0]=0;
}
}else{
const int dc_bias= intra ? (1<<(QUANT_SHIFT+1))/3 : (1<<(QUANT_SHIFT+1))/6;
const unsigned int dc_threshold1= (1<<(QUANT_SHIFT+1)) - dc_bias - 1;
const unsigned int dc_threshold2= (dc_threshold1<<1);
int level= block[0]*quant_table[0];
if(((unsigned)(level+dc_threshold1))>dc_threshold2){
if(level>0){
level= (dc_bias + level)>>(QUANT_SHIFT+1);
block[0]= level;
}else{
level= (dc_bias - level)>>(QUANT_SHIFT+1);
block[0]= -level;
}
// last_non_zero = i;
}else{
block[0]=0;
}
}
last_non_zero= 0;
i=1;
}else{
last_non_zero= -1;
i=0;
}
for(; i<16; i++){
const int j= scantable[i];
int level= block[j]*quant_table[j];
// if( bias+level >= (1<<(QMAT_SHIFT - 3))
// || bias-level >= (1<<(QMAT_SHIFT - 3))){
if(((unsigned)(level+threshold1))>threshold2){
if(level>0){
level= (bias + level)>>QUANT_SHIFT;
block[j]= level;
}else{
level= (bias - level)>>QUANT_SHIFT;
block[j]= -level;
}
last_non_zero = i;
}else{
block[j]=0;
}
}
return last_non_zero;
}
static void pred4x4_vertical_c(uint8_t *src, uint8_t *topright, int stride){
const uint32_t a= ((uint32_t*)(src-stride))[0];
((uint32_t*)(src+0*stride))[0]= a;
((uint32_t*)(src+1*stride))[0]= a;
((uint32_t*)(src+2*stride))[0]= a;
((uint32_t*)(src+3*stride))[0]= a;
}
static void pred4x4_horizontal_c(uint8_t *src, uint8_t *topright, int stride){
((uint32_t*)(src+0*stride))[0]= src[-1+0*stride]*0x01010101;
((uint32_t*)(src+1*stride))[0]= src[-1+1*stride]*0x01010101;
((uint32_t*)(src+2*stride))[0]= src[-1+2*stride]*0x01010101;
((uint32_t*)(src+3*stride))[0]= src[-1+3*stride]*0x01010101;
}
static void pred4x4_dc_c(uint8_t *src, uint8_t *topright, int stride){
const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
+ src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
((uint32_t*)(src+0*stride))[0]=
((uint32_t*)(src+1*stride))[0]=
((uint32_t*)(src+2*stride))[0]=
((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
}
static void pred4x4_left_dc_c(uint8_t *src, uint8_t *topright, int stride){
const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
((uint32_t*)(src+0*stride))[0]=
((uint32_t*)(src+1*stride))[0]=
((uint32_t*)(src+2*stride))[0]=
((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
}
static void pred4x4_top_dc_c(uint8_t *src, uint8_t *topright, int stride){
const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
((uint32_t*)(src+0*stride))[0]=
((uint32_t*)(src+1*stride))[0]=
((uint32_t*)(src+2*stride))[0]=
((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
}
static void pred4x4_128_dc_c(uint8_t *src, uint8_t *topright, int stride){
((uint32_t*)(src+0*stride))[0]=
((uint32_t*)(src+1*stride))[0]=
((uint32_t*)(src+2*stride))[0]=
((uint32_t*)(src+3*stride))[0]= 128U*0x01010101U;
}
#define LOAD_TOP_RIGHT_EDGE\
const int t4= topright[0];\
const int t5= topright[1];\
const int t6= topright[2];\
const int t7= topright[3];\
#define LOAD_LEFT_EDGE\
const int l0= src[-1+0*stride];\
const int l1= src[-1+1*stride];\
const int l2= src[-1+2*stride];\
const int l3= src[-1+3*stride];\
#define LOAD_TOP_EDGE\
const int t0= src[ 0-1*stride];\
const int t1= src[ 1-1*stride];\
const int t2= src[ 2-1*stride];\
const int t3= src[ 3-1*stride];\
static void pred4x4_down_right_c(uint8_t *src, uint8_t *topright, int stride){
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
src[0+2*stride]=
src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
src[0+1*stride]=
src[1+2*stride]=
src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
src[0+0*stride]=
src[1+1*stride]=
src[2+2*stride]=
src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
src[1+0*stride]=
src[2+1*stride]=
src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
src[2+0*stride]=
src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
}
static void pred4x4_down_left_c(uint8_t *src, uint8_t *topright, int stride){
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
// LOAD_LEFT_EDGE
src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
src[1+0*stride]=
src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
src[2+0*stride]=
src[1+1*stride]=
src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
src[3+0*stride]=
src[2+1*stride]=
src[1+2*stride]=
src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
src[3+1*stride]=
src[2+2*stride]=
src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
src[3+2*stride]=
src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
}
static void pred4x4_vertical_right_c(uint8_t *src, uint8_t *topright, int stride){
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
const __attribute__((unused)) int unu= l3;
src[0+0*stride]=
src[1+2*stride]=(lt + t0 + 1)>>1;
src[1+0*stride]=
src[2+2*stride]=(t0 + t1 + 1)>>1;
src[2+0*stride]=
src[3+2*stride]=(t1 + t2 + 1)>>1;
src[3+0*stride]=(t2 + t3 + 1)>>1;
src[0+1*stride]=
src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
src[1+1*stride]=
src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
src[2+1*stride]=
src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
}
static void pred4x4_vertical_left_c(uint8_t *src, uint8_t *topright, int stride){
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
const __attribute__((unused)) int unu= t7;
src[0+0*stride]=(t0 + t1 + 1)>>1;
src[1+0*stride]=
src[0+2*stride]=(t1 + t2 + 1)>>1;
src[2+0*stride]=
src[1+2*stride]=(t2 + t3 + 1)>>1;
src[3+0*stride]=
src[2+2*stride]=(t3 + t4+ 1)>>1;
src[3+2*stride]=(t4 + t5+ 1)>>1;
src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
src[1+1*stride]=
src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
src[2+1*stride]=
src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
src[3+1*stride]=
src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
}
static void pred4x4_horizontal_up_c(uint8_t *src, uint8_t *topright, int stride){
LOAD_LEFT_EDGE
src[0+0*stride]=(l0 + l1 + 1)>>1;
src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
src[2+0*stride]=
src[0+1*stride]=(l1 + l2 + 1)>>1;
src[3+0*stride]=
src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
src[2+1*stride]=
src[0+2*stride]=(l2 + l3 + 1)>>1;
src[3+1*stride]=
src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
src[3+2*stride]=
src[1+3*stride]=
src[0+3*stride]=
src[2+2*stride]=
src[2+3*stride]=
src[3+3*stride]=l3;
}
static void pred4x4_horizontal_down_c(uint8_t *src, uint8_t *topright, int stride){
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
const __attribute__((unused)) int unu= t3;
src[0+0*stride]=
src[2+1*stride]=(lt + l0 + 1)>>1;
src[1+0*stride]=
src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
src[0+1*stride]=
src[2+2*stride]=(l0 + l1 + 1)>>1;
src[1+1*stride]=
src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
src[0+2*stride]=
src[2+3*stride]=(l1 + l2+ 1)>>1;
src[1+2*stride]=
src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
src[0+3*stride]=(l2 + l3 + 1)>>1;
src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
}
static void pred16x16_vertical_c(uint8_t *src, int stride){
int i;
const uint32_t a= ((uint32_t*)(src-stride))[0];
const uint32_t b= ((uint32_t*)(src-stride))[1];
const uint32_t c= ((uint32_t*)(src-stride))[2];
const uint32_t d= ((uint32_t*)(src-stride))[3];
for(i=0; i<16; i++){
((uint32_t*)(src+i*stride))[0]= a;
((uint32_t*)(src+i*stride))[1]= b;
((uint32_t*)(src+i*stride))[2]= c;
((uint32_t*)(src+i*stride))[3]= d;
}
}
static void pred16x16_horizontal_c(uint8_t *src, int stride){
int i;
for(i=0; i<16; i++){
((uint32_t*)(src+i*stride))[0]=
((uint32_t*)(src+i*stride))[1]=
((uint32_t*)(src+i*stride))[2]=
((uint32_t*)(src+i*stride))[3]= src[-1+i*stride]*0x01010101;
}
}
static void pred16x16_dc_c(uint8_t *src, int stride){
int i, dc=0;
for(i=0;i<16; i++){
dc+= src[-1+i*stride];
}
for(i=0;i<16; i++){
dc+= src[i-stride];
}
dc= 0x01010101*((dc + 16)>>5);
for(i=0; i<16; i++){
((uint32_t*)(src+i*stride))[0]=
((uint32_t*)(src+i*stride))[1]=
((uint32_t*)(src+i*stride))[2]=
((uint32_t*)(src+i*stride))[3]= dc;
}
}
static void pred16x16_left_dc_c(uint8_t *src, int stride){
int i, dc=0;
for(i=0;i<16; i++){
dc+= src[-1+i*stride];
}
dc= 0x01010101*((dc + 8)>>4);
for(i=0; i<16; i++){
((uint32_t*)(src+i*stride))[0]=
((uint32_t*)(src+i*stride))[1]=
((uint32_t*)(src+i*stride))[2]=
((uint32_t*)(src+i*stride))[3]= dc;
}
}
static void pred16x16_top_dc_c(uint8_t *src, int stride){
int i, dc=0;
for(i=0;i<16; i++){
dc+= src[i-stride];
}
dc= 0x01010101*((dc + 8)>>4);
for(i=0; i<16; i++){
((uint32_t*)(src+i*stride))[0]=
((uint32_t*)(src+i*stride))[1]=
((uint32_t*)(src+i*stride))[2]=
((uint32_t*)(src+i*stride))[3]= dc;
}
}
static void pred16x16_128_dc_c(uint8_t *src, int stride){
int i;
for(i=0; i<16; i++){
((uint32_t*)(src+i*stride))[0]=
((uint32_t*)(src+i*stride))[1]=
((uint32_t*)(src+i*stride))[2]=
((uint32_t*)(src+i*stride))[3]= 0x01010101U*128U;
}
}
static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3){
int i, j, k;
int a;
uint8_t *cm = cropTbl + MAX_NEG_CROP;
const uint8_t * const src0 = src+7-stride;
const uint8_t *src1 = src+8*stride-1;
const uint8_t *src2 = src1-2*stride; // == src+6*stride-1;
int H = src0[1] - src0[-1];
int V = src1[0] - src2[ 0];
for(k=2; k<=8; ++k) {
src1 += stride; src2 -= stride;
H += k*(src0[k] - src0[-k]);
V += k*(src1[0] - src2[ 0]);
}
if(svq3){
H = ( 5*(H/4) ) / 16;
V = ( 5*(V/4) ) / 16;
/* required for 100% accuracy */
i = H; H = V; V = i;
}else{
H = ( 5*H+32 ) >> 6;
V = ( 5*V+32 ) >> 6;
}
a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
for(j=16; j>0; --j) {
int b = a;
a += V;
for(i=-16; i<0; i+=4) {
src[16+i] = cm[ (b ) >> 5 ];
src[17+i] = cm[ (b+ H) >> 5 ];
src[18+i] = cm[ (b+2*H) >> 5 ];
src[19+i] = cm[ (b+3*H) >> 5 ];
b += 4*H;
}
src += stride;
}
}
static void pred16x16_plane_c(uint8_t *src, int stride){
pred16x16_plane_compat_c(src, stride, 0);
}
static void pred8x8_vertical_c(uint8_t *src, int stride){
int i;
const uint32_t a= ((uint32_t*)(src-stride))[0];
const uint32_t b= ((uint32_t*)(src-stride))[1];
for(i=0; i<8; i++){
((uint32_t*)(src+i*stride))[0]= a;
((uint32_t*)(src+i*stride))[1]= b;
}
}
static void pred8x8_horizontal_c(uint8_t *src, int stride){
int i;
for(i=0; i<8; i++){
((uint32_t*)(src+i*stride))[0]=
((uint32_t*)(src+i*stride))[1]= src[-1+i*stride]*0x01010101;
}
}
static void pred8x8_128_dc_c(uint8_t *src, int stride){
int i;
for(i=0; i<8; i++){
((uint32_t*)(src+i*stride))[0]=
((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
}
}
static void pred8x8_left_dc_c(uint8_t *src, int stride){
int i;
int dc0, dc2;
dc0=dc2=0;
for(i=0;i<4; i++){
dc0+= src[-1+i*stride];
dc2+= src[-1+(i+4)*stride];
}
dc0= 0x01010101*((dc0 + 2)>>2);
dc2= 0x01010101*((dc2 + 2)>>2);
for(i=0; i<4; i++){
((uint32_t*)(src+i*stride))[0]=
((uint32_t*)(src+i*stride))[1]= dc0;
}
for(i=4; i<8; i++){
((uint32_t*)(src+i*stride))[0]=
((uint32_t*)(src+i*stride))[1]= dc2;
}
}
static void pred8x8_top_dc_c(uint8_t *src, int stride){
int i;
int dc0, dc1;
dc0=dc1=0;
for(i=0;i<4; i++){
dc0+= src[i-stride];
dc1+= src[4+i-stride];
}
dc0= 0x01010101*((dc0 + 2)>>2);
dc1= 0x01010101*((dc1 + 2)>>2);
for(i=0; i<4; i++){
((uint32_t*)(src+i*stride))[0]= dc0;
((uint32_t*)(src+i*stride))[1]= dc1;
}
for(i=4; i<8; i++){
((uint32_t*)(src+i*stride))[0]= dc0;
((uint32_t*)(src+i*stride))[1]= dc1;
}
}
static void pred8x8_dc_c(uint8_t *src, int stride){
int i;
int dc0, dc1, dc2, dc3;
dc0=dc1=dc2=0;
for(i=0;i<4; i++){
dc0+= src[-1+i*stride] + src[i-stride];
dc1+= src[4+i-stride];
dc2+= src[-1+(i+4)*stride];
}
dc3= 0x01010101*((dc1 + dc2 + 4)>>3);
dc0= 0x01010101*((dc0 + 4)>>3);
dc1= 0x01010101*((dc1 + 2)>>2);
dc2= 0x01010101*((dc2 + 2)>>2);
for(i=0; i<4; i++){
((uint32_t*)(src+i*stride))[0]= dc0;
((uint32_t*)(src+i*stride))[1]= dc1;
}
for(i=4; i<8; i++){
((uint32_t*)(src+i*stride))[0]= dc2;
((uint32_t*)(src+i*stride))[1]= dc3;
}
}
static void pred8x8_plane_c(uint8_t *src, int stride){
int j, k;
int a;
uint8_t *cm = cropTbl + MAX_NEG_CROP;
const uint8_t * const src0 = src+3-stride;
const uint8_t *src1 = src+4*stride-1;
const uint8_t *src2 = src1-2*stride; // == src+2*stride-1;
int H = src0[1] - src0[-1];
int V = src1[0] - src2[ 0];
for(k=2; k<=4; ++k) {
src1 += stride; src2 -= stride;
H += k*(src0[k] - src0[-k]);
V += k*(src1[0] - src2[ 0]);
}
H = ( 17*H+16 ) >> 5;
V = ( 17*V+16 ) >> 5;
a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
for(j=8; j>0; --j) {
int b = a;
a += V;
src[0] = cm[ (b ) >> 5 ];
src[1] = cm[ (b+ H) >> 5 ];
src[2] = cm[ (b+2*H) >> 5 ];
src[3] = cm[ (b+3*H) >> 5 ];
src[4] = cm[ (b+4*H) >> 5 ];
src[5] = cm[ (b+5*H) >> 5 ];
src[6] = cm[ (b+6*H) >> 5 ];
src[7] = cm[ (b+7*H) >> 5 ];
src += stride;
}
}
#define SRC(x,y) src[(x)+(y)*stride]
#define PL(y) \
const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
#define PREDICT_8x8_LOAD_LEFT \
const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \
+ 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
const int l7 attribute_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2
#define PT(x) \
const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
#define PREDICT_8x8_LOAD_TOP \
const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \
+ 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
const int t7 attribute_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \
+ 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2
#define PTR(x) \
t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
#define PREDICT_8x8_LOAD_TOPRIGHT \
int t8, t9, t10, t11, t12, t13, t14, t15; \
if(has_topright) { \
PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \
t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \
} else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1);
#define PREDICT_8x8_LOAD_TOPLEFT \
const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2
#define PREDICT_8x8_DC(v) \
int y; \
for( y = 0; y < 8; y++ ) { \
((uint32_t*)src)[0] = \
((uint32_t*)src)[1] = v; \
src += stride; \
}
static void pred8x8l_128_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_DC(0x80808080);
}
static void pred8x8l_left_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_LOAD_LEFT;
const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101;
PREDICT_8x8_DC(dc);
}
static void pred8x8l_top_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_LOAD_TOP;
const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101;
PREDICT_8x8_DC(dc);
}
static void pred8x8l_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOP;
const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7
+t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101;
PREDICT_8x8_DC(dc);
}
static void pred8x8l_horizontal_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_LOAD_LEFT;
#define ROW(y) ((uint32_t*)(src+y*stride))[0] =\
((uint32_t*)(src+y*stride))[1] = 0x01010101 * l##y
ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
#undef ROW
}
static void pred8x8l_vertical_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
int y;
PREDICT_8x8_LOAD_TOP;
src[0] = t0;
src[1] = t1;
src[2] = t2;
src[3] = t3;
src[4] = t4;
src[5] = t5;
src[6] = t6;
src[7] = t7;
for( y = 1; y < 8; y++ )
*(uint64_t*)(src+y*stride) = *(uint64_t*)src;
}
static void pred8x8l_down_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_TOPRIGHT;
SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
}
static void pred8x8l_down_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOPLEFT;
SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
}
static void pred8x8l_vertical_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOPLEFT;
SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
SRC(7,0)= (t6 + t7 + 1) >> 1;
}
static void pred8x8l_horizontal_down_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOPLEFT;
SRC(0,7)= (l6 + l7 + 1) >> 1;
SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
}
static void pred8x8l_vertical_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_TOPRIGHT;
SRC(0,0)= (t0 + t1 + 1) >> 1;
SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
SRC(7,6)= (t10 + t11 + 1) >> 1;
SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
}
static void pred8x8l_horizontal_up_c(uint8_t *src, int has_topleft, int has_topright, int stride)
{
PREDICT_8x8_LOAD_LEFT;
SRC(0,0)= (l0 + l1 + 1) >> 1;
SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
}
#undef PREDICT_8x8_LOAD_LEFT
#undef PREDICT_8x8_LOAD_TOP
#undef PREDICT_8x8_LOAD_TOPLEFT
#undef PREDICT_8x8_LOAD_TOPRIGHT
#undef PREDICT_8x8_DC
#undef PTR
#undef PT
#undef PL
#undef SRC
static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int chroma_height, int delta, int list,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int src_x_offset, int src_y_offset,
qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op){
MpegEncContext * const s = &h->s;
const int mx= h->mv_cache[list][ scan8[n] ][0] + src_x_offset*8;
const int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
const int luma_xy= (mx&3) + ((my&3)<<2);
uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*s->linesize;
uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*s->uvlinesize;
uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*s->uvlinesize;
int extra_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16; //FIXME increase edge?, IMHO not worth it
int extra_height= extra_width;
int emu=0;
const int full_mx= mx>>2;
const int full_my= my>>2;
const int pic_width = 16*s->mb_width;
const int pic_height = 16*s->mb_height;
assert(pic->data[0]);
if(mx&7) extra_width -= 3;
if(my&7) extra_height -= 3;
if( full_mx < 0-extra_width
|| full_my < 0-extra_height
|| full_mx + 16/*FIXME*/ > pic_width + extra_width
|| full_my + 16/*FIXME*/ > pic_height + extra_height){
ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*s->linesize, s->linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
src_y= s->edge_emu_buffer + 2 + 2*s->linesize;
emu=1;
}
qpix_op[luma_xy](dest_y, src_y, s->linesize); //FIXME try variable height perhaps?
if(!square){
qpix_op[luma_xy](dest_y + delta, src_y + delta, s->linesize);
}
if(s->flags&CODEC_FLAG_GRAY) return;
if(emu){
ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
src_cb= s->edge_emu_buffer;
}
chroma_op(dest_cb, src_cb, s->uvlinesize, chroma_height, mx&7, my&7);
if(emu){
ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
src_cr= s->edge_emu_buffer;
}
chroma_op(dest_cr, src_cr, s->uvlinesize, chroma_height, mx&7, my&7);
}
static inline void mc_part_std(H264Context *h, int n, int square, int chroma_height, int delta,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int x_offset, int y_offset,
qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
int list0, int list1){
MpegEncContext * const s = &h->s;
qpel_mc_func *qpix_op= qpix_put;
h264_chroma_mc_func chroma_op= chroma_put;
dest_y += 2*x_offset + 2*y_offset*s-> linesize;
dest_cb += x_offset + y_offset*s->uvlinesize;
dest_cr += x_offset + y_offset*s->uvlinesize;
x_offset += 8*s->mb_x;
y_offset += 8*s->mb_y;
if(list0){
Picture *ref= &h->ref_list[0][ h->ref_cache[0][ scan8[n] ] ];
mc_dir_part(h, ref, n, square, chroma_height, delta, 0,
dest_y, dest_cb, dest_cr, x_offset, y_offset,
qpix_op, chroma_op);
qpix_op= qpix_avg;
chroma_op= chroma_avg;
}
if(list1){
Picture *ref= &h->ref_list[1][ h->ref_cache[1][ scan8[n] ] ];
mc_dir_part(h, ref, n, square, chroma_height, delta, 1,
dest_y, dest_cb, dest_cr, x_offset, y_offset,
qpix_op, chroma_op);
}
}
static inline void mc_part_weighted(H264Context *h, int n, int square, int chroma_height, int delta,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int x_offset, int y_offset,
qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op,
h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg,
int list0, int list1){
MpegEncContext * const s = &h->s;
dest_y += 2*x_offset + 2*y_offset*s-> linesize;
dest_cb += x_offset + y_offset*s->uvlinesize;
dest_cr += x_offset + y_offset*s->uvlinesize;
x_offset += 8*s->mb_x;
y_offset += 8*s->mb_y;
if(list0 && list1){
/* don't optimize for luma-only case, since B-frames usually
* use implicit weights => chroma too. */
uint8_t *tmp_cb = s->obmc_scratchpad;
uint8_t *tmp_cr = tmp_cb + 8*s->uvlinesize;
uint8_t *tmp_y = tmp_cr + 8*s->uvlinesize;
int refn0 = h->ref_cache[0][ scan8[n] ];
int refn1 = h->ref_cache[1][ scan8[n] ];
mc_dir_part(h, &h->ref_list[0][refn0], n, square, chroma_height, delta, 0,
dest_y, dest_cb, dest_cr,
x_offset, y_offset, qpix_put, chroma_put);
mc_dir_part(h, &h->ref_list[1][refn1], n, square, chroma_height, delta, 1,
tmp_y, tmp_cb, tmp_cr,
x_offset, y_offset, qpix_put, chroma_put);
if(h->use_weight == 2){
int weight0 = h->implicit_weight[refn0][refn1];
int weight1 = 64 - weight0;
luma_weight_avg( dest_y, tmp_y, s-> linesize, 5, weight0, weight1, 0, 0);
chroma_weight_avg(dest_cb, tmp_cb, s->uvlinesize, 5, weight0, weight1, 0, 0);
chroma_weight_avg(dest_cr, tmp_cr, s->uvlinesize, 5, weight0, weight1, 0, 0);
}else{
luma_weight_avg(dest_y, tmp_y, s->linesize, h->luma_log2_weight_denom,
h->luma_weight[0][refn0], h->luma_weight[1][refn1],
h->luma_offset[0][refn0], h->luma_offset[1][refn1]);
chroma_weight_avg(dest_cb, tmp_cb, s->uvlinesize, h->chroma_log2_weight_denom,
h->chroma_weight[0][refn0][0], h->chroma_weight[1][refn1][0],
h->chroma_offset[0][refn0][0], h->chroma_offset[1][refn1][0]);
chroma_weight_avg(dest_cr, tmp_cr, s->uvlinesize, h->chroma_log2_weight_denom,
h->chroma_weight[0][refn0][1], h->chroma_weight[1][refn1][1],
h->chroma_offset[0][refn0][1], h->chroma_offset[1][refn1][1]);
}
}else{
int list = list1 ? 1 : 0;
int refn = h->ref_cache[list][ scan8[n] ];
Picture *ref= &h->ref_list[list][refn];
mc_dir_part(h, ref, n, square, chroma_height, delta, list,
dest_y, dest_cb, dest_cr, x_offset, y_offset,
qpix_put, chroma_put);
luma_weight_op(dest_y, s->linesize, h->luma_log2_weight_denom,
h->luma_weight[list][refn], h->luma_offset[list][refn]);
if(h->use_weight_chroma){
chroma_weight_op(dest_cb, s->uvlinesize, h->chroma_log2_weight_denom,
h->chroma_weight[list][refn][0], h->chroma_offset[list][refn][0]);
chroma_weight_op(dest_cr, s->uvlinesize, h->chroma_log2_weight_denom,
h->chroma_weight[list][refn][1], h->chroma_offset[list][refn][1]);
}
}
}
static inline void mc_part(H264Context *h, int n, int square, int chroma_height, int delta,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int x_offset, int y_offset,
qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
h264_weight_func *weight_op, h264_biweight_func *weight_avg,
int list0, int list1){
if((h->use_weight==2 && list0 && list1
&& (h->implicit_weight[ h->ref_cache[0][scan8[n]] ][ h->ref_cache[1][scan8[n]] ] != 32))
|| h->use_weight==1)
mc_part_weighted(h, n, square, chroma_height, delta, dest_y, dest_cb, dest_cr,
x_offset, y_offset, qpix_put, chroma_put,
weight_op[0], weight_op[3], weight_avg[0], weight_avg[3], list0, list1);
else
mc_part_std(h, n, square, chroma_height, delta, dest_y, dest_cb, dest_cr,
x_offset, y_offset, qpix_put, chroma_put, qpix_avg, chroma_avg, list0, list1);
}
static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
qpel_mc_func (*qpix_put)[16], h264_chroma_mc_func (*chroma_put),
qpel_mc_func (*qpix_avg)[16], h264_chroma_mc_func (*chroma_avg),
h264_weight_func *weight_op, h264_biweight_func *weight_avg){
MpegEncContext * const s = &h->s;
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
const int mb_type= s->current_picture.mb_type[mb_xy];
assert(IS_INTER(mb_type));
if(IS_16X16(mb_type)){
mc_part(h, 0, 1, 8, 0, dest_y, dest_cb, dest_cr, 0, 0,
qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
&weight_op[0], &weight_avg[0],
IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
}else if(IS_16X8(mb_type)){
mc_part(h, 0, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 0,
qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
&weight_op[1], &weight_avg[1],
IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
mc_part(h, 8, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 4,
qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
&weight_op[1], &weight_avg[1],
IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
}else if(IS_8X16(mb_type)){
mc_part(h, 0, 0, 8, 8*s->linesize, dest_y, dest_cb, dest_cr, 0, 0,
qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
&weight_op[2], &weight_avg[2],
IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
mc_part(h, 4, 0, 8, 8*s->linesize, dest_y, dest_cb, dest_cr, 4, 0,
qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
&weight_op[2], &weight_avg[2],
IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
}else{
int i;
assert(IS_8X8(mb_type));
for(i=0; i<4; i++){
const int sub_mb_type= h->sub_mb_type[i];
const int n= 4*i;
int x_offset= (i&1)<<2;
int y_offset= (i&2)<<1;
if(IS_SUB_8X8(sub_mb_type)){
mc_part(h, n, 1, 4, 0, dest_y, dest_cb, dest_cr, x_offset, y_offset,
qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
&weight_op[3], &weight_avg[3],
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
}else if(IS_SUB_8X4(sub_mb_type)){
mc_part(h, n , 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset,
qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
&weight_op[4], &weight_avg[4],
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
mc_part(h, n+2, 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset+2,
qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
&weight_op[4], &weight_avg[4],
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
}else if(IS_SUB_4X8(sub_mb_type)){
mc_part(h, n , 0, 4, 4*s->linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset,
qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
&weight_op[5], &weight_avg[5],
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
mc_part(h, n+1, 0, 4, 4*s->linesize, dest_y, dest_cb, dest_cr, x_offset+2, y_offset,
qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
&weight_op[5], &weight_avg[5],
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
}else{
int j;
assert(IS_SUB_4X4(sub_mb_type));
for(j=0; j<4; j++){
int sub_x_offset= x_offset + 2*(j&1);
int sub_y_offset= y_offset + (j&2);
mc_part(h, n+j, 1, 2, 0, dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset,
qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
&weight_op[6], &weight_avg[6],
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
}
}
}
}
}
static void decode_init_vlc(H264Context *h){
static int done = 0;
if (!done) {
int i;
done = 1;
init_vlc(&chroma_dc_coeff_token_vlc, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 4*5,
&chroma_dc_coeff_token_len [0], 1, 1,
&chroma_dc_coeff_token_bits[0], 1, 1, 1);
for(i=0; i<4; i++){
init_vlc(&coeff_token_vlc[i], COEFF_TOKEN_VLC_BITS, 4*17,
&coeff_token_len [i][0], 1, 1,
&coeff_token_bits[i][0], 1, 1, 1);
}
for(i=0; i<3; i++){
init_vlc(&chroma_dc_total_zeros_vlc[i], CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 4,
&chroma_dc_total_zeros_len [i][0], 1, 1,
&chroma_dc_total_zeros_bits[i][0], 1, 1, 1);
}
for(i=0; i<15; i++){
init_vlc(&total_zeros_vlc[i], TOTAL_ZEROS_VLC_BITS, 16,
&total_zeros_len [i][0], 1, 1,
&total_zeros_bits[i][0], 1, 1, 1);
}
for(i=0; i<6; i++){
init_vlc(&run_vlc[i], RUN_VLC_BITS, 7,
&run_len [i][0], 1, 1,
&run_bits[i][0], 1, 1, 1);
}
init_vlc(&run7_vlc, RUN7_VLC_BITS, 16,
&run_len [6][0], 1, 1,
&run_bits[6][0], 1, 1, 1);
}
}
/**
* Sets the intra prediction function pointers.
*/
static void init_pred_ptrs(H264Context *h){
// MpegEncContext * const s = &h->s;
h->pred4x4[VERT_PRED ]= pred4x4_vertical_c;
h->pred4x4[HOR_PRED ]= pred4x4_horizontal_c;
h->pred4x4[DC_PRED ]= pred4x4_dc_c;
h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_c;
h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c;
h->pred4x4[VERT_RIGHT_PRED ]= pred4x4_vertical_right_c;
h->pred4x4[HOR_DOWN_PRED ]= pred4x4_horizontal_down_c;
h->pred4x4[VERT_LEFT_PRED ]= pred4x4_vertical_left_c;
h->pred4x4[HOR_UP_PRED ]= pred4x4_horizontal_up_c;
h->pred4x4[LEFT_DC_PRED ]= pred4x4_left_dc_c;
h->pred4x4[TOP_DC_PRED ]= pred4x4_top_dc_c;
h->pred4x4[DC_128_PRED ]= pred4x4_128_dc_c;
h->pred8x8l[VERT_PRED ]= pred8x8l_vertical_c;
h->pred8x8l[HOR_PRED ]= pred8x8l_horizontal_c;
h->pred8x8l[DC_PRED ]= pred8x8l_dc_c;
h->pred8x8l[DIAG_DOWN_LEFT_PRED ]= pred8x8l_down_left_c;
h->pred8x8l[DIAG_DOWN_RIGHT_PRED]= pred8x8l_down_right_c;
h->pred8x8l[VERT_RIGHT_PRED ]= pred8x8l_vertical_right_c;
h->pred8x8l[HOR_DOWN_PRED ]= pred8x8l_horizontal_down_c;
h->pred8x8l[VERT_LEFT_PRED ]= pred8x8l_vertical_left_c;
h->pred8x8l[HOR_UP_PRED ]= pred8x8l_horizontal_up_c;
h->pred8x8l[LEFT_DC_PRED ]= pred8x8l_left_dc_c;
h->pred8x8l[TOP_DC_PRED ]= pred8x8l_top_dc_c;
h->pred8x8l[DC_128_PRED ]= pred8x8l_128_dc_c;
h->pred8x8[DC_PRED8x8 ]= pred8x8_dc_c;
h->pred8x8[VERT_PRED8x8 ]= pred8x8_vertical_c;
h->pred8x8[HOR_PRED8x8 ]= pred8x8_horizontal_c;
h->pred8x8[PLANE_PRED8x8 ]= pred8x8_plane_c;
h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_c;
h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_c;
h->pred8x8[DC_128_PRED8x8 ]= pred8x8_128_dc_c;
h->pred16x16[DC_PRED8x8 ]= pred16x16_dc_c;
h->pred16x16[VERT_PRED8x8 ]= pred16x16_vertical_c;
h->pred16x16[HOR_PRED8x8 ]= pred16x16_horizontal_c;
h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_c;
h->pred16x16[LEFT_DC_PRED8x8]= pred16x16_left_dc_c;
h->pred16x16[TOP_DC_PRED8x8 ]= pred16x16_top_dc_c;
h->pred16x16[DC_128_PRED8x8 ]= pred16x16_128_dc_c;
}
static void free_tables(H264Context *h){
av_freep(&h->intra4x4_pred_mode);
av_freep(&h->chroma_pred_mode_table);
av_freep(&h->cbp_table);
av_freep(&h->mvd_table[0]);
av_freep(&h->mvd_table[1]);
av_freep(&h->direct_table);
av_freep(&h->non_zero_count);
av_freep(&h->slice_table_base);
av_freep(&h->top_borders[1]);
av_freep(&h->top_borders[0]);
h->slice_table= NULL;
av_freep(&h->mb2b_xy);
av_freep(&h->mb2b8_xy);
av_freep(&h->dequant4_coeff);
av_freep(&h->dequant8_coeff);
av_freep(&h->s.obmc_scratchpad);
}
/**
* allocates tables.
* needs width/height
*/
static int alloc_tables(H264Context *h){
MpegEncContext * const s = &h->s;
const int big_mb_num= s->mb_stride * (s->mb_height+1);
int x,y,q;
CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t))
CHECKED_ALLOCZ(h->non_zero_count , big_mb_num * 16 * sizeof(uint8_t))
CHECKED_ALLOCZ(h->slice_table_base , big_mb_num * sizeof(uint8_t))
CHECKED_ALLOCZ(h->top_borders[0] , s->mb_width * (16+8+8) * sizeof(uint8_t))
CHECKED_ALLOCZ(h->top_borders[1] , s->mb_width * (16+8+8) * sizeof(uint8_t))
CHECKED_ALLOCZ(h->cbp_table, big_mb_num * sizeof(uint16_t))
if( h->pps.cabac ) {
CHECKED_ALLOCZ(h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t))
CHECKED_ALLOCZ(h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t));
CHECKED_ALLOCZ(h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t));
CHECKED_ALLOCZ(h->direct_table, 32*big_mb_num * sizeof(uint8_t));
}
memset(h->slice_table_base, -1, big_mb_num * sizeof(uint8_t));
h->slice_table= h->slice_table_base + s->mb_stride + 1;
CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint32_t));
CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint32_t));
for(y=0; y<s->mb_height; y++){
for(x=0; x<s->mb_width; x++){
const int mb_xy= x + y*s->mb_stride;
const int b_xy = 4*x + 4*y*h->b_stride;
const int b8_xy= 2*x + 2*y*h->b8_stride;
h->mb2b_xy [mb_xy]= b_xy;
h->mb2b8_xy[mb_xy]= b8_xy;
}
}
CHECKED_ALLOCZ(h->dequant4_coeff, 52*16 * sizeof(uint16_t));
CHECKED_ALLOCZ(h->dequant8_coeff, 52*64 * sizeof(uint16_t));
memcpy(h->dequant4_coeff, dequant_coeff, 52*16 * sizeof(uint16_t));
for(q=0; q<52; q++){
int shift = div6[q];
int idx = rem6[q];
if(shift >= 2) // qp<12 are shifted during dequant
shift -= 2;
for(x=0; x<64; x++)
h->dequant8_coeff[q][x] = dequant8_coeff_init[idx][
dequant8_coeff_init_scan[((x>>1)&12) | (x&3)] ] << shift;
}
if(h->sps.transform_bypass){
for(x=0; x<16; x++)
h->dequant4_coeff[0][x] = 1;
for(x=0; x<64; x++)
h->dequant8_coeff[0][x] = 1<<2;
}
s->obmc_scratchpad = NULL;
return 0;
fail:
free_tables(h);
return -1;
}
static void common_init(H264Context *h){
MpegEncContext * const s = &h->s;
s->width = s->avctx->width;
s->height = s->avctx->height;
s->codec_id= s->avctx->codec->id;
init_pred_ptrs(h);
s->unrestricted_mv=1;
s->decode=1; //FIXME
}
static int decode_init(AVCodecContext *avctx){
H264Context *h= avctx->priv_data;
MpegEncContext * const s = &h->s;
MPV_decode_defaults(s);
s->avctx = avctx;
common_init(h);
s->out_format = FMT_H264;
s->workaround_bugs= avctx->workaround_bugs;
// set defaults
// s->decode_mb= ff_h263_decode_mb;
s->low_delay= 1;
avctx->pix_fmt= PIX_FMT_YUV420P;
decode_init_vlc(h);
if(avctx->extradata_size > 0 && avctx->extradata &&
*(char *)avctx->extradata == 1){
h->is_avc = 1;
h->got_avcC = 0;
} else {
h->is_avc = 0;
}
return 0;
}
static void frame_start(H264Context *h){
MpegEncContext * const s = &h->s;
int i;
MPV_frame_start(s, s->avctx);
ff_er_frame_start(s);
assert(s->linesize && s->uvlinesize);
for(i=0; i<16; i++){
h->block_offset[i]= 4*((scan8[i] - scan8[0])&7) + 4*s->linesize*((scan8[i] - scan8[0])>>3);
h->block_offset[24+i]= 4*((scan8[i] - scan8[0])&7) + 8*s->linesize*((scan8[i] - scan8[0])>>3);
}
for(i=0; i<4; i++){
h->block_offset[16+i]=
h->block_offset[20+i]= 4*((scan8[i] - scan8[0])&7) + 4*s->uvlinesize*((scan8[i] - scan8[0])>>3);
h->block_offset[24+16+i]=
h->block_offset[24+20+i]= 4*((scan8[i] - scan8[0])&7) + 8*s->uvlinesize*((scan8[i] - scan8[0])>>3);
}
/* can't be in alloc_tables because linesize isn't known there.
* FIXME: redo bipred weight to not require extra buffer? */
if(!s->obmc_scratchpad)
s->obmc_scratchpad = av_malloc(16*s->linesize + 2*8*s->uvlinesize);
// s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
}
static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize){
MpegEncContext * const s = &h->s;
int i;
src_y -= linesize;
src_cb -= uvlinesize;
src_cr -= uvlinesize;
// There are two lines saved, the line above the the top macroblock of a pair,
// and the line above the bottom macroblock
h->left_border[0]= h->top_borders[0][s->mb_x][15];
for(i=1; i<17; i++){
h->left_border[i]= src_y[15+i* linesize];
}
*(uint64_t*)(h->top_borders[0][s->mb_x]+0)= *(uint64_t*)(src_y + 16*linesize);
*(uint64_t*)(h->top_borders[0][s->mb_x]+8)= *(uint64_t*)(src_y +8+16*linesize);
if(!(s->flags&CODEC_FLAG_GRAY)){
h->left_border[17 ]= h->top_borders[0][s->mb_x][16+7];
h->left_border[17+9]= h->top_borders[0][s->mb_x][24+7];
for(i=1; i<9; i++){
h->left_border[i+17 ]= src_cb[7+i*uvlinesize];
h->left_border[i+17+9]= src_cr[7+i*uvlinesize];
}
*(uint64_t*)(h->top_borders[0][s->mb_x]+16)= *(uint64_t*)(src_cb+8*uvlinesize);
*(uint64_t*)(h->top_borders[0][s->mb_x]+24)= *(uint64_t*)(src_cr+8*uvlinesize);
}
}
static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg){
MpegEncContext * const s = &h->s;
int temp8, i;
uint64_t temp64;
int deblock_left = (s->mb_x > 0);
int deblock_top = (s->mb_y > 0);
src_y -= linesize + 1;
src_cb -= uvlinesize + 1;
src_cr -= uvlinesize + 1;
#define XCHG(a,b,t,xchg)\
t= a;\
if(xchg)\
a= b;\
b= t;
if(deblock_left){
for(i = !deblock_top; i<17; i++){
XCHG(h->left_border[i ], src_y [i* linesize], temp8, xchg);
}
}
if(deblock_top){
XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg);
XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1);
if(s->mb_x+1 < s->mb_width){
XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x+1]), *(uint64_t*)(src_y +17), temp64, 1);
}
}
if(!(s->flags&CODEC_FLAG_GRAY)){
if(deblock_left){
for(i = !deblock_top; i<9; i++){
XCHG(h->left_border[i+17 ], src_cb[i*uvlinesize], temp8, xchg);
XCHG(h->left_border[i+17+9], src_cr[i*uvlinesize], temp8, xchg);
}
}
if(deblock_top){
XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1);
XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1);
}
}
}
static inline void backup_pair_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize){
MpegEncContext * const s = &h->s;
int i;
src_y -= 2 * linesize;
src_cb -= 2 * uvlinesize;
src_cr -= 2 * uvlinesize;
// There are two lines saved, the line above the the top macroblock of a pair,
// and the line above the bottom macroblock
h->left_border[0]= h->top_borders[0][s->mb_x][15];
h->left_border[1]= h->top_borders[1][s->mb_x][15];
for(i=2; i<34; i++){
h->left_border[i]= src_y[15+i* linesize];
}
*(uint64_t*)(h->top_borders[0][s->mb_x]+0)= *(uint64_t*)(src_y + 32*linesize);
*(uint64_t*)(h->top_borders[0][s->mb_x]+8)= *(uint64_t*)(src_y +8+32*linesize);
*(uint64_t*)(h->top_borders[1][s->mb_x]+0)= *(uint64_t*)(src_y + 33*linesize);
*(uint64_t*)(h->top_borders[1][s->mb_x]+8)= *(uint64_t*)(src_y +8+33*linesize);
if(!(s->flags&CODEC_FLAG_GRAY)){
h->left_border[34 ]= h->top_borders[0][s->mb_x][16+7];
h->left_border[34+ 1]= h->top_borders[1][s->mb_x][16+7];
h->left_border[34+18 ]= h->top_borders[0][s->mb_x][24+7];
h->left_border[34+18+1]= h->top_borders[1][s->mb_x][24+7];
for(i=2; i<18; i++){
h->left_border[i+34 ]= src_cb[7+i*uvlinesize];
h->left_border[i+34+18]= src_cr[7+i*uvlinesize];
}
*(uint64_t*)(h->top_borders[0][s->mb_x]+16)= *(uint64_t*)(src_cb+16*uvlinesize);
*(uint64_t*)(h->top_borders[0][s->mb_x]+24)= *(uint64_t*)(src_cr+16*uvlinesize);
*(uint64_t*)(h->top_borders[1][s->mb_x]+16)= *(uint64_t*)(src_cb+17*uvlinesize);
*(uint64_t*)(h->top_borders[1][s->mb_x]+24)= *(uint64_t*)(src_cr+17*uvlinesize);
}
}
static inline void xchg_pair_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg){
MpegEncContext * const s = &h->s;
int temp8, i;
uint64_t temp64;
int deblock_left = (s->mb_x > 0);
int deblock_top = (s->mb_y > 0);
tprintf("xchg_pair_border: src_y:%p src_cb:%p src_cr:%p ls:%d uvls:%d\n", src_y, src_cb, src_cr, linesize, uvlinesize);
src_y -= 2 * linesize + 1;
src_cb -= 2 * uvlinesize + 1;
src_cr -= 2 * uvlinesize + 1;
#define XCHG(a,b,t,xchg)\
t= a;\
if(xchg)\
a= b;\
b= t;
if(deblock_left){
for(i = (!deblock_top)<<1; i<34; i++){
XCHG(h->left_border[i ], src_y [i* linesize], temp8, xchg);
}
}
if(deblock_top){
XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg);
XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1);
XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+0), *(uint64_t*)(src_y +1 +linesize), temp64, xchg);
XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+8), *(uint64_t*)(src_y +9 +linesize), temp64, 1);
}
if(!(s->flags&CODEC_FLAG_GRAY)){
if(deblock_left){
for(i = (!deblock_top) << 1; i<18; i++){
XCHG(h->left_border[i+34 ], src_cb[i*uvlinesize], temp8, xchg);
XCHG(h->left_border[i+34+18], src_cr[i*uvlinesize], temp8, xchg);
}
}
if(deblock_top){
XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1);
XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1);
XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+16), *(uint64_t*)(src_cb+1 +uvlinesize), temp64, 1);
XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+24), *(uint64_t*)(src_cr+1 +uvlinesize), temp64, 1);
}
}
}
static void hl_decode_mb(H264Context *h){
MpegEncContext * const s = &h->s;
const int mb_x= s->mb_x;
const int mb_y= s->mb_y;
const int mb_xy= mb_x + mb_y*s->mb_stride;
const int mb_type= s->current_picture.mb_type[mb_xy];
uint8_t *dest_y, *dest_cb, *dest_cr;
int linesize, uvlinesize /*dct_offset*/;
int i;
int *block_offset = &h->block_offset[0];
const unsigned int bottom = mb_y & 1;
const int transform_bypass = (s->qscale == 0 && h->sps.transform_bypass);
void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
if(!s->decode)
return;
dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16;
dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
if (h->mb_field_decoding_flag) {
linesize = s->linesize * 2;
uvlinesize = s->uvlinesize * 2;
block_offset = &h->block_offset[24];
if(mb_y&1){ //FIXME move out of this func?
dest_y -= s->linesize*15;
dest_cb-= s->uvlinesize*7;
dest_cr-= s->uvlinesize*7;
}
} else {
linesize = s->linesize;
uvlinesize = s->uvlinesize;
// dct_offset = s->linesize * 16;
}
idct_add = transform_bypass
? IS_8x8DCT(mb_type) ? s->dsp.add_pixels8 : s->dsp.add_pixels4
: IS_8x8DCT(mb_type) ? s->dsp.h264_idct8_add : s->dsp.h264_idct_add;
if (IS_INTRA_PCM(mb_type)) {
unsigned int x, y;
// The pixels are stored in h->mb array in the same order as levels,
// copy them in output in the correct order.
for(i=0; i<16; i++) {
for (y=0; y<4; y++) {
for (x=0; x<4; x++) {
*(dest_y + block_offset[i] + y*linesize + x) = h->mb[i*16+y*4+x];
}
}
}
for(i=16; i<16+4; i++) {
for (y=0; y<4; y++) {
for (x=0; x<4; x++) {
*(dest_cb + block_offset[i] + y*uvlinesize + x) = h->mb[i*16+y*4+x];
}
}
}
for(i=20; i<20+4; i++) {
for (y=0; y<4; y++) {
for (x=0; x<4; x++) {
*(dest_cr + block_offset[i] + y*uvlinesize + x) = h->mb[i*16+y*4+x];
}
}
}
} else {
if(IS_INTRA(mb_type)){
if(h->deblocking_filter) {
if (h->mb_aff_frame) {
if (!bottom)
xchg_pair_border(h, dest_y, dest_cb, dest_cr, s->linesize, s->uvlinesize, 1);
} else {
xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1);
}
}
if(!(s->flags&CODEC_FLAG_GRAY)){
h->pred8x8[ h->chroma_pred_mode ](dest_cb, uvlinesize);
h->pred8x8[ h->chroma_pred_mode ](dest_cr, uvlinesize);
}
if(IS_INTRA4x4(mb_type)){
if(!s->encoding){
if(IS_8x8DCT(mb_type)){
for(i=0; i<16; i+=4){
uint8_t * const ptr= dest_y + block_offset[i];
const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
h->pred8x8l[ dir ](ptr, (h->topleft_samples_available<<i)&0x8000,
(h->topright_samples_available<<(i+1))&0x8000, linesize);
if(h->non_zero_count_cache[ scan8[i] ])
idct_add(ptr, h->mb + i*16, linesize);
}
}else
for(i=0; i<16; i++){
uint8_t * const ptr= dest_y + block_offset[i];
uint8_t *topright;
const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
int tr;
if(dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED){
const int topright_avail= (h->topright_samples_available<<i)&0x8000;
assert(mb_y || linesize <= block_offset[i]);
if(!topright_avail){
tr= ptr[3 - linesize]*0x01010101;
topright= (uint8_t*) &tr;
}else
topright= ptr + 4 - linesize;
}else
topright= NULL;
h->pred4x4[ dir ](ptr, topright, linesize);
if(h->non_zero_count_cache[ scan8[i] ]){
if(s->codec_id == CODEC_ID_H264)
idct_add(ptr, h->mb + i*16, linesize);
else
svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, 0);
}
}
}
}else{
h->pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize);
if(s->codec_id == CODEC_ID_H264){
if(!transform_bypass)
h264_luma_dc_dequant_idct_c(h->mb, s->qscale);
}else
svq3_luma_dc_dequant_idct_c(h->mb, s->qscale);
}
if(h->deblocking_filter) {
if (h->mb_aff_frame) {
if (bottom) {
uint8_t *pair_dest_y = s->current_picture.data[0] + ((mb_y-1) * 16* s->linesize ) + mb_x * 16;
uint8_t *pair_dest_cb = s->current_picture.data[1] + ((mb_y-1) * 8 * s->uvlinesize) + mb_x * 8;
uint8_t *pair_dest_cr = s->current_picture.data[2] + ((mb_y-1) * 8 * s->uvlinesize) + mb_x * 8;
s->mb_y--;
xchg_pair_border(h, pair_dest_y, pair_dest_cb, pair_dest_cr, s->linesize, s->uvlinesize, 0);
s->mb_y++;
}
} else {
xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0);
}
}
}else if(s->codec_id == CODEC_ID_H264){
hl_motion(h, dest_y, dest_cb, dest_cr,
s->dsp.put_h264_qpel_pixels_tab, s->dsp.put_h264_chroma_pixels_tab,
s->dsp.avg_h264_qpel_pixels_tab, s->dsp.avg_h264_chroma_pixels_tab,
s->dsp.weight_h264_pixels_tab, s->dsp.biweight_h264_pixels_tab);
}
if(!IS_INTRA4x4(mb_type)){
if(s->codec_id == CODEC_ID_H264){
const int di = IS_8x8DCT(mb_type) ? 4 : 1;
for(i=0; i<16; i+=di){
if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below
uint8_t * const ptr= dest_y + block_offset[i];
idct_add(ptr, h->mb + i*16, linesize);
}
}
}else{
for(i=0; i<16; i++){
if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below
uint8_t * const ptr= dest_y + block_offset[i];
svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, IS_INTRA(mb_type) ? 1 : 0);
}
}
}
}
if(!(s->flags&CODEC_FLAG_GRAY)){
idct_add = transform_bypass ? s->dsp.add_pixels4 : s->dsp.h264_idct_add;
if(!transform_bypass){
chroma_dc_dequant_idct_c(h->mb + 16*16, h->chroma_qp);
chroma_dc_dequant_idct_c(h->mb + 16*16+4*16, h->chroma_qp);
}
if(s->codec_id == CODEC_ID_H264){
for(i=16; i<16+4; i++){
if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
uint8_t * const ptr= dest_cb + block_offset[i];
idct_add(ptr, h->mb + i*16, uvlinesize);
}
}
for(i=20; i<20+4; i++){
if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
uint8_t * const ptr= dest_cr + block_offset[i];
idct_add(ptr, h->mb + i*16, uvlinesize);
}
}
}else{
for(i=16; i<16+4; i++){
if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
uint8_t * const ptr= dest_cb + block_offset[i];
svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2);
}
}
for(i=20; i<20+4; i++){
if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
uint8_t * const ptr= dest_cr + block_offset[i];
svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2);
}
}
}
}
}
if(h->deblocking_filter) {
if (h->mb_aff_frame) {
const int mb_y = s->mb_y - 1;
uint8_t *pair_dest_y, *pair_dest_cb, *pair_dest_cr;
const int mb_xy= mb_x + mb_y*s->mb_stride;
const int mb_type_top = s->current_picture.mb_type[mb_xy];
const int mb_type_bottom= s->current_picture.mb_type[mb_xy+s->mb_stride];
uint8_t tmp = s->current_picture.data[1][384];
if (!bottom) return;
pair_dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16;
pair_dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
pair_dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
backup_pair_border(h, pair_dest_y, pair_dest_cb, pair_dest_cr, s->linesize, s->uvlinesize);
// TODO deblock a pair
// top
s->mb_y--;
tprintf("call mbaff filter_mb mb_x:%d mb_y:%d pair_dest_y = %p, dest_y = %p\n", mb_x, mb_y, pair_dest_y, dest_y);
fill_caches(h, mb_type_top, 1); //FIXME don't fill stuff which isn't used by filter_mb
filter_mb(h, mb_x, mb_y, pair_dest_y, pair_dest_cb, pair_dest_cr, linesize, uvlinesize);
if (tmp != s->current_picture.data[1][384]) {
tprintf("modified pixel 8,1 (1)\n");
}
// bottom
s->mb_y++;
tprintf("call mbaff filter_mb\n");
fill_caches(h, mb_type_bottom, 1); //FIXME don't fill stuff which isn't used by filter_mb
filter_mb(h, mb_x, mb_y+1, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
if (tmp != s->current_picture.data[1][384]) {
tprintf("modified pixel 8,1 (2)\n");
}
} else {
tprintf("call filter_mb\n");
backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
fill_caches(h, mb_type, 1); //FIXME don't fill stuff which isn't used by filter_mb
filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
}
}
}
/**
* fills the default_ref_list.
*/
static int fill_default_ref_list(H264Context *h){
MpegEncContext * const s = &h->s;
int i;
int smallest_poc_greater_than_current = -1;
Picture sorted_short_ref[32];
if(h->slice_type==B_TYPE){
int out_i;
int limit= INT_MIN;
/* sort frame according to poc in B slice */
for(out_i=0; out_i<h->short_ref_count; out_i++){
int best_i=INT_MIN;
int best_poc=INT_MAX;
for(i=0; i<h->short_ref_count; i++){
const int poc= h->short_ref[i]->poc;
if(poc > limit && poc < best_poc){
best_poc= poc;
best_i= i;
}
}
assert(best_i != INT_MIN);
limit= best_poc;
sorted_short_ref[out_i]= *h->short_ref[best_i];
tprintf("sorted poc: %d->%d poc:%d fn:%d\n", best_i, out_i, sorted_short_ref[out_i].poc, sorted_short_ref[out_i].frame_num);
if (-1 == smallest_poc_greater_than_current) {
if (h->short_ref[best_i]->poc >= s->current_picture_ptr->poc) {
smallest_poc_greater_than_current = out_i;
}
}
}
}
if(s->picture_structure == PICT_FRAME){
if(h->slice_type==B_TYPE){
int list;
tprintf("current poc: %d, smallest_poc_greater_than_current: %d\n", s->current_picture_ptr->poc, smallest_poc_greater_than_current);
// find the largest poc
for(list=0; list<2; list++){
int index = 0;
int j= -99;
int step= list ? -1 : 1;
for(i=0; i<h->short_ref_count && index < h->ref_count[list]; i++, j+=step) {
while(j<0 || j>= h->short_ref_count){
if(j != -99 && step == (list ? -1 : 1))
return -1;
step = -step;
j= smallest_poc_greater_than_current + (step>>1);
}
if(sorted_short_ref[j].reference != 3) continue;
h->default_ref_list[list][index ]= sorted_short_ref[j];
h->default_ref_list[list][index++].pic_id= sorted_short_ref[j].frame_num;
}
for(i = 0; i < 16 && index < h->ref_count[ list ]; i++){
if(h->long_ref[i] == NULL) continue;
if(h->long_ref[i]->reference != 3) continue;
h->default_ref_list[ list ][index ]= *h->long_ref[i];
h->default_ref_list[ list ][index++].pic_id= i;;
}
if(list && (smallest_poc_greater_than_current<=0 || smallest_poc_greater_than_current>=h->short_ref_count) && (1 < index)){
// swap the two first elements of L1 when
// L0 and L1 are identical
Picture temp= h->default_ref_list[1][0];
h->default_ref_list[1][0] = h->default_ref_list[1][1];
h->default_ref_list[1][1] = temp;
}
if(index < h->ref_count[ list ])
memset(&h->default_ref_list[list][index], 0, sizeof(Picture)*(h->ref_count[ list ] - index));
}
}else{
int index=0;
for(i=0; i<h->short_ref_count; i++){
if(h->short_ref[i]->reference != 3) continue; //FIXME refernce field shit
h->default_ref_list[0][index ]= *h->short_ref[i];
h->default_ref_list[0][index++].pic_id= h->short_ref[i]->frame_num;
}
for(i = 0; i < 16; i++){
if(h->long_ref[i] == NULL) continue;
if(h->long_ref[i]->reference != 3) continue;
h->default_ref_list[0][index ]= *h->long_ref[i];
h->default_ref_list[0][index++].pic_id= i;;
}
if(index < h->ref_count[0])
memset(&h->default_ref_list[0][index], 0, sizeof(Picture)*(h->ref_count[0] - index));
}
}else{ //FIELD
if(h->slice_type==B_TYPE){
}else{
//FIXME second field balh
}
}
#ifdef TRACE
for (i=0; i<h->ref_count[0]; i++) {
tprintf("List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]);
}
if(h->slice_type==B_TYPE){
for (i=0; i<h->ref_count[1]; i++) {
tprintf("List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[0][i].data[0]);
}
}
#endif
return 0;
}
static void print_short_term(H264Context *h);
static void print_long_term(H264Context *h);
static int decode_ref_pic_list_reordering(H264Context *h){
MpegEncContext * const s = &h->s;
int list, index;
print_short_term(h);
print_long_term(h);
if(h->slice_type==I_TYPE || h->slice_type==SI_TYPE) return 0; //FIXME move before func
for(list=0; list<2; list++){
memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]);
if(get_bits1(&s->gb)){
int pred= h->curr_pic_num;
for(index=0; ; index++){
int reordering_of_pic_nums_idc= get_ue_golomb(&s->gb);
int pic_id;
int i;
Picture *ref = NULL;
if(reordering_of_pic_nums_idc==3)
break;
if(index >= h->ref_count[list]){
av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflow\n");
return -1;
}
if(reordering_of_pic_nums_idc<3){
if(reordering_of_pic_nums_idc<2){
const int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1;
if(abs_diff_pic_num >= h->max_pic_num){
av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n");
return -1;
}
if(reordering_of_pic_nums_idc == 0) pred-= abs_diff_pic_num;
else pred+= abs_diff_pic_num;
pred &= h->max_pic_num - 1;
for(i= h->short_ref_count-1; i>=0; i--){
ref = h->short_ref[i];
assert(ref->reference == 3);
assert(!ref->long_ref);
if(ref->data[0] != NULL && ref->frame_num == pred && ref->long_ref == 0) // ignore non existing pictures by testing data[0] pointer
break;
}
if(i>=0)
ref->pic_id= ref->frame_num;
}else{
pic_id= get_ue_golomb(&s->gb); //long_term_pic_idx
ref = h->long_ref[pic_id];
ref->pic_id= pic_id;
assert(ref->reference == 3);
assert(ref->long_ref);
i=0;
}
if (i < 0) {
av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reorder\n");
memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME
} else {
for(i=index; i+1<h->ref_count[list]; i++){
if(ref->long_ref == h->ref_list[list][i].long_ref && ref->pic_id == h->ref_list[list][i].pic_id)
break;
}
for(; i > index; i--){
h->ref_list[list][i]= h->ref_list[list][i-1];
}
h->ref_list[list][index]= *ref;
}
}else{
av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n");
return -1;
}
}
}
if(h->slice_type!=B_TYPE) break;
}
for(list=0; list<2; list++){
for(index= 0; index < h->ref_count[list]; index++){
if(!h->ref_list[list][index].data[0])
h->ref_list[list][index]= s->current_picture;
}
if(h->slice_type!=B_TYPE) break;
}
if(h->slice_type==B_TYPE && !h->direct_spatial_mv_pred)
direct_dist_scale_factor(h);
direct_ref_list_init(h);
return 0;
}
static int pred_weight_table(H264Context *h){
MpegEncContext * const s = &h->s;
int list, i;
int luma_def, chroma_def;
h->use_weight= 0;
h->use_weight_chroma= 0;
h->luma_log2_weight_denom= get_ue_golomb(&s->gb);
h->chroma_log2_weight_denom= get_ue_golomb(&s->gb);
luma_def = 1<<h->luma_log2_weight_denom;
chroma_def = 1<<h->chroma_log2_weight_denom;
for(list=0; list<2; list++){
for(i=0; i<h->ref_count[list]; i++){
int luma_weight_flag, chroma_weight_flag;
luma_weight_flag= get_bits1(&s->gb);
if(luma_weight_flag){
h->luma_weight[list][i]= get_se_golomb(&s->gb);
h->luma_offset[list][i]= get_se_golomb(&s->gb);
if( h->luma_weight[list][i] != luma_def
|| h->luma_offset[list][i] != 0)
h->use_weight= 1;
}else{
h->luma_weight[list][i]= luma_def;
h->luma_offset[list][i]= 0;
}
chroma_weight_flag= get_bits1(&s->gb);
if(chroma_weight_flag){
int j;
for(j=0; j<2; j++){
h->chroma_weight[list][i][j]= get_se_golomb(&s->gb);
h->chroma_offset[list][i][j]= get_se_golomb(&s->gb);
if( h->chroma_weight[list][i][j] != chroma_def
|| h->chroma_offset[list][i][j] != 0)
h->use_weight_chroma= 1;
}
}else{
int j;
for(j=0; j<2; j++){
h->chroma_weight[list][i][j]= chroma_def;
h->chroma_offset[list][i][j]= 0;
}
}
}
if(h->slice_type != B_TYPE) break;
}
h->use_weight= h->use_weight || h->use_weight_chroma;
return 0;
}
static void implicit_weight_table(H264Context *h){
MpegEncContext * const s = &h->s;
int ref0, ref1;
int cur_poc = s->current_picture_ptr->poc;
if( h->ref_count[0] == 1 && h->ref_count[1] == 1
&& h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2*cur_poc){
h->use_weight= 0;
h->use_weight_chroma= 0;
return;
}
h->use_weight= 2;
h->use_weight_chroma= 2;
h->luma_log2_weight_denom= 5;
h->chroma_log2_weight_denom= 5;
/* FIXME: MBAFF */
for(ref0=0; ref0 < h->ref_count[0]; ref0++){
int poc0 = h->ref_list[0][ref0].poc;
for(ref1=0; ref1 < h->ref_count[1]; ref1++){
int poc1 = h->ref_list[1][ref1].poc;
int td = clip(poc1 - poc0, -128, 127);
if(td){
int tb = clip(cur_poc - poc0, -128, 127);
int tx = (16384 + (ABS(td) >> 1)) / td;
int dist_scale_factor = clip((tb*tx + 32) >> 6, -1024, 1023) >> 2;
if(dist_scale_factor < -64 || dist_scale_factor > 128)
h->implicit_weight[ref0][ref1] = 32;
else
h->implicit_weight[ref0][ref1] = 64 - dist_scale_factor;
}else
h->implicit_weight[ref0][ref1] = 32;
}
}
}
static inline void unreference_pic(H264Context *h, Picture *pic){
int i;
pic->reference=0;
if(pic == h->delayed_output_pic)
pic->reference=1;
else{
for(i = 0; h->delayed_pic[i]; i++)
if(pic == h->delayed_pic[i]){
pic->reference=1;
break;
}
}
}
/**
* instantaneous decoder refresh.
*/
static void idr(H264Context *h){
int i;
for(i=0; i<16; i++){
if (h->long_ref[i] != NULL) {
unreference_pic(h, h->long_ref[i]);
h->long_ref[i]= NULL;
}
}
h->long_ref_count=0;
for(i=0; i<h->short_ref_count; i++){
unreference_pic(h, h->short_ref[i]);
h->short_ref[i]= NULL;
}
h->short_ref_count=0;
}
/* forget old pics after a seek */
static void flush_dpb(AVCodecContext *avctx){
H264Context *h= avctx->priv_data;
int i;
for(i=0; i<16; i++)
h->delayed_pic[i]= NULL;
h->delayed_output_pic= NULL;
idr(h);
if(h->s.current_picture_ptr)
h->s.current_picture_ptr->reference= 0;
}
/**
*
* @return the removed picture or NULL if an error occurs
*/
static Picture * remove_short(H264Context *h, int frame_num){
MpegEncContext * const s = &h->s;
int i;
if(s->avctx->debug&FF_DEBUG_MMCO)
av_log(h->s.avctx, AV_LOG_DEBUG, "remove short %d count %d\n", frame_num, h->short_ref_count);
for(i=0; i<h->short_ref_count; i++){
Picture *pic= h->short_ref[i];
if(s->avctx->debug&FF_DEBUG_MMCO)
av_log(h->s.avctx, AV_LOG_DEBUG, "%d %d %p\n", i, pic->frame_num, pic);
if(pic->frame_num == frame_num){
h->short_ref[i]= NULL;
memmove(&h->short_ref[i], &h->short_ref[i+1], (h->short_ref_count - i - 1)*sizeof(Picture*));
h->short_ref_count--;
return pic;
}
}
return NULL;
}
/**
*
* @return the removed picture or NULL if an error occurs
*/
static Picture * remove_long(H264Context *h, int i){
Picture *pic;
pic= h->long_ref[i];
h->long_ref[i]= NULL;
if(pic) h->long_ref_count--;
return pic;
}
/**
* print short term list
*/
static void print_short_term(H264Context *h) {
uint32_t i;
if(h->s.avctx->debug&FF_DEBUG_MMCO) {
av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n");
for(i=0; i<h->short_ref_count; i++){
Picture *pic= h->short_ref[i];
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
}
}
}
/**
* print long term list
*/
static void print_long_term(H264Context *h) {
uint32_t i;
if(h->s.avctx->debug&FF_DEBUG_MMCO) {
av_log(h->s.avctx, AV_LOG_DEBUG, "long term list:\n");
for(i = 0; i < 16; i++){
Picture *pic= h->long_ref[i];
if (pic) {
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
}
}
}
}
/**
* Executes the reference picture marking (memory management control operations).
*/
static int execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
MpegEncContext * const s = &h->s;
int i, j;
int current_is_long=0;
Picture *pic;
if((s->avctx->debug&FF_DEBUG_MMCO) && mmco_count==0)
av_log(h->s.avctx, AV_LOG_DEBUG, "no mmco here\n");
for(i=0; i<mmco_count; i++){
if(s->avctx->debug&FF_DEBUG_MMCO)
av_log(h->s.avctx, AV_LOG_DEBUG, "mmco:%d %d %d\n", h->mmco[i].opcode, h->mmco[i].short_frame_num, h->mmco[i].long_index);
switch(mmco[i].opcode){
case MMCO_SHORT2UNUSED:
pic= remove_short(h, mmco[i].short_frame_num);
if(pic)
unreference_pic(h, pic);
else if(s->avctx->debug&FF_DEBUG_MMCO)
av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: remove_short() failure\n");
break;
case MMCO_SHORT2LONG:
pic= remove_long(h, mmco[i].long_index);
if(pic) unreference_pic(h, pic);
h->long_ref[ mmco[i].long_index ]= remove_short(h, mmco[i].short_frame_num);
h->long_ref[ mmco[i].long_index ]->long_ref=1;
h->long_ref_count++;
break;
case MMCO_LONG2UNUSED:
pic= remove_long(h, mmco[i].long_index);
if(pic)
unreference_pic(h, pic);
else if(s->avctx->debug&FF_DEBUG_MMCO)
av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: remove_long() failure\n");
break;
case MMCO_LONG:
pic= remove_long(h, mmco[i].long_index);
if(pic) unreference_pic(h, pic);
h->long_ref[ mmco[i].long_index ]= s->current_picture_ptr;
h->long_ref[ mmco[i].long_index ]->long_ref=1;
h->long_ref_count++;
current_is_long=1;
break;
case MMCO_SET_MAX_LONG:
assert(mmco[i].long_index <= 16);
// just remove the long term which index is greater than new max
for(j = mmco[i].long_index; j<16; j++){
pic = remove_long(h, j);
if (pic) unreference_pic(h, pic);
}
break;
case MMCO_RESET:
while(h->short_ref_count){
pic= remove_short(h, h->short_ref[0]->frame_num);
unreference_pic(h, pic);
}
for(j = 0; j < 16; j++) {
pic= remove_long(h, j);
if(pic) unreference_pic(h, pic);
}
break;
default: assert(0);
}
}
if(!current_is_long){
pic= remove_short(h, s->current_picture_ptr->frame_num);
if(pic){
unreference_pic(h, pic);
av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term buffer state detected\n");
}
if(h->short_ref_count)
memmove(&h->short_ref[1], &h->short_ref[0], h->short_ref_count*sizeof(Picture*));
h->short_ref[0]= s->current_picture_ptr;
h->short_ref[0]->long_ref=0;
h->short_ref_count++;
}
print_short_term(h);
print_long_term(h);
return 0;
}
static int decode_ref_pic_marking(H264Context *h){
MpegEncContext * const s = &h->s;
int i;
if(h->nal_unit_type == NAL_IDR_SLICE){ //FIXME fields
s->broken_link= get_bits1(&s->gb) -1;
h->mmco[0].long_index= get_bits1(&s->gb) - 1; // current_long_term_idx
if(h->mmco[0].long_index == -1)
h->mmco_index= 0;
else{
h->mmco[0].opcode= MMCO_LONG;
h->mmco_index= 1;
}
}else{
if(get_bits1(&s->gb)){ // adaptive_ref_pic_marking_mode_flag
for(i= 0; i<MAX_MMCO_COUNT; i++) {
MMCOOpcode opcode= get_ue_golomb(&s->gb);;
h->mmco[i].opcode= opcode;
if(opcode==MMCO_SHORT2UNUSED || opcode==MMCO_SHORT2LONG){
h->mmco[i].short_frame_num= (h->frame_num - get_ue_golomb(&s->gb) - 1) & ((1<<h->sps.log2_max_frame_num)-1); //FIXME fields
/* if(h->mmco[i].short_frame_num >= h->short_ref_count || h->short_ref[ h->mmco[i].short_frame_num ] == NULL){
av_log(s->avctx, AV_LOG_ERROR, "illegal short ref in memory management control operation %d\n", mmco);
return -1;
}*/
}
if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){
h->mmco[i].long_index= get_ue_golomb(&s->gb);
if(/*h->mmco[i].long_index >= h->long_ref_count || h->long_ref[ h->mmco[i].long_index ] == NULL*/ h->mmco[i].long_index >= 16){
av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %d\n", opcode);
return -1;
}
}
if(opcode > MMCO_LONG){
av_log(h->s.avctx, AV_LOG_ERROR, "illegal memory management control operation %d\n", opcode);
return -1;
}
if(opcode == MMCO_END)
break;
}
h->mmco_index= i;
}else{
assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count);
if(h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count){ //FIXME fields
h->mmco[0].opcode= MMCO_SHORT2UNUSED;
h->mmco[0].short_frame_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
h->mmco_index= 1;
}else
h->mmco_index= 0;
}
}
return 0;
}
static int init_poc(H264Context *h){
MpegEncContext * const s = &h->s;
const int max_frame_num= 1<<h->sps.log2_max_frame_num;
int field_poc[2];
if(h->nal_unit_type == NAL_IDR_SLICE){
h->frame_num_offset= 0;
}else{
if(h->frame_num < h->prev_frame_num)
h->frame_num_offset= h->prev_frame_num_offset + max_frame_num;
else
h->frame_num_offset= h->prev_frame_num_offset;
}
if(h->sps.poc_type==0){
const int max_poc_lsb= 1<<h->sps.log2_max_poc_lsb;
if(h->nal_unit_type == NAL_IDR_SLICE){
h->prev_poc_msb=
h->prev_poc_lsb= 0;
}
if (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb/2)
h->poc_msb = h->prev_poc_msb + max_poc_lsb;
else if(h->poc_lsb > h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb/2)
h->poc_msb = h->prev_poc_msb - max_poc_lsb;
else
h->poc_msb = h->prev_poc_msb;
//printf("poc: %d %d\n", h->poc_msb, h->poc_lsb);
field_poc[0] =
field_poc[1] = h->poc_msb + h->poc_lsb;
if(s->picture_structure == PICT_FRAME)
field_poc[1] += h->delta_poc_bottom;
}else if(h->sps.poc_type==1){
int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
int i;
if(h->sps.poc_cycle_length != 0)
abs_frame_num = h->frame_num_offset + h->frame_num;
else
abs_frame_num = 0;
if(h->nal_ref_idc==0 && abs_frame_num > 0)
abs_frame_num--;
expected_delta_per_poc_cycle = 0;
for(i=0; i < h->sps.poc_cycle_length; i++)
expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[ i ]; //FIXME integrate during sps parse
if(abs_frame_num > 0){
int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
for(i = 0; i <= frame_num_in_poc_cycle; i++)
expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[ i ];
} else
expectedpoc = 0;
if(h->nal_ref_idc == 0)
expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
field_poc[0] = expectedpoc + h->delta_poc[0];
field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
if(s->picture_structure == PICT_FRAME)
field_poc[1] += h->delta_poc[1];
}else{
int poc;
if(h->nal_unit_type == NAL_IDR_SLICE){
poc= 0;
}else{
if(h->nal_ref_idc) poc= 2*(h->frame_num_offset + h->frame_num);
else poc= 2*(h->frame_num_offset + h->frame_num) - 1;
}
field_poc[0]= poc;
field_poc[1]= poc;
}
if(s->picture_structure != PICT_BOTTOM_FIELD)
s->current_picture_ptr->field_poc[0]= field_poc[0];
if(s->picture_structure != PICT_TOP_FIELD)
s->current_picture_ptr->field_poc[1]= field_poc[1];
if(s->picture_structure == PICT_FRAME) // FIXME field pix?
s->current_picture_ptr->poc= FFMIN(field_poc[0], field_poc[1]);
return 0;
}
/**
* decodes a slice header.
* this will allso call MPV_common_init() and frame_start() as needed
*/
static int decode_slice_header(H264Context *h){
MpegEncContext * const s = &h->s;
int first_mb_in_slice, pps_id;
int num_ref_idx_active_override_flag;
static const uint8_t slice_type_map[5]= {P_TYPE, B_TYPE, I_TYPE, SP_TYPE, SI_TYPE};
int slice_type;
int default_ref_list_done = 0;
s->current_picture.reference= h->nal_ref_idc != 0;
s->dropable= h->nal_ref_idc == 0;
first_mb_in_slice= get_ue_golomb(&s->gb);
slice_type= get_ue_golomb(&s->gb);
if(slice_type > 9){
av_log(h->s.avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", h->slice_type, s->mb_x, s->mb_y);
return -1;
}
if(slice_type > 4){
slice_type -= 5;
h->slice_type_fixed=1;
}else
h->slice_type_fixed=0;
slice_type= slice_type_map[ slice_type ];
if (slice_type == I_TYPE
|| (h->slice_num != 0 && slice_type == h->slice_type) ) {
default_ref_list_done = 1;
}
h->slice_type= slice_type;
s->pict_type= h->slice_type; // to make a few old func happy, it's wrong though
pps_id= get_ue_golomb(&s->gb);
if(pps_id>255){
av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n");
return -1;
}
h->pps= h->pps_buffer[pps_id];
if(h->pps.slice_group_count == 0){
av_log(h->s.avctx, AV_LOG_ERROR, "non existing PPS referenced\n");
return -1;
}
h->sps= h->sps_buffer[ h->pps.sps_id ];
if(h->sps.log2_max_frame_num == 0){
av_log(h->s.avctx, AV_LOG_ERROR, "non existing SPS referenced\n");
return -1;
}
s->mb_width= h->sps.mb_width;
s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
h->b_stride= s->mb_width*4 + 1;
h->b8_stride= s->mb_width*2 + 1;
s->width = 16*s->mb_width - 2*(h->sps.crop_left + h->sps.crop_right );
if(h->sps.frame_mbs_only_flag)
s->height= 16*s->mb_height - 2*(h->sps.crop_top + h->sps.crop_bottom);
else
s->height= 16*s->mb_height - 4*(h->sps.crop_top + h->sps.crop_bottom); //FIXME recheck
if (s->context_initialized
&& ( s->width != s->avctx->width || s->height != s->avctx->height)) {
free_tables(h);
MPV_common_end(s);
}
if (!s->context_initialized) {
if (MPV_common_init(s) < 0)
return -1;
if(s->dsp.h264_idct_add == ff_h264_idct_add_c){ //FIXME little ugly
memcpy(h->zigzag_scan, zigzag_scan, 16*sizeof(uint8_t));
memcpy(h-> field_scan, field_scan, 16*sizeof(uint8_t));
}else{
int i;
for(i=0; i<16; i++){
#define T(x) (x>>2) | ((x<<2) & 0xF)
h->zigzag_scan[i] = T(zigzag_scan[i]);
h-> field_scan[i] = T( field_scan[i]);
}
}
if(h->sps.transform_bypass){ //FIXME same ugly
h->zigzag_scan_q0 = zigzag_scan;
h->field_scan_q0 = field_scan;
}else{
h->zigzag_scan_q0 = h->zigzag_scan;
h->field_scan_q0 = h->field_scan;
}
alloc_tables(h);
s->avctx->width = s->width;
s->avctx->height = s->height;
s->avctx->sample_aspect_ratio= h->sps.sar;
if(!s->avctx->sample_aspect_ratio.den)
s->avctx->sample_aspect_ratio.den = 1;
if(h->sps.timing_info_present_flag){
s->avctx->time_base= _AVRational(h->sps.num_units_in_tick, h->sps.time_scale); //Picard
}
}
if(h->slice_num == 0){
frame_start(h);
}
s->current_picture_ptr->frame_num= //FIXME frame_num cleanup
h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num);
h->mb_aff_frame = 0;
if(h->sps.frame_mbs_only_flag){
s->picture_structure= PICT_FRAME;
}else{
if(get_bits1(&s->gb)) { //field_pic_flag
s->picture_structure= PICT_TOP_FIELD + get_bits1(&s->gb); //bottom_field_flag
} else {
s->picture_structure= PICT_FRAME;
first_mb_in_slice <<= h->sps.mb_aff;
h->mb_aff_frame = h->sps.mb_aff;
}
}
s->resync_mb_x = s->mb_x = first_mb_in_slice % s->mb_width;
s->resync_mb_y = s->mb_y = first_mb_in_slice / s->mb_width;
if(s->mb_y >= s->mb_height){
return -1;
}
if(s->picture_structure==PICT_FRAME){
h->curr_pic_num= h->frame_num;
h->max_pic_num= 1<< h->sps.log2_max_frame_num;
}else{
h->curr_pic_num= 2*h->frame_num;
h->max_pic_num= 1<<(h->sps.log2_max_frame_num + 1);
}
if(h->nal_unit_type == NAL_IDR_SLICE){
get_ue_golomb(&s->gb); /* idr_pic_id */
}
if(h->sps.poc_type==0){
h->poc_lsb= get_bits(&s->gb, h->sps.log2_max_poc_lsb);
if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME){
h->delta_poc_bottom= get_se_golomb(&s->gb);
}
}
if(h->sps.poc_type==1 && !h->sps.delta_pic_order_always_zero_flag){
h->delta_poc[0]= get_se_golomb(&s->gb);
if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME)
h->delta_poc[1]= get_se_golomb(&s->gb);
}
init_poc(h);
if(h->pps.redundant_pic_cnt_present){
h->redundant_pic_count= get_ue_golomb(&s->gb);
}
//set defaults, might be overriden a few line later
h->ref_count[0]= h->pps.ref_count[0];
h->ref_count[1]= h->pps.ref_count[1];
if(h->slice_type == P_TYPE || h->slice_type == SP_TYPE || h->slice_type == B_TYPE){
if(h->slice_type == B_TYPE){
h->direct_spatial_mv_pred= get_bits1(&s->gb);
}
num_ref_idx_active_override_flag= get_bits1(&s->gb);
if(num_ref_idx_active_override_flag){
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
if(h->slice_type==B_TYPE)
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
if(h->ref_count[0] > 32 || h->ref_count[1] > 32){
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
return -1;
}
}
}
if(!default_ref_list_done){
fill_default_ref_list(h);
}
if(decode_ref_pic_list_reordering(h) < 0)
return -1;
if( (h->pps.weighted_pred && (h->slice_type == P_TYPE || h->slice_type == SP_TYPE ))
|| (h->pps.weighted_bipred_idc==1 && h->slice_type==B_TYPE ) )
pred_weight_table(h);
else if(h->pps.weighted_bipred_idc==2 && h->slice_type==B_TYPE)
implicit_weight_table(h);
else
h->use_weight = 0;
if(s->current_picture.reference)
decode_ref_pic_marking(h);
if( h->slice_type != I_TYPE && h->slice_type != SI_TYPE && h->pps.cabac )
h->cabac_init_idc = get_ue_golomb(&s->gb);
h->last_qscale_diff = 0;
s->qscale = h->pps.init_qp + get_se_golomb(&s->gb);
if(s->qscale<0 || s->qscale>51){
av_log(s->avctx, AV_LOG_ERROR, "QP %d out of range\n", s->qscale);
return -1;
}
h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, s->qscale);
//FIXME qscale / qp ... stuff
if(h->slice_type == SP_TYPE){
get_bits1(&s->gb); /* sp_for_switch_flag */
}
if(h->slice_type==SP_TYPE || h->slice_type == SI_TYPE){
get_se_golomb(&s->gb); /* slice_qs_delta */
}
h->deblocking_filter = 1;
h->slice_alpha_c0_offset = 0;
h->slice_beta_offset = 0;
if( h->pps.deblocking_filter_parameters_present ) {
h->deblocking_filter= get_ue_golomb(&s->gb);
if(h->deblocking_filter < 2)
h->deblocking_filter^= 1; // 1<->0
if( h->deblocking_filter ) {
h->slice_alpha_c0_offset = get_se_golomb(&s->gb) << 1;
h->slice_beta_offset = get_se_golomb(&s->gb) << 1;
}
}
if( s->avctx->skip_loop_filter >= AVDISCARD_ALL
||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type != I_TYPE)
||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type == B_TYPE)
||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
h->deblocking_filter= 0;
#if 0 //FMO
if( h->pps.num_slice_groups > 1 && h->pps.mb_slice_group_map_type >= 3 && h->pps.mb_slice_group_map_type <= 5)
slice_group_change_cycle= get_bits(&s->gb, ?);
#endif
h->slice_num++;
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
av_log(h->s.avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c pps:%d frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s\n",
h->slice_num,
(s->picture_structure==PICT_FRAME ? "F" : s->picture_structure==PICT_TOP_FIELD ? "T" : "B"),
first_mb_in_slice,
av_get_pict_type_char(h->slice_type),
pps_id, h->frame_num,
s->current_picture_ptr->field_poc[0], s->current_picture_ptr->field_poc[1],
h->ref_count[0], h->ref_count[1],
s->qscale,
h->deblocking_filter, h->slice_alpha_c0_offset/2, h->slice_beta_offset/2,
h->use_weight,
h->use_weight==1 && h->use_weight_chroma ? "c" : ""
);
}
return 0;
}
/**
*
*/
static inline int get_level_prefix(GetBitContext *gb){
unsigned int buf;
int log;
OPEN_READER(re, gb);
UPDATE_CACHE(re, gb);
buf=GET_CACHE(re, gb);
log= 32 - av_log2(buf);
#ifdef TRACE
print_bin(buf>>(32-log), log);
av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d lpr @%5d in %s get_level_prefix\n", buf>>(32-log), log, log-1, get_bits_count(gb), __FILE__);
#endif
LAST_SKIP_BITS(re, gb, log);
CLOSE_READER(re, gb);
return log-1;
}
static inline int get_dct8x8_allowed(H264Context *h){
int i;
for(i=0; i<4; i++){
if(!IS_SUB_8X8(h->sub_mb_type[i])
|| (!h->sps.direct_8x8_inference_flag && IS_DIRECT(h->sub_mb_type[i])))
return 0;
}
return 1;
}
/**
* decodes a residual block.
* @param n block index
* @param scantable scantable
* @param max_coeff number of coefficients in the block
* @return <0 if an error occured
*/
static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, int n, const uint8_t *scantable, const uint16_t *qmul, int max_coeff){
MpegEncContext * const s = &h->s;
static const int coeff_token_table_index[17]= {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
int level[16], run[16];
int suffix_length, zeros_left, coeff_num, coeff_token, total_coeff, i, trailing_ones;
//FIXME put trailing_onex into the context
if(n == CHROMA_DC_BLOCK_INDEX){
coeff_token= get_vlc2(gb, chroma_dc_coeff_token_vlc.table, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 1);
total_coeff= coeff_token>>2;
}else{
if(n == LUMA_DC_BLOCK_INDEX){
total_coeff= pred_non_zero_count(h, 0);
coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2);
total_coeff= coeff_token>>2;
}else{
total_coeff= pred_non_zero_count(h, n);
coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2);
total_coeff= coeff_token>>2;
h->non_zero_count_cache[ scan8[n] ]= total_coeff;
}
}
//FIXME set last_non_zero?
if(total_coeff==0)
return 0;
trailing_ones= coeff_token&3;
tprintf("trailing:%d, total:%d\n", trailing_ones, total_coeff);
assert(total_coeff<=16);
for(i=0; i<trailing_ones; i++){
level[i]= 1 - 2*get_bits1(gb);
}
suffix_length= total_coeff > 10 && trailing_ones < 3;
for(; i<total_coeff; i++){
const int prefix= get_level_prefix(gb);
int level_code, mask;
if(prefix<14){ //FIXME try to build a large unified VLC table for all this
if(suffix_length)
level_code= (prefix<<suffix_length) + get_bits(gb, suffix_length); //part
else
level_code= (prefix<<suffix_length); //part
}else if(prefix==14){
if(suffix_length)
level_code= (prefix<<suffix_length) + get_bits(gb, suffix_length); //part
else
level_code= prefix + get_bits(gb, 4); //part
}else if(prefix==15){
level_code= (prefix<<suffix_length) + get_bits(gb, 12); //part
if(suffix_length==0) level_code+=15; //FIXME doesn't make (much)sense
}else{
av_log(h->s.avctx, AV_LOG_ERROR, "prefix too large at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
if(i==trailing_ones && i<3) level_code+= 2; //FIXME split first iteration
mask= -(level_code&1);
level[i]= (((2+level_code)>>1) ^ mask) - mask;
if(suffix_length==0) suffix_length=1; //FIXME split first iteration
#if 1
if(ABS(level[i]) > (3<<(suffix_length-1)) && suffix_length<6) suffix_length++;
#else
if((2+level_code)>>1) > (3<<(suffix_length-1)) && suffix_length<6) suffix_length++;
/* ? == prefix > 2 or sth */
#endif
tprintf("level: %d suffix_length:%d\n", level[i], suffix_length);
}
if(total_coeff == max_coeff)
zeros_left=0;
else{
if(n == CHROMA_DC_BLOCK_INDEX)
zeros_left= get_vlc2(gb, chroma_dc_total_zeros_vlc[ total_coeff-1 ].table, CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 1);
else
zeros_left= get_vlc2(gb, total_zeros_vlc[ total_coeff-1 ].table, TOTAL_ZEROS_VLC_BITS, 1);
}
for(i=0; i<total_coeff-1; i++){
if(zeros_left <=0)
break;
else if(zeros_left < 7){
run[i]= get_vlc2(gb, run_vlc[zeros_left-1].table, RUN_VLC_BITS, 1);
}else{
run[i]= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2);
}
zeros_left -= run[i];
}
if(zeros_left<0){
av_log(h->s.avctx, AV_LOG_ERROR, "negative number of zero coeffs at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
for(; i<total_coeff-1; i++){
run[i]= 0;
}
run[i]= zeros_left;
coeff_num=-1;
if(n > 24){
for(i=total_coeff-1; i>=0; i--){ //FIXME merge into rundecode?
int j;
coeff_num += run[i] + 1; //FIXME add 1 earlier ?
j= scantable[ coeff_num ];
block[j]= level[i];
}
}else{
for(i=total_coeff-1; i>=0; i--){ //FIXME merge into rundecode?
int j;
coeff_num += run[i] + 1; //FIXME add 1 earlier ?
j= scantable[ coeff_num ];
block[j]= level[i] * qmul[j];
// printf("%d %d ", block[j], qmul[j]);
}
}
return 0;
}
/**
* decodes a P_SKIP or B_SKIP macroblock
*/
static void decode_mb_skip(H264Context *h){
MpegEncContext * const s = &h->s;
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
int mb_type=0;
memset(h->non_zero_count[mb_xy], 0, 16);
memset(h->non_zero_count_cache + 8, 0, 8*5); //FIXME ugly, remove pfui
if(h->mb_aff_frame && s->mb_skip_run==0 && (s->mb_y&1)==0){
h->mb_field_decoding_flag= get_bits1(&s->gb);
}
if(h->mb_field_decoding_flag)
mb_type|= MB_TYPE_INTERLACED;
if( h->slice_type == B_TYPE )
{
// just for fill_caches. pred_direct_motion will set the real mb_type
mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP;
fill_caches(h, mb_type, 0); //FIXME check what is needed and what not ...
pred_direct_motion(h, &mb_type);
if(h->pps.cabac){
fill_rectangle(h->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 4);
fill_rectangle(h->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 4);
}
}
else
{
int mx, my;
mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP;
fill_caches(h, mb_type, 0); //FIXME check what is needed and what not ...
pred_pskip_motion(h, &mx, &my);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4);
if(h->pps.cabac)
fill_rectangle(h->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 4);
}
write_back_motion(h, mb_type);
s->current_picture.mb_type[mb_xy]= mb_type|MB_TYPE_SKIP;
s->current_picture.qscale_table[mb_xy]= s->qscale;
h->slice_table[ mb_xy ]= h->slice_num;
h->prev_mb_skipped= 1;
}
/**
* decodes a macroblock
* @returns 0 if ok, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed
*/
static int decode_mb_cavlc(H264Context *h){
MpegEncContext * const s = &h->s;
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
int mb_type, partition_count, cbp;
int dct8x8_allowed= h->pps.transform_8x8_mode;
s->dsp.clear_blocks(h->mb); //FIXME avoid if already clear (move after skip handlong?
tprintf("pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
cbp = 0; /* avoid warning. FIXME: find a solution without slowing
down the code */
if(h->slice_type != I_TYPE && h->slice_type != SI_TYPE){
if(s->mb_skip_run==-1)
s->mb_skip_run= get_ue_golomb(&s->gb);
if (s->mb_skip_run--) {
decode_mb_skip(h);
return 0;
}
}
if(h->mb_aff_frame){
if ( ((s->mb_y&1) == 0) || h->prev_mb_skipped)
h->mb_field_decoding_flag = get_bits1(&s->gb);
}else
h->mb_field_decoding_flag= (s->picture_structure!=PICT_FRAME);
h->prev_mb_skipped= 0;
mb_type= get_ue_golomb(&s->gb);
if(h->slice_type == B_TYPE){
if(mb_type < 23){
partition_count= b_mb_type_info[mb_type].partition_count;
mb_type= b_mb_type_info[mb_type].type;
}else{
mb_type -= 23;
goto decode_intra_mb;
}
}else if(h->slice_type == P_TYPE /*|| h->slice_type == SP_TYPE */){
if(mb_type < 5){
partition_count= p_mb_type_info[mb_type].partition_count;
mb_type= p_mb_type_info[mb_type].type;
}else{
mb_type -= 5;
goto decode_intra_mb;
}
}else{
assert(h->slice_type == I_TYPE);
decode_intra_mb:
if(mb_type > 25){
av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice to large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y);
return -1;
}
partition_count=0;
cbp= i_mb_type_info[mb_type].cbp;
h->intra16x16_pred_mode= i_mb_type_info[mb_type].pred_mode;
mb_type= i_mb_type_info[mb_type].type;
}
if(h->mb_field_decoding_flag)
mb_type |= MB_TYPE_INTERLACED;
h->slice_table[ mb_xy ]= h->slice_num;
if(IS_INTRA_PCM(mb_type)){
unsigned int x, y;
// we assume these blocks are very rare so we dont optimize it
align_get_bits(&s->gb);
// The pixels are stored in the same order as levels in h->mb array.
for(y=0; y<16; y++){
const int index= 4*(y&3) + 32*((y>>2)&1) + 128*(y>>3);
for(x=0; x<16; x++){
tprintf("LUMA ICPM LEVEL (%3d)\n", show_bits(&s->gb, 8));
h->mb[index + (x&3) + 16*((x>>2)&1) + 64*(x>>3)]= get_bits(&s->gb, 8);
}
}
for(y=0; y<8; y++){
const int index= 256 + 4*(y&3) + 32*(y>>2);
for(x=0; x<8; x++){
tprintf("CHROMA U ICPM LEVEL (%3d)\n", show_bits(&s->gb, 8));
h->mb[index + (x&3) + 16*(x>>2)]= get_bits(&s->gb, 8);
}
}
for(y=0; y<8; y++){
const int index= 256 + 64 + 4*(y&3) + 32*(y>>2);
for(x=0; x<8; x++){
tprintf("CHROMA V ICPM LEVEL (%3d)\n", show_bits(&s->gb, 8));
h->mb[index + (x&3) + 16*(x>>2)]= get_bits(&s->gb, 8);
}
}
// In deblocking, the quantizer is 0
s->current_picture.qscale_table[mb_xy]= 0;
h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, 0);
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 16);
s->current_picture.mb_type[mb_xy]= mb_type;
return 0;
}
fill_caches(h, mb_type, 0);
//mb_pred
if(IS_INTRA(mb_type)){
// init_top_left_availability(h);
if(IS_INTRA4x4(mb_type)){
int i;
int di = 1;
if(dct8x8_allowed && get_bits1(&s->gb)){
mb_type |= MB_TYPE_8x8DCT;
di = 4;
}
// fill_intra4x4_pred_table(h);
for(i=0; i<16; i+=di){
const int mode_coded= !get_bits1(&s->gb);
const int predicted_mode= pred_intra_mode(h, i);
int mode;
if(mode_coded){
const int rem_mode= get_bits(&s->gb, 3);
if(rem_mode<predicted_mode)
mode= rem_mode;
else
mode= rem_mode + 1;
}else{
mode= predicted_mode;
}
if(di==4)
fill_rectangle( &h->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1 );
else
h->intra4x4_pred_mode_cache[ scan8[i] ] = mode;
}
write_back_intra_pred_mode(h);
if( check_intra4x4_pred_mode(h) < 0)
return -1;
}else{
h->intra16x16_pred_mode= check_intra_pred_mode(h, h->intra16x16_pred_mode);
if(h->intra16x16_pred_mode < 0)
return -1;
}
h->chroma_pred_mode= get_ue_golomb(&s->gb);
h->chroma_pred_mode= check_intra_pred_mode(h, h->chroma_pred_mode);
if(h->chroma_pred_mode < 0)
return -1;
}else if(partition_count==4){
int i, j, sub_partition_count[4], list, ref[2][4];
if(h->slice_type == B_TYPE){
for(i=0; i<4; i++){
h->sub_mb_type[i]= get_ue_golomb(&s->gb);
if(h->sub_mb_type[i] >=13){
av_log(h->s.avctx, AV_LOG_ERROR, "B sub_mb_type %d out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y);
return -1;
}
sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
h->sub_mb_type[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].type;
}
if( IS_DIRECT(h->sub_mb_type[0]) || IS_DIRECT(h->sub_mb_type[1])
|| IS_DIRECT(h->sub_mb_type[2]) || IS_DIRECT(h->sub_mb_type[3]))
pred_direct_motion(h, &mb_type);
}else{
assert(h->slice_type == P_TYPE || h->slice_type == SP_TYPE); //FIXME SP correct ?
for(i=0; i<4; i++){
h->sub_mb_type[i]= get_ue_golomb(&s->gb);
if(h->sub_mb_type[i] >=4){
av_log(h->s.avctx, AV_LOG_ERROR, "P sub_mb_type %d out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y);
return -1;
}
sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
h->sub_mb_type[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].type;
}
}
for(list=0; list<2; list++){
int ref_count= IS_REF0(mb_type) ? 1 : h->ref_count[list];
if(ref_count == 0) continue;
if (h->mb_aff_frame && h->mb_field_decoding_flag) {
ref_count <<= 1;
}
for(i=0; i<4; i++){
if(IS_DIRECT(h->sub_mb_type[i])) continue;
if(IS_DIR(h->sub_mb_type[i], 0, list)){
ref[list][i] = get_te0_golomb(&s->gb, ref_count); //FIXME init to 0 before and skip?
}else{
//FIXME
ref[list][i] = -1;
}
}
}
if(dct8x8_allowed)
dct8x8_allowed = get_dct8x8_allowed(h);
for(list=0; list<2; list++){
const int ref_count= IS_REF0(mb_type) ? 1 : h->ref_count[list];
if(ref_count == 0) continue;
for(i=0; i<4; i++){
if(IS_DIRECT(h->sub_mb_type[i])) continue;
h->ref_cache[list][ scan8[4*i] ]=h->ref_cache[list][ scan8[4*i]+1 ]=
h->ref_cache[list][ scan8[4*i]+8 ]=h->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i];
if(IS_DIR(h->sub_mb_type[i], 0, list)){
const int sub_mb_type= h->sub_mb_type[i];
const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1;
for(j=0; j<sub_partition_count[i]; j++){
int mx, my;
const int index= 4*i + block_width*j;
int16_t (* mv_cache)[2]= &h->mv_cache[list][ scan8[index] ];
pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mx, &my);
mx += get_se_golomb(&s->gb);
my += get_se_golomb(&s->gb);
tprintf("final mv:%d %d\n", mx, my);
if(IS_SUB_8X8(sub_mb_type)){
mv_cache[ 0 ][0]= mv_cache[ 1 ][0]=
mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx;
mv_cache[ 0 ][1]= mv_cache[ 1 ][1]=
mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my;
}else if(IS_SUB_8X4(sub_mb_type)){
mv_cache[ 0 ][0]= mv_cache[ 1 ][0]= mx;
mv_cache[ 0 ][1]= mv_cache[ 1 ][1]= my;
}else if(IS_SUB_4X8(sub_mb_type)){
mv_cache[ 0 ][0]= mv_cache[ 8 ][0]= mx;
mv_cache[ 0 ][1]= mv_cache[ 8 ][1]= my;
}else{
assert(IS_SUB_4X4(sub_mb_type));
mv_cache[ 0 ][0]= mx;
mv_cache[ 0 ][1]= my;
}
}
}else{
uint32_t *p= (uint32_t *)&h->mv_cache[list][ scan8[4*i] ][0];
p[0] = p[1]=
p[8] = p[9]= 0;
}
}
}
}else if(IS_DIRECT(mb_type)){
pred_direct_motion(h, &mb_type);
dct8x8_allowed &= h->sps.direct_8x8_inference_flag;
}else{
int list, mx, my, i;
//FIXME we should set ref_idx_l? to 0 if we use that later ...
if(IS_16X16(mb_type)){
for(list=0; list<2; list++){
if(h->ref_count[list]>0){
if(IS_DIR(mb_type, 0, list)){
const int val= get_te0_golomb(&s->gb, h->ref_count[list]);
fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, val, 1);
}else
fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, (LIST_NOT_USED&0xFF), 1);
}
}
for(list=0; list<2; list++){
if(IS_DIR(mb_type, 0, list)){
pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mx, &my);
mx += get_se_golomb(&s->gb);
my += get_se_golomb(&s->gb);
tprintf("final mv:%d %d\n", mx, my);
fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4);
}else
fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, 0, 4);
}
}
else if(IS_16X8(mb_type)){
for(list=0; list<2; list++){
if(h->ref_count[list]>0){
for(i=0; i<2; i++){
if(IS_DIR(mb_type, i, list)){
const int val= get_te0_golomb(&s->gb, h->ref_count[list]);
fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 1);
}else
fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, (LIST_NOT_USED&0xFF), 1);
}
}
}
for(list=0; list<2; list++){
for(i=0; i<2; i++){
if(IS_DIR(mb_type, i, list)){
pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mx, &my);
mx += get_se_golomb(&s->gb);
my += get_se_golomb(&s->gb);
tprintf("final mv:%d %d\n", mx, my);
fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx,my), 4);
}else
fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 4);
}
}
}else{
assert(IS_8X16(mb_type));
for(list=0; list<2; list++){
if(h->ref_count[list]>0){
for(i=0; i<2; i++){
if(IS_DIR(mb_type, i, list)){ //FIXME optimize
const int val= get_te0_golomb(&s->gb, h->ref_count[list]);
fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 1);
}else
fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, (LIST_NOT_USED&0xFF), 1);
}
}
}
for(list=0; list<2; list++){
for(i=0; i<2; i++){
if(IS_DIR(mb_type, i, list)){
pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mx, &my);
mx += get_se_golomb(&s->gb);
my += get_se_golomb(&s->gb);
tprintf("final mv:%d %d\n", mx, my);
fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx,my), 4);
}else
fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 4);
}
}
}
}
if(IS_INTER(mb_type))
write_back_motion(h, mb_type);
if(!IS_INTRA16x16(mb_type)){
cbp= get_ue_golomb(&s->gb);
if(cbp > 47){
av_log(h->s.avctx, AV_LOG_ERROR, "cbp too large (%d) at %d %d\n", cbp, s->mb_x, s->mb_y);
return -1;
}
if(IS_INTRA4x4(mb_type))
cbp= golomb_to_intra4x4_cbp[cbp];
else
cbp= golomb_to_inter_cbp[cbp];
}
if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){
if(get_bits1(&s->gb))
mb_type |= MB_TYPE_8x8DCT;
}
s->current_picture.mb_type[mb_xy]= mb_type;
if(cbp || IS_INTRA16x16(mb_type)){
int i8x8, i4x4, chroma_idx;
int chroma_qp, dquant;
GetBitContext *gb= IS_INTRA(mb_type) ? h->intra_gb_ptr : h->inter_gb_ptr;
const uint8_t *scan, *dc_scan;
// fill_non_zero_count_cache(h);
if(IS_INTERLACED(mb_type)){
scan= s->qscale ? h->field_scan : h->field_scan_q0;
dc_scan= luma_dc_field_scan;
}else{
scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0;
dc_scan= luma_dc_zigzag_scan;
}
dquant= get_se_golomb(&s->gb);
if( dquant > 25 || dquant < -26 ){
av_log(h->s.avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, s->mb_x, s->mb_y);
return -1;
}
s->qscale += dquant;
if(((unsigned)s->qscale) > 51){
if(s->qscale<0) s->qscale+= 52;
else s->qscale-= 52;
}
h->chroma_qp= chroma_qp= get_chroma_qp(h->pps.chroma_qp_index_offset, s->qscale);
if(IS_INTRA16x16(mb_type)){
if( decode_residual(h, h->intra_gb_ptr, h->mb, LUMA_DC_BLOCK_INDEX, dc_scan, h->dequant4_coeff[s->qscale], 16) < 0){
return -1; //FIXME continue if partitioned and other return -1 too
}
assert((cbp&15) == 0 || (cbp&15) == 15);
if(cbp&15){
for(i8x8=0; i8x8<4; i8x8++){
for(i4x4=0; i4x4<4; i4x4++){
const int index= i4x4 + 4*i8x8;
if( decode_residual(h, h->intra_gb_ptr, h->mb + 16*index, index, scan + 1, h->dequant4_coeff[s->qscale], 15) < 0 ){
return -1;
}
}
}
}else{
fill_rectangle(&h->non_zero_count_cache[scan8[0]], 4, 4, 8, 0, 1);
}
}else{
for(i8x8=0; i8x8<4; i8x8++){
if(cbp & (1<<i8x8)){
if(IS_8x8DCT(mb_type)){
DCTELEM *buf = &h->mb[64*i8x8];
uint8_t *nnz;
for(i4x4=0; i4x4<4; i4x4++){
if( decode_residual(h, gb, buf, i4x4+4*i8x8, zigzag_scan8x8_cavlc+16*i4x4,
h->dequant8_coeff[s->qscale], 16) <0 )
return -1;
}
if(s->qscale < 12){
int i;
for(i=0; i<64; i++)
buf[i] = (buf[i] + 2) >> 2;
}
nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ];
nnz[0] |= nnz[1] | nnz[8] | nnz[9];
}else{
for(i4x4=0; i4x4<4; i4x4++){
const int index= i4x4 + 4*i8x8;
if( decode_residual(h, gb, h->mb + 16*index, index, scan, h->dequant4_coeff[s->qscale], 16) <0 ){
return -1;
}
}
}
}else{
uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ];
nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0;
}
}
}
if(cbp&0x30){
for(chroma_idx=0; chroma_idx<2; chroma_idx++)
if( decode_residual(h, gb, h->mb + 256 + 16*4*chroma_idx, CHROMA_DC_BLOCK_INDEX, chroma_dc_scan, h->dequant4_coeff[chroma_qp], 4) < 0){
return -1;
}
}
if(cbp&0x20){
for(chroma_idx=0; chroma_idx<2; chroma_idx++){
for(i4x4=0; i4x4<4; i4x4++){
const int index= 16 + 4*chroma_idx + i4x4;
if( decode_residual(h, gb, h->mb + 16*index, index, scan + 1, h->dequant4_coeff[chroma_qp], 15) < 0){
return -1;
}
}
}
}else{
uint8_t * const nnz= &h->non_zero_count_cache[0];
nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
}
}else{
uint8_t * const nnz= &h->non_zero_count_cache[0];
fill_rectangle(&nnz[scan8[0]], 4, 4, 8, 0, 1);
nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
}
s->current_picture.qscale_table[mb_xy]= s->qscale;
write_back_non_zero_count(h);
return 0;
}
static int decode_cabac_field_decoding_flag(H264Context *h) {
MpegEncContext * const s = &h->s;
const int mb_x = s->mb_x;
const int mb_y = s->mb_y & ~1;
const int mba_xy = mb_x - 1 + mb_y *s->mb_stride;
const int mbb_xy = mb_x + (mb_y-2)*s->mb_stride;
unsigned int ctx = 0;
if( h->slice_table[mba_xy] == h->slice_num && IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) ) {
ctx += 1;
}
if( h->slice_table[mbb_xy] == h->slice_num && IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) ) {
ctx += 1;
}
return get_cabac( &h->cabac, &h->cabac_state[70 + ctx] );
}
static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_slice) {
uint8_t *state= &h->cabac_state[ctx_base];
int mb_type;
if(intra_slice){
MpegEncContext * const s = &h->s;
const int mba_xy = h->left_mb_xy[0];
const int mbb_xy = h->top_mb_xy;
int ctx=0;
if( h->slice_table[mba_xy] == h->slice_num && !IS_INTRA4x4( s->current_picture.mb_type[mba_xy] ) )
ctx++;
if( h->slice_table[mbb_xy] == h->slice_num && !IS_INTRA4x4( s->current_picture.mb_type[mbb_xy] ) )
ctx++;
if( get_cabac( &h->cabac, &state[ctx] ) == 0 )
return 0; /* I4x4 */
state += 2;
}else{
if( get_cabac( &h->cabac, &state[0] ) == 0 )
return 0; /* I4x4 */
}
if( get_cabac_terminate( &h->cabac ) )
return 25; /* PCM */
mb_type = 1; /* I16x16 */
if( get_cabac( &h->cabac, &state[1] ) )
mb_type += 12; /* cbp_luma != 0 */
if( get_cabac( &h->cabac, &state[2] ) ) {
if( get_cabac( &h->cabac, &state[2+intra_slice] ) )
mb_type += 4 * 2; /* cbp_chroma == 2 */
else
mb_type += 4 * 1; /* cbp_chroma == 1 */
}
if( get_cabac( &h->cabac, &state[3+intra_slice] ) )
mb_type += 2;
if( get_cabac( &h->cabac, &state[3+2*intra_slice] ) )
mb_type += 1;
return mb_type;
}
static int decode_cabac_mb_type( H264Context *h ) {
MpegEncContext * const s = &h->s;
if( h->slice_type == I_TYPE ) {
return decode_cabac_intra_mb_type(h, 3, 1);
} else if( h->slice_type == P_TYPE ) {
if( get_cabac( &h->cabac, &h->cabac_state[14] ) == 0 ) {
/* P-type */
if( get_cabac( &h->cabac, &h->cabac_state[15] ) == 0 ) {
if( get_cabac( &h->cabac, &h->cabac_state[16] ) == 0 )
return 0; /* P_L0_D16x16; */
else
return 3; /* P_8x8; */
} else {
if( get_cabac( &h->cabac, &h->cabac_state[17] ) == 0 )
return 2; /* P_L0_D8x16; */
else
return 1; /* P_L0_D16x8; */
}
} else {
return decode_cabac_intra_mb_type(h, 17, 0) + 5;
}
} else if( h->slice_type == B_TYPE ) {
const int mba_xy = h->left_mb_xy[0];
const int mbb_xy = h->top_mb_xy;
int ctx = 0;
int bits;
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] )
&& !IS_DIRECT( s->current_picture.mb_type[mba_xy] ) )
ctx++;
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] )
&& !IS_DIRECT( s->current_picture.mb_type[mbb_xy] ) )
ctx++;
if( !get_cabac( &h->cabac, &h->cabac_state[27+ctx] ) )
return 0; /* B_Direct_16x16 */
if( !get_cabac( &h->cabac, &h->cabac_state[27+3] ) ) {
return 1 + get_cabac( &h->cabac, &h->cabac_state[27+5] ); /* B_L[01]_16x16 */
}
bits = get_cabac( &h->cabac, &h->cabac_state[27+4] ) << 3;
bits|= get_cabac( &h->cabac, &h->cabac_state[27+5] ) << 2;
bits|= get_cabac( &h->cabac, &h->cabac_state[27+5] ) << 1;
bits|= get_cabac( &h->cabac, &h->cabac_state[27+5] );
if( bits < 8 )
return bits + 3; /* B_Bi_16x16 through B_L1_L0_16x8 */
else if( bits == 13 ) {
return decode_cabac_intra_mb_type(h, 32, 0) + 23;
} else if( bits == 14 )
return 11; /* B_L1_L0_8x16 */
else if( bits == 15 )
return 22; /* B_8x8 */
bits= ( bits<<1 ) | get_cabac( &h->cabac, &h->cabac_state[27+5] );
return bits - 4; /* B_L0_Bi_* through B_Bi_Bi_* */
} else {
/* TODO SI/SP frames? */
return -1;
}
}
static int decode_cabac_mb_skip( H264Context *h) {
MpegEncContext * const s = &h->s;
const int mb_xy = s->mb_x + s->mb_y*s->mb_stride;
const int mba_xy = mb_xy - 1;
const int mbb_xy = mb_xy - s->mb_stride;
int ctx = 0;
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] ))
ctx++;
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
ctx++;
if( h->slice_type == P_TYPE || h->slice_type == SP_TYPE)
return get_cabac( &h->cabac, &h->cabac_state[11+ctx] );
else /* B-frame */
return get_cabac( &h->cabac, &h->cabac_state[24+ctx] );
}
static int decode_cabac_mb_intra4x4_pred_mode( H264Context *h, int pred_mode ) {
int mode = 0;
if( get_cabac( &h->cabac, &h->cabac_state[68] ) )
return pred_mode;
if( get_cabac( &h->cabac, &h->cabac_state[69] ) )
mode += 1;
if( get_cabac( &h->cabac, &h->cabac_state[69] ) )
mode += 2;
if( get_cabac( &h->cabac, &h->cabac_state[69] ) )
mode += 4;
if( mode >= pred_mode )
return mode + 1;
else
return mode;
}
static int decode_cabac_mb_chroma_pre_mode( H264Context *h) {
const int mba_xy = h->left_mb_xy[0];
const int mbb_xy = h->top_mb_xy;
int ctx = 0;
/* No need to test for IS_INTRA4x4 and IS_INTRA16x16, as we set chroma_pred_mode_table to 0 */
if( h->slice_table[mba_xy] == h->slice_num && h->chroma_pred_mode_table[mba_xy] != 0 )
ctx++;
if( h->slice_table[mbb_xy] == h->slice_num && h->chroma_pred_mode_table[mbb_xy] != 0 )
ctx++;
if( get_cabac( &h->cabac, &h->cabac_state[64+ctx] ) == 0 )
return 0;
if( get_cabac( &h->cabac, &h->cabac_state[64+3] ) == 0 )
return 1;
if( get_cabac( &h->cabac, &h->cabac_state[64+3] ) == 0 )
return 2;
else
return 3;
}
static const uint8_t block_idx_x[16] = {
0, 1, 0, 1, 2, 3, 2, 3, 0, 1, 0, 1, 2, 3, 2, 3
};
static const uint8_t block_idx_y[16] = {
0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3
};
static const uint8_t block_idx_xy[4][4] = {
{ 0, 2, 8, 10},
{ 1, 3, 9, 11},
{ 4, 6, 12, 14},
{ 5, 7, 13, 15}
};
static int decode_cabac_mb_cbp_luma( H264Context *h) {
MpegEncContext * const s = &h->s;
int cbp = 0;
int i8x8;
for( i8x8 = 0; i8x8 < 4; i8x8++ ) {
int cbp_a = -1;
int cbp_b = -1;
int x, y;
int ctx = 0;
x = block_idx_x[4*i8x8];
y = block_idx_y[4*i8x8];
if( x > 0 )
cbp_a = cbp;
else if( s->mb_x > 0 && (h->slice_table[h->left_mb_xy[0]] == h->slice_num)) {
cbp_a = h->left_cbp;
tprintf("cbp_a = left_cbp = %x\n", cbp_a);
}
if( y > 0 )
cbp_b = cbp;
else if( s->mb_y > 0 && (h->slice_table[h->top_mb_xy] == h->slice_num)) {
cbp_b = h->top_cbp;
tprintf("cbp_b = top_cbp = %x\n", cbp_b);
}
/* No need to test for skip as we put 0 for skip block */
/* No need to test for IPCM as we put 1 for IPCM block */
if( cbp_a >= 0 ) {
int i8x8a = block_idx_xy[(x-1)&0x03][y]/4;
if( ((cbp_a >> i8x8a)&0x01) == 0 )
ctx++;
}
if( cbp_b >= 0 ) {
int i8x8b = block_idx_xy[x][(y-1)&0x03]/4;
if( ((cbp_b >> i8x8b)&0x01) == 0 )
ctx += 2;
}
if( get_cabac( &h->cabac, &h->cabac_state[73 + ctx] ) ) {
cbp |= 1 << i8x8;
}
}
return cbp;
}
static int decode_cabac_mb_cbp_chroma( H264Context *h) {
int ctx;
int cbp_a, cbp_b;
cbp_a = (h->left_cbp>>4)&0x03;
cbp_b = (h-> top_cbp>>4)&0x03;
ctx = 0;
if( cbp_a > 0 ) ctx++;
if( cbp_b > 0 ) ctx += 2;
if( get_cabac( &h->cabac, &h->cabac_state[77 + ctx] ) == 0 )
return 0;
ctx = 4;
if( cbp_a == 2 ) ctx++;
if( cbp_b == 2 ) ctx += 2;
return 1 + get_cabac( &h->cabac, &h->cabac_state[77 + ctx] );
}
static int decode_cabac_mb_dqp( H264Context *h) {
MpegEncContext * const s = &h->s;
int mbn_xy;
int ctx = 0;
int val = 0;
if( s->mb_x > 0 )
mbn_xy = s->mb_x + s->mb_y*s->mb_stride - 1;
else
mbn_xy = s->mb_width - 1 + (s->mb_y-1)*s->mb_stride;
if( h->last_qscale_diff != 0 && ( IS_INTRA16x16(s->current_picture.mb_type[mbn_xy] ) || (h->cbp_table[mbn_xy]&0x3f) ) )
ctx++;
while( get_cabac( &h->cabac, &h->cabac_state[60 + ctx] ) ) {
if( ctx < 2 )
ctx = 2;
else
ctx = 3;
val++;
if(val > 52) //prevent infinite loop
return INT_MIN;
}
if( val&0x01 )
return (val + 1)/2;
else
return -(val + 1)/2;
}
static int decode_cabac_p_mb_sub_type( H264Context *h ) {
if( get_cabac( &h->cabac, &h->cabac_state[21] ) )
return 0; /* 8x8 */
if( !get_cabac( &h->cabac, &h->cabac_state[22] ) )
return 1; /* 8x4 */
if( get_cabac( &h->cabac, &h->cabac_state[23] ) )
return 2; /* 4x8 */
return 3; /* 4x4 */
}
static int decode_cabac_b_mb_sub_type( H264Context *h ) {
int type;
if( !get_cabac( &h->cabac, &h->cabac_state[36] ) )
return 0; /* B_Direct_8x8 */
if( !get_cabac( &h->cabac, &h->cabac_state[37] ) )
return 1 + get_cabac( &h->cabac, &h->cabac_state[39] ); /* B_L0_8x8, B_L1_8x8 */
type = 3;
if( get_cabac( &h->cabac, &h->cabac_state[38] ) ) {
if( get_cabac( &h->cabac, &h->cabac_state[39] ) )
return 11 + get_cabac( &h->cabac, &h->cabac_state[39] ); /* B_L1_4x4, B_Bi_4x4 */
type += 4;
}
type += 2*get_cabac( &h->cabac, &h->cabac_state[39] );
type += get_cabac( &h->cabac, &h->cabac_state[39] );
return type;
}
static inline int decode_cabac_mb_transform_size( H264Context *h ) {
return get_cabac( &h->cabac, &h->cabac_state[399 + h->neighbor_transform_size] );
}
static int decode_cabac_mb_ref( H264Context *h, int list, int n ) {
int refa = h->ref_cache[list][scan8[n] - 1];
int refb = h->ref_cache[list][scan8[n] - 8];
int ref = 0;
int ctx = 0;
if( h->slice_type == B_TYPE) {
if( refa > 0 && !h->direct_cache[scan8[n] - 1] )
ctx++;
if( refb > 0 && !h->direct_cache[scan8[n] - 8] )
ctx += 2;
} else {
if( refa > 0 )
ctx++;
if( refb > 0 )
ctx += 2;
}
while( get_cabac( &h->cabac, &h->cabac_state[54+ctx] ) ) {
ref++;
if( ctx < 4 )
ctx = 4;
else
ctx = 5;
}
return ref;
}
static int decode_cabac_mb_mvd( H264Context *h, int list, int n, int l ) {
int amvd = abs( h->mvd_cache[list][scan8[n] - 1][l] ) +
abs( h->mvd_cache[list][scan8[n] - 8][l] );
int ctxbase = (l == 0) ? 40 : 47;
int ctx, mvd;
if( amvd < 3 )
ctx = 0;
else if( amvd > 32 )
ctx = 2;
else
ctx = 1;
if(!get_cabac(&h->cabac, &h->cabac_state[ctxbase+ctx]))
return 0;
mvd= 1;
ctx= 3;
while( mvd < 9 && get_cabac( &h->cabac, &h->cabac_state[ctxbase+ctx] ) ) {
mvd++;
if( ctx < 6 )
ctx++;
}
if( mvd >= 9 ) {
int k = 3;
while( get_cabac_bypass( &h->cabac ) ) {
mvd += 1 << k;
k++;
}
while( k-- ) {
if( get_cabac_bypass( &h->cabac ) )
mvd += 1 << k;
}
}
if( get_cabac_bypass( &h->cabac ) ) return -mvd;
else return mvd;
}
static int inline get_cabac_cbf_ctx( H264Context *h, int cat, int idx ) {
int nza, nzb;
int ctx = 0;
if( cat == 0 ) {
nza = h->left_cbp&0x100;
nzb = h-> top_cbp&0x100;
} else if( cat == 1 || cat == 2 ) {
nza = h->non_zero_count_cache[scan8[idx] - 1];
nzb = h->non_zero_count_cache[scan8[idx] - 8];
} else if( cat == 3 ) {
nza = (h->left_cbp>>(6+idx))&0x01;
nzb = (h-> top_cbp>>(6+idx))&0x01;
} else {
assert(cat == 4);
nza = h->non_zero_count_cache[scan8[16+idx] - 1];
nzb = h->non_zero_count_cache[scan8[16+idx] - 8];
}
if( nza > 0 )
ctx++;
if( nzb > 0 )
ctx += 2;
return ctx + 4 * cat;
}
static int inline decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint16_t *qmul, int max_coeff) {
const int mb_xy = h->s.mb_x + h->s.mb_y*h->s.mb_stride;
static const int significant_coeff_flag_field_offset[2] = { 105, 277 };
static const int last_significant_coeff_flag_field_offset[2] = { 166, 338 };
static const int significant_coeff_flag_offset[6] = { 0, 15, 29, 44, 47, 297 };
static const int last_significant_coeff_flag_offset[6] = { 0, 15, 29, 44, 47, 251 };
static const int coeff_abs_level_m1_offset[6] = { 227+0, 227+10, 227+20, 227+30, 227+39, 426 };
static const int identity[15] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
};
static const int significant_coeff_flag_offset_8x8[63] = {
0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5,
4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7,
7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11,
12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12
};
static const int last_coeff_flag_offset_8x8[63] = {
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8
};
int index[64];
int i, last;
int coeff_count = 0;
int abslevel1 = 1;
int abslevelgt1 = 0;
const int* significant_coeff_ctx_offset;
const int* last_coeff_ctx_offset;
const int significant_coeff_ctx_base = significant_coeff_flag_offset[cat]
+ significant_coeff_flag_field_offset[h->mb_field_decoding_flag];
const int last_coeff_ctx_base = last_significant_coeff_flag_offset[cat]
+ last_significant_coeff_flag_field_offset[h->mb_field_decoding_flag];
/* cat: 0-> DC 16x16 n = 0
* 1-> AC 16x16 n = luma4x4idx
* 2-> Luma4x4 n = luma4x4idx
* 3-> DC Chroma n = iCbCr
* 4-> AC Chroma n = 4 * iCbCr + chroma4x4idx
* 5-> Luma8x8 n = 4 * luma8x8idx
*/
/* read coded block flag */
if( cat == 5 ) {
significant_coeff_ctx_offset = significant_coeff_flag_offset_8x8;
last_coeff_ctx_offset = last_coeff_flag_offset_8x8;
} else {
if( get_cabac( &h->cabac, &h->cabac_state[85 + get_cabac_cbf_ctx( h, cat, n ) ] ) == 0 ) {
if( cat == 1 || cat == 2 )
h->non_zero_count_cache[scan8[n]] = 0;
else if( cat == 4 )
h->non_zero_count_cache[scan8[16+n]] = 0;
return 0;
}
significant_coeff_ctx_offset =
last_coeff_ctx_offset = identity;
}
for(last= 0; last < max_coeff - 1; last++) {
int sig_ctx = significant_coeff_ctx_base + significant_coeff_ctx_offset[last];
if( get_cabac( &h->cabac, &h->cabac_state[sig_ctx] )) {
int last_ctx = last_coeff_ctx_base + last_coeff_ctx_offset[last];
index[coeff_count++] = last;
if( get_cabac( &h->cabac, &h->cabac_state[last_ctx] ) ) {
last= max_coeff;
break;
}
}
}
if( last == max_coeff -1 ) {
index[coeff_count++] = last;
}
assert(coeff_count > 0);
if( cat == 0 )
h->cbp_table[mb_xy] |= 0x100;
else if( cat == 1 || cat == 2 )
h->non_zero_count_cache[scan8[n]] = coeff_count;
else if( cat == 3 )
h->cbp_table[mb_xy] |= 0x40 << n;
else if( cat == 4 )
h->non_zero_count_cache[scan8[16+n]] = coeff_count;
else {
assert( cat == 5 );
fill_rectangle(&h->non_zero_count_cache[scan8[n]], 2, 2, 8, 1, 1);
}
for( i = coeff_count - 1; i >= 0; i-- ) {
int ctx = (abslevelgt1 != 0 ? 0 : FFMIN( 4, abslevel1 )) + coeff_abs_level_m1_offset[cat];
int j= scantable[index[i]];
if( get_cabac( &h->cabac, &h->cabac_state[ctx] ) == 0 ) {
if( cat == 0 || cat == 3 ) {
if( get_cabac_bypass( &h->cabac ) ) block[j] = -1;
else block[j] = 1;
}else{
if( get_cabac_bypass( &h->cabac ) ) block[j] = -qmul[j];
else block[j] = qmul[j];
}
abslevel1++;
} else {
int coeff_abs = 2;
ctx = 5 + FFMIN( 4, abslevelgt1 ) + coeff_abs_level_m1_offset[cat];
while( coeff_abs < 15 && get_cabac( &h->cabac, &h->cabac_state[ctx] ) ) {
coeff_abs++;
}
if( coeff_abs >= 15 ) {
int j = 0;
while( get_cabac_bypass( &h->cabac ) ) {
coeff_abs += 1 << j;
j++;
}
while( j-- ) {
if( get_cabac_bypass( &h->cabac ) )
coeff_abs += 1 << j ;
}
}
if( cat == 0 || cat == 3 ) {
if( get_cabac_bypass( &h->cabac ) ) block[j] = -coeff_abs;
else block[j] = coeff_abs;
}else{
if( get_cabac_bypass( &h->cabac ) ) block[j] = -coeff_abs * qmul[j];
else block[j] = coeff_abs * qmul[j];
}
abslevelgt1++;
}
}
return 0;
}
void inline compute_mb_neighboors(H264Context *h)
{
MpegEncContext * const s = &h->s;
const int mb_xy = s->mb_x + s->mb_y*s->mb_stride;
h->top_mb_xy = mb_xy - s->mb_stride;
h->left_mb_xy[0] = mb_xy - 1;
if(h->mb_aff_frame){
const int pair_xy = s->mb_x + (s->mb_y & ~1)*s->mb_stride;
const int top_pair_xy = pair_xy - s->mb_stride;
const int top_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[top_pair_xy]);
const int left_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[pair_xy-1]);
const int curr_mb_frame_flag = !h->mb_field_decoding_flag;
const int bottom = (s->mb_y & 1);
if (bottom
? !curr_mb_frame_flag // bottom macroblock
: (!curr_mb_frame_flag && !top_mb_frame_flag) // top macroblock
) {
h->top_mb_xy -= s->mb_stride;
}
if (left_mb_frame_flag != curr_mb_frame_flag) {
h->left_mb_xy[0] = pair_xy - 1;
}
}
return;
}
/**
* decodes a macroblock
* @returns 0 if ok, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed
*/
static int decode_mb_cabac(H264Context *h) {
MpegEncContext * const s = &h->s;
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
int mb_type, partition_count, cbp = 0;
int dct8x8_allowed= h->pps.transform_8x8_mode;
s->dsp.clear_blocks(h->mb); //FIXME avoid if already clear (move after skip handlong?)
tprintf("pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
if( h->slice_type != I_TYPE && h->slice_type != SI_TYPE ) {
/* read skip flags */
if( decode_cabac_mb_skip( h ) ) {
decode_mb_skip(h);
h->cbp_table[mb_xy] = 0;
h->chroma_pred_mode_table[mb_xy] = 0;
h->last_qscale_diff = 0;
return 0;
}
}
if(h->mb_aff_frame){
if ( ((s->mb_y&1) == 0) || h->prev_mb_skipped)
h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
}else
h->mb_field_decoding_flag= (s->picture_structure!=PICT_FRAME);
h->prev_mb_skipped = 0;
compute_mb_neighboors(h);
if( ( mb_type = decode_cabac_mb_type( h ) ) < 0 ) {
av_log( h->s.avctx, AV_LOG_ERROR, "decode_cabac_mb_type failed\n" );
return -1;
}
if( h->slice_type == B_TYPE ) {
if( mb_type < 23 ){
partition_count= b_mb_type_info[mb_type].partition_count;
mb_type= b_mb_type_info[mb_type].type;
}else{
mb_type -= 23;
goto decode_intra_mb;
}
} else if( h->slice_type == P_TYPE ) {
if( mb_type < 5) {
partition_count= p_mb_type_info[mb_type].partition_count;
mb_type= p_mb_type_info[mb_type].type;
} else {
mb_type -= 5;
goto decode_intra_mb;
}
} else {
assert(h->slice_type == I_TYPE);
decode_intra_mb:
partition_count = 0;
cbp= i_mb_type_info[mb_type].cbp;
h->intra16x16_pred_mode= i_mb_type_info[mb_type].pred_mode;
mb_type= i_mb_type_info[mb_type].type;
}
if(h->mb_field_decoding_flag)
mb_type |= MB_TYPE_INTERLACED;
h->slice_table[ mb_xy ]= h->slice_num;
if(IS_INTRA_PCM(mb_type)) {
const uint8_t *ptr;
unsigned int x, y;
// We assume these blocks are very rare so we dont optimize it.
// FIXME The two following lines get the bitstream position in the cabac
// decode, I think it should be done by a function in cabac.h (or cabac.c).
ptr= h->cabac.bytestream;
if (h->cabac.low&0x1) ptr-=CABAC_BITS/8;
// The pixels are stored in the same order as levels in h->mb array.
for(y=0; y<16; y++){
const int index= 4*(y&3) + 32*((y>>2)&1) + 128*(y>>3);
for(x=0; x<16; x++){
tprintf("LUMA ICPM LEVEL (%3d)\n", *ptr);
h->mb[index + (x&3) + 16*((x>>2)&1) + 64*(x>>3)]= *ptr++;
}
}
for(y=0; y<8; y++){
const int index= 256 + 4*(y&3) + 32*(y>>2);
for(x=0; x<8; x++){
tprintf("CHROMA U ICPM LEVEL (%3d)\n", *ptr);
h->mb[index + (x&3) + 16*(x>>2)]= *ptr++;
}
}
for(y=0; y<8; y++){
const int index= 256 + 64 + 4*(y&3) + 32*(y>>2);
for(x=0; x<8; x++){
tprintf("CHROMA V ICPM LEVEL (%3d)\n", *ptr);
h->mb[index + (x&3) + 16*(x>>2)]= *ptr++;
}
}
ff_init_cabac_decoder(&h->cabac, ptr, h->cabac.bytestream_end - ptr);
// All blocks are present
h->cbp_table[mb_xy] = 0x1ef;
h->chroma_pred_mode_table[mb_xy] = 0;
// In deblocking, the quantizer is 0
s->current_picture.qscale_table[mb_xy]= 0;
h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, 0);
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 16);
s->current_picture.mb_type[mb_xy]= mb_type;
return 0;
}
fill_caches(h, mb_type, 0);
if( IS_INTRA( mb_type ) ) {
int i;
if( IS_INTRA4x4( mb_type ) ) {
if( dct8x8_allowed && decode_cabac_mb_transform_size( h ) ) {
mb_type |= MB_TYPE_8x8DCT;
for( i = 0; i < 16; i+=4 ) {
int pred = pred_intra_mode( h, i );
int mode = decode_cabac_mb_intra4x4_pred_mode( h, pred );
fill_rectangle( &h->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1 );
}
} else {
for( i = 0; i < 16; i++ ) {
int pred = pred_intra_mode( h, i );
h->intra4x4_pred_mode_cache[ scan8[i] ] = decode_cabac_mb_intra4x4_pred_mode( h, pred );
//av_log( s->avctx, AV_LOG_ERROR, "i4x4 pred=%d mode=%d\n", pred, h->intra4x4_pred_mode_cache[ scan8[i] ] );
}
}
write_back_intra_pred_mode(h);
if( check_intra4x4_pred_mode(h) < 0 ) return -1;
} else {
h->intra16x16_pred_mode= check_intra_pred_mode( h, h->intra16x16_pred_mode );
if( h->intra16x16_pred_mode < 0 ) return -1;
}
h->chroma_pred_mode_table[mb_xy] =
h->chroma_pred_mode = decode_cabac_mb_chroma_pre_mode( h );
h->chroma_pred_mode= check_intra_pred_mode( h, h->chroma_pred_mode );
if( h->chroma_pred_mode < 0 ) return -1;
} else if( partition_count == 4 ) {
int i, j, sub_partition_count[4], list, ref[2][4];
if( h->slice_type == B_TYPE ) {
for( i = 0; i < 4; i++ ) {
h->sub_mb_type[i] = decode_cabac_b_mb_sub_type( h );
sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
h->sub_mb_type[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].type;
}
if( IS_DIRECT(h->sub_mb_type[0]) || IS_DIRECT(h->sub_mb_type[1])
|| IS_DIRECT(h->sub_mb_type[2]) || IS_DIRECT(h->sub_mb_type[3])) {
pred_direct_motion(h, &mb_type);
if( h->ref_count[0] > 1 || h->ref_count[1] > 1 ) {
for( i = 0; i < 4; i++ )
if( IS_DIRECT(h->sub_mb_type[i]) )
fill_rectangle( &h->direct_cache[scan8[4*i]], 2, 2, 8, 1, 1 );
}
}
} else {
for( i = 0; i < 4; i++ ) {
h->sub_mb_type[i] = decode_cabac_p_mb_sub_type( h );
sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
h->sub_mb_type[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].type;
}
}
for( list = 0; list < 2; list++ ) {
if( h->ref_count[list] > 0 ) {
for( i = 0; i < 4; i++ ) {
if(IS_DIRECT(h->sub_mb_type[i])) continue;
if(IS_DIR(h->sub_mb_type[i], 0, list)){
if( h->ref_count[list] > 1 )
ref[list][i] = decode_cabac_mb_ref( h, list, 4*i );
else
ref[list][i] = 0;
} else {
ref[list][i] = -1;
}
h->ref_cache[list][ scan8[4*i]+1 ]=
h->ref_cache[list][ scan8[4*i]+8 ]=h->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i];
}
}
}
if(dct8x8_allowed)
dct8x8_allowed = get_dct8x8_allowed(h);
for(list=0; list<2; list++){
for(i=0; i<4; i++){
if(IS_DIRECT(h->sub_mb_type[i])){
fill_rectangle(h->mvd_cache[list][scan8[4*i]], 2, 2, 8, 0, 4);
continue;
}
h->ref_cache[list][ scan8[4*i] ]=h->ref_cache[list][ scan8[4*i]+1 ];
if(IS_DIR(h->sub_mb_type[i], 0, list) && !IS_DIRECT(h->sub_mb_type[i])){
const int sub_mb_type= h->sub_mb_type[i];
const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1;
for(j=0; j<sub_partition_count[i]; j++){
int mpx, mpy;
int mx, my;
const int index= 4*i + block_width*j;
int16_t (* mv_cache)[2]= &h->mv_cache[list][ scan8[index] ];
int16_t (* mvd_cache)[2]= &h->mvd_cache[list][ scan8[index] ];
pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mpx, &mpy);
mx = mpx + decode_cabac_mb_mvd( h, list, index, 0 );
my = mpy + decode_cabac_mb_mvd( h, list, index, 1 );
tprintf("final mv:%d %d\n", mx, my);
if(IS_SUB_8X8(sub_mb_type)){
mv_cache[ 0 ][0]= mv_cache[ 1 ][0]=
mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx;
mv_cache[ 0 ][1]= mv_cache[ 1 ][1]=
mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my;
mvd_cache[ 0 ][0]= mvd_cache[ 1 ][0]=
mvd_cache[ 8 ][0]= mvd_cache[ 9 ][0]= mx - mpx;
mvd_cache[ 0 ][1]= mvd_cache[ 1 ][1]=
mvd_cache[ 8 ][1]= mvd_cache[ 9 ][1]= my - mpy;
}else if(IS_SUB_8X4(sub_mb_type)){
mv_cache[ 0 ][0]= mv_cache[ 1 ][0]= mx;
mv_cache[ 0 ][1]= mv_cache[ 1 ][1]= my;
mvd_cache[ 0 ][0]= mvd_cache[ 1 ][0]= mx- mpx;
mvd_cache[ 0 ][1]= mvd_cache[ 1 ][1]= my - mpy;
}else if(IS_SUB_4X8(sub_mb_type)){
mv_cache[ 0 ][0]= mv_cache[ 8 ][0]= mx;
mv_cache[ 0 ][1]= mv_cache[ 8 ][1]= my;
mvd_cache[ 0 ][0]= mvd_cache[ 8 ][0]= mx - mpx;
mvd_cache[ 0 ][1]= mvd_cache[ 8 ][1]= my - mpy;
}else{
assert(IS_SUB_4X4(sub_mb_type));
mv_cache[ 0 ][0]= mx;
mv_cache[ 0 ][1]= my;
mvd_cache[ 0 ][0]= mx - mpx;
mvd_cache[ 0 ][1]= my - mpy;
}
}
}else{
uint32_t *p= (uint32_t *)&h->mv_cache[list][ scan8[4*i] ][0];
uint32_t *pd= (uint32_t *)&h->mvd_cache[list][ scan8[4*i] ][0];
p[0] = p[1] = p[8] = p[9] = 0;
pd[0]= pd[1]= pd[8]= pd[9]= 0;
}
}
}
} else if( IS_DIRECT(mb_type) ) {
pred_direct_motion(h, &mb_type);
fill_rectangle(h->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 4);
fill_rectangle(h->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 4);
dct8x8_allowed &= h->sps.direct_8x8_inference_flag;
} else {
int list, mx, my, i, mpx, mpy;
if(IS_16X16(mb_type)){
for(list=0; list<2; list++){
if(IS_DIR(mb_type, 0, list)){
if(h->ref_count[list] > 0 ){
const int ref = h->ref_count[list] > 1 ? decode_cabac_mb_ref( h, list, 0 ) : 0;
fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, ref, 1);
}
}else
fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, (uint8_t)LIST_NOT_USED, 1);
}
for(list=0; list<2; list++){
if(IS_DIR(mb_type, 0, list)){
pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mpx, &mpy);
mx = mpx + decode_cabac_mb_mvd( h, list, 0, 0 );
my = mpy + decode_cabac_mb_mvd( h, list, 0, 1 );
tprintf("final mv:%d %d\n", mx, my);
fill_rectangle(h->mvd_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx-mpx,my-mpy), 4);
fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4);
}else
fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, 0, 4);
}
}
else if(IS_16X8(mb_type)){
for(list=0; list<2; list++){
if(h->ref_count[list]>0){
for(i=0; i<2; i++){
if(IS_DIR(mb_type, i, list)){
const int ref= h->ref_count[list] > 1 ? decode_cabac_mb_ref( h, list, 8*i ) : 0;
fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, ref, 1);
}else
fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, (LIST_NOT_USED&0xFF), 1);
}
}
}
for(list=0; list<2; list++){
for(i=0; i<2; i++){
if(IS_DIR(mb_type, i, list)){
pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mpx, &mpy);
mx = mpx + decode_cabac_mb_mvd( h, list, 8*i, 0 );
my = mpy + decode_cabac_mb_mvd( h, list, 8*i, 1 );
tprintf("final mv:%d %d\n", mx, my);
fill_rectangle(h->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx-mpx,my-mpy), 4);
fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx,my), 4);
}else{
fill_rectangle(h->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 4);
fill_rectangle(h-> mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 4);
}
}
}
}else{
assert(IS_8X16(mb_type));
for(list=0; list<2; list++){
if(h->ref_count[list]>0){
for(i=0; i<2; i++){
if(IS_DIR(mb_type, i, list)){ //FIXME optimize
const int ref= h->ref_count[list] > 1 ? decode_cabac_mb_ref( h, list, 4*i ) : 0;
fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, ref, 1);
}else
fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, (LIST_NOT_USED&0xFF), 1);
}
}
}
for(list=0; list<2; list++){
for(i=0; i<2; i++){
if(IS_DIR(mb_type, i, list)){
pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mpx, &mpy);
mx = mpx + decode_cabac_mb_mvd( h, list, 4*i, 0 );
my = mpy + decode_cabac_mb_mvd( h, list, 4*i, 1 );
tprintf("final mv:%d %d\n", mx, my);
fill_rectangle(h->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx-mpx,my-mpy), 4);
fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx,my), 4);
}else{
fill_rectangle(h->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 4);
fill_rectangle(h-> mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 4);
}
}
}
}
}
if( IS_INTER( mb_type ) ) {
h->chroma_pred_mode_table[mb_xy] = 0;
write_back_motion( h, mb_type );
}
if( !IS_INTRA16x16( mb_type ) ) {
cbp = decode_cabac_mb_cbp_luma( h );
cbp |= decode_cabac_mb_cbp_chroma( h ) << 4;
}
h->cbp_table[mb_xy] = cbp;
if( dct8x8_allowed && (cbp&15) && !IS_INTRA( mb_type ) ) {
if( decode_cabac_mb_transform_size( h ) )
mb_type |= MB_TYPE_8x8DCT;
}
s->current_picture.mb_type[mb_xy]= mb_type;
if( cbp || IS_INTRA16x16( mb_type ) ) {
const uint8_t *scan, *dc_scan;
int dqp;
if(IS_INTERLACED(mb_type)){
scan= s->qscale ? h->field_scan : h->field_scan_q0;
dc_scan= luma_dc_field_scan;
}else{
scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0;
dc_scan= luma_dc_zigzag_scan;
}
h->last_qscale_diff = dqp = decode_cabac_mb_dqp( h );
if( dqp == INT_MIN ){
av_log(h->s.avctx, AV_LOG_ERROR, "cabac decode of qscale diff failed at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
s->qscale += dqp;
if(((unsigned)s->qscale) > 51){
if(s->qscale<0) s->qscale+= 52;
else s->qscale-= 52;
}
h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, s->qscale);
if( IS_INTRA16x16( mb_type ) ) {
int i;
//av_log( s->avctx, AV_LOG_ERROR, "INTRA16x16 DC\n" );
if( decode_cabac_residual( h, h->mb, 0, 0, dc_scan, h->dequant4_coeff[s->qscale], 16) < 0)
return -1;
if( cbp&15 ) {
for( i = 0; i < 16; i++ ) {
//av_log( s->avctx, AV_LOG_ERROR, "INTRA16x16 AC:%d\n", i );
if( decode_cabac_residual(h, h->mb + 16*i, 1, i, scan + 1, h->dequant4_coeff[s->qscale], 15) < 0 )
return -1;
}
} else {
fill_rectangle(&h->non_zero_count_cache[scan8[0]], 4, 4, 8, 0, 1);
}
} else {
int i8x8, i4x4;
for( i8x8 = 0; i8x8 < 4; i8x8++ ) {
if( cbp & (1<<i8x8) ) {
if( IS_8x8DCT(mb_type) ) {
if( decode_cabac_residual(h, h->mb + 64*i8x8, 5, 4*i8x8,
zigzag_scan8x8, h->dequant8_coeff[s->qscale], 64) < 0 )
return -1;
if(s->qscale < 12){
int i;
for(i=0; i<64; i++)
h->mb[64*i8x8+i] = (h->mb[64*i8x8+i] + 2) >> 2;
}
} else
for( i4x4 = 0; i4x4 < 4; i4x4++ ) {
const int index = 4*i8x8 + i4x4;
//av_log( s->avctx, AV_LOG_ERROR, "Luma4x4: %d\n", index );
if( decode_cabac_residual(h, h->mb + 16*index, 2, index, scan, h->dequant4_coeff[s->qscale], 16) < 0 )
return -1;
}
} else {
uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ];
nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0;
}
}
}
if( cbp&0x30 ){
int c;
for( c = 0; c < 2; c++ ) {
//av_log( s->avctx, AV_LOG_ERROR, "INTRA C%d-DC\n",c );
if( decode_cabac_residual(h, h->mb + 256 + 16*4*c, 3, c, chroma_dc_scan, h->dequant4_coeff[h->chroma_qp], 4) < 0)
return -1;
}
}
if( cbp&0x20 ) {
int c, i;
for( c = 0; c < 2; c++ ) {
for( i = 0; i < 4; i++ ) {
const int index = 16 + 4 * c + i;
//av_log( s->avctx, AV_LOG_ERROR, "INTRA C%d-AC %d\n",c, index - 16 );
if( decode_cabac_residual(h, h->mb + 16*index, 4, index - 16, scan + 1, h->dequant4_coeff[h->chroma_qp], 15) < 0)
return -1;
}
}
} else {
uint8_t * const nnz= &h->non_zero_count_cache[0];
nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
}
} else {
uint8_t * const nnz= &h->non_zero_count_cache[0];
fill_rectangle(&nnz[scan8[0]], 4, 4, 8, 0, 1);
nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
}
s->current_picture.qscale_table[mb_xy]= s->qscale;
write_back_non_zero_count(h);
return 0;
}
static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
int i, d;
const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
const int alpha = alpha_table[index_a];
const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )];
if( bS[0] < 4 ) {
int8_t tc[4];
for(i=0; i<4; i++)
tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] : -1;
h->s.dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc);
} else {
/* 16px edge length, because bS=4 is triggered by being at
* the edge of an intra MB, so all 4 bS are the same */
for( d = 0; d < 16; d++ ) {
const int p0 = pix[-1];
const int p1 = pix[-2];
const int p2 = pix[-3];
const int q0 = pix[0];
const int q1 = pix[1];
const int q2 = pix[2];
if( ABS( p0 - q0 ) < alpha &&
ABS( p1 - p0 ) < beta &&
ABS( q1 - q0 ) < beta ) {
if(ABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
if( ABS( p2 - p0 ) < beta)
{
const int p3 = pix[-4];
/* p0', p1', p2' */
pix[-1] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
pix[-2] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
pix[-3] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
} else {
/* p0' */
pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
}
if( ABS( q2 - q0 ) < beta)
{
const int q3 = pix[3];
/* q0', q1', q2' */
pix[0] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
pix[1] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
pix[2] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
} else {
/* q0' */
pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
}
}else{
/* p0', q0' */
pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
pix[ 0] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
}
//tprintf("filter_mb_edgev i:%d d:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, d, p2, p1, p0, q0, q1, q2, pix[-2], pix[-1], pix[0], pix[1]);
}
pix += stride;
}
}
}
static void filter_mb_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
int i;
const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
const int alpha = alpha_table[index_a];
const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )];
if( bS[0] < 4 ) {
int8_t tc[4];
for(i=0; i<4; i++)
tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] + 1 : 0;
h->s.dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc);
} else {
h->s.dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta);
}
}
static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int bS[8], int qp[2] ) {
int i;
for( i = 0; i < 16; i++, pix += stride) {
int index_a;
int alpha;
int beta;
int qp_index;
int bS_index = (i >> 1);
if (h->mb_field_decoding_flag) {
bS_index &= ~1;
bS_index |= (i & 1);
}
if( bS[bS_index] == 0 ) {
continue;
}
qp_index = h->mb_field_decoding_flag ? (i & 1) : (i >> 3);
index_a = clip( qp[qp_index] + h->slice_alpha_c0_offset, 0, 51 );
alpha = alpha_table[index_a];
beta = beta_table[clip( qp[qp_index] + h->slice_beta_offset, 0, 51 )];
if( bS[bS_index] < 4 ) {
const int tc0 = tc0_table[index_a][bS[bS_index] - 1];
/* 4px edge length */
const int p0 = pix[-1];
const int p1 = pix[-2];
const int p2 = pix[-3];
const int q0 = pix[0];
const int q1 = pix[1];
const int q2 = pix[2];
if( ABS( p0 - q0 ) < alpha &&
ABS( p1 - p0 ) < beta &&
ABS( q1 - q0 ) < beta ) {
int tc = tc0;
int i_delta;
if( ABS( p2 - p0 ) < beta ) {
pix[-2] = p1 + clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 );
tc++;
}
if( ABS( q2 - q0 ) < beta ) {
pix[1] = q1 + clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 );
tc++;
}
i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
pix[-1] = clip_uint8( p0 + i_delta ); /* p0' */
pix[0] = clip_uint8( q0 - i_delta ); /* q0' */
tprintf("filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1);
}
}else{
/* 4px edge length */
const int p0 = pix[-1];
const int p1 = pix[-2];
const int p2 = pix[-3];
const int q0 = pix[0];
const int q1 = pix[1];
const int q2 = pix[2];
if( ABS( p0 - q0 ) < alpha &&
ABS( p1 - p0 ) < beta &&
ABS( q1 - q0 ) < beta ) {
if(ABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
if( ABS( p2 - p0 ) < beta)
{
const int p3 = pix[-4];
/* p0', p1', p2' */
pix[-1] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
pix[-2] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
pix[-3] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
} else {
/* p0' */
pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
}
if( ABS( q2 - q0 ) < beta)
{
const int q3 = pix[3];
/* q0', q1', q2' */
pix[0] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
pix[1] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
pix[2] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
} else {
/* q0' */
pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
}
}else{
/* p0', q0' */
pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
pix[ 0] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
}
tprintf("filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, p2, p1, p0, q0, q1, q2, pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]);
}
}
}
}
static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp[2] ) {
int i;
for( i = 0; i < 8; i++, pix += stride) {
int index_a;
int alpha;
int beta;
int qp_index;
int bS_index = i;
if( bS[bS_index] == 0 ) {
continue;
}
qp_index = h->mb_field_decoding_flag ? (i & 1) : (i >> 3);
index_a = clip( qp[qp_index] + h->slice_alpha_c0_offset, 0, 51 );
alpha = alpha_table[index_a];
beta = beta_table[clip( qp[qp_index] + h->slice_beta_offset, 0, 51 )];
if( bS[bS_index] < 4 ) {
const int tc = tc0_table[index_a][bS[bS_index] - 1] + 1;
/* 2px edge length (because we use same bS than the one for luma) */
const int p0 = pix[-1];
const int p1 = pix[-2];
const int q0 = pix[0];
const int q1 = pix[1];
if( ABS( p0 - q0 ) < alpha &&
ABS( p1 - p0 ) < beta &&
ABS( q1 - q0 ) < beta ) {
const int i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
pix[-1] = clip_uint8( p0 + i_delta ); /* p0' */
pix[0] = clip_uint8( q0 - i_delta ); /* q0' */
tprintf("filter_mb_mbaff_edgecv i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1);
}
}else{
const int p0 = pix[-1];
const int p1 = pix[-2];
const int q0 = pix[0];
const int q1 = pix[1];
if( ABS( p0 - q0 ) < alpha &&
ABS( p1 - p0 ) < beta &&
ABS( q1 - q0 ) < beta ) {
pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */
pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */
tprintf("filter_mb_mbaff_edgecv i:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, pix[-3], p1, p0, q0, q1, pix[2], pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]);
}
}
}
}
static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
int i, d;
const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
const int alpha = alpha_table[index_a];
const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )];
const int pix_next = stride;
if( bS[0] < 4 ) {
int8_t tc[4];
for(i=0; i<4; i++)
tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] : -1;
h->s.dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
} else {
/* 16px edge length, see filter_mb_edgev */
for( d = 0; d < 16; d++ ) {
const int p0 = pix[-1*pix_next];
const int p1 = pix[-2*pix_next];
const int p2 = pix[-3*pix_next];
const int q0 = pix[0];
const int q1 = pix[1*pix_next];
const int q2 = pix[2*pix_next];
if( ABS( p0 - q0 ) < alpha &&
ABS( p1 - p0 ) < beta &&
ABS( q1 - q0 ) < beta ) {
const int p3 = pix[-4*pix_next];
const int q3 = pix[ 3*pix_next];
if(ABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
if( ABS( p2 - p0 ) < beta) {
/* p0', p1', p2' */
pix[-1*pix_next] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
pix[-2*pix_next] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
pix[-3*pix_next] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
} else {
/* p0' */
pix[-1*pix_next] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
}
if( ABS( q2 - q0 ) < beta) {
/* q0', q1', q2' */
pix[0*pix_next] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
pix[1*pix_next] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
pix[2*pix_next] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
} else {
/* q0' */
pix[0*pix_next] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
}
}else{
/* p0', q0' */
pix[-1*pix_next] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
pix[ 0*pix_next] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
}
//tprintf("filter_mb_edgeh i:%d d:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, d, qp, index_a, alpha, beta, bS[i], p2, p1, p0, q0, q1, q2, pix[-2*pix_next], pix[-pix_next], pix[0], pix[pix_next]);
}
pix++;
}
}
}
static void filter_mb_edgech( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
int i;
const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
const int alpha = alpha_table[index_a];
const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )];
if( bS[0] < 4 ) {
int8_t tc[4];
for(i=0; i<4; i++)
tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] + 1 : 0;
h->s.dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc);
} else {
h->s.dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta);
}
}
static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
MpegEncContext * const s = &h->s;
const int mb_xy= mb_x + mb_y*s->mb_stride;
int first_vertical_edge_done = 0;
int dir;
/* FIXME: A given frame may occupy more than one position in
* the reference list. So ref2frm should be populated with
* frame numbers, not indices. */
static const int ref2frm[18] = {-1,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
if (h->mb_aff_frame
// left mb is in picture
&& h->slice_table[mb_xy-1] != 255
// and current and left pair do not have the same interlaced type
&& (IS_INTERLACED(s->current_picture.mb_type[mb_xy]) != IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]))
// and left mb is in the same slice if deblocking_filter == 2
&& (h->deblocking_filter!=2 || h->slice_table[mb_xy-1] == h->slice_table[mb_xy])) {
/* First vertical edge is different in MBAFF frames
* There are 8 different bS to compute and 2 different Qp
*/
int bS[8];
int qp[2];
int chroma_qp[2];
int i;
first_vertical_edge_done = 1;
for( i = 0; i < 8; i++ ) {
int y = i>>1;
int b_idx= 8 + 4 + 8*y;
int bn_idx= b_idx - 1;
int mbn_xy = h->mb_field_decoding_flag ? h->left_mb_xy[i>>2] : h->left_mb_xy[i&1];
if( IS_INTRA( s->current_picture.mb_type[mb_xy] ) ||
IS_INTRA( s->current_picture.mb_type[mbn_xy] ) ) {
bS[i] = 4;
} else if( h->non_zero_count_cache[b_idx] != 0 ||
/* FIXME: with 8x8dct + cavlc, should check cbp instead of nnz */
h->non_zero_count_cache[bn_idx] != 0 ) {
bS[i] = 2;
} else {
int l;
bS[i] = 0;
for( l = 0; l < 1 + (h->slice_type == B_TYPE); l++ ) {
if( ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] ||
ABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= 4 ) {
bS[i] = 1;
break;
}
}
}
}
if(bS[0]+bS[1]+bS[2]+bS[3] != 0) {
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
qp[0] = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[h->left_mb_xy[0]] + 1 ) >> 1;
chroma_qp[0] = ( get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mb_xy] ) +
get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[h->left_mb_xy[0]] ) + 1 ) >> 1;
qp[1] = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[h->left_mb_xy[1]] + 1 ) >> 1;
chroma_qp[1] = ( get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mb_xy] ) +
get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[h->left_mb_xy[1]] ) + 1 ) >> 1;
/* Filter edge */
tprintf("filter mb:%d/%d MBAFF, QPy:%d/%d, QPc:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], chroma_qp[0], chroma_qp[1], linesize, uvlinesize);
{ int i; for (i = 0; i < 8; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); }
filter_mb_mbaff_edgev ( h, &img_y [0], linesize, bS, qp );
filter_mb_mbaff_edgecv( h, &img_cb[0], uvlinesize, bS, chroma_qp );
filter_mb_mbaff_edgecv( h, &img_cr[0], uvlinesize, bS, chroma_qp );
}
}
/* dir : 0 -> vertical edge, 1 -> horizontal edge */
for( dir = 0; dir < 2; dir++ )
{
int edge;
const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy;
int start = h->slice_table[mbm_xy] == 255 ? 1 : 0;
if (first_vertical_edge_done) {
start = 1;
first_vertical_edge_done = 0;
}
if (h->deblocking_filter==2 && h->slice_table[mbm_xy] != h->slice_table[mb_xy])
start = 1;
/* Calculate bS */
for( edge = start; edge < 4; edge++ ) {
/* mbn_xy: neighbor macroblock */
int mbn_xy = edge > 0 ? mb_xy : mbm_xy;
int bS[4];
int qp;
if( (edge&1) && IS_8x8DCT(s->current_picture.mb_type[mb_xy]) )
continue;
if (h->mb_aff_frame && (dir == 1) && (edge == 0) && ((mb_y & 1) == 0)
&& !IS_INTERLACED(s->current_picture.mb_type[mb_xy])
&& IS_INTERLACED(s->current_picture.mb_type[mbn_xy])
) {
// This is a special case in the norm where the filtering must
// be done twice (one each of the field) even if we are in a
// frame macroblock.
//
unsigned int tmp_linesize = 2 * linesize;
unsigned int tmp_uvlinesize = 2 * uvlinesize;
int mbn_xy = mb_xy - 2 * s->mb_stride;
int qp, chroma_qp;
// first filtering
if( IS_INTRA( s->current_picture.mb_type[mb_xy] ) ||
IS_INTRA( s->current_picture.mb_type[mbn_xy] ) ) {
bS[0] = bS[1] = bS[2] = bS[3] = 3;
} else {
// TODO
av_log(h->s.avctx, AV_LOG_ERROR, "both non intra (TODO)\n");
}
/* Filter edge */
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1;
tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
{ int i; for (i = 0; i < 4; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); }
filter_mb_edgeh( h, &img_y[0], tmp_linesize, bS, qp );
chroma_qp = ( h->chroma_qp +
get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
filter_mb_edgech( h, &img_cb[0], tmp_uvlinesize, bS, chroma_qp );
filter_mb_edgech( h, &img_cr[0], tmp_uvlinesize, bS, chroma_qp );
// second filtering
mbn_xy += s->mb_stride;
if( IS_INTRA( s->current_picture.mb_type[mb_xy] ) ||
IS_INTRA( s->current_picture.mb_type[mbn_xy] ) ) {
bS[0] = bS[1] = bS[2] = bS[3] = 3;
} else {
// TODO
av_log(h->s.avctx, AV_LOG_ERROR, "both non intra (TODO)\n");
}
/* Filter edge */
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1;
tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
{ int i; for (i = 0; i < 4; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); }
filter_mb_edgeh( h, &img_y[linesize], tmp_linesize, bS, qp );
chroma_qp = ( h->chroma_qp +
get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
filter_mb_edgech( h, &img_cb[uvlinesize], tmp_uvlinesize, bS, chroma_qp );
filter_mb_edgech( h, &img_cr[uvlinesize], tmp_uvlinesize, bS, chroma_qp );
continue;
}
if( IS_INTRA( s->current_picture.mb_type[mb_xy] ) ||
IS_INTRA( s->current_picture.mb_type[mbn_xy] ) ) {
int value;
if (edge == 0) {
if ( (!IS_INTERLACED(s->current_picture.mb_type[mb_xy]) && !IS_INTERLACED(s->current_picture.mb_type[mbm_xy]))
|| ((h->mb_aff_frame || (s->picture_structure != PICT_FRAME)) && (dir == 0))
) {
value = 4;
} else {
value = 3;
}
} else {
value = 3;
}
bS[0] = bS[1] = bS[2] = bS[3] = value;
} else {
int i;
for( i = 0; i < 4; i++ ) {
int x = dir == 0 ? edge : i;
int y = dir == 0 ? i : edge;
int b_idx= 8 + 4 + x + 8*y;
int bn_idx= b_idx - (dir ? 8:1);
if( h->non_zero_count_cache[b_idx] != 0 ||
h->non_zero_count_cache[bn_idx] != 0 ) {
bS[i] = 2;
}
else
{
int l;
bS[i] = 0;
for( l = 0; l < 1 + (h->slice_type == B_TYPE); l++ ) {
if( ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] ||
ABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= 4 ) {
bS[i] = 1;
break;
}
}
}
}
if(bS[0]+bS[1]+bS[2]+bS[3] == 0)
continue;
}
/* Filter edge */
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1;
//tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp, s->current_picture.qscale_table[mbn_xy]);
tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
{ int i; for (i = 0; i < 4; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); }
if( dir == 0 ) {
filter_mb_edgev( h, &img_y[4*edge], linesize, bS, qp );
if( (edge&1) == 0 ) {
int chroma_qp = ( h->chroma_qp +
get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
filter_mb_edgecv( h, &img_cb[2*edge], uvlinesize, bS, chroma_qp );
filter_mb_edgecv( h, &img_cr[2*edge], uvlinesize, bS, chroma_qp );
}
} else {
filter_mb_edgeh( h, &img_y[4*edge*linesize], linesize, bS, qp );
if( (edge&1) == 0 ) {
int chroma_qp = ( h->chroma_qp +
get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
filter_mb_edgech( h, &img_cb[2*edge*uvlinesize], uvlinesize, bS, chroma_qp );
filter_mb_edgech( h, &img_cr[2*edge*uvlinesize], uvlinesize, bS, chroma_qp );
}
}
}
}
}
static int decode_slice(H264Context *h){
MpegEncContext * const s = &h->s;
const int part_mask= s->partitioned_frame ? (AC_END|AC_ERROR) : 0x7F;
s->mb_skip_run= -1;
if( h->pps.cabac ) {
int i;
/* realign */
align_get_bits( &s->gb );
/* init cabac */
ff_init_cabac_states( &h->cabac, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64 );
ff_init_cabac_decoder( &h->cabac,
s->gb.buffer + get_bits_count(&s->gb)/8,
( s->gb.size_in_bits - get_bits_count(&s->gb) + 7)/8);
/* calculate pre-state */
for( i= 0; i < 460; i++ ) {
int pre;
if( h->slice_type == I_TYPE )
pre = clip( ((cabac_context_init_I[i][0] * s->qscale) >>4 ) + cabac_context_init_I[i][1], 1, 126 );
else
pre = clip( ((cabac_context_init_PB[h->cabac_init_idc][i][0] * s->qscale) >>4 ) + cabac_context_init_PB[h->cabac_init_idc][i][1], 1, 126 );
if( pre <= 63 )
h->cabac_state[i] = 2 * ( 63 - pre ) + 0;
else
h->cabac_state[i] = 2 * ( pre - 64 ) + 1;
}
for(;;){
int ret = decode_mb_cabac(h);
int eos;
if(ret>=0) hl_decode_mb(h);
/* XXX: useless as decode_mb_cabac it doesn't support that ... */
if( ret >= 0 && h->mb_aff_frame ) { //FIXME optimal? or let mb_decode decode 16x32 ?
s->mb_y++;
if(ret>=0) ret = decode_mb_cabac(h);
if(ret>=0) hl_decode_mb(h);
s->mb_y--;
}
eos = get_cabac_terminate( &h->cabac );
if( ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 1) {
av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
return -1;
}
if( ++s->mb_x >= s->mb_width ) {
s->mb_x = 0;
ff_draw_horiz_band(s, 16*s->mb_y, 16);
++s->mb_y;
if(h->mb_aff_frame) {
++s->mb_y;
}
}
if( eos || s->mb_y >= s->mb_height ) {
tprintf("slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
return 0;
}
}
} else {
for(;;){
int ret = decode_mb_cavlc(h);
if(ret>=0) hl_decode_mb(h);
if(ret>=0 && h->mb_aff_frame){ //FIXME optimal? or let mb_decode decode 16x32 ?
s->mb_y++;
ret = decode_mb_cavlc(h);
if(ret>=0) hl_decode_mb(h);
s->mb_y--;
}
if(ret<0){
av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
return -1;
}
if(++s->mb_x >= s->mb_width){
s->mb_x=0;
ff_draw_horiz_band(s, 16*s->mb_y, 16);
++s->mb_y;
if(h->mb_aff_frame) {
++s->mb_y;
}
if(s->mb_y >= s->mb_height){
tprintf("slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
if(get_bits_count(&s->gb) == s->gb.size_in_bits ) {
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
return 0;
}else{
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
return -1;
}
}
}
if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->mb_skip_run<=0){
tprintf("slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
if(get_bits_count(&s->gb) == s->gb.size_in_bits ){
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
return 0;
}else{
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
return -1;
}
}
}
}
#if 0
for(;s->mb_y < s->mb_height; s->mb_y++){
for(;s->mb_x < s->mb_width; s->mb_x++){
int ret= decode_mb(h);
hl_decode_mb(h);
if(ret<0){
fprintf(stderr, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
return -1;
}
if(++s->mb_x >= s->mb_width){
s->mb_x=0;
if(++s->mb_y >= s->mb_height){
if(get_bits_count(s->gb) == s->gb.size_in_bits){
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
return 0;
}else{
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
return -1;
}
}
}
if(get_bits_count(s->?gb) >= s->gb?.size_in_bits){
if(get_bits_count(s->gb) == s->gb.size_in_bits){
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
return 0;
}else{
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
return -1;
}
}
}
s->mb_x=0;
ff_draw_horiz_band(s, 16*s->mb_y, 16);
}
#endif
return -1; //not reached
}
static int decode_unregistered_user_data(H264Context *h, int size){
MpegEncContext * const s = &h->s;
uint8_t user_data[16+256];
int e, build, i;
if(size<16)
return -1;
for(i=0; i<sizeof(user_data)-1 && i<size; i++){
user_data[i]= get_bits(&s->gb, 8);
}
user_data[i]= 0;
e= sscanf(user_data+16, "x264 - core %d"/*%s - H.264/MPEG-4 AVC codec - Copyleft 2005 - http://www.videolan.org/x264.html*/, &build);
if(e==1 && build>=0)
h->x264_build= build;
if(s->avctx->debug & FF_DEBUG_BUGS)
av_log(s->avctx, AV_LOG_DEBUG, "user data:\"%s\"\n", user_data+16);
for(; i<size; i++)
skip_bits(&s->gb, 8);
return 0;
}
static int decode_sei(H264Context *h){
MpegEncContext * const s = &h->s;
while(get_bits_count(&s->gb) + 16 < s->gb.size_in_bits){
int size, type;
type=0;
do{
type+= show_bits(&s->gb, 8);
}while(get_bits(&s->gb, 8) == 255);
size=0;
do{
size+= show_bits(&s->gb, 8);
}while(get_bits(&s->gb, 8) == 255);
switch(type){
case 5:
if(decode_unregistered_user_data(h, size) < 0);
return -1;
break;
default:
skip_bits(&s->gb, 8*size);
}
//FIXME check bits here
align_get_bits(&s->gb);
}
return 0;
}
static inline void decode_hrd_parameters(H264Context *h, SPS *sps){
MpegEncContext * const s = &h->s;
int cpb_count, i;
cpb_count = get_ue_golomb(&s->gb) + 1;
get_bits(&s->gb, 4); /* bit_rate_scale */
get_bits(&s->gb, 4); /* cpb_size_scale */
for(i=0; i<cpb_count; i++){
get_ue_golomb(&s->gb); /* bit_rate_value_minus1 */
get_ue_golomb(&s->gb); /* cpb_size_value_minus1 */
get_bits1(&s->gb); /* cbr_flag */
}
get_bits(&s->gb, 5); /* initial_cpb_removal_delay_length_minus1 */
get_bits(&s->gb, 5); /* cpb_removal_delay_length_minus1 */
get_bits(&s->gb, 5); /* dpb_output_delay_length_minus1 */
get_bits(&s->gb, 5); /* time_offset_length */
}
static inline int decode_vui_parameters(H264Context *h, SPS *sps){
MpegEncContext * const s = &h->s;
int aspect_ratio_info_present_flag, aspect_ratio_idc;
int nal_hrd_parameters_present_flag, vcl_hrd_parameters_present_flag;
aspect_ratio_info_present_flag= get_bits1(&s->gb);
if( aspect_ratio_info_present_flag ) {
aspect_ratio_idc= get_bits(&s->gb, 8);
if( aspect_ratio_idc == EXTENDED_SAR ) {
sps->sar.num= get_bits(&s->gb, 16);
sps->sar.den= get_bits(&s->gb, 16);
}else if(aspect_ratio_idc < 16){
sps->sar= pixel_aspect[aspect_ratio_idc];
}else{
av_log(h->s.avctx, AV_LOG_ERROR, "illegal aspect ratio\n");
return -1;
}
}else{
sps->sar.num=
sps->sar.den= 0;
}
// s->avctx->aspect_ratio= sar_width*s->width / (float)(s->height*sar_height);
if(get_bits1(&s->gb)){ /* overscan_info_present_flag */
get_bits1(&s->gb); /* overscan_appropriate_flag */
}
if(get_bits1(&s->gb)){ /* video_signal_type_present_flag */
get_bits(&s->gb, 3); /* video_format */
get_bits1(&s->gb); /* video_full_range_flag */
if(get_bits1(&s->gb)){ /* colour_description_present_flag */
get_bits(&s->gb, 8); /* colour_primaries */
get_bits(&s->gb, 8); /* transfer_characteristics */
get_bits(&s->gb, 8); /* matrix_coefficients */
}
}
if(get_bits1(&s->gb)){ /* chroma_location_info_present_flag */
get_ue_golomb(&s->gb); /* chroma_sample_location_type_top_field */
get_ue_golomb(&s->gb); /* chroma_sample_location_type_bottom_field */
}
sps->timing_info_present_flag = get_bits1(&s->gb);
if(sps->timing_info_present_flag){
sps->num_units_in_tick = get_bits_long(&s->gb, 32);
sps->time_scale = get_bits_long(&s->gb, 32);
sps->fixed_frame_rate_flag = get_bits1(&s->gb);
}
nal_hrd_parameters_present_flag = get_bits1(&s->gb);
if(nal_hrd_parameters_present_flag)
decode_hrd_parameters(h, sps);
vcl_hrd_parameters_present_flag = get_bits1(&s->gb);
if(vcl_hrd_parameters_present_flag)
decode_hrd_parameters(h, sps);
if(nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag)
get_bits1(&s->gb); /* low_delay_hrd_flag */
get_bits1(&s->gb); /* pic_struct_present_flag */
sps->bitstream_restriction_flag = get_bits1(&s->gb);
if(sps->bitstream_restriction_flag){
get_bits1(&s->gb); /* motion_vectors_over_pic_boundaries_flag */
get_ue_golomb(&s->gb); /* max_bytes_per_pic_denom */
get_ue_golomb(&s->gb); /* max_bits_per_mb_denom */
get_ue_golomb(&s->gb); /* log2_max_mv_length_horizontal */
get_ue_golomb(&s->gb); /* log2_max_mv_length_vertical */
sps->num_reorder_frames = get_ue_golomb(&s->gb);
get_ue_golomb(&s->gb); /* max_dec_frame_buffering */
}
return 0;
}
static inline int decode_seq_parameter_set(H264Context *h){
MpegEncContext * const s = &h->s;
int profile_idc, level_idc;
int sps_id, i;
SPS *sps;
profile_idc= get_bits(&s->gb, 8);
get_bits1(&s->gb); //constraint_set0_flag
get_bits1(&s->gb); //constraint_set1_flag
get_bits1(&s->gb); //constraint_set2_flag
get_bits1(&s->gb); //constraint_set3_flag
get_bits(&s->gb, 4); // reserved
level_idc= get_bits(&s->gb, 8);
sps_id= get_ue_golomb(&s->gb);
sps= &h->sps_buffer[ sps_id ];
sps->profile_idc= profile_idc;
sps->level_idc= level_idc;
if(sps->profile_idc >= 100){ //high profile
if(get_ue_golomb(&s->gb) == 3) //chroma_format_idc
get_bits1(&s->gb); //residual_color_transform_flag
get_ue_golomb(&s->gb); //bit_depth_luma_minus8
get_ue_golomb(&s->gb); //bit_depth_chroma_minus8
sps->transform_bypass = get_bits1(&s->gb);
if(get_bits1(&s->gb)){ //seq_scaling_matrix_present_flag
av_log(h->s.avctx, AV_LOG_ERROR, "custom scaling matrix not implemented\n");
return -1;
}
}
sps->log2_max_frame_num= get_ue_golomb(&s->gb) + 4;
sps->poc_type= get_ue_golomb(&s->gb);
if(sps->poc_type == 0){ //FIXME #define
sps->log2_max_poc_lsb= get_ue_golomb(&s->gb) + 4;
} else if(sps->poc_type == 1){//FIXME #define
sps->delta_pic_order_always_zero_flag= get_bits1(&s->gb);
sps->offset_for_non_ref_pic= get_se_golomb(&s->gb);
sps->offset_for_top_to_bottom_field= get_se_golomb(&s->gb);
sps->poc_cycle_length= get_ue_golomb(&s->gb);
for(i=0; i<sps->poc_cycle_length; i++)
sps->offset_for_ref_frame[i]= get_se_golomb(&s->gb);
}
if(sps->poc_type > 2){
av_log(h->s.avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type);
return -1;
}
sps->ref_frame_count= get_ue_golomb(&s->gb);
if(sps->ref_frame_count > MAX_PICTURE_COUNT-2){
av_log(h->s.avctx, AV_LOG_ERROR, "too many reference frames\n");
}
sps->gaps_in_frame_num_allowed_flag= get_bits1(&s->gb);
sps->mb_width= get_ue_golomb(&s->gb) + 1;
sps->mb_height= get_ue_golomb(&s->gb) + 1;
if((unsigned)sps->mb_width >= INT_MAX/16 || (unsigned)sps->mb_height >= INT_MAX/16 ||
avcodec_check_dimensions(NULL, 16*sps->mb_width, 16*sps->mb_height))
return -1;
sps->frame_mbs_only_flag= get_bits1(&s->gb);
if(!sps->frame_mbs_only_flag)
sps->mb_aff= get_bits1(&s->gb);
else
sps->mb_aff= 0;
sps->direct_8x8_inference_flag= get_bits1(&s->gb);
sps->crop= get_bits1(&s->gb);
if(sps->crop){
sps->crop_left = get_ue_golomb(&s->gb);
sps->crop_right = get_ue_golomb(&s->gb);
sps->crop_top = get_ue_golomb(&s->gb);
sps->crop_bottom= get_ue_golomb(&s->gb);
if(sps->crop_left || sps->crop_top){
av_log(h->s.avctx, AV_LOG_ERROR, "insane cropping not completely supported, this could look slightly wrong ...\n");
}
}else{
sps->crop_left =
sps->crop_right =
sps->crop_top =
sps->crop_bottom= 0;
}
sps->vui_parameters_present_flag= get_bits1(&s->gb);
if( sps->vui_parameters_present_flag )
decode_vui_parameters(h, sps);
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
av_log(h->s.avctx, AV_LOG_DEBUG, "sps:%d profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s\n",
sps_id, sps->profile_idc, sps->level_idc,
sps->poc_type,
sps->ref_frame_count,
sps->mb_width, sps->mb_height,
sps->frame_mbs_only_flag ? "FRM" : (sps->mb_aff ? "MB-AFF" : "PIC-AFF"),
sps->direct_8x8_inference_flag ? "8B8" : "",
sps->crop_left, sps->crop_right,
sps->crop_top, sps->crop_bottom,
sps->vui_parameters_present_flag ? "VUI" : ""
);
}
return 0;
}
static inline int decode_picture_parameter_set(H264Context *h, int bit_length){
MpegEncContext * const s = &h->s;
int pps_id= get_ue_golomb(&s->gb);
PPS *pps= &h->pps_buffer[pps_id];
pps->sps_id= get_ue_golomb(&s->gb);
pps->cabac= get_bits1(&s->gb);
pps->pic_order_present= get_bits1(&s->gb);
pps->slice_group_count= get_ue_golomb(&s->gb) + 1;
if(pps->slice_group_count > 1 ){
pps->mb_slice_group_map_type= get_ue_golomb(&s->gb);
av_log(h->s.avctx, AV_LOG_ERROR, "FMO not supported\n");
switch(pps->mb_slice_group_map_type){
case 0:
#if 0
| for( i = 0; i <= num_slice_groups_minus1; i++ ) | | |
| run_length[ i ] |1 |ue(v) |
#endif
break;
case 2:
#if 0
| for( i = 0; i < num_slice_groups_minus1; i++ ) | | |
|{ | | |
| top_left_mb[ i ] |1 |ue(v) |
| bottom_right_mb[ i ] |1 |ue(v) |
| } | | |
#endif
break;
case 3:
case 4:
case 5:
#if 0
| slice_group_change_direction_flag |1 |u(1) |
| slice_group_change_rate_minus1 |1 |ue(v) |
#endif
break;
case 6:
#if 0
| slice_group_id_cnt_minus1 |1 |ue(v) |
| for( i = 0; i <= slice_group_id_cnt_minus1; i++ | | |
|) | | |
| slice_group_id[ i ] |1 |u(v) |
#endif
break;
}
}
pps->ref_count[0]= get_ue_golomb(&s->gb) + 1;
pps->ref_count[1]= get_ue_golomb(&s->gb) + 1;
if(pps->ref_count[0] > 32 || pps->ref_count[1] > 32){
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow (pps)\n");
return -1;
}
pps->weighted_pred= get_bits1(&s->gb);
pps->weighted_bipred_idc= get_bits(&s->gb, 2);
pps->init_qp= get_se_golomb(&s->gb) + 26;
pps->init_qs= get_se_golomb(&s->gb) + 26;
pps->chroma_qp_index_offset= get_se_golomb(&s->gb);
pps->deblocking_filter_parameters_present= get_bits1(&s->gb);
pps->constrained_intra_pred= get_bits1(&s->gb);
pps->redundant_pic_cnt_present = get_bits1(&s->gb);
if(get_bits_count(&s->gb) < bit_length){
pps->transform_8x8_mode= get_bits1(&s->gb);
if(get_bits1(&s->gb)){ //pic_scaling_matrix_present_flag
av_log(h->s.avctx, AV_LOG_ERROR, "custom scaling matrix not implemented\n");
return -1;
}
get_se_golomb(&s->gb); //second_chroma_qp_index_offset
}
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
av_log(h->s.avctx, AV_LOG_DEBUG, "pps:%d sps:%d %s slice_groups:%d ref:%d/%d %s qp:%d/%d/%d %s %s %s %s\n",
pps_id, pps->sps_id,
pps->cabac ? "CABAC" : "CAVLC",
pps->slice_group_count,
pps->ref_count[0], pps->ref_count[1],
pps->weighted_pred ? "weighted" : "",
pps->init_qp, pps->init_qs, pps->chroma_qp_index_offset,
pps->deblocking_filter_parameters_present ? "LPAR" : "",
pps->constrained_intra_pred ? "CONSTR" : "",
pps->redundant_pic_cnt_present ? "REDU" : "",
pps->transform_8x8_mode ? "8x8DCT" : ""
);
}
return 0;
}
/**
* finds the end of the current frame in the bitstream.
* @return the position of the first byte of the next frame, or -1
*/
static int find_frame_end(H264Context *h, const uint8_t *buf, int buf_size){
int i;
uint32_t state;
ParseContext *pc = &(h->s.parse_context);
//printf("first %02X%02X%02X%02X\n", buf[0], buf[1],buf[2],buf[3]);
// mb_addr= pc->mb_addr - 1;
state= pc->state;
for(i=0; i<=buf_size; i++){
if((state&0xFFFFFF1F) == 0x101 || (state&0xFFFFFF1F) == 0x102 || (state&0xFFFFFF1F) == 0x105){
tprintf("find_frame_end new startcode = %08x, frame_start_found = %d, pos = %d\n", state, pc->frame_start_found, i);
if(pc->frame_start_found){
// If there isn't one more byte in the buffer
// the test on first_mb_in_slice cannot be done yet
// do it at next call.
if (i >= buf_size) break;
if (buf[i] & 0x80) {
// first_mb_in_slice is 0, probably the first nal of a new
// slice
tprintf("find_frame_end frame_end_found, state = %08x, pos = %d\n", state, i);
pc->state=-1;
pc->frame_start_found= 0;
return i-4;
}
}
pc->frame_start_found = 1;
}
if((state&0xFFFFFF1F) == 0x107 || (state&0xFFFFFF1F) == 0x108 || (state&0xFFFFFF1F) == 0x109){
if(pc->frame_start_found){
pc->state=-1;
pc->frame_start_found= 0;
return i-4;
}
}
if (i<buf_size)
state= (state<<8) | buf[i];
}
pc->state= state;
return END_NOT_FOUND;
}
static int h264_parse(AVCodecParserContext *s,
AVCodecContext *avctx,
uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size)
{
H264Context *h = s->priv_data;
ParseContext *pc = &h->s.parse_context;
int next;
next= find_frame_end(h, buf, buf_size);
if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) {
*poutbuf = NULL;
*poutbuf_size = 0;
return buf_size;
}
*poutbuf = (uint8_t *)buf;
*poutbuf_size = buf_size;
return next;
}
static int h264_split(AVCodecContext *avctx,
const uint8_t *buf, int buf_size)
{
int i;
uint32_t state = -1;
int has_sps= 0;
for(i=0; i<=buf_size; i++){
if((state&0xFFFFFF1F) == 0x107)
has_sps=1;
/* if((state&0xFFFFFF1F) == 0x101 || (state&0xFFFFFF1F) == 0x102 || (state&0xFFFFFF1F) == 0x105){
}*/
if((state&0xFFFFFF00) == 0x100 && (state&0xFFFFFF1F) != 0x107 && (state&0xFFFFFF1F) != 0x108 && (state&0xFFFFFF1F) != 0x109){
if(has_sps){
while(i>4 && buf[i-5]==0) i--;
return i-4;
}
}
if (i<buf_size)
state= (state<<8) | buf[i];
}
return 0;
}
static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){
MpegEncContext * const s = &h->s;
AVCodecContext * const avctx= s->avctx;
int buf_index=0;
#if 0
int i;
for(i=0; i<50; i++){
av_log(NULL, AV_LOG_ERROR,"%02X ", buf[i]);
}
#endif
h->slice_num = 0;
s->current_picture_ptr= NULL;
for(;;){
int consumed;
int dst_length;
int bit_length;
uint8_t *ptr;
int i, nalsize = 0;
if(h->is_avc) {
if(buf_index >= buf_size) break;
nalsize = 0;
for(i = 0; i < h->nal_length_size; i++)
nalsize = (nalsize << 8) | buf[buf_index++];
} else {
// start code prefix search
for(; buf_index + 3 < buf_size; buf_index++){
// this should allways succeed in the first iteration
if(buf[buf_index] == 0 && buf[buf_index+1] == 0 && buf[buf_index+2] == 1)
break;
}
if(buf_index+3 >= buf_size) break;
buf_index+=3;
}
ptr= decode_nal(h, buf + buf_index, &dst_length, &consumed, h->is_avc ? nalsize : buf_size - buf_index);
if(ptr[dst_length - 1] == 0) dst_length--;
bit_length= 8*dst_length - decode_rbsp_trailing(ptr + dst_length - 1);
if(s->avctx->debug&FF_DEBUG_STARTCODE){
av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d at %d/%d length %d\n", h->nal_unit_type, buf_index, buf_size, dst_length);
}
if (h->is_avc && (nalsize != consumed))
av_log(h->s.avctx, AV_LOG_ERROR, "AVC: Consumed only %d bytes instead of %d\n", consumed, nalsize);
buf_index += consumed;
if( (s->hurry_up == 1 && h->nal_ref_idc == 0) //FIXME dont discard SEI id
||(avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
continue;
switch(h->nal_unit_type){
case NAL_IDR_SLICE:
idr(h); //FIXME ensure we don't loose some frames if there is reordering
case NAL_SLICE:
init_get_bits(&s->gb, ptr, bit_length);
h->intra_gb_ptr=
h->inter_gb_ptr= &s->gb;
s->data_partitioning = 0;
if(decode_slice_header(h) < 0){
av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
break;
}
if(h->redundant_pic_count==0 && s->hurry_up < 5
&& (avctx->skip_frame < AVDISCARD_NONREF || h->nal_ref_idc)
&& (avctx->skip_frame < AVDISCARD_BIDIR || h->slice_type!=B_TYPE)
&& (avctx->skip_frame < AVDISCARD_NONKEY || h->slice_type==I_TYPE)
&& avctx->skip_frame < AVDISCARD_ALL)
decode_slice(h);
break;
case NAL_DPA:
init_get_bits(&s->gb, ptr, bit_length);
h->intra_gb_ptr=
h->inter_gb_ptr= NULL;
s->data_partitioning = 1;
if(decode_slice_header(h) < 0){
av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
}
break;
case NAL_DPB:
init_get_bits(&h->intra_gb, ptr, bit_length);
h->intra_gb_ptr= &h->intra_gb;
break;
case NAL_DPC:
init_get_bits(&h->inter_gb, ptr, bit_length);
h->inter_gb_ptr= &h->inter_gb;
if(h->redundant_pic_count==0 && h->intra_gb_ptr && s->data_partitioning
&& s->hurry_up < 5
&& (avctx->skip_frame < AVDISCARD_NONREF || h->nal_ref_idc)
&& (avctx->skip_frame < AVDISCARD_BIDIR || h->slice_type!=B_TYPE)
&& (avctx->skip_frame < AVDISCARD_NONKEY || h->slice_type==I_TYPE)
&& avctx->skip_frame < AVDISCARD_ALL)
decode_slice(h);
break;
case NAL_SEI:
init_get_bits(&s->gb, ptr, bit_length);
decode_sei(h);
break;
case NAL_SPS:
init_get_bits(&s->gb, ptr, bit_length);
decode_seq_parameter_set(h);
if(s->flags& CODEC_FLAG_LOW_DELAY)
s->low_delay=1;
if(avctx->has_b_frames < 2)
avctx->has_b_frames= !s->low_delay;
break;
case NAL_PPS:
init_get_bits(&s->gb, ptr, bit_length);
decode_picture_parameter_set(h, bit_length);
break;
case NAL_PICTURE_DELIMITER:
break;
case NAL_FILTER_DATA:
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown NAL code: %d\n", h->nal_unit_type);
}
}
if(!s->current_picture_ptr) return buf_index; //no frame
s->current_picture_ptr->pict_type= s->pict_type;
s->current_picture_ptr->key_frame= s->pict_type == I_TYPE && h->nal_unit_type == NAL_IDR_SLICE;
h->prev_frame_num_offset= h->frame_num_offset;
h->prev_frame_num= h->frame_num;
if(s->current_picture_ptr->reference){
h->prev_poc_msb= h->poc_msb;
h->prev_poc_lsb= h->poc_lsb;
}
if(s->current_picture_ptr->reference)
execute_ref_pic_marking(h, h->mmco, h->mmco_index);
ff_er_frame_end(s);
MPV_frame_end(s);
return buf_index;
}
/**
* returns the number of bytes consumed for building the current frame
*/
static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size){
if(s->flags&CODEC_FLAG_TRUNCATED){
pos -= s->parse_context.last_index;
if(pos<0) pos=0; // FIXME remove (unneeded?)
return pos;
}else{
if(pos==0) pos=1; //avoid infinite loops (i doubt thats needed but ...)
if(pos+10>buf_size) pos=buf_size; // oops ;)
return pos;
}
}
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
H264Context *h = avctx->priv_data;
MpegEncContext *s = &h->s;
AVFrame *pict = data;
int buf_index;
s->flags= avctx->flags;
s->flags2= avctx->flags2;
/* no supplementary picture */
if (buf_size == 0) {
return 0;
}
if(s->flags&CODEC_FLAG_TRUNCATED){
int next= find_frame_end(h, buf, buf_size);
if( ff_combine_frame(&s->parse_context, next, &buf, &buf_size) < 0 )
return buf_size;
//printf("next:%d buf_size:%d last_index:%d\n", next, buf_size, s->parse_context.last_index);
}
if(h->is_avc && !h->got_avcC) {
int i, cnt, nalsize;
unsigned char *p = avctx->extradata;
if(avctx->extradata_size < 7) {
av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
return -1;
}
if(*p != 1) {
av_log(avctx, AV_LOG_ERROR, "Unknown avcC version %d\n", *p);
return -1;
}
/* sps and pps in the avcC always have length coded with 2 bytes,
so put a fake nal_length_size = 2 while parsing them */
h->nal_length_size = 2;
// Decode sps from avcC
cnt = *(p+5) & 0x1f; // Number of sps
p += 6;
for (i = 0; i < cnt; i++) {
nalsize = BE_16(p) + 2;
if(decode_nal_units(h, p, nalsize) < 0) {
av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i);
return -1;
}
p += nalsize;
}
// Decode pps from avcC
cnt = *(p++); // Number of pps
for (i = 0; i < cnt; i++) {
nalsize = BE_16(p) + 2;
if(decode_nal_units(h, p, nalsize) != nalsize) {
av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i);
return -1;
}
p += nalsize;
}
// Now store right nal length size, that will be use to parse all other nals
h->nal_length_size = ((*(((char*)(avctx->extradata))+4))&0x03)+1;
// Do not reparse avcC
h->got_avcC = 1;
}
if(!h->is_avc && s->avctx->extradata_size && s->picture_number==0){
if(decode_nal_units(h, s->avctx->extradata, s->avctx->extradata_size) < 0)
return -1;
}
buf_index=decode_nal_units(h, buf, buf_size);
if(buf_index < 0)
return -1;
//FIXME do something with unavailable reference frames
// if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_index, buf_size);
if(!s->current_picture_ptr){
av_log(h->s.avctx, AV_LOG_DEBUG, "error, NO frame\n");
return -1;
}
{
Picture *out = s->current_picture_ptr;
#if 0 //decode order
*data_size = sizeof(AVFrame);
#else
/* Sort B-frames into display order */
Picture *cur = s->current_picture_ptr;
Picture *prev = h->delayed_output_pic;
int out_idx = 0;
int pics = 0;
int out_of_order;
int cross_idr = 0;
int dropped_frame = 0;
int i;
if(h->sps.bitstream_restriction_flag
&& s->avctx->has_b_frames < h->sps.num_reorder_frames){
s->avctx->has_b_frames = h->sps.num_reorder_frames;
s->low_delay = 0;
}
while(h->delayed_pic[pics]) pics++;
h->delayed_pic[pics++] = cur;
if(cur->reference == 0)
cur->reference = 1;
for(i=0; h->delayed_pic[i]; i++)
if(h->delayed_pic[i]->key_frame || h->delayed_pic[i]->poc==0)
cross_idr = 1;
out = h->delayed_pic[0];
for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame; i++)
if(h->delayed_pic[i]->poc < out->poc){
out = h->delayed_pic[i];
out_idx = i;
}
out_of_order = !cross_idr && prev && out->poc < prev->poc;
if(prev && pics <= s->avctx->has_b_frames)
out = prev;
else if((out_of_order && pics-1 == s->avctx->has_b_frames)
|| (s->low_delay &&
((!cross_idr && prev && out->poc > prev->poc + 2)
|| cur->pict_type == B_TYPE)))
{
s->low_delay = 0;
s->avctx->has_b_frames++;
out = prev;
}
else if(out_of_order)
out = prev;
if(out_of_order || pics > s->avctx->has_b_frames){
dropped_frame = (out != h->delayed_pic[out_idx]);
for(i=out_idx; h->delayed_pic[i]; i++)
h->delayed_pic[i] = h->delayed_pic[i+1];
}
if(prev == out && !dropped_frame)
*data_size = 0;
else
*data_size = sizeof(AVFrame);
if(prev && prev != out && prev->reference == 1)
prev->reference = 0;
h->delayed_output_pic = out;
#endif
*pict= *(AVFrame*)out;
}
assert(pict->data[0]);
ff_print_debug_info(s, pict);
//printf("out %d\n", (int)pict->data[0]);
#if 0 //?
/* Return the Picture timestamp as the frame number */
/* we substract 1 because it is added on utils.c */
avctx->frame_number = s->picture_number - 1;
#endif
return get_consumed_bytes(s, buf_index, buf_size);
}
#if 0
static inline void fill_mb_avail(H264Context *h){
MpegEncContext * const s = &h->s;
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
if(s->mb_y){
h->mb_avail[0]= s->mb_x && h->slice_table[mb_xy - s->mb_stride - 1] == h->slice_num;
h->mb_avail[1]= h->slice_table[mb_xy - s->mb_stride ] == h->slice_num;
h->mb_avail[2]= s->mb_x+1 < s->mb_width && h->slice_table[mb_xy - s->mb_stride + 1] == h->slice_num;
}else{
h->mb_avail[0]=
h->mb_avail[1]=
h->mb_avail[2]= 0;
}
h->mb_avail[3]= s->mb_x && h->slice_table[mb_xy - 1] == h->slice_num;
h->mb_avail[4]= 1; //FIXME move out
h->mb_avail[5]= 0; //FIXME move out
}
#endif
#if 0 //selftest
#define COUNT 8000
#define SIZE (COUNT*40)
int main(){
int i;
uint8_t temp[SIZE];
PutBitContext pb;
GetBitContext gb;
// int int_temp[10000];
DSPContext dsp;
AVCodecContext avctx;
dsputil_init(&dsp, &avctx);
init_put_bits(&pb, temp, SIZE);
printf("testing unsigned exp golomb\n");
for(i=0; i<COUNT; i++){
START_TIMER
set_ue_golomb(&pb, i);
STOP_TIMER("set_ue_golomb");
}
flush_put_bits(&pb);
init_get_bits(&gb, temp, 8*SIZE);
for(i=0; i<COUNT; i++){
int j, s;
s= show_bits(&gb, 24);
START_TIMER
j= get_ue_golomb(&gb);
if(j != i){
printf("missmatch! at %d (%d should be %d) bits:%6X\n", i, j, i, s);
// return -1;
}
STOP_TIMER("get_ue_golomb");
}
init_put_bits(&pb, temp, SIZE);
printf("testing signed exp golomb\n");
for(i=0; i<COUNT; i++){
START_TIMER
set_se_golomb(&pb, i - COUNT/2);
STOP_TIMER("set_se_golomb");
}
flush_put_bits(&pb);
init_get_bits(&gb, temp, 8*SIZE);
for(i=0; i<COUNT; i++){
int j, s;
s= show_bits(&gb, 24);
START_TIMER
j= get_se_golomb(&gb);
if(j != i - COUNT/2){
printf("missmatch! at %d (%d should be %d) bits:%6X\n", i, j, i, s);
// return -1;
}
STOP_TIMER("get_se_golomb");
}
printf("testing 4x4 (I)DCT\n");
DCTELEM block[16];
uint8_t src[16], ref[16];
uint64_t error= 0, max_error=0;
for(i=0; i<COUNT; i++){
int j;
// printf("%d %d %d\n", r1, r2, (r2-r1)*16);
for(j=0; j<16; j++){
ref[j]= random()%255;
src[j]= random()%255;
}
h264_diff_dct_c(block, src, ref, 4);
//normalize
for(j=0; j<16; j++){
// printf("%d ", block[j]);
block[j]= block[j]*4;
if(j&1) block[j]= (block[j]*4 + 2)/5;
if(j&4) block[j]= (block[j]*4 + 2)/5;
}
// printf("\n");
s->dsp.h264_idct_add(ref, block, 4);
/* for(j=0; j<16; j++){
printf("%d ", ref[j]);
}
printf("\n");*/
for(j=0; j<16; j++){
int diff= ABS(src[j] - ref[j]);
error+= diff*diff;
max_error= FFMAX(max_error, diff);
}
}
printf("error=%f max_error=%d\n", ((float)error)/COUNT/16, (int)max_error );
#if 0
printf("testing quantizer\n");
for(qp=0; qp<52; qp++){
for(i=0; i<16; i++)
src1_block[i]= src2_block[i]= random()%255;
}
#endif
printf("Testing NAL layer\n");
uint8_t bitstream[COUNT];
uint8_t nal[COUNT*2];
H264Context h;
memset(&h, 0, sizeof(H264Context));
for(i=0; i<COUNT; i++){
int zeros= i;
int nal_length;
int consumed;
int out_length;
uint8_t *out;
int j;
for(j=0; j<COUNT; j++){
bitstream[j]= (random() % 255) + 1;
}
for(j=0; j<zeros; j++){
int pos= random() % COUNT;
while(bitstream[pos] == 0){
pos++;
pos %= COUNT;
}
bitstream[pos]=0;
}
START_TIMER
nal_length= encode_nal(&h, nal, bitstream, COUNT, COUNT*2);
if(nal_length<0){
printf("encoding failed\n");
return -1;
}
out= decode_nal(&h, nal, &out_length, &consumed, nal_length);
STOP_TIMER("NAL")
if(out_length != COUNT){
printf("incorrect length %d %d\n", out_length, COUNT);
return -1;
}
if(consumed != nal_length){
printf("incorrect consumed length %d %d\n", nal_length, consumed);
return -1;
}
if(memcmp(bitstream, out, COUNT)){
printf("missmatch\n");
return -1;
}
}
printf("Testing RBSP\n");
return 0;
}
#endif
static int decode_end(AVCodecContext *avctx)
{
H264Context *h = avctx->priv_data;
MpegEncContext *s = &h->s;
av_freep(&h->rbsp_buffer);
free_tables(h); //FIXME cleanup init stuff perhaps
MPV_common_end(s);
// memset(h, 0, sizeof(H264Context));
return 0;
}
AVCodec h264_decoder = {
"h264",
CODEC_TYPE_VIDEO,
CODEC_ID_H264,
sizeof(H264Context),
decode_init,
NULL,
decode_end,
decode_frame,
/*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
NULL, //next
flush_dpb,
};
AVCodecParser h264_parser = {
{ CODEC_ID_H264 },
sizeof(H264Context),
NULL,
h264_parse,
ff_parse_close,
h264_split,
};
#include "svq3.c"
| gpl-2.0 |
dodocat/vlc | modules/stream_out/transcode/video.c | 1 | 31937 | /*****************************************************************************
* video.c: transcoding stream output module (video)
*****************************************************************************
* Copyright (C) 2003-2009 VLC authors and VideoLAN
* $Id$
*
* Authors: Laurent Aimar <fenrir@via.ecp.fr>
* Gildas Bazin <gbazin@videolan.org>
* Jean-Paul Saman <jpsaman #_at_# m2x dot nl>
* Antoine Cellerier <dionoea at videolan dot org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
/*****************************************************************************
* Preamble
*****************************************************************************/
#include "transcode.h"
#include <math.h>
#include <vlc_meta.h>
#include <vlc_spu.h>
#include <vlc_modules.h>
#define ENC_FRAMERATE (25 * 1000)
#define ENC_FRAMERATE_BASE 1000
struct decoder_owner_sys_t
{
sout_stream_sys_t *p_sys;
};
static int video_update_format_decoder( decoder_t *p_dec )
{
p_dec->fmt_out.video.i_chroma = p_dec->fmt_out.i_codec;
return 0;
}
static picture_t *video_new_buffer_decoder( decoder_t *p_dec )
{
return picture_NewFromFormat( &p_dec->fmt_out.video );
}
static picture_t *video_new_buffer_encoder( encoder_t *p_enc )
{
p_enc->fmt_in.video.i_chroma = p_enc->fmt_in.i_codec;
return picture_NewFromFormat( &p_enc->fmt_in.video );
}
static picture_t *transcode_video_filter_buffer_new( filter_t *p_filter )
{
p_filter->fmt_out.video.i_chroma = p_filter->fmt_out.i_codec;
return picture_NewFromFormat( &p_filter->fmt_out.video );
}
static void* EncoderThread( void *obj )
{
sout_stream_sys_t *p_sys = (sout_stream_sys_t*)obj;
sout_stream_id_sys_t *id = p_sys->id_video;
picture_t *p_pic = NULL;
int canc = vlc_savecancel ();
block_t *p_block = NULL;
for( ;; )
{
vlc_mutex_lock( &p_sys->lock_out );
while( !p_sys->b_abort &&
(p_pic = picture_fifo_Pop( p_sys->pp_pics )) == NULL )
vlc_cond_wait( &p_sys->cond, &p_sys->lock_out );
if( p_sys->b_abort && !p_pic )
{
vlc_mutex_unlock( &p_sys->lock_out );
break;
}
vlc_mutex_unlock( &p_sys->lock_out );
if( p_pic )
{
p_block = id->p_encoder->pf_encode_video( id->p_encoder, p_pic );
vlc_mutex_lock( &p_sys->lock_out );
block_ChainAppend( &p_sys->p_buffers, p_block );
vlc_mutex_unlock( &p_sys->lock_out );
picture_Release( p_pic );
}
vlc_mutex_lock( &p_sys->lock_out );
if( p_sys->b_abort )
{
vlc_mutex_unlock( &p_sys->lock_out );
break;
}
vlc_mutex_unlock( &p_sys->lock_out );
}
/*Encode what we have in the buffer on closing*/
vlc_mutex_lock( &p_sys->lock_out );
while( (p_pic = picture_fifo_Pop( p_sys->pp_pics )) != NULL )
{
p_block = id->p_encoder->pf_encode_video( id->p_encoder, p_pic );
block_ChainAppend( &p_sys->p_buffers, p_block );
picture_Release( p_pic );
}
/*Now flush encoder*/
do {
p_block = id->p_encoder->pf_encode_video(id->p_encoder, NULL );
block_ChainAppend( &p_sys->p_buffers, p_block );
} while( p_block );
vlc_mutex_unlock( &p_sys->lock_out );
vlc_restorecancel (canc);
return NULL;
}
int transcode_video_new( sout_stream_t *p_stream, sout_stream_id_sys_t *id )
{
sout_stream_sys_t *p_sys = p_stream->p_sys;
/* Open decoder
* Initialization of decoder structures
*/
id->p_decoder->fmt_out = id->p_decoder->fmt_in;
id->p_decoder->fmt_out.i_extra = 0;
id->p_decoder->fmt_out.p_extra = 0;
id->p_decoder->pf_decode_video = NULL;
id->p_decoder->pf_get_cc = NULL;
id->p_decoder->pf_get_cc = 0;
id->p_decoder->pf_vout_format_update = video_update_format_decoder;
id->p_decoder->pf_vout_buffer_new = video_new_buffer_decoder;
id->p_decoder->p_owner = malloc( sizeof(decoder_owner_sys_t) );
if( !id->p_decoder->p_owner )
return VLC_EGENERIC;
id->p_decoder->p_owner->p_sys = p_sys;
/* id->p_decoder->p_cfg = p_sys->p_video_cfg; */
id->p_decoder->p_module =
module_need( id->p_decoder, "decoder", "$codec", false );
if( !id->p_decoder->p_module )
{
msg_Err( p_stream, "cannot find video decoder" );
free( id->p_decoder->p_owner );
return VLC_EGENERIC;
}
/*
* Open encoder.
* Because some info about the decoded input will only be available
* once the first frame is decoded, we actually only test the availability
* of the encoder here.
*/
/* Initialization of encoder format structures */
es_format_Init( &id->p_encoder->fmt_in, id->p_decoder->fmt_in.i_cat,
id->p_decoder->fmt_out.i_codec );
id->p_encoder->fmt_in.video.i_chroma = id->p_decoder->fmt_out.i_codec;
/* The dimensions will be set properly later on.
* Just put sensible values so we can test an encoder is available. */
id->p_encoder->fmt_in.video.i_width =
id->p_encoder->fmt_out.video.i_width
? id->p_encoder->fmt_out.video.i_width
: id->p_decoder->fmt_in.video.i_width
? id->p_decoder->fmt_in.video.i_width : 16;
id->p_encoder->fmt_in.video.i_height =
id->p_encoder->fmt_out.video.i_height
? id->p_encoder->fmt_out.video.i_height
: id->p_decoder->fmt_in.video.i_height
? id->p_decoder->fmt_in.video.i_height : 16;
id->p_encoder->fmt_in.video.i_visible_width =
id->p_encoder->fmt_out.video.i_visible_width
? id->p_encoder->fmt_out.video.i_visible_width
: id->p_decoder->fmt_in.video.i_visible_width
? id->p_decoder->fmt_in.video.i_visible_width : id->p_encoder->fmt_in.video.i_width;
id->p_encoder->fmt_in.video.i_visible_height =
id->p_encoder->fmt_out.video.i_visible_height
? id->p_encoder->fmt_out.video.i_visible_height
: id->p_decoder->fmt_in.video.i_visible_height
? id->p_decoder->fmt_in.video.i_visible_height : id->p_encoder->fmt_in.video.i_height;
id->p_encoder->i_threads = p_sys->i_threads;
id->p_encoder->p_cfg = p_sys->p_video_cfg;
id->p_encoder->p_module =
module_need( id->p_encoder, "encoder", p_sys->psz_venc, true );
if( !id->p_encoder->p_module )
{
msg_Err( p_stream, "cannot find video encoder (module:%s fourcc:%4.4s). Take a look few lines earlier to see possible reason.",
p_sys->psz_venc ? p_sys->psz_venc : "any",
(char *)&p_sys->i_vcodec );
module_unneed( id->p_decoder, id->p_decoder->p_module );
id->p_decoder->p_module = 0;
free( id->p_decoder->p_owner );
return VLC_EGENERIC;
}
/* Close the encoder.
* We'll open it only when we have the first frame. */
module_unneed( id->p_encoder, id->p_encoder->p_module );
if( id->p_encoder->fmt_out.p_extra )
{
free( id->p_encoder->fmt_out.p_extra );
id->p_encoder->fmt_out.p_extra = NULL;
id->p_encoder->fmt_out.i_extra = 0;
}
id->p_encoder->p_module = NULL;
if( p_sys->i_threads >= 1 )
{
int i_priority = p_sys->b_high_priority ? VLC_THREAD_PRIORITY_OUTPUT :
VLC_THREAD_PRIORITY_VIDEO;
p_sys->id_video = id;
vlc_mutex_init( &p_sys->lock_out );
vlc_cond_init( &p_sys->cond );
p_sys->pp_pics = picture_fifo_New();
if( p_sys->pp_pics == NULL )
{
msg_Err( p_stream, "cannot create picture fifo" );
vlc_mutex_destroy( &p_sys->lock_out );
vlc_cond_destroy( &p_sys->cond );
module_unneed( id->p_decoder, id->p_decoder->p_module );
id->p_decoder->p_module = NULL;
free( id->p_decoder->p_owner );
return VLC_ENOMEM;
}
p_sys->p_buffers = NULL;
p_sys->b_abort = false;
if( vlc_clone( &p_sys->thread, EncoderThread, p_sys, i_priority ) )
{
msg_Err( p_stream, "cannot spawn encoder thread" );
vlc_mutex_destroy( &p_sys->lock_out );
vlc_cond_destroy( &p_sys->cond );
picture_fifo_Delete( p_sys->pp_pics );
module_unneed( id->p_decoder, id->p_decoder->p_module );
id->p_decoder->p_module = NULL;
free( id->p_decoder->p_owner );
return VLC_EGENERIC;
}
}
return VLC_SUCCESS;
}
static void transcode_video_filter_init( sout_stream_t *p_stream,
sout_stream_id_sys_t *id )
{
filter_owner_t owner = {
.sys = p_stream->p_sys,
.video = {
.buffer_new = transcode_video_filter_buffer_new,
},
};
es_format_t *p_fmt_out = &id->p_decoder->fmt_out;
id->p_encoder->fmt_in.video.i_chroma = id->p_encoder->fmt_in.i_codec;
id->p_f_chain = filter_chain_NewVideo( p_stream, false, &owner );
filter_chain_Reset( id->p_f_chain, p_fmt_out, p_fmt_out );
/* Deinterlace */
if( p_stream->p_sys->b_deinterlace )
{
filter_chain_AppendFilter( id->p_f_chain,
p_stream->p_sys->psz_deinterlace,
p_stream->p_sys->p_deinterlace_cfg,
&id->p_decoder->fmt_out,
&id->p_decoder->fmt_out );
p_fmt_out = filter_chain_GetFmtOut( id->p_f_chain );
}
if( p_stream->p_sys->b_master_sync )
{
filter_chain_AppendFilter( id->p_f_chain,
"fps",
NULL,
p_fmt_out,
&id->p_encoder->fmt_in );
p_fmt_out = filter_chain_GetFmtOut( id->p_f_chain );
}
/* Check that we have visible_width/height*/
if( !p_fmt_out->video.i_visible_height )
p_fmt_out->video.i_visible_height = p_fmt_out->video.i_height;
if( !p_fmt_out->video.i_visible_width )
p_fmt_out->video.i_visible_width = p_fmt_out->video.i_width;
if( p_stream->p_sys->psz_vf2 )
{
id->p_uf_chain = filter_chain_NewVideo( p_stream, true, &owner );
filter_chain_Reset( id->p_uf_chain, p_fmt_out,
&id->p_encoder->fmt_in );
if( p_fmt_out->video.i_chroma != id->p_encoder->fmt_in.video.i_chroma )
{
filter_chain_AppendFilter( id->p_uf_chain,
NULL, NULL,
p_fmt_out,
&id->p_encoder->fmt_in );
}
filter_chain_AppendFromString( id->p_uf_chain, p_stream->p_sys->psz_vf2 );
p_fmt_out = filter_chain_GetFmtOut( id->p_uf_chain );
es_format_Copy( &id->p_encoder->fmt_in, p_fmt_out );
id->p_encoder->fmt_out.video.i_width =
id->p_encoder->fmt_in.video.i_width;
id->p_encoder->fmt_out.video.i_height =
id->p_encoder->fmt_in.video.i_height;
id->p_encoder->fmt_out.video.i_sar_num =
id->p_encoder->fmt_in.video.i_sar_num;
id->p_encoder->fmt_out.video.i_sar_den =
id->p_encoder->fmt_in.video.i_sar_den;
}
}
/* Take care of the scaling and chroma conversions. */
static void conversion_video_filter_append( sout_stream_id_sys_t *id )
{
const es_format_t *p_fmt_out = &id->p_decoder->fmt_out;
if( id->p_f_chain )
p_fmt_out = filter_chain_GetFmtOut( id->p_f_chain );
if( id->p_uf_chain )
p_fmt_out = filter_chain_GetFmtOut( id->p_uf_chain );
if( ( p_fmt_out->video.i_chroma != id->p_encoder->fmt_in.video.i_chroma ) ||
( p_fmt_out->video.i_width != id->p_encoder->fmt_in.video.i_width ) ||
( p_fmt_out->video.i_height != id->p_encoder->fmt_in.video.i_height ) )
{
filter_chain_AppendFilter( id->p_uf_chain ? id->p_uf_chain : id->p_f_chain,
NULL, NULL,
p_fmt_out,
&id->p_encoder->fmt_in );
}
}
static void transcode_video_encoder_init( sout_stream_t *p_stream,
sout_stream_id_sys_t *id )
{
sout_stream_sys_t *p_sys = p_stream->p_sys;
const es_format_t *p_fmt_out = &id->p_decoder->fmt_out;
if( id->p_f_chain ) {
p_fmt_out = filter_chain_GetFmtOut( id->p_f_chain );
}
if( id->p_uf_chain ) {
p_fmt_out = filter_chain_GetFmtOut( id->p_uf_chain );
}
/* Calculate scaling
* width/height of source */
int i_src_visible_width = p_fmt_out->video.i_visible_width;
int i_src_visible_height = p_fmt_out->video.i_visible_height;
if (i_src_visible_width == 0)
i_src_visible_width = p_fmt_out->video.i_width;
if (i_src_visible_height == 0)
i_src_visible_height = p_fmt_out->video.i_height;
/* with/height scaling */
float f_scale_width = 1;
float f_scale_height = 1;
/* aspect ratio */
float f_aspect = (double)p_fmt_out->video.i_sar_num *
p_fmt_out->video.i_width /
p_fmt_out->video.i_sar_den /
p_fmt_out->video.i_height;
msg_Dbg( p_stream, "decoder aspect is %f:1", (double) f_aspect );
/* Change f_aspect from source frame to source pixel */
f_aspect = f_aspect * i_src_visible_height / i_src_visible_width;
msg_Dbg( p_stream, "source pixel aspect is %f:1", (double) f_aspect );
/* Calculate scaling factor for specified parameters */
if( id->p_encoder->fmt_out.video.i_visible_width <= 0 &&
id->p_encoder->fmt_out.video.i_visible_height <= 0 && p_sys->f_scale )
{
/* Global scaling. Make sure width will remain a factor of 16 */
float f_real_scale;
int i_new_height;
int i_new_width = i_src_visible_width * p_sys->f_scale;
if( i_new_width % 16 <= 7 && i_new_width >= 16 )
i_new_width -= i_new_width % 16;
else
i_new_width += 16 - i_new_width % 16;
f_real_scale = (float)( i_new_width ) / (float) i_src_visible_width;
i_new_height = __MAX( 16, i_src_visible_height * (float)f_real_scale );
f_scale_width = f_real_scale;
f_scale_height = (float) i_new_height / (float) i_src_visible_height;
}
else if( id->p_encoder->fmt_out.video.i_visible_width > 0 &&
id->p_encoder->fmt_out.video.i_visible_height <= 0 )
{
/* Only width specified */
f_scale_width = (float)id->p_encoder->fmt_out.video.i_visible_width/i_src_visible_width;
f_scale_height = f_scale_width;
}
else if( id->p_encoder->fmt_out.video.i_visible_width <= 0 &&
id->p_encoder->fmt_out.video.i_visible_height > 0 )
{
/* Only height specified */
f_scale_height = (float)id->p_encoder->fmt_out.video.i_visible_height/i_src_visible_height;
f_scale_width = f_scale_height;
}
else if( id->p_encoder->fmt_out.video.i_visible_width > 0 &&
id->p_encoder->fmt_out.video.i_visible_height > 0 )
{
/* Width and height specified */
f_scale_width = (float)id->p_encoder->fmt_out.video.i_visible_width/i_src_visible_width;
f_scale_height = (float)id->p_encoder->fmt_out.video.i_visible_height/i_src_visible_height;
}
/* check maxwidth and maxheight */
if( p_sys->i_maxwidth && f_scale_width > (float)p_sys->i_maxwidth /
i_src_visible_width )
{
f_scale_width = (float)p_sys->i_maxwidth / i_src_visible_width;
}
if( p_sys->i_maxheight && f_scale_height > (float)p_sys->i_maxheight /
i_src_visible_height )
{
f_scale_height = (float)p_sys->i_maxheight / i_src_visible_height;
}
/* Change aspect ratio from source pixel to scaled pixel */
f_aspect = f_aspect * f_scale_height / f_scale_width;
msg_Dbg( p_stream, "scaled pixel aspect is %f:1", (double) f_aspect );
/* f_scale_width and f_scale_height are now final */
/* Calculate width, height from scaling
* Make sure its multiple of 2
*/
/* width/height of output stream */
int i_dst_visible_width = 2 * lroundf(f_scale_width*i_src_visible_width/2);
int i_dst_visible_height = 2 * lroundf(f_scale_height*i_src_visible_height/2);
int i_dst_width = 2 * lroundf(f_scale_width*p_fmt_out->video.i_width/2);
int i_dst_height = 2 * lroundf(f_scale_height*p_fmt_out->video.i_height/2);
/* Change aspect ratio from scaled pixel to output frame */
f_aspect = f_aspect * i_dst_visible_width / i_dst_visible_height;
/* Store calculated values */
id->p_encoder->fmt_out.video.i_width = i_dst_width;
id->p_encoder->fmt_out.video.i_visible_width = i_dst_visible_width;
id->p_encoder->fmt_out.video.i_height = i_dst_height;
id->p_encoder->fmt_out.video.i_visible_height = i_dst_visible_height;
id->p_encoder->fmt_in.video.i_width = i_dst_width;
id->p_encoder->fmt_in.video.i_visible_width = i_dst_visible_width;
id->p_encoder->fmt_in.video.i_height = i_dst_height;
id->p_encoder->fmt_in.video.i_visible_height = i_dst_visible_height;
msg_Dbg( p_stream, "source %ix%i, destination %ix%i",
i_src_visible_width, i_src_visible_height,
i_dst_visible_width, i_dst_visible_height
);
/* Handle frame rate conversion */
if( !id->p_encoder->fmt_out.video.i_frame_rate ||
!id->p_encoder->fmt_out.video.i_frame_rate_base )
{
if( p_fmt_out->video.i_frame_rate &&
p_fmt_out->video.i_frame_rate_base )
{
id->p_encoder->fmt_out.video.i_frame_rate =
p_fmt_out->video.i_frame_rate;
id->p_encoder->fmt_out.video.i_frame_rate_base =
p_fmt_out->video.i_frame_rate_base;
}
else
{
/* Pick a sensible default value */
id->p_encoder->fmt_out.video.i_frame_rate = ENC_FRAMERATE;
id->p_encoder->fmt_out.video.i_frame_rate_base = ENC_FRAMERATE_BASE;
}
}
id->p_encoder->fmt_in.video.orientation =
id->p_encoder->fmt_out.video.orientation =
id->p_decoder->fmt_in.video.orientation;
id->p_encoder->fmt_in.video.i_frame_rate =
id->p_encoder->fmt_out.video.i_frame_rate;
id->p_encoder->fmt_in.video.i_frame_rate_base =
id->p_encoder->fmt_out.video.i_frame_rate_base;
vlc_ureduce( &id->p_encoder->fmt_in.video.i_frame_rate,
&id->p_encoder->fmt_in.video.i_frame_rate_base,
id->p_encoder->fmt_in.video.i_frame_rate,
id->p_encoder->fmt_in.video.i_frame_rate_base,
0 );
msg_Dbg( p_stream, "source fps %d/%d, destination %d/%d",
id->p_decoder->fmt_out.video.i_frame_rate,
id->p_decoder->fmt_out.video.i_frame_rate_base,
id->p_encoder->fmt_in.video.i_frame_rate,
id->p_encoder->fmt_in.video.i_frame_rate_base );
/* Check whether a particular aspect ratio was requested */
if( id->p_encoder->fmt_out.video.i_sar_num <= 0 ||
id->p_encoder->fmt_out.video.i_sar_den <= 0 )
{
vlc_ureduce( &id->p_encoder->fmt_out.video.i_sar_num,
&id->p_encoder->fmt_out.video.i_sar_den,
(uint64_t)p_fmt_out->video.i_sar_num * i_src_visible_width * i_dst_visible_height,
(uint64_t)p_fmt_out->video.i_sar_den * i_src_visible_height * i_dst_visible_width,
0 );
}
else
{
vlc_ureduce( &id->p_encoder->fmt_out.video.i_sar_num,
&id->p_encoder->fmt_out.video.i_sar_den,
id->p_encoder->fmt_out.video.i_sar_num,
id->p_encoder->fmt_out.video.i_sar_den,
0 );
}
id->p_encoder->fmt_in.video.i_sar_num =
id->p_encoder->fmt_out.video.i_sar_num;
id->p_encoder->fmt_in.video.i_sar_den =
id->p_encoder->fmt_out.video.i_sar_den;
msg_Dbg( p_stream, "encoder aspect is %i:%i",
id->p_encoder->fmt_out.video.i_sar_num * id->p_encoder->fmt_out.video.i_width,
id->p_encoder->fmt_out.video.i_sar_den * id->p_encoder->fmt_out.video.i_height );
}
static int transcode_video_encoder_open( sout_stream_t *p_stream,
sout_stream_id_sys_t *id )
{
sout_stream_sys_t *p_sys = p_stream->p_sys;
msg_Dbg( p_stream, "destination (after video filters) %ix%i",
id->p_encoder->fmt_in.video.i_width,
id->p_encoder->fmt_in.video.i_height );
id->p_encoder->p_module =
module_need( id->p_encoder, "encoder", p_sys->psz_venc, true );
if( !id->p_encoder->p_module )
{
msg_Err( p_stream, "cannot find video encoder (module:%s fourcc:%4.4s)",
p_sys->psz_venc ? p_sys->psz_venc : "any",
(char *)&p_sys->i_vcodec );
return VLC_EGENERIC;
}
id->p_encoder->fmt_in.video.i_chroma = id->p_encoder->fmt_in.i_codec;
/* */
id->p_encoder->fmt_out.i_codec =
vlc_fourcc_GetCodec( VIDEO_ES, id->p_encoder->fmt_out.i_codec );
id->id = sout_StreamIdAdd( p_stream->p_next, &id->p_encoder->fmt_out );
if( !id->id )
{
msg_Err( p_stream, "cannot add this stream" );
return VLC_EGENERIC;
}
return VLC_SUCCESS;
}
void transcode_video_close( sout_stream_t *p_stream,
sout_stream_id_sys_t *id )
{
if( p_stream->p_sys->i_threads >= 1 )
{
vlc_mutex_lock( &p_stream->p_sys->lock_out );
p_stream->p_sys->b_abort = true;
vlc_cond_signal( &p_stream->p_sys->cond );
vlc_mutex_unlock( &p_stream->p_sys->lock_out );
vlc_join( p_stream->p_sys->thread, NULL );
vlc_mutex_destroy( &p_stream->p_sys->lock_out );
vlc_cond_destroy( &p_stream->p_sys->cond );
picture_fifo_Delete( p_stream->p_sys->pp_pics );
block_ChainRelease( p_stream->p_sys->p_buffers );
p_stream->p_sys->pp_pics = NULL;
}
/* Close decoder */
if( id->p_decoder->p_module )
module_unneed( id->p_decoder, id->p_decoder->p_module );
if( id->p_decoder->p_description )
vlc_meta_Delete( id->p_decoder->p_description );
free( id->p_decoder->p_owner );
/* Close encoder */
if( id->p_encoder->p_module )
module_unneed( id->p_encoder, id->p_encoder->p_module );
/* Close filters */
if( id->p_f_chain )
filter_chain_Delete( id->p_f_chain );
if( id->p_uf_chain )
filter_chain_Delete( id->p_uf_chain );
}
static void OutputFrame( sout_stream_t *p_stream, picture_t *p_pic, sout_stream_id_sys_t *id, block_t **out )
{
sout_stream_sys_t *p_sys = p_stream->p_sys;
picture_t *p_pic2 = NULL;
/*
* Encoding
*/
/* Check if we have a subpicture to overlay */
if( p_sys->p_spu )
{
video_format_t fmt = id->p_encoder->fmt_in.video;
if( fmt.i_visible_width <= 0 || fmt.i_visible_height <= 0 )
{
fmt.i_visible_width = fmt.i_width;
fmt.i_visible_height = fmt.i_height;
fmt.i_x_offset = 0;
fmt.i_y_offset = 0;
}
subpicture_t *p_subpic = spu_Render( p_sys->p_spu, NULL, &fmt, &fmt,
p_pic->date, p_pic->date, false );
/* Overlay subpicture */
if( p_subpic )
{
if( picture_IsReferenced( p_pic ) && !filter_chain_GetLength( id->p_f_chain ) )
{
/* We can't modify the picture, we need to duplicate it,
* in this point the picture is already p_encoder->fmt.in format*/
picture_t *p_tmp = video_new_buffer_encoder( id->p_encoder );
if( likely( p_tmp ) )
{
picture_Copy( p_tmp, p_pic );
picture_Release( p_pic );
p_pic = p_tmp;
}
}
if( unlikely( !p_sys->p_spu_blend ) )
p_sys->p_spu_blend = filter_NewBlend( VLC_OBJECT( p_sys->p_spu ), &fmt );
if( likely( p_sys->p_spu_blend ) )
picture_BlendSubpicture( p_pic, p_sys->p_spu_blend, p_subpic );
subpicture_Delete( p_subpic );
}
}
if( p_sys->i_threads == 0 )
{
block_t *p_block;
p_block = id->p_encoder->pf_encode_video( id->p_encoder, p_pic );
block_ChainAppend( out, p_block );
}
if( p_sys->i_threads )
{
vlc_mutex_lock( &p_sys->lock_out );
picture_fifo_Push( p_sys->pp_pics, p_pic );
vlc_cond_signal( &p_sys->cond );
vlc_mutex_unlock( &p_sys->lock_out );
}
if( p_sys->i_threads && p_pic2 )
picture_Release( p_pic2 );
else if ( p_sys->i_threads == 0 )
picture_Release( p_pic );
}
int transcode_video_process( sout_stream_t *p_stream, sout_stream_id_sys_t *id,
block_t *in, block_t **out )
{
sout_stream_sys_t *p_sys = p_stream->p_sys;
picture_t *p_pic = NULL;
*out = NULL;
if( unlikely( in == NULL ) )
{
if( p_sys->i_threads == 0 )
{
block_t *p_block;
do {
p_block = id->p_encoder->pf_encode_video(id->p_encoder, NULL );
block_ChainAppend( out, p_block );
} while( p_block );
}
else
{
msg_Dbg( p_stream, "Flushing thread and waiting that");
vlc_mutex_lock( &p_stream->p_sys->lock_out );
p_stream->p_sys->b_abort = true;
vlc_cond_signal( &p_stream->p_sys->cond );
vlc_mutex_unlock( &p_stream->p_sys->lock_out );
vlc_join( p_stream->p_sys->thread, NULL );
vlc_mutex_lock( &p_sys->lock_out );
*out = p_sys->p_buffers;
p_sys->p_buffers = NULL;
vlc_mutex_unlock( &p_sys->lock_out );
msg_Dbg( p_stream, "Flushing done");
}
return VLC_SUCCESS;
}
while( (p_pic = id->p_decoder->pf_decode_video( id->p_decoder, &in )) )
{
if( unlikely (
id->p_encoder->p_module &&
!video_format_IsSimilar( &id->fmt_input_video, &id->p_decoder->fmt_out.video )
)
)
{
msg_Info( p_stream, "aspect-ratio changed, reiniting. %i -> %i : %i -> %i.",
id->fmt_input_video.i_sar_num, id->p_decoder->fmt_out.video.i_sar_num,
id->fmt_input_video.i_sar_den, id->p_decoder->fmt_out.video.i_sar_den
);
/* Close filters */
if( id->p_f_chain )
filter_chain_Delete( id->p_f_chain );
id->p_f_chain = NULL;
if( id->p_uf_chain )
filter_chain_Delete( id->p_uf_chain );
id->p_uf_chain = NULL;
/* Reinitialize filters */
id->p_encoder->fmt_out.video.i_visible_width = p_sys->i_width & ~1;
id->p_encoder->fmt_out.video.i_visible_height = p_sys->i_height & ~1;
id->p_encoder->fmt_out.video.i_sar_num = id->p_encoder->fmt_out.video.i_sar_den = 0;
transcode_video_filter_init( p_stream, id );
transcode_video_encoder_init( p_stream, id );
conversion_video_filter_append( id );
memcpy( &id->fmt_input_video, &id->p_decoder->fmt_out.video, sizeof(video_format_t));
}
if( unlikely( !id->p_encoder->p_module ) )
{
if( id->p_f_chain )
filter_chain_Delete( id->p_f_chain );
if( id->p_uf_chain )
filter_chain_Delete( id->p_uf_chain );
id->p_f_chain = id->p_uf_chain = NULL;
transcode_video_filter_init( p_stream, id );
transcode_video_encoder_init( p_stream, id );
conversion_video_filter_append( id );
memcpy( &id->fmt_input_video, &id->p_decoder->fmt_out.video, sizeof(video_format_t));
if( transcode_video_encoder_open( p_stream, id ) != VLC_SUCCESS )
{
picture_Release( p_pic );
transcode_video_close( p_stream, id );
id->b_transcode = false;
return VLC_EGENERIC;
}
}
/* Run the filter and output chains; first with the picture,
* and then with NULL as many times as we need until they
* stop outputting frames.
*/
for ( ;; ) {
picture_t *p_filtered_pic = p_pic;
/* Run filter chain */
if( id->p_f_chain )
p_filtered_pic = filter_chain_VideoFilter( id->p_f_chain, p_filtered_pic );
if( !p_filtered_pic )
break;
for ( ;; ) {
picture_t *p_user_filtered_pic = p_filtered_pic;
/* Run user specified filter chain */
if( id->p_uf_chain )
p_user_filtered_pic = filter_chain_VideoFilter( id->p_uf_chain, p_user_filtered_pic );
if( !p_user_filtered_pic )
break;
OutputFrame( p_stream, p_user_filtered_pic, id, out );
p_filtered_pic = NULL;
}
p_pic = NULL;
}
}
if( p_sys->i_threads >= 1 )
{
/* Pick up any return data the encoder thread wants to output. */
vlc_mutex_lock( &p_sys->lock_out );
*out = p_sys->p_buffers;
p_sys->p_buffers = NULL;
vlc_mutex_unlock( &p_sys->lock_out );
}
return VLC_SUCCESS;
}
bool transcode_video_add( sout_stream_t *p_stream, es_format_t *p_fmt,
sout_stream_id_sys_t *id )
{
sout_stream_sys_t *p_sys = p_stream->p_sys;
msg_Dbg( p_stream,
"creating video transcoding from fcc=`%4.4s' to fcc=`%4.4s'",
(char*)&p_fmt->i_codec, (char*)&p_sys->i_vcodec );
/* Complete destination format */
id->p_encoder->fmt_out.i_codec = p_sys->i_vcodec;
id->p_encoder->fmt_out.video.i_visible_width = p_sys->i_width & ~1;
id->p_encoder->fmt_out.video.i_visible_height = p_sys->i_height & ~1;
id->p_encoder->fmt_out.i_bitrate = p_sys->i_vbitrate;
/* Build decoder -> filter -> encoder chain */
if( transcode_video_new( p_stream, id ) )
{
msg_Err( p_stream, "cannot create video chain" );
return false;
}
/* Stream will be added later on because we don't know
* all the characteristics of the decoded stream yet */
id->b_transcode = true;
if( p_sys->fps_num )
{
id->p_encoder->fmt_in.video.i_frame_rate = id->p_encoder->fmt_out.video.i_frame_rate = (p_sys->fps_num );
id->p_encoder->fmt_in.video.i_frame_rate_base = id->p_encoder->fmt_out.video.i_frame_rate_base = (p_sys->fps_den ? p_sys->fps_den : 1);
}
return true;
}
| gpl-2.0 |
iegor/kdesdk | kdbg/kdbg/debugger.cpp | 1 | 58572 | /*
* Copyright Johannes Sixt
* This file is licensed under the GNU General Public License Version 2.
* See the file COPYING in the toplevel directory of the source directory.
*/
#include "debugger.h"
#include "dbgdriver.h"
#include "pgmargs.h"
#include "typetable.h"
#include "exprwnd.h"
#include "pgmsettings.h"
#include "programconfig.h"
#include <qregexp.h>
#include <qfileinfo.h>
#include <qlistbox.h>
#include <qstringlist.h>
#include <kapplication.h>
#include <kconfig.h>
#include <klocale.h> /* i18n */
#include <kmessagebox.h>
#include <ctype.h>
#include <stdlib.h> /* strtol, atoi */
#ifdef HAVE_UNISTD_H
#include <unistd.h> /* sleep(3) */
#endif
#include "mydebug.h"
KDebugger::KDebugger(QWidget* parent,
ExprWnd* localVars,
ExprWnd* watchVars,
QListBox* backtrace) :
QObject(parent, "debugger"),
m_ttyLevel(ttyFull),
m_memoryFormat(MDTword | MDThex),
m_haveExecutable(false),
m_programActive(false),
m_programRunning(false),
m_sharedLibsListed(false),
m_typeTable(0),
m_programConfig(0),
m_d(0),
m_localVariables(*localVars),
m_watchVariables(*watchVars),
m_btWindow(*backtrace)
{
m_envVars.setAutoDelete(true);
connect(&m_localVariables, SIGNAL(expanded(QListViewItem*)),
SLOT(slotExpanding(QListViewItem*)));
connect(&m_watchVariables, SIGNAL(expanded(QListViewItem*)),
SLOT(slotExpanding(QListViewItem*)));
connect(&m_localVariables, SIGNAL(editValueCommitted(VarTree*, const QString&)),
SLOT(slotValueEdited(VarTree*, const QString&)));
connect(&m_watchVariables, SIGNAL(editValueCommitted(VarTree*, const QString&)),
SLOT(slotValueEdited(VarTree*, const QString&)));
connect(&m_btWindow, SIGNAL(highlighted(int)), SLOT(gotoFrame(int)));
emit updateUI();
}
KDebugger::~KDebugger()
{
if (m_programConfig != 0) {
saveProgramSettings();
m_programConfig->sync();
delete m_programConfig;
}
delete m_typeTable;
}
void KDebugger::saveSettings(KConfig* /*config*/)
{
}
void KDebugger::restoreSettings(KConfig* /*config*/)
{
}
//////////////////////////////////////////////////////////////////////
// external interface
const char GeneralGroup[] = "General";
const char DebuggerCmdStr[] = "DebuggerCmdStr";
const char TTYLevelEntry[] = "TTYLevel";
const char KDebugger::DriverNameEntry[] = "DriverName";
bool KDebugger::debugProgram(const QString& name,
DebuggerDriver* driver)
{
if (m_d != 0 && m_d->isRunning())
{
QApplication::setOverrideCursor(waitCursor);
stopDriver();
QApplication::restoreOverrideCursor();
if (m_d->isRunning() || m_haveExecutable) {
/* timed out! We can't really do anything useful now */
TRACE("timed out while waiting for gdb to die!");
return false;
}
delete m_d;
m_d = 0;
}
// wire up the driver
connect(driver, SIGNAL(activateFileLine(const QString&,int,const DbgAddr&)),
this, SIGNAL(activateFileLine(const QString&,int,const DbgAddr&)));
connect(driver, SIGNAL(processExited(KProcess*)), SLOT(gdbExited(KProcess*)));
connect(driver, SIGNAL(commandReceived(CmdQueueItem*,const char*)),
SLOT(parse(CmdQueueItem*,const char*)));
connect(driver, SIGNAL(wroteStdin(KProcess*)), SIGNAL(updateUI()));
connect(driver, SIGNAL(inferiorRunning()), SLOT(slotInferiorRunning()));
connect(driver, SIGNAL(enterIdleState()), SLOT(backgroundUpdate()));
connect(driver, SIGNAL(enterIdleState()), SIGNAL(updateUI()));
connect(&m_localVariables, SIGNAL(removingItem(VarTree*)),
driver, SLOT(dequeueCmdByVar(VarTree*)));
connect(&m_watchVariables, SIGNAL(removingItem(VarTree*)),
driver, SLOT(dequeueCmdByVar(VarTree*)));
// create the program settings object
openProgramConfig(name);
// get debugger command from per-program settings
if (m_programConfig != 0) {
m_programConfig->setGroup(GeneralGroup);
m_debuggerCmd = readDebuggerCmd();
// get terminal emulation level
m_ttyLevel = TTYLevel(m_programConfig->readNumEntry(TTYLevelEntry, ttyFull));
}
// the rest is read in later in the handler of DCexecutable
m_d = driver;
if (!startDriver()) {
TRACE("startDriver failed");
m_d = 0;
return false;
}
TRACE("before file cmd");
m_d->executeCmd(DCexecutable, name);
m_executable = name;
// set remote target
if (!m_remoteDevice.isEmpty()) {
m_d->executeCmd(DCtargetremote, m_remoteDevice);
m_d->queueCmd(DCbt, DebuggerDriver::QMoverride);
m_d->queueCmd(DCframe, 0, DebuggerDriver::QMnormal);
m_programActive = true;
m_haveExecutable = true;
}
// create a type table
m_typeTable = new ProgramTypeTable;
m_sharedLibsListed = false;
emit updateUI();
return true;
}
void KDebugger::shutdown()
{
// shut down debugger driver
if (m_d != 0 && m_d->isRunning())
{
stopDriver();
}
}
void KDebugger::useCoreFile(QString corefile, bool batch)
{
m_corefile = corefile;
if (!batch) {
CmdQueueItem* cmd = loadCoreFile();
cmd->m_byUser = true;
}
}
void KDebugger::setAttachPid(const QString& pid)
{
m_attachedPid = pid;
}
void KDebugger::programRun()
{
if (!isReady())
return;
// when program is active, but not a core file, continue
// otherwise run the program
if (m_programActive && m_corefile.isEmpty()) {
// gdb command: continue
m_d->executeCmd(DCcont, true);
} else {
// gdb command: run
m_d->executeCmd(DCrun, true);
m_corefile = QString();
m_programActive = true;
}
m_programRunning = true;
}
void KDebugger::attachProgram(const QString& pid)
{
if (!isReady())
return;
m_attachedPid = pid;
TRACE("Attaching to " + m_attachedPid);
m_d->executeCmd(DCattach, m_attachedPid);
m_programActive = true;
m_programRunning = true;
}
void KDebugger::programRunAgain()
{
if (canSingleStep()) {
m_d->executeCmd(DCrun, true);
m_corefile = QString();
m_programRunning = true;
}
}
void KDebugger::programStep()
{
if (canSingleStep()) {
m_d->executeCmd(DCstep, true);
m_programRunning = true;
}
}
void KDebugger::programNext()
{
if (canSingleStep()) {
m_d->executeCmd(DCnext, true);
m_programRunning = true;
}
}
void KDebugger::programStepi()
{
if (canSingleStep()) {
m_d->executeCmd(DCstepi, true);
m_programRunning = true;
}
}
void KDebugger::programNexti()
{
if (canSingleStep()) {
m_d->executeCmd(DCnexti, true);
m_programRunning = true;
}
}
void KDebugger::programFinish()
{
if (canSingleStep()) {
m_d->executeCmd(DCfinish, true);
m_programRunning = true;
}
}
void KDebugger::programKill()
{
if (haveExecutable() && isProgramActive()) {
if (m_programRunning) {
m_d->interruptInferior();
}
// this is an emergency command; flush queues
m_d->flushCommands(true);
m_d->executeCmd(DCkill, true);
}
}
bool KDebugger::runUntil(const QString& fileName, int lineNo)
{
if (isReady() && m_programActive && !m_programRunning) {
// strip off directory part of file name
QString file = fileName;
int offset = file.findRev("/");
if (offset >= 0) {
file.remove(0, offset+1);
}
m_d->executeCmd(DCuntil, file, lineNo, true);
m_programRunning = true;
return true;
} else {
return false;
}
}
void KDebugger::programBreak()
{
if (m_haveExecutable && m_programRunning) {
m_d->interruptInferior();
}
}
void KDebugger::programArgs(QWidget* parent)
{
if (m_haveExecutable) {
QStringList allOptions = m_d->boolOptionList();
PgmArgs dlg(parent, m_executable, m_envVars, allOptions);
dlg.setArgs(m_programArgs);
dlg.setWd(m_programWD);
dlg.setOptions(m_boolOptions);
if (dlg.exec()) {
updateProgEnvironment(dlg.args(), dlg.wd(),
dlg.envVars(), dlg.options());
}
}
}
void KDebugger::programSettings(QWidget* parent)
{
if (!m_haveExecutable)
return;
ProgramSettings dlg(parent, m_executable);
dlg.m_chooseDriver.setDebuggerCmd(m_debuggerCmd);
dlg.m_output.setTTYLevel(m_ttyLevel);
if (dlg.exec() == QDialog::Accepted)
{
m_debuggerCmd = dlg.m_chooseDriver.debuggerCmd();
m_ttyLevel = TTYLevel(dlg.m_output.ttyLevel());
}
}
bool KDebugger::setBreakpoint(QString file, int lineNo,
const DbgAddr& address, bool temporary)
{
if (!isReady()) {
return false;
}
BrkptIterator bp = breakpointByFilePos(file, lineNo, address);
if (bp == m_brkpts.end())
{
/*
* No such breakpoint, so set a new one. If we have an address, we
* set the breakpoint exactly there. Otherwise we use the file name
* plus line no.
*/
Breakpoint* bp = new Breakpoint;
bp->temporary = temporary;
if (address.isEmpty())
{
bp->fileName = file;
bp->lineNo = lineNo;
}
else
{
bp->address = address;
}
setBreakpoint(bp, false);
}
else
{
/*
* If the breakpoint is disabled, enable it; if it's enabled,
* delete that breakpoint.
*/
if (bp->enabled) {
deleteBreakpoint(bp);
} else {
enableDisableBreakpoint(bp);
}
}
return true;
}
void KDebugger::setBreakpoint(Breakpoint* bp, bool queueOnly)
{
CmdQueueItem* cmd = executeBreakpoint(bp, queueOnly);
cmd->m_brkpt = bp; // used in newBreakpoint()
}
CmdQueueItem* KDebugger::executeBreakpoint(const Breakpoint* bp, bool queueOnly)
{
CmdQueueItem* cmd;
if (!bp->text.isEmpty())
{
/*
* The breakpoint was set using the text box in the breakpoint
* list. This is the only way in which watchpoints are set.
*/
if (bp->type == Breakpoint::watchpoint) {
cmd = m_d->executeCmd(DCwatchpoint, bp->text);
} else {
cmd = m_d->executeCmd(DCbreaktext, bp->text);
}
}
else if (bp->address.isEmpty())
{
// strip off directory part of file name
QString file = bp->fileName;
int offset = file.findRev("/");
if (offset >= 0) {
file.remove(0, offset+1);
}
if (queueOnly) {
cmd = m_d->queueCmd(bp->temporary ? DCtbreakline : DCbreakline,
file, bp->lineNo, DebuggerDriver::QMoverride);
} else {
cmd = m_d->executeCmd(bp->temporary ? DCtbreakline : DCbreakline,
file, bp->lineNo);
}
}
else
{
if (queueOnly) {
cmd = m_d->queueCmd(bp->temporary ? DCtbreakaddr : DCbreakaddr,
bp->address.asString(), DebuggerDriver::QMoverride);
} else {
cmd = m_d->executeCmd(bp->temporary ? DCtbreakaddr : DCbreakaddr,
bp->address.asString());
}
}
return cmd;
}
bool KDebugger::enableDisableBreakpoint(QString file, int lineNo,
const DbgAddr& address)
{
BrkptIterator bp = breakpointByFilePos(file, lineNo, address);
return enableDisableBreakpoint(bp);
}
bool KDebugger::enableDisableBreakpoint(BrkptIterator bp)
{
if (bp == m_brkpts.end())
return false;
/*
* Toggle enabled/disabled state.
*
* The driver is not bothered if we are modifying an orphaned
* breakpoint.
*/
if (!bp->isOrphaned()) {
if (!canChangeBreakpoints()) {
return false;
}
m_d->executeCmd(bp->enabled ? DCdisable : DCenable, bp->id);
} else {
bp->enabled = !bp->enabled;
emit breakpointsChanged();
}
return true;
}
bool KDebugger::conditionalBreakpoint(BrkptIterator bp,
const QString& condition,
int ignoreCount)
{
if (bp == m_brkpts.end())
return false;
/*
* Change the condition and ignore count.
*
* The driver is not bothered if we are removing an orphaned
* breakpoint.
*/
if (!bp->isOrphaned()) {
if (!canChangeBreakpoints()) {
return false;
}
bool changed = false;
if (bp->condition != condition) {
// change condition
m_d->executeCmd(DCcondition, condition, bp->id);
changed = true;
}
if (bp->ignoreCount != ignoreCount) {
// change ignore count
m_d->executeCmd(DCignore, bp->id, ignoreCount);
changed = true;
}
if (changed) {
// get the changes
m_d->queueCmd(DCinfobreak, DebuggerDriver::QMoverride);
}
} else {
bp->condition = condition;
bp->ignoreCount = ignoreCount;
emit breakpointsChanged();
}
return true;
}
bool KDebugger::deleteBreakpoint(BrkptIterator bp)
{
if (bp == m_brkpts.end())
return false;
/*
* Remove the breakpoint.
*
* The driver is not bothered if we are removing an orphaned
* breakpoint.
*/
if (!bp->isOrphaned()) {
if (!canChangeBreakpoints()) {
return false;
}
m_d->executeCmd(DCdelete, bp->id);
} else {
m_brkpts.erase(bp);
emit breakpointsChanged();
}
return false;
}
bool KDebugger::canSingleStep()
{
return isReady() && m_programActive && !m_programRunning;
}
bool KDebugger::canChangeBreakpoints()
{
return isReady() && !m_programRunning;
}
bool KDebugger::canStart()
{
return isReady() && !m_programActive;
}
bool KDebugger::isReady() const
{
return m_haveExecutable &&
m_d != 0 && m_d->canExecuteImmediately();
}
bool KDebugger::isIdle() const
{
return m_d == 0 || m_d->isIdle();
}
//////////////////////////////////////////////////////////
// debugger driver
bool KDebugger::startDriver()
{
emit debuggerStarting(); /* must set m_inferiorTerminal */
/*
* If the per-program command string is empty, use the global setting
* (which might also be empty, in which case the driver uses its
* default).
*/
m_explicitKill = false;
if (!m_d->startup(m_debuggerCmd)) {
return false;
}
/*
* If we have an output terminal, we use it. Otherwise we will run the
* program with input and output redirected to /dev/null. Other
* redirections are also necessary depending on the tty emulation
* level.
*/
int redirect = RDNstdin|RDNstdout|RDNstderr; /* redirect everything */
if (!m_inferiorTerminal.isEmpty()) {
switch (m_ttyLevel) {
default:
case ttyNone:
// redirect everything
break;
case ttySimpleOutputOnly:
redirect = RDNstdin;
break;
case ttyFull:
redirect = 0;
break;
}
}
m_d->executeCmd(DCtty, m_inferiorTerminal, redirect);
return true;
}
void KDebugger::stopDriver()
{
m_explicitKill = true;
if (m_attachedPid.isEmpty()) {
m_d->terminate();
} else {
m_d->detachAndTerminate();
}
/*
* We MUST wait until the slot gdbExited() has been called. But to
* avoid a deadlock, we wait only for some certain maximum time. Should
* this timeout be reached, the only reasonable thing one could do then
* is exiting kdbg.
*/
kapp->processEvents(1000); /* ideally, this will already shut it down */
int maxTime = 20; /* about 20 seconds */
while (m_haveExecutable && maxTime > 0) {
// give gdb time to die (and send a SIGCLD)
::sleep(1);
--maxTime;
kapp->processEvents(1000);
}
}
void KDebugger::gdbExited(KProcess*)
{
/*
* Save settings, but only if gdb has already processed "info line
* main", otherwise we would save an empty config file, because it
* isn't read in until then!
*/
if (m_programConfig != 0) {
if (m_haveExecutable) {
saveProgramSettings();
m_programConfig->sync();
}
delete m_programConfig;
m_programConfig = 0;
}
// erase types
delete m_typeTable;
m_typeTable = 0;
if (m_explicitKill) {
TRACE(m_d->driverName() + " exited normally");
} else {
QString msg = i18n("%1 exited unexpectedly.\n"
"Restart the session (e.g. with File|Executable).");
KMessageBox::error(parentWidget(), msg.arg(m_d->driverName()));
}
// reset state
m_haveExecutable = false;
m_executable = "";
m_programActive = false;
m_programRunning = false;
m_explicitKill = false;
m_debuggerCmd = QString(); /* use global setting at next start! */
m_attachedPid = QString(); /* we are no longer attached to a process */
m_ttyLevel = ttyFull;
m_brkpts.clear();
// erase PC
emit updatePC(QString(), -1, DbgAddr(), 0);
}
QString KDebugger::getConfigForExe(const QString& name)
{
QFileInfo fi(name);
QString pgmConfigFile = fi.dirPath(true);
if (!pgmConfigFile.isEmpty()) {
pgmConfigFile += '/';
}
pgmConfigFile += ".kdbgrc." + fi.fileName();
TRACE("program config file = " + pgmConfigFile);
return pgmConfigFile;
}
void KDebugger::openProgramConfig(const QString& name)
{
ASSERT(m_programConfig == 0);
QString pgmConfigFile = getConfigForExe(name);
m_programConfig = new ProgramConfig(pgmConfigFile);
}
const char EnvironmentGroup[] = "Environment";
const char WatchGroup[] = "Watches";
const char FileVersion[] = "FileVersion";
const char ProgramArgs[] = "ProgramArgs";
const char WorkingDirectory[] = "WorkingDirectory";
const char OptionsSelected[] = "OptionsSelected";
const char Variable[] = "Var%d";
const char Value[] = "Value%d";
const char ExprFmt[] = "Expr%d";
void KDebugger::saveProgramSettings()
{
ASSERT(m_programConfig != 0);
m_programConfig->setGroup(GeneralGroup);
m_programConfig->writeEntry(FileVersion, 1);
m_programConfig->writeEntry(ProgramArgs, m_programArgs);
m_programConfig->writeEntry(WorkingDirectory, m_programWD);
m_programConfig->writeEntry(OptionsSelected, m_boolOptions);
m_programConfig->writeEntry(DebuggerCmdStr, m_debuggerCmd);
m_programConfig->writeEntry(TTYLevelEntry, int(m_ttyLevel));
QString driverName;
if (m_d != 0)
driverName = m_d->driverName();
m_programConfig->writeEntry(DriverNameEntry, driverName);
// write environment variables
m_programConfig->deleteGroup(EnvironmentGroup);
m_programConfig->setGroup(EnvironmentGroup);
QDictIterator<EnvVar> it = m_envVars;
EnvVar* var;
QString varName;
QString varValue;
for (int i = 0; (var = it) != 0; ++it, ++i) {
varName.sprintf(Variable, i);
varValue.sprintf(Value, i);
m_programConfig->writeEntry(varName, it.currentKey());
m_programConfig->writeEntry(varValue, var->value);
}
saveBreakpoints(m_programConfig);
// watch expressions
// first get rid of whatever was in this group
m_programConfig->deleteGroup(WatchGroup);
// then start a new group
m_programConfig->setGroup(WatchGroup);
VarTree* item = m_watchVariables.firstChild();
int watchNum = 0;
for (; item != 0; item = item->nextSibling(), ++watchNum) {
varName.sprintf(ExprFmt, watchNum);
m_programConfig->writeEntry(varName, item->getText());
}
// give others a chance
emit saveProgramSpecific(m_programConfig);
}
void KDebugger::overrideProgramArguments(const QString& args)
{
ASSERT(m_programConfig != 0);
m_programConfig->setGroup(GeneralGroup);
m_programConfig->writeEntry(ProgramArgs, args);
}
void KDebugger::restoreProgramSettings()
{
ASSERT(m_programConfig != 0);
m_programConfig->setGroup(GeneralGroup);
/*
* We ignore file version for now we will use it in the future to
* distinguish different versions of this configuration file.
*/
// m_debuggerCmd has been read in already
// m_ttyLevel has been read in already
QString pgmArgs = m_programConfig->readEntry(ProgramArgs);
QString pgmWd = m_programConfig->readEntry(WorkingDirectory);
QStringList boolOptions = m_programConfig->readListEntry(OptionsSelected);
m_boolOptions = QStringList();
// read environment variables
m_programConfig->setGroup(EnvironmentGroup);
m_envVars.clear();
QDict<EnvVar> pgmVars;
EnvVar* var;
QString varName;
QString varValue;
for (int i = 0;; ++i) {
varName.sprintf(Variable, i);
varValue.sprintf(Value, i);
if (!m_programConfig->hasKey(varName)) {
/* entry not present, assume that we've hit them all */
break;
}
QString name = m_programConfig->readEntry(varName);
if (name.isEmpty()) {
// skip empty names
continue;
}
var = new EnvVar;
var->value = m_programConfig->readEntry(varValue);
var->status = EnvVar::EVnew;
pgmVars.insert(name, var);
}
updateProgEnvironment(pgmArgs, pgmWd, pgmVars, boolOptions);
restoreBreakpoints(m_programConfig);
// watch expressions
m_programConfig->setGroup(WatchGroup);
m_watchVariables.clear();
for (int i = 0;; ++i) {
varName.sprintf(ExprFmt, i);
if (!m_programConfig->hasKey(varName)) {
/* entry not present, assume that we've hit them all */
break;
}
QString expr = m_programConfig->readEntry(varName);
if (expr.isEmpty()) {
// skip empty expressions
continue;
}
addWatch(expr);
}
// give others a chance
emit restoreProgramSpecific(m_programConfig);
}
/**
* Reads the debugger command line from the program settings. The config
* group must have been set by the caller.
*/
QString KDebugger::readDebuggerCmd()
{
QString debuggerCmd = m_programConfig->readEntry(DebuggerCmdStr);
// always let the user confirm the debugger cmd if we are root
if (::geteuid() == 0)
{
if (!debuggerCmd.isEmpty()) {
QString msg = i18n(
"The settings for this program specify "
"the following debugger command:\n%1\n"
"Shall this command be used?");
if (KMessageBox::warningYesNo(parentWidget(), msg.arg(debuggerCmd))
!= KMessageBox::Yes)
{
// don't use it
debuggerCmd = QString();
}
}
}
return debuggerCmd;
}
/*
* Breakpoints are saved one per group.
*/
const char BPGroup[] = "Breakpoint %d";
const char File[] = "File";
const char Line[] = "Line";
const char Text[] = "Text";
const char Address[] = "Address";
const char Temporary[] = "Temporary";
const char Enabled[] = "Enabled";
const char Condition[] = "Condition";
void KDebugger::saveBreakpoints(ProgramConfig* config)
{
QString groupName;
int i = 0;
for (BrkptIterator bp = m_brkpts.begin(); bp != m_brkpts.end(); ++bp)
{
if (bp->type == Breakpoint::watchpoint)
continue; /* don't save watchpoints */
groupName.sprintf(BPGroup, i++);
/* remove remmants */
config->deleteGroup(groupName);
config->setGroup(groupName);
if (!bp->text.isEmpty()) {
/*
* The breakpoint was set using the text box in the breakpoint
* list. We do not save the location by filename+line number,
* but instead honor what the user typed (a function name, for
* example, which could move between sessions).
*/
config->writeEntry(Text, bp->text);
} else if (!bp->fileName.isEmpty()) {
config->writeEntry(File, bp->fileName);
config->writeEntry(Line, bp->lineNo);
/*
* Addresses are hardly correct across sessions, so we don't
* save it.
*/
} else {
config->writeEntry(Address, bp->address.asString());
}
config->writeEntry(Temporary, bp->temporary);
config->writeEntry(Enabled, bp->enabled);
if (!bp->condition.isEmpty())
config->writeEntry(Condition, bp->condition);
// we do not save the ignore count
}
// delete remaining groups
// we recognize that a group is present if there is an Enabled entry
for (;; i++) {
groupName.sprintf(BPGroup, i);
config->setGroup(groupName);
if (!config->hasKey(Enabled)) {
/* group not present, assume that we've hit them all */
break;
}
config->deleteGroup(groupName);
}
}
void KDebugger::restoreBreakpoints(ProgramConfig* config)
{
QString groupName;
/*
* We recognize the end of the list if there is no Enabled entry
* present.
*/
for (int i = 0;; i++) {
groupName.sprintf(BPGroup, i);
config->setGroup(groupName);
if (!config->hasKey(Enabled)) {
/* group not present, assume that we've hit them all */
break;
}
Breakpoint* bp = new Breakpoint;
bp->fileName = config->readEntry(File);
bp->lineNo = config->readNumEntry(Line, -1);
bp->text = config->readEntry(Text);
bp->address = config->readEntry(Address);
// check consistency
if ((bp->fileName.isEmpty() || bp->lineNo < 0) &&
bp->text.isEmpty() &&
bp->address.isEmpty())
{
delete bp;
continue;
}
bp->enabled = config->readBoolEntry(Enabled, true);
bp->temporary = config->readBoolEntry(Temporary, false);
bp->condition = config->readEntry(Condition);
/*
* Add the breakpoint.
*/
setBreakpoint(bp, false);
// the new breakpoint is disabled or conditionalized later
// in newBreakpoint()
}
m_d->queueCmd(DCinfobreak, DebuggerDriver::QMoverride);
}
// parse output of command cmd
void KDebugger::parse(CmdQueueItem* cmd, const char* output)
{
ASSERT(cmd != 0); /* queue mustn't be empty */
TRACE(QString(__PRETTY_FUNCTION__) + " parsing " + output);
switch (cmd->m_cmd) {
case DCtargetremote:
// the output (if any) is uninteresting
case DCsetargs:
case DCtty:
// there is no output
case DCsetenv:
case DCunsetenv:
case DCsetoption:
/* if value is empty, we see output, but we don't care */
break;
case DCcd:
/* display gdb's message in the status bar */
m_d->parseChangeWD(output, m_statusMessage);
emit updateStatusMessage();
break;
case DCinitialize:
break;
case DCexecutable:
if (m_d->parseChangeExecutable(output, m_statusMessage))
{
// success; restore breakpoints etc.
if (m_programConfig != 0) {
restoreProgramSettings();
}
// load file containing main() or core file
if (!m_corefile.isEmpty())
{
// load core file
loadCoreFile();
}
else if (!m_attachedPid.isEmpty())
{
m_d->queueCmd(DCattach, m_attachedPid, DebuggerDriver::QMoverride);
m_programActive = true;
m_programRunning = true;
}
else if (!m_remoteDevice.isEmpty())
{
// handled elsewhere
}
else
{
m_d->queueCmd(DCinfolinemain, DebuggerDriver::QMnormal);
}
if (!m_statusMessage.isEmpty())
emit updateStatusMessage();
} else {
QString msg = m_d->driverName() + ": " + m_statusMessage;
KMessageBox::sorry(parentWidget(), msg);
m_executable = "";
m_corefile = ""; /* don't process core file */
m_haveExecutable = false;
}
break;
case DCcorefile:
// in any event we have an executable at this point
m_haveExecutable = true;
if (m_d->parseCoreFile(output)) {
// loading a core is like stopping at a breakpoint
m_programActive = true;
handleRunCommands(output);
// do not reset m_corefile
} else {
// report error
QString msg = m_d->driverName() + ": " + QString(output);
KMessageBox::sorry(parentWidget(), msg);
// if core file was loaded from command line, revert to info line main
if (!cmd->m_byUser) {
m_d->queueCmd(DCinfolinemain, DebuggerDriver::QMnormal);
}
m_corefile = QString(); /* core file not available any more */
}
break;
case DCinfolinemain:
// ignore the output, marked file info follows
m_haveExecutable = true;
break;
case DCinfolocals:
// parse local variables
if (output[0] != '\0') {
handleLocals(output);
}
break;
case DCinforegisters:
handleRegisters(output);
break;
case DCexamine:
handleMemoryDump(output);
break;
case DCinfoline:
handleInfoLine(cmd, output);
break;
case DCdisassemble:
handleDisassemble(cmd, output);
break;
case DCframe:
handleFrameChange(output);
updateAllExprs();
break;
case DCbt:
handleBacktrace(output);
updateAllExprs();
break;
case DCprint:
handlePrint(cmd, output);
break;
case DCprintDeref:
handlePrintDeref(cmd, output);
break;
case DCattach:
m_haveExecutable = true;
// fall through
case DCrun:
case DCcont:
case DCstep:
case DCstepi:
case DCnext:
case DCnexti:
case DCfinish:
case DCuntil:
case DCthread:
handleRunCommands(output);
break;
case DCkill:
m_programRunning = m_programActive = false;
// erase PC
emit updatePC(QString(), -1, DbgAddr(), 0);
break;
case DCbreaktext:
case DCbreakline:
case DCtbreakline:
case DCbreakaddr:
case DCtbreakaddr:
case DCwatchpoint:
newBreakpoint(cmd, output);
// fall through
case DCdelete:
case DCenable:
case DCdisable:
// these commands need immediate response
m_d->queueCmd(DCinfobreak, DebuggerDriver::QMoverrideMoreEqual);
break;
case DCinfobreak:
// note: this handler must not enqueue a command, since
// DCinfobreak is used at various different places.
updateBreakList(output);
break;
case DCfindType:
handleFindType(cmd, output);
break;
case DCprintStruct:
case DCprintQStringStruct:
case DCprintWChar:
handlePrintStruct(cmd, output);
break;
case DCinfosharedlib:
handleSharedLibs(output);
break;
case DCcondition:
case DCignore:
// we are not interested in the output
break;
case DCinfothreads:
handleThreadList(output);
break;
case DCsetpc:
handleSetPC(output);
break;
case DCsetvariable:
handleSetVariable(cmd, output);
break;
}
}
void KDebugger::backgroundUpdate()
{
/*
* If there are still expressions that need to be updated, then do so.
*/
if (m_programActive)
evalExpressions();
}
void KDebugger::handleRunCommands(const char* output)
{
uint flags = m_d->parseProgramStopped(output, m_statusMessage);
emit updateStatusMessage();
m_programActive = flags & DebuggerDriver::SFprogramActive;
// refresh files if necessary
if (flags & DebuggerDriver::SFrefreshSource) {
TRACE("re-reading files");
emit executableUpdated();
}
/*
* Try to set any orphaned breakpoints now.
*/
for (BrkptIterator bp = m_brkpts.begin(); bp != m_brkpts.end(); ++bp)
{
if (bp->isOrphaned()) {
TRACE(QString("re-trying brkpt loc: %2 file: %3 line: %1")
.arg(bp->lineNo).arg(bp->location, bp->fileName));
CmdQueueItem* cmd = executeBreakpoint(&*bp, true);
cmd->m_existingBrkpt = bp->id; // used in newBreakpoint()
flags |= DebuggerDriver::SFrefreshBreak;
}
}
/*
* If we stopped at a breakpoint, we must update the breakpoint list
* because the hit count changes. Also, if the breakpoint was temporary
* it would go away now.
*/
if ((flags & (DebuggerDriver::SFrefreshBreak|DebuggerDriver::SFrefreshSource)) ||
stopMayChangeBreakList())
{
m_d->queueCmd(DCinfobreak, DebuggerDriver::QMoverride);
}
/*
* If we haven't listed the shared libraries yet, do so. We must do
* this before we emit any commands that list variables, since the type
* libraries depend on the shared libraries.
*/
if (!m_sharedLibsListed) {
// must be a high-priority command!
m_d->executeCmd(DCinfosharedlib);
}
// get the backtrace if the program is running
if (m_programActive) {
m_d->queueCmd(DCbt, DebuggerDriver::QMoverride);
} else {
// program finished: erase PC
emit updatePC(QString(), -1, DbgAddr(), 0);
// dequeue any commands in the queues
m_d->flushCommands();
}
/* Update threads list */
if (m_programActive && (flags & DebuggerDriver::SFrefreshThreads)) {
m_d->queueCmd(DCinfothreads, DebuggerDriver::QMoverride);
}
m_programRunning = false;
emit programStopped();
}
void KDebugger::slotInferiorRunning()
{
m_programRunning = true;
}
void KDebugger::updateAllExprs()
{
if (!m_programActive)
return;
// retrieve local variables
m_d->queueCmd(DCinfolocals, DebuggerDriver::QMoverride);
// retrieve registers
m_d->queueCmd(DCinforegisters, DebuggerDriver::QMoverride);
// get new memory dump
if (!m_memoryExpression.isEmpty()) {
queueMemoryDump(false);
}
// update watch expressions
VarTree* item = m_watchVariables.firstChild();
for (; item != 0; item = item->nextSibling()) {
m_watchEvalExpr.push_back(item->getText());
}
}
void KDebugger::updateProgEnvironment(const QString& args, const QString& wd,
const QDict<EnvVar>& newVars,
const QStringList& newOptions)
{
m_programArgs = args;
m_d->executeCmd(DCsetargs, m_programArgs);
TRACE("new pgm args: " + m_programArgs + "\n");
m_programWD = wd.stripWhiteSpace();
if (!m_programWD.isEmpty()) {
m_d->executeCmd(DCcd, m_programWD);
TRACE("new wd: " + m_programWD + "\n");
}
// update environment variables
QDictIterator<EnvVar> it = newVars;
EnvVar* val;
for (; (val = it) != 0; ++it) {
QString var = it.currentKey();
switch (val->status) {
case EnvVar::EVnew:
m_envVars.insert(var, val);
// fall thru
case EnvVar::EVdirty:
// the value must be in our list
ASSERT(m_envVars[var] == val);
// update value
m_d->executeCmd(DCsetenv, var, val->value);
break;
case EnvVar::EVdeleted:
// must be in our list
ASSERT(m_envVars[var] == val);
// delete value
m_d->executeCmd(DCunsetenv, var);
m_envVars.remove(var);
break;
default:
ASSERT(false);
case EnvVar::EVclean:
// variable not changed
break;
}
}
// update options
QStringList::ConstIterator oi;
for (oi = newOptions.begin(); oi != newOptions.end(); ++oi)
{
if (m_boolOptions.findIndex(*oi) < 0) {
// the options is currently not set, so set it
m_d->executeCmd(DCsetoption, *oi, 1);
} else {
// option is set, no action required, but move it to the end
m_boolOptions.remove(*oi);
}
m_boolOptions.append(*oi);
}
/*
* Now all options that should be set are at the end of m_boolOptions.
* If some options need to be unset, they are at the front of the list.
* Here we unset and remove them.
*/
while (m_boolOptions.count() > newOptions.count()) {
m_d->executeCmd(DCsetoption, m_boolOptions.first(), 0);
m_boolOptions.remove(m_boolOptions.begin());
}
}
void KDebugger::handleLocals(const char* output)
{
// retrieve old list of local variables
QStringList oldVars = m_localVariables.exprList();
/*
* Get local variables.
*/
std::list<ExprValue*> newVars;
parseLocals(output, newVars);
/*
* Clear any old VarTree item pointers, so that later we don't access
* dangling pointers.
*/
m_localVariables.clearPendingUpdates();
/*
* Match old variables against new ones.
*/
for (QStringList::ConstIterator n = oldVars.begin(); n != oldVars.end(); ++n) {
// lookup this variable in the list of new variables
std::list<ExprValue*>::iterator v = newVars.begin();
while (v != newVars.end() && (*v)->m_name != *n)
++v;
if (v == newVars.end()) {
// old variable not in the new variables
TRACE("old var deleted: " + *n);
VarTree* v = m_localVariables.topLevelExprByName(*n);
if (v != 0) {
m_localVariables.removeExpr(v);
}
} else {
// variable in both old and new lists: update
TRACE("update var: " + *n);
m_localVariables.updateExpr(*v, *m_typeTable);
// remove the new variable from the list
delete *v;
newVars.erase(v);
}
}
// insert all remaining new variables
while (!newVars.empty())
{
ExprValue* v = newVars.front();
TRACE("new var: " + v->m_name);
m_localVariables.insertExpr(v, *m_typeTable);
delete v;
newVars.pop_front();
}
}
void KDebugger::parseLocals(const char* output, std::list<ExprValue*>& newVars)
{
std::list<ExprValue*> vars;
m_d->parseLocals(output, vars);
QString origName; /* used in renaming variables */
while (!vars.empty())
{
ExprValue* variable = vars.front();
vars.pop_front();
/*
* When gdb prints local variables, those from the innermost block
* come first. We run through the list of already parsed variables
* to find duplicates (ie. variables that hide local variables from
* a surrounding block). We keep the name of the inner variable, but
* rename those from the outer block so that, when the value is
* updated in the window, the value of the variable that is
* _visible_ changes the color!
*/
int block = 0;
origName = variable->m_name;
for (std::list<ExprValue*>::iterator v = newVars.begin(); v != newVars.end(); ++v) {
if (variable->m_name == (*v)->m_name) {
// we found a duplicate, change name
block++;
QString newName = origName + " (" + QString().setNum(block) + ")";
variable->m_name = newName;
}
}
newVars.push_back(variable);
}
}
bool KDebugger::handlePrint(CmdQueueItem* cmd, const char* output)
{
ASSERT(cmd->m_expr != 0);
ExprValue* variable = m_d->parsePrintExpr(output, true);
if (variable == 0)
return false;
// set expression "name"
variable->m_name = cmd->m_expr->getText();
{
TRACE("update expr: " + cmd->m_expr->getText());
cmd->m_exprWnd->updateExpr(cmd->m_expr, variable, *m_typeTable);
delete variable;
}
evalExpressions(); /* enqueue dereferenced pointers */
return true;
}
bool KDebugger::handlePrintDeref(CmdQueueItem* cmd, const char* output)
{
ASSERT(cmd->m_expr != 0);
ExprValue* variable = m_d->parsePrintExpr(output, true);
if (variable == 0)
return false;
// set expression "name"
variable->m_name = cmd->m_expr->getText();
{
/*
* We must insert a dummy parent, because otherwise variable's value
* would overwrite cmd->m_expr's value.
*/
ExprValue* dummyParent = new ExprValue(variable->m_name, VarTree::NKplain);
dummyParent->m_varKind = VarTree::VKdummy;
// the name of the parsed variable is the address of the pointer
QString addr = "*" + cmd->m_expr->value();
variable->m_name = addr;
variable->m_nameKind = VarTree::NKaddress;
dummyParent->m_child = variable;
// expand the first level for convenience
variable->m_initiallyExpanded = true;
TRACE("update ptr: " + cmd->m_expr->getText());
cmd->m_exprWnd->updateExpr(cmd->m_expr, dummyParent, *m_typeTable);
delete dummyParent;
}
evalExpressions(); /* enqueue dereferenced pointers */
return true;
}
// parse the output of bt
void KDebugger::handleBacktrace(const char* output)
{
// reduce flicker
m_btWindow.setAutoUpdate(false);
m_btWindow.clear();
std::list<StackFrame> stack;
m_d->parseBackTrace(output, stack);
if (!stack.empty()) {
std::list<StackFrame>::iterator frm = stack.begin();
// first frame must set PC
// note: frm->lineNo is zero-based
emit updatePC(frm->fileName, frm->lineNo, frm->address, frm->frameNo);
for (; frm != stack.end(); ++frm) {
QString func;
if (frm->var != 0)
func = frm->var->m_name;
else
func = frm->fileName + ":" + QString().setNum(frm->lineNo+1);
m_btWindow.insertItem(func);
TRACE("frame " + func + " (" + frm->fileName + ":" +
QString().setNum(frm->lineNo+1) + ")");
}
}
m_btWindow.setAutoUpdate(true);
m_btWindow.repaint();
}
void KDebugger::gotoFrame(int frame)
{
m_d->executeCmd(DCframe, frame);
}
void KDebugger::handleFrameChange(const char* output)
{
QString fileName;
int frameNo;
int lineNo;
DbgAddr address;
if (m_d->parseFrameChange(output, frameNo, fileName, lineNo, address)) {
/* lineNo can be negative here if we can't find a file name */
emit updatePC(fileName, lineNo, address, frameNo);
} else {
emit updatePC(fileName, -1, address, frameNo);
}
}
void KDebugger::evalExpressions()
{
// evaluate expressions in the following order:
// watch expressions
// pointers in local variables
// pointers in watch expressions
// types in local variables
// types in watch expressions
// struct members in local variables
// struct members in watch expressions
VarTree* exprItem = 0;
if (!m_watchEvalExpr.empty())
{
QString expr = m_watchEvalExpr.front();
m_watchEvalExpr.pop_front();
exprItem = m_watchVariables.topLevelExprByName(expr);
}
if (exprItem != 0) {
CmdQueueItem* cmd = m_d->queueCmd(DCprint, exprItem->getText(), DebuggerDriver::QMoverride);
// remember which expr this was
cmd->m_expr = exprItem;
cmd->m_exprWnd = &m_watchVariables;
} else {
ExprWnd* wnd;
#define POINTER(widget) \
wnd = &widget; \
exprItem = widget.nextUpdatePtr(); \
if (exprItem != 0) goto pointer
#define STRUCT(widget) \
wnd = &widget; \
exprItem = widget.nextUpdateStruct(); \
if (exprItem != 0) goto ustruct
#define TYPE(widget) \
wnd = &widget; \
exprItem = widget.nextUpdateType(); \
if (exprItem != 0) goto type
repeat:
POINTER(m_localVariables);
POINTER(m_watchVariables);
STRUCT(m_localVariables);
STRUCT(m_watchVariables);
TYPE(m_localVariables);
TYPE(m_watchVariables);
#undef POINTER
#undef STRUCT
#undef TYPE
return;
pointer:
// we have an expression to send
dereferencePointer(wnd, exprItem, false);
return;
ustruct:
// paranoia
if (exprItem->m_type == 0 || exprItem->m_type == TypeInfo::unknownType())
goto repeat;
evalInitialStructExpression(exprItem, wnd, false);
return;
type:
/*
* Sometimes a VarTree gets registered twice for a type update. So
* it may happen that it has already been updated. Hence, we ignore
* it here and go on to the next task.
*/
if (exprItem->m_type != 0)
goto repeat;
determineType(wnd, exprItem);
}
}
void KDebugger::dereferencePointer(ExprWnd* wnd, VarTree* exprItem,
bool immediate)
{
ASSERT(exprItem->m_varKind == VarTree::VKpointer);
QString expr = exprItem->computeExpr();
TRACE("dereferencing pointer: " + expr);
CmdQueueItem* cmd;
if (immediate) {
cmd = m_d->queueCmd(DCprintDeref, expr, DebuggerDriver::QMoverrideMoreEqual);
} else {
cmd = m_d->queueCmd(DCprintDeref, expr, DebuggerDriver::QMoverride);
}
// remember which expr this was
cmd->m_expr = exprItem;
cmd->m_exprWnd = wnd;
}
void KDebugger::determineType(ExprWnd* wnd, VarTree* exprItem)
{
ASSERT(exprItem->m_varKind == VarTree::VKstruct);
QString expr = exprItem->computeExpr();
TRACE("get type of: " + expr);
CmdQueueItem* cmd;
cmd = m_d->queueCmd(DCfindType, expr, DebuggerDriver::QMoverride);
// remember which expr this was
cmd->m_expr = exprItem;
cmd->m_exprWnd = wnd;
}
void KDebugger::handleFindType(CmdQueueItem* cmd, const char* output)
{
QString type;
if (m_d->parseFindType(output, type))
{
ASSERT(cmd != 0 && cmd->m_expr != 0);
TypeInfo* info = m_typeTable->lookup(type);
if (info == 0) {
/*
* We've asked gdb for the type of the expression in
* cmd->m_expr, but it returned a name we don't know. The base
* class (and member) types have been checked already (at the
* time when we parsed that particular expression). Now it's
* time to derive the type from the base classes as a last
* resort.
*/
info = cmd->m_expr->inferTypeFromBaseClass();
// if we found a type through this method, register an alias
if (info != 0) {
TRACE("infered alias: " + type);
m_typeTable->registerAlias(type, info);
}
}
if (info == 0) {
TRACE("unknown type "+type);
cmd->m_expr->m_type = TypeInfo::unknownType();
} else {
cmd->m_expr->m_type = info;
/* since this node has a new type, we get its value immediately */
evalInitialStructExpression(cmd->m_expr, cmd->m_exprWnd, false);
return;
}
}
evalExpressions(); /* queue more of them */
}
void KDebugger::handlePrintStruct(CmdQueueItem* cmd, const char* output)
{
VarTree* var = cmd->m_expr;
ASSERT(var != 0);
ASSERT(var->m_varKind == VarTree::VKstruct);
ExprValue* partExpr;
if (cmd->m_cmd == DCprintQStringStruct) {
partExpr = m_d->parseQCharArray(output, false, m_typeTable->qCharIsShort());
} else if (cmd->m_cmd == DCprintWChar) {
partExpr = m_d->parseQCharArray(output, false, true);
} else {
partExpr = m_d->parsePrintExpr(output, false);
}
bool errorValue =
partExpr == 0 ||
/* we only allow simple values at the moment */
partExpr->m_child != 0;
QString partValue;
if (errorValue)
{
partValue = "?""?""?"; // 2 question marks in a row would be a trigraph
} else {
partValue = partExpr->m_value;
}
delete partExpr;
partExpr = 0;
/*
* Updating a struct value works like this: var->m_partialValue holds
* the value that we have gathered so far (it's been initialized with
* var->m_type->m_displayString[0] earlier). Each time we arrive here,
* we append the printed result followed by the next
* var->m_type->m_displayString to var->m_partialValue.
*
* If the expression we just evaluated was a guard expression, and it
* resulted in an error, we must not evaluate the real expression, but
* go on to the next index. (We must still add the question marks to
* the value).
*
* Next, if this was the length expression, we still have not seen the
* real expression, but the length of a QString.
*/
ASSERT(var->m_exprIndex >= 0 && var->m_exprIndex <= typeInfoMaxExpr);
if (errorValue || !var->m_exprIndexUseGuard)
{
// add current partValue (which might be the question marks)
var->m_partialValue += partValue;
var->m_exprIndex++; /* next part */
var->m_exprIndexUseGuard = true;
var->m_partialValue += var->m_type->m_displayString[var->m_exprIndex];
}
else
{
// this was a guard expression that succeeded
// go for the real expression
var->m_exprIndexUseGuard = false;
}
/* go for more sub-expressions if needed */
if (var->m_exprIndex < var->m_type->m_numExprs) {
/* queue a new print command with quite high priority */
evalStructExpression(var, cmd->m_exprWnd, true);
return;
}
cmd->m_exprWnd->updateStructValue(var);
evalExpressions(); /* enqueue dereferenced pointers */
}
/* queues the first printStruct command for a struct */
void KDebugger::evalInitialStructExpression(VarTree* var, ExprWnd* wnd, bool immediate)
{
var->m_exprIndex = 0;
if (var->m_type != TypeInfo::wchartType())
{
var->m_exprIndexUseGuard = true;
var->m_partialValue = var->m_type->m_displayString[0];
evalStructExpression(var, wnd, immediate);
}
else
{
var->m_exprIndexUseGuard = false;
QString expr = var->computeExpr();
CmdQueueItem* cmd = m_d->queueCmd(DCprintWChar, expr,
immediate ? DebuggerDriver::QMoverrideMoreEqual
: DebuggerDriver::QMoverride);
// remember which expression this was
cmd->m_expr = var;
cmd->m_exprWnd = wnd;
}
}
/** queues a printStruct command; var must have been initialized correctly */
void KDebugger::evalStructExpression(VarTree* var, ExprWnd* wnd, bool immediate)
{
QString base = var->computeExpr();
QString expr;
if (var->m_exprIndexUseGuard) {
expr = var->m_type->m_guardStrings[var->m_exprIndex];
if (expr.isEmpty()) {
// no guard, omit it and go to expression
var->m_exprIndexUseGuard = false;
}
}
if (!var->m_exprIndexUseGuard) {
expr = var->m_type->m_exprStrings[var->m_exprIndex];
}
expr.replace("%s", base);
DbgCommand dbgCmd = DCprintStruct;
// check if this is a QString::Data
if (expr.left(15) == "/QString::Data ")
{
if (m_typeTable->parseQt2QStrings())
{
expr = expr.mid(15, expr.length()); /* strip off /QString::Data */
dbgCmd = DCprintQStringStruct;
} else {
/*
* This should not happen: the type libraries should be set up
* in a way that this can't happen. If this happens
* nevertheless it means that, eg., kdecore was loaded but qt2
* was not (only qt2 enables the QString feature).
*/
// TODO: remove this "print"; queue the next printStruct instead
expr = "*0";
}
}
TRACE("evalStruct: " + expr + (var->m_exprIndexUseGuard ? " // guard" : " // real"));
CmdQueueItem* cmd = m_d->queueCmd(dbgCmd, expr,
immediate ? DebuggerDriver::QMoverrideMoreEqual
: DebuggerDriver::QMnormal);
// remember which expression this was
cmd->m_expr = var;
cmd->m_exprWnd = wnd;
}
void KDebugger::handleSharedLibs(const char* output)
{
// parse the table of shared libraries
m_sharedLibs = m_d->parseSharedLibs(output);
m_sharedLibsListed = true;
// get type libraries
m_typeTable->loadLibTypes(m_sharedLibs);
// hand over the QString data cmd
m_d->setPrintQStringDataCmd(m_typeTable->printQStringDataCmd());
}
CmdQueueItem* KDebugger::loadCoreFile()
{
return m_d->queueCmd(DCcorefile, m_corefile, DebuggerDriver::QMoverride);
}
void KDebugger::slotExpanding(QListViewItem* item)
{
VarTree* exprItem = static_cast<VarTree*>(item);
if (exprItem->m_varKind != VarTree::VKpointer) {
return;
}
ExprWnd* wnd = static_cast<ExprWnd*>(item->listView());
dereferencePointer(wnd, exprItem, true);
}
// add the expression in the edit field to the watch expressions
void KDebugger::addWatch(const QString& t)
{
QString expr = t.stripWhiteSpace();
// don't add a watched expression again
if (expr.isEmpty() || m_watchVariables.topLevelExprByName(expr) != 0)
return;
ExprValue e(expr, VarTree::NKplain);
m_watchVariables.insertExpr(&e, *m_typeTable);
// if we are boring ourselves, send down the command
if (m_programActive) {
m_watchEvalExpr.push_back(expr);
if (m_d->isIdle()) {
evalExpressions();
}
}
}
// delete a toplevel watch expression
void KDebugger::slotDeleteWatch()
{
// delete only allowed while debugger is idle; or else we might delete
// the very expression the debugger is currently working on...
if (m_d == 0 || !m_d->isIdle())
return;
VarTree* item = m_watchVariables.currentItem();
if (item == 0 || !item->isToplevelExpr())
return;
// remove the variable from the list to evaluate
QStringList::iterator i = m_watchEvalExpr.find(item->getText());
if (i != m_watchEvalExpr.end()) {
m_watchEvalExpr.erase(i);
}
m_watchVariables.removeExpr(item);
// item is invalid at this point!
}
void KDebugger::handleRegisters(const char* output)
{
emit registersChanged(m_d->parseRegisters(output));
}
/*
* The output of the DCbreak* commands has more accurate information about
* the file and the line number.
*
* All newly set breakpoints are inserted in the m_brkpts, even those that
* were not set sucessfully. The unsuccessful breakpoints ("orphaned
* breakpoints") are assigned negative ids, and they are tried to set later
* when the program stops again at a breakpoint.
*/
void KDebugger::newBreakpoint(CmdQueueItem* cmd, const char* output)
{
BrkptIterator bp;
if (cmd->m_brkpt != 0) {
// a new breakpoint, put it in the list
assert(cmd->m_brkpt->id == 0);
m_brkpts.push_back(*cmd->m_brkpt);
delete cmd->m_brkpt;
bp = m_brkpts.end();
--bp;
} else {
// an existing breakpoint was retried
assert(cmd->m_existingBrkpt != 0);
bp = breakpointById(cmd->m_existingBrkpt);
if (bp == m_brkpts.end())
return;
}
// parse the output to determine success or failure
int id;
QString file;
int lineNo;
QString address;
if (!m_d->parseBreakpoint(output, id, file, lineNo, address))
{
/*
* Failure, the breakpoint could not be set. If this is a new
* breakpoint, assign it a negative id. We look for the minimal id
* of all breakpoints (that are already in the list) to get the new
* id.
*/
if (bp->id == 0)
{
int minId = 0;
for (BrkptIterator i = m_brkpts.begin(); i != m_brkpts.end(); ++i) {
if (i->id < minId)
minId = i->id;
}
bp->id = minId-1;
}
return;
}
// The breakpoint was successfully set.
if (bp->id <= 0)
{
// this is a new or orphaned breakpoint:
// set the remaining properties
if (!bp->enabled) {
m_d->executeCmd(DCdisable, id);
}
if (!bp->condition.isEmpty()) {
m_d->executeCmd(DCcondition, bp->condition, id);
}
}
bp->id = id;
bp->fileName = file;
bp->lineNo = lineNo;
if (!address.isEmpty())
bp->address = address;
}
void KDebugger::updateBreakList(const char* output)
{
// get the new list
std::list<Breakpoint> brks;
m_d->parseBreakList(output, brks);
// merge existing information into the new list
// then swap the old and new lists
for (BrkptIterator bp = brks.begin(); bp != brks.end(); ++bp)
{
BrkptIterator i = breakpointById(bp->id);
if (i != m_brkpts.end())
{
// preserve accurate location information
// note that xsldbg doesn't have a location in
// the listed breakpoint if it has just been set
// therefore, we copy it as well if necessary
bp->text = i->text;
if (!i->fileName.isEmpty()) {
bp->fileName = i->fileName;
bp->lineNo = i->lineNo;
}
}
}
// orphaned breakpoints must be copied
for (BrkptIterator bp = m_brkpts.begin(); bp != m_brkpts.end(); ++bp)
{
if (bp->isOrphaned())
brks.push_back(*bp);
}
m_brkpts.swap(brks);
emit breakpointsChanged();
}
// look if there is at least one temporary breakpoint
// or a watchpoint
bool KDebugger::stopMayChangeBreakList() const
{
for (BrkptROIterator bp = m_brkpts.begin(); bp != m_brkpts.end(); ++bp)
{
if (bp->temporary || bp->type == Breakpoint::watchpoint)
return true;
}
return false;
}
KDebugger::BrkptIterator KDebugger::breakpointByFilePos(QString file, int lineNo,
const DbgAddr& address)
{
// look for exact file name match
for (BrkptIterator bp = m_brkpts.begin(); bp != m_brkpts.end(); ++bp)
{
if (bp->lineNo == lineNo &&
bp->fileName == file &&
(address.isEmpty() || bp->address == address))
{
return bp;
}
}
// not found, so try basename
// strip off directory part of file name
int offset = file.findRev("/");
file.remove(0, offset+1);
for (BrkptIterator bp = m_brkpts.begin(); bp != m_brkpts.end(); ++bp)
{
// get base name of breakpoint's file
QString basename = bp->fileName;
int offset = basename.findRev("/");
if (offset >= 0) {
basename.remove(0, offset+1);
}
if (bp->lineNo == lineNo &&
basename == file &&
(address.isEmpty() || bp->address == address))
{
return bp;
}
}
// not found
return m_brkpts.end();
}
KDebugger::BrkptIterator KDebugger::breakpointById(int id)
{
for (BrkptIterator bp = m_brkpts.begin(); bp != m_brkpts.end(); ++bp)
{
if (bp->id == id) {
return bp;
}
}
// not found
return m_brkpts.end();
}
void KDebugger::slotValuePopup(const QString& expr)
{
// search the local variables for a match
VarTree* v = m_localVariables.topLevelExprByName(expr);
if (v == 0) {
// not found, check watch expressions
v = m_watchVariables.topLevelExprByName(expr);
if (v == 0) {
// try a member of 'this'
v = m_localVariables.topLevelExprByName("this");
if (v != 0)
v = ExprWnd::ptrMemberByName(v, expr);
if (v == 0) {
// nothing found; do nothing
return;
}
}
}
// construct the tip
QString tip = v->getText() + " = ";
if (!v->value().isEmpty())
{
tip += v->value();
}
else
{
// no value: we use some hint
switch (v->m_varKind) {
case VarTree::VKstruct:
tip += "{...}";
break;
case VarTree::VKarray:
tip += "[...]";
break;
default:
tip += "?""?""?"; // 2 question marks in a row would be a trigraph
break;
}
}
emit valuePopup(tip);
}
void KDebugger::slotDisassemble(const QString& fileName, int lineNo)
{
if (m_haveExecutable) {
CmdQueueItem* cmd = m_d->queueCmd(DCinfoline, fileName, lineNo,
DebuggerDriver::QMoverrideMoreEqual);
cmd->m_fileName = fileName;
cmd->m_lineNo = lineNo;
}
}
void KDebugger::handleInfoLine(CmdQueueItem* cmd, const char* output)
{
QString addrFrom, addrTo;
if (cmd->m_lineNo >= 0) {
// disassemble
if (m_d->parseInfoLine(output, addrFrom, addrTo)) {
// got the address range, now get the real code
CmdQueueItem* c = m_d->queueCmd(DCdisassemble, addrFrom, addrTo,
DebuggerDriver::QMoverrideMoreEqual);
c->m_fileName = cmd->m_fileName;
c->m_lineNo = cmd->m_lineNo;
} else {
// no code
emit disassembled(cmd->m_fileName, cmd->m_lineNo, std::list<DisassembledCode>());
}
} else {
// set program counter
if (m_d->parseInfoLine(output, addrFrom, addrTo)) {
// move the program counter to the start address
m_d->executeCmd(DCsetpc, addrFrom);
}
}
}
void KDebugger::handleDisassemble(CmdQueueItem* cmd, const char* output)
{
emit disassembled(cmd->m_fileName, cmd->m_lineNo,
m_d->parseDisassemble(output));
}
void KDebugger::handleThreadList(const char* output)
{
emit threadsChanged(m_d->parseThreadList(output));
}
void KDebugger::setThread(int id)
{
m_d->queueCmd(DCthread, id, DebuggerDriver::QMoverrideMoreEqual);
}
void KDebugger::setMemoryExpression(const QString& memexpr)
{
m_memoryExpression = memexpr;
// queue the new expression
if (!m_memoryExpression.isEmpty() &&
isProgramActive() &&
!isProgramRunning())
{
queueMemoryDump(true);
}
}
void KDebugger::queueMemoryDump(bool immediate)
{
m_d->queueCmd(DCexamine, m_memoryExpression, m_memoryFormat,
immediate ? DebuggerDriver::QMoverrideMoreEqual :
DebuggerDriver::QMoverride);
}
void KDebugger::handleMemoryDump(const char* output)
{
std::list<MemoryDump> memdump;
QString msg = m_d->parseMemoryDump(output, memdump);
emit memoryDumpChanged(msg, memdump);
}
void KDebugger::setProgramCounter(const QString& file, int line, const DbgAddr& addr)
{
if (addr.isEmpty()) {
// find address of the specified line
CmdQueueItem* cmd = m_d->executeCmd(DCinfoline, file, line);
cmd->m_lineNo = -1; /* indicates "Set PC" UI command */
} else {
// move the program counter to that address
m_d->executeCmd(DCsetpc, addr.asString());
}
}
void KDebugger::handleSetPC(const char* /*output*/)
{
// TODO: handle errors
// now go to the top-most frame
// this also modifies the program counter indicator in the UI
gotoFrame(0);
}
void KDebugger::slotValueEdited(VarTree* expr, const QString& text)
{
if (text.simplifyWhiteSpace().isEmpty())
return; /* no text entered: ignore request */
ExprWnd* wnd = static_cast<ExprWnd*>(expr->listView());
TRACE(QString().sprintf("Changing %s to ",
wnd->name()) + text);
// determine the lvalue to edit
QString lvalue = expr->computeExpr();
CmdQueueItem* cmd = m_d->executeCmd(DCsetvariable, lvalue, text);
cmd->m_expr = expr;
cmd->m_exprWnd = wnd;
}
void KDebugger::handleSetVariable(CmdQueueItem* cmd, const char* output)
{
QString msg = m_d->parseSetVariable(output);
if (!msg.isEmpty())
{
// there was an error; display it in the status bar
m_statusMessage = msg;
emit updateStatusMessage();
return;
}
// get the new value
QString expr = cmd->m_expr->computeExpr();
CmdQueueItem* printCmd =
m_d->queueCmd(DCprint, expr, DebuggerDriver::QMoverrideMoreEqual);
printCmd->m_expr = cmd->m_expr;
printCmd->m_exprWnd = cmd->m_exprWnd;
}
#include "debugger.moc"
| gpl-2.0 |
GeoCat/QGIS | src/plugins/geometry_checker/ui/qgsgeometrycheckerresulttab.cpp | 1 | 24471 | /***************************************************************************
* qgsgeometrycheckerresulttab.cpp *
* ------------------- *
* copyright : (C) 2014 by Sandro Mani / Sourcepole AG *
* email : smani@sourcepole.ch *
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include <QFileDialog>
#include <QGroupBox>
#include <QMessageBox>
#include <QDialogButtonBox>
#include <QPlainTextEdit>
#include "qgsgeometrycheckerresulttab.h"
#include "qgsgeometrycheckfixdialog.h"
#include "../qgsgeometrychecker.h"
#include "../checks/qgsgeometrycheck.h"
#include "../utils/qgsfeaturepool.h"
#include "qgsgeometry.h"
#include "qgisinterface.h"
#include "qgsmapcanvas.h"
#include "qgsproject.h"
#include "qgsproviderregistry.h"
#include "qgsrubberband.h"
#include "qgsvectorlayer.h"
#include "qgsvectordataprovider.h"
#include "qgsvectorfilewriter.h"
#include "qgssettings.h"
#include "qgsscrollarea.h"
QString QgsGeometryCheckerResultTab::sSettingsGroup = QStringLiteral( "/geometry_checker/default_fix_methods/" );
QgsGeometryCheckerResultTab::QgsGeometryCheckerResultTab( QgisInterface *iface, QgsGeometryChecker *checker, QgsFeaturePool *featurePool, QTabWidget *tabWidget, QWidget *parent )
: QWidget( parent )
, mTabWidget( tabWidget )
, mIface( iface )
, mChecker( checker )
, mFeaturePool( featurePool )
{
ui.setupUi( this );
mErrorCount = 0;
mFixedCount = 0;
mCloseable = true;
mAttribTableDialog = nullptr;
for ( int i = 0, n = mFeaturePool->getLayer()->fields().count(); i < n; ++i )
{
ui.comboBoxMergeAttribute->addItem( mFeaturePool->getLayer()->fields().at( i ).name() );
}
connect( checker, &QgsGeometryChecker::errorAdded, this, &QgsGeometryCheckerResultTab::addError );
connect( checker, &QgsGeometryChecker::errorUpdated, this, &QgsGeometryCheckerResultTab::updateError );
connect( ui.comboBoxMergeAttribute, static_cast<void ( QComboBox::* )( int )>( &QComboBox::currentIndexChanged ), checker, &QgsGeometryChecker::setMergeAttributeIndex );
connect( ui.tableWidgetErrors->selectionModel(), &QItemSelectionModel::selectionChanged, this, &QgsGeometryCheckerResultTab::onSelectionChanged );
connect( ui.buttonGroupSelectAction, static_cast<void ( QButtonGroup::* )( int )>( &QButtonGroup::buttonClicked ), this, &QgsGeometryCheckerResultTab::highlightErrors );
connect( ui.pushButtonOpenAttributeTable, &QAbstractButton::clicked, this, &QgsGeometryCheckerResultTab::openAttributeTable );
connect( ui.pushButtonFixWithDefault, &QAbstractButton::clicked, this, &QgsGeometryCheckerResultTab::fixErrorsWithDefault );
connect( ui.pushButtonFixWithPrompt, &QAbstractButton::clicked, this, &QgsGeometryCheckerResultTab::fixErrorsWithPrompt );
connect( ui.pushButtonErrorResolutionSettings, &QAbstractButton::clicked, this, &QgsGeometryCheckerResultTab::setDefaultResolutionMethods );
connect( ui.checkBoxHighlight, &QAbstractButton::clicked, this, &QgsGeometryCheckerResultTab::highlightErrors );
connect( QgsProject::instance(), static_cast<void ( QgsProject::* )( const QStringList & )>( &QgsProject::layersWillBeRemoved ), this, &QgsGeometryCheckerResultTab::checkRemovedLayer );
connect( ui.pushButtonExport, &QAbstractButton::clicked, this, &QgsGeometryCheckerResultTab::exportErrors );
if ( ( mFeaturePool->getLayer()->dataProvider()->capabilities() & QgsVectorDataProvider::ChangeGeometries ) == 0 )
{
ui.pushButtonFixWithDefault->setEnabled( false );
ui.pushButtonFixWithPrompt->setEnabled( false );
}
ui.progressBarFixErrors->setVisible( false );
ui.tableWidgetErrors->horizontalHeader()->setSortIndicator( 0, Qt::AscendingOrder );
// Not sure why, but this is needed...
ui.tableWidgetErrors->setSortingEnabled( true );
ui.tableWidgetErrors->setSortingEnabled( false );
}
QgsGeometryCheckerResultTab::~QgsGeometryCheckerResultTab()
{
if ( mFeaturePool->getLayer() )
mFeaturePool->getLayer()->setReadOnly( false );
delete mChecker;
delete mFeaturePool;
qDeleteAll( mCurrentRubberBands );
}
void QgsGeometryCheckerResultTab::finalize()
{
ui.tableWidgetErrors->setSortingEnabled( true );
if ( !mChecker->getMessages().isEmpty() )
{
QDialog dialog;
dialog.setLayout( new QVBoxLayout() );
dialog.layout()->addWidget( new QLabel( tr( "The following checks reported errors:" ) ) );
dialog.layout()->addWidget( new QPlainTextEdit( mChecker->getMessages().join( QStringLiteral( "\n" ) ) ) );
QDialogButtonBox *bbox = new QDialogButtonBox( QDialogButtonBox::Close, Qt::Horizontal );
dialog.layout()->addWidget( bbox );
connect( bbox, &QDialogButtonBox::accepted, &dialog, &QDialog::accept );
connect( bbox, &QDialogButtonBox::rejected, &dialog, &QDialog::reject );
dialog.setWindowTitle( tr( "Check Errors Occurred" ) );
dialog.exec();
}
}
void QgsGeometryCheckerResultTab::addError( QgsGeometryCheckError *error )
{
bool sortingWasEnabled = ui.tableWidgetErrors->isSortingEnabled();
if ( sortingWasEnabled )
ui.tableWidgetErrors->setSortingEnabled( false );
int row = ui.tableWidgetErrors->rowCount();
int prec = 7 - std::floor( std::max( 0., std::log10( std::max( error->location().x(), error->location().y() ) ) ) );
QString posStr = QStringLiteral( "%1, %2" ).arg( error->location().x(), 0, 'f', prec ).arg( error->location().y(), 0, 'f', prec );
double layerToMap = mIface->mapCanvas()->mapSettings().layerToMapUnits( mFeaturePool->getLayer() );
QVariant value;
if ( error->valueType() == QgsGeometryCheckError::ValueLength )
{
value = QVariant::fromValue( error->value().toDouble() * layerToMap );
}
else if ( error->valueType() == QgsGeometryCheckError::ValueArea )
{
value = QVariant::fromValue( error->value().toDouble() * layerToMap * layerToMap );
}
else
{
value = error->value();
}
ui.tableWidgetErrors->insertRow( row );
QTableWidgetItem *idItem = new QTableWidgetItem();
idItem->setData( Qt::EditRole, error->featureId() != FEATUREID_NULL ? QVariant( error->featureId() ) : QVariant() );
ui.tableWidgetErrors->setItem( row, 0, idItem );
ui.tableWidgetErrors->setItem( row, 1, new QTableWidgetItem( error->description() ) );
ui.tableWidgetErrors->setItem( row, 2, new QTableWidgetItem( posStr ) );
QTableWidgetItem *valueItem = new QTableWidgetItem();
valueItem->setData( Qt::EditRole, value );
ui.tableWidgetErrors->setItem( row, 3, valueItem );
ui.tableWidgetErrors->setItem( row, 4, new QTableWidgetItem( QLatin1String( "" ) ) );
ui.tableWidgetErrors->item( row, 0 )->setData( Qt::UserRole, QVariant::fromValue( error ) );
++mErrorCount;
ui.labelErrorCount->setText( tr( "Total errors: %1, fixed errors: %2" ).arg( mErrorCount ).arg( mFixedCount ) );
mStatistics.newErrors.insert( error );
mErrorMap.insert( error, QPersistentModelIndex( ui.tableWidgetErrors->model()->index( row, 0 ) ) );
if ( sortingWasEnabled )
ui.tableWidgetErrors->setSortingEnabled( true );
}
void QgsGeometryCheckerResultTab::updateError( QgsGeometryCheckError *error, bool statusChanged )
{
if ( !mErrorMap.contains( error ) )
{
return;
}
// Disable sorting to prevent crashes: if i.e. sorting by col 0, as soon as the item(row, 0)
// is set, the row is potentially moved due to sorting, and subsequent item(row, col) reference wrong
// item
ui.tableWidgetErrors->setSortingEnabled( false );
int row = mErrorMap.value( error ).row();
int prec = 7 - std::floor( std::max( 0., std::log10( std::max( error->location().x(), error->location().y() ) ) ) );
QString posStr = QStringLiteral( "%1, %2" ).arg( error->location().x(), 0, 'f', prec ).arg( error->location().y(), 0, 'f', prec );
double layerToMap = mIface->mapCanvas()->mapSettings().layerToMapUnits( mFeaturePool->getLayer() );
QVariant value;
if ( error->valueType() == QgsGeometryCheckError::ValueLength )
{
value = QVariant::fromValue( error->value().toDouble() * layerToMap );
}
else if ( error->valueType() == QgsGeometryCheckError::ValueArea )
{
value = QVariant::fromValue( error->value().toDouble() * layerToMap * layerToMap );
}
else
{
value = error->value();
}
ui.tableWidgetErrors->item( row, 2 )->setText( posStr );
ui.tableWidgetErrors->item( row, 3 )->setData( Qt::EditRole, value );
if ( error->status() == QgsGeometryCheckError::StatusFixed )
{
setRowStatus( row, Qt::green, tr( "Fixed: %1" ).arg( error->resolutionMessage() ), true );
++mFixedCount;
if ( statusChanged )
{
mStatistics.fixedErrors.insert( error );
}
}
else if ( error->status() == QgsGeometryCheckError::StatusFixFailed )
{
setRowStatus( row, Qt::red, tr( "Fix failed: %1" ).arg( error->resolutionMessage() ), true );
if ( statusChanged )
{
mStatistics.failedErrors.insert( error );
}
}
else if ( error->status() == QgsGeometryCheckError::StatusObsolete )
{
ui.tableWidgetErrors->setRowHidden( row, true );
// setRowStatus( row, Qt::gray, tr( "Obsolete" ), false );
--mErrorCount;
// If error was new, don't report it as obsolete since the user never got to see the new error anyways
if ( statusChanged && !mStatistics.newErrors.remove( error ) )
{
mStatistics.obsoleteErrors.insert( error );
}
}
ui.labelErrorCount->setText( tr( "Total errors: %1, fixed errors: %2" ).arg( mErrorCount ).arg( mFixedCount ) );
ui.tableWidgetErrors->setSortingEnabled( true );
}
void QgsGeometryCheckerResultTab::exportErrors()
{
QString initialdir;
QDir dir = QFileInfo( mFeaturePool->getLayer()->dataProvider()->dataSourceUri() ).dir();
if ( dir.exists() )
{
initialdir = dir.absolutePath();
}
QString file = QFileDialog::getSaveFileName( this, tr( "Select Output File" ), initialdir, tr( "ESRI Shapefile (*.shp);;" ) );
if ( file.isEmpty() )
{
return;
}
if ( !exportErrorsDo( file ) )
{
QMessageBox::critical( this, tr( "Error" ), tr( "Failed to export errors to shapefile." ) );
}
}
bool QgsGeometryCheckerResultTab::exportErrorsDo( const QString &file )
{
QList< QPair<QString, QString> > attributes;
attributes.append( qMakePair( QStringLiteral( "FeatureID" ), QStringLiteral( "String;10;" ) ) );
attributes.append( qMakePair( QStringLiteral( "ErrorDesc" ), QStringLiteral( "String;80;" ) ) );
QLibrary ogrLib( QgsProviderRegistry::instance()->library( QStringLiteral( "ogr" ) ) );
if ( !ogrLib.load() )
{
return false;
}
typedef bool ( *createEmptyDataSourceProc )( const QString &, const QString &, const QString &, QgsWkbTypes::Type, const QList< QPair<QString, QString> > &, const QgsCoordinateReferenceSystem & );
createEmptyDataSourceProc createEmptyDataSource = ( createEmptyDataSourceProc ) cast_to_fptr( ogrLib.resolve( "createEmptyDataSource" ) );
if ( !createEmptyDataSource )
{
return false;
}
if ( !createEmptyDataSource( file, QStringLiteral( "ESRI Shapefile" ), mFeaturePool->getLayer()->dataProvider()->encoding(), QgsWkbTypes::Point, attributes, mFeaturePool->getLayer()->crs() ) )
{
return false;
}
QgsVectorLayer *layer = new QgsVectorLayer( file, QFileInfo( file ).baseName(), QStringLiteral( "ogr" ) );
if ( !layer->isValid() )
{
delete layer;
return false;
}
int fieldFeatureId = layer->fields().lookupField( QStringLiteral( "FeatureID" ) );
int fieldErrDesc = layer->fields().lookupField( QStringLiteral( "ErrorDesc" ) );
for ( int row = 0, nRows = ui.tableWidgetErrors->rowCount(); row < nRows; ++row )
{
QgsGeometryCheckError *error = ui.tableWidgetErrors->item( row, 0 )->data( Qt::UserRole ).value<QgsGeometryCheckError *>();
QgsFeature f( layer->fields() );
f.setAttribute( fieldFeatureId, error->featureId() );
f.setAttribute( fieldErrDesc, error->description() );
f.setGeometry( QgsGeometry( error->location().clone() ) );
layer->dataProvider()->addFeatures( QgsFeatureList() << f );
}
// Remove existing layer with same uri
QStringList toRemove;
Q_FOREACH ( QgsMapLayer *maplayer, QgsProject::instance()->mapLayers() )
{
if ( dynamic_cast<QgsVectorLayer *>( maplayer ) &&
static_cast<QgsVectorLayer *>( maplayer )->dataProvider()->dataSourceUri() == layer->dataProvider()->dataSourceUri() )
{
toRemove.append( maplayer->id() );
}
}
if ( !toRemove.isEmpty() )
{
QgsProject::instance()->removeMapLayers( toRemove );
}
QgsProject::instance()->addMapLayers( QList<QgsMapLayer *>() << layer );
return true;
}
void QgsGeometryCheckerResultTab::highlightError( QgsGeometryCheckError *error )
{
if ( !mErrorMap.contains( error ) )
{
return;
}
int row = mErrorMap.value( error ).row();
ui.tableWidgetErrors->setCurrentIndex( ui.tableWidgetErrors->model()->index( row, 0 ) );
highlightErrors( true );
}
void QgsGeometryCheckerResultTab::highlightErrors( bool current )
{
qDeleteAll( mCurrentRubberBands );
mCurrentRubberBands.clear();
QList<QTableWidgetItem *> items;
QVector<QgsPointXY> errorPositions;
QgsRectangle totextent;
if ( current )
{
items.append( ui.tableWidgetErrors->currentItem() );
}
else
{
items.append( ui.tableWidgetErrors->selectedItems() );
}
Q_FOREACH ( QTableWidgetItem *item, items )
{
QgsGeometryCheckError *error = ui.tableWidgetErrors->item( item->row(), 0 )->data( Qt::UserRole ).value<QgsGeometryCheckError *>();
QgsAbstractGeometry *geometry = error->geometry();
if ( ui.checkBoxHighlight->isChecked() && geometry )
{
QgsRubberBand *featureRubberBand = new QgsRubberBand( mIface->mapCanvas() );
QgsGeometry geom( geometry->clone() );
featureRubberBand->addGeometry( geom, mFeaturePool->getLayer() );
featureRubberBand->setWidth( 5 );
featureRubberBand->setColor( Qt::yellow );
mCurrentRubberBands.append( featureRubberBand );
}
else
{
// QgsGeometry above takes ownership of geometry and deletes it when it goes out of scope
delete geometry;
geometry = nullptr;
}
if ( ui.radioButtonError->isChecked() || current || error->status() == QgsGeometryCheckError::StatusFixed )
{
QgsRubberBand *pointRubberBand = new QgsRubberBand( mIface->mapCanvas(), QgsWkbTypes::PointGeometry );
QgsPointXY pos = mIface->mapCanvas()->mapSettings().layerToMapCoordinates( mFeaturePool->getLayer(), QgsPointXY( error->location().x(), error->location().y() ) );
pointRubberBand->addPoint( pos );
pointRubberBand->setWidth( 20 );
pointRubberBand->setColor( Qt::red );
mCurrentRubberBands.append( pointRubberBand );
errorPositions.append( pos );
}
else if ( ui.radioButtonFeature->isChecked() && geometry )
{
QgsRectangle geomextent = mIface->mapCanvas()->mapSettings().layerExtentToOutputExtent( mFeaturePool->getLayer(), geometry->boundingBox() );
if ( totextent.isEmpty() )
{
totextent = geomextent;
}
else
{
totextent.combineExtentWith( geomextent );
}
}
}
// If error positions positions are marked, pan to the center of all positions,
// and zoom out if necessary to make all points fit.
if ( !errorPositions.isEmpty() )
{
double cx = 0., cy = 0.;
QgsRectangle pointExtent( errorPositions.first(), errorPositions.first() );
Q_FOREACH ( const QgsPointXY &p, errorPositions )
{
cx += p.x();
cy += p.y();
pointExtent.include( p );
}
QgsPointXY center = QgsPointXY( cx / errorPositions.size(), cy / errorPositions.size() );
if ( totextent.isEmpty() )
{
QgsRectangle extent = mIface->mapCanvas()->extent();
QgsVector diff = center - extent.center();
extent.setXMinimum( extent.xMinimum() + diff.x() );
extent.setXMaximum( extent.xMaximum() + diff.x() );
extent.setYMinimum( extent.yMinimum() + diff.y() );
extent.setYMaximum( extent.yMaximum() + diff.y() );
extent.combineExtentWith( pointExtent );
totextent = extent;
}
else
{
totextent.combineExtentWith( pointExtent );
}
}
mIface->mapCanvas()->setExtent( totextent );
mIface->mapCanvas()->refresh();
}
void QgsGeometryCheckerResultTab::onSelectionChanged( const QItemSelection &newSel, const QItemSelection &/*oldSel*/ )
{
QModelIndex idx = ui.tableWidgetErrors->currentIndex();
if ( idx.isValid() && !ui.tableWidgetErrors->isRowHidden( idx.row() ) && ui.tableWidgetErrors->selectionModel()->selectedIndexes().contains( idx ) )
{
highlightErrors();
}
else
{
qDeleteAll( mCurrentRubberBands );
mCurrentRubberBands.clear();
}
ui.pushButtonOpenAttributeTable->setEnabled( !newSel.isEmpty() );
}
void QgsGeometryCheckerResultTab::openAttributeTable()
{
QSet<int> ids;
Q_FOREACH ( QModelIndex idx, ui.tableWidgetErrors->selectionModel()->selectedRows() )
{
QgsFeatureId id = ui.tableWidgetErrors->item( idx.row(), 0 )->data( Qt::UserRole ).value<QgsGeometryCheckError *>()->featureId();
if ( id >= 0 )
{
ids.insert( id );
}
}
if ( ids.isEmpty() )
{
return;
}
QStringList expr;
Q_FOREACH ( int id, ids )
{
expr.append( QStringLiteral( "$id = %1 " ).arg( id ) );
}
if ( mAttribTableDialog )
{
disconnect( mAttribTableDialog, &QObject::destroyed, this, &QgsGeometryCheckerResultTab::clearAttribTableDialog );
mAttribTableDialog->close();
}
mAttribTableDialog = mIface->showAttributeTable( mFeaturePool->getLayer(), expr.join( QStringLiteral( " or " ) ) );
connect( mAttribTableDialog, &QObject::destroyed, this, &QgsGeometryCheckerResultTab::clearAttribTableDialog );
}
void QgsGeometryCheckerResultTab::fixErrors( bool prompt )
{
//! Collect errors to fix *
QModelIndexList rows = ui.tableWidgetErrors->selectionModel()->selectedRows();
if ( rows.isEmpty() )
{
ui.tableWidgetErrors->selectAll();
rows = ui.tableWidgetErrors->selectionModel()->selectedRows();
}
QList<QgsGeometryCheckError *> errors;
Q_FOREACH ( const QModelIndex &index, rows )
{
QgsGeometryCheckError *error = ui.tableWidgetErrors->item( index.row(), 0 )->data( Qt::UserRole ).value<QgsGeometryCheckError *>();
if ( error->status() < QgsGeometryCheckError::StatusFixed )
{
errors.append( error );
}
}
if ( errors.isEmpty() )
{
return;
}
if ( QMessageBox::Yes != QMessageBox::question( this, tr( "Fix errors?" ), tr( "Do you want to fix %1 errors?" ).arg( errors.size() ), QMessageBox::Yes, QMessageBox::No ) )
{
return;
}
//! Reset statistics, clear rubberbands *
mStatistics = QgsGeometryCheckerFixSummaryDialog::Statistics();
qDeleteAll( mCurrentRubberBands );
mCurrentRubberBands.clear();
//! Fix errors *
mCloseable = false;
if ( prompt )
{
QgsGeometryCheckerFixDialog fixdialog( mChecker, errors, mIface, mIface->mainWindow() );
QEventLoop loop;
connect( &fixdialog, &QgsGeometryCheckerFixDialog::currentErrorChanged, this, &QgsGeometryCheckerResultTab::highlightError );
connect( &fixdialog, &QDialog::finished, &loop, &QEventLoop::quit );
fixdialog.show();
parentWidget()->parentWidget()->parentWidget()->setEnabled( false );
loop.exec();
parentWidget()->parentWidget()->parentWidget()->setEnabled( true );
}
else
{
setCursor( Qt::WaitCursor );
ui.progressBarFixErrors->setVisible( true );
ui.progressBarFixErrors->setRange( 0, errors.size() );
Q_FOREACH ( QgsGeometryCheckError *error, errors )
{
int fixMethod = QgsSettings().value( sSettingsGroup + error->check()->errorName(), QVariant::fromValue<int>( 0 ) ).toInt();
mChecker->fixError( error, fixMethod );
ui.progressBarFixErrors->setValue( ui.progressBarFixErrors->value() + 1 );
QApplication::processEvents( QEventLoop::ExcludeUserInputEvents );
}
ui.progressBarFixErrors->setVisible( false );
unsetCursor();
}
mChecker->getLayer()->triggerRepaint();
if ( mStatistics.itemCount() > 0 )
{
QgsGeometryCheckerFixSummaryDialog summarydialog( mIface, mFeaturePool->getLayer(), mStatistics, mChecker->getMessages(), mIface->mainWindow() );
QEventLoop loop;
connect( &summarydialog, &QgsGeometryCheckerFixSummaryDialog::errorSelected, this, &QgsGeometryCheckerResultTab::highlightError );
connect( &summarydialog, &QDialog::finished, &loop, &QEventLoop::quit );
summarydialog.show();
parentWidget()->parentWidget()->parentWidget()->setEnabled( false );
loop.exec();
parentWidget()->parentWidget()->parentWidget()->setEnabled( true );
}
mCloseable = true;
}
void QgsGeometryCheckerResultTab::setRowStatus( int row, const QColor &color, const QString &message, bool selectable )
{
for ( int col = 0, nCols = ui.tableWidgetErrors->columnCount(); col < nCols; ++col )
{
QTableWidgetItem *item = ui.tableWidgetErrors->item( row, col );
item->setBackground( color );
if ( !selectable )
{
item->setFlags( item->flags() & ~Qt::ItemIsSelectable );
item->setForeground( Qt::lightGray );
}
}
ui.tableWidgetErrors->item( row, 4 )->setText( message );
}
void QgsGeometryCheckerResultTab::setDefaultResolutionMethods()
{
QDialog dialog( this );
dialog.setWindowTitle( tr( "Set Error Resolutions" ) );
QVBoxLayout *layout = new QVBoxLayout( &dialog );
QgsScrollArea *scrollArea = new QgsScrollArea( &dialog );
scrollArea->setFrameShape( QFrame::NoFrame );
layout->setContentsMargins( 0, 0, 0, 0 );
layout->addWidget( scrollArea );
QWidget *scrollAreaContents = new QWidget( scrollArea );
QVBoxLayout *scrollAreaLayout = new QVBoxLayout( scrollAreaContents );
Q_FOREACH ( const QgsGeometryCheck *check, mChecker->getChecks() )
{
QGroupBox *groupBox = new QGroupBox( scrollAreaContents );
groupBox->setTitle( check->errorDescription() );
groupBox->setFlat( true );
QVBoxLayout *groupBoxLayout = new QVBoxLayout( groupBox );
groupBoxLayout->setContentsMargins( 2, 0, 2, 2 );
QButtonGroup *radioGroup = new QButtonGroup( groupBox );
radioGroup->setProperty( "errorType", check->errorName() );
int id = 0;
int checkedId = QgsSettings().value( sSettingsGroup + check->errorName(), QVariant::fromValue<int>( 0 ) ).toInt();
Q_FOREACH ( const QString &method, check->getResolutionMethods() )
{
QRadioButton *radio = new QRadioButton( method, groupBox );
radio->setChecked( id == checkedId );
groupBoxLayout->addWidget( radio );
radioGroup->addButton( radio, id++ );
}
connect( radioGroup, static_cast<void ( QButtonGroup::* )( int )>( &QButtonGroup::buttonClicked ), this, &QgsGeometryCheckerResultTab::storeDefaultResolutionMethod );
scrollAreaLayout->addWidget( groupBox );
}
scrollArea->setWidget( scrollAreaContents );
QDialogButtonBox *buttonBox = new QDialogButtonBox( QDialogButtonBox::Ok, Qt::Horizontal, &dialog );
connect( buttonBox, &QDialogButtonBox::accepted, &dialog, &QDialog::accept );
layout->addWidget( buttonBox );
dialog.exec();
}
void QgsGeometryCheckerResultTab::storeDefaultResolutionMethod( int id ) const
{
QString errorType = qobject_cast<QButtonGroup *>( QObject::sender() )->property( "errorType" ).toString();
QgsSettings().setValue( sSettingsGroup + errorType, id );
}
void QgsGeometryCheckerResultTab::checkRemovedLayer( const QStringList &ids )
{
if ( mFeaturePool->getLayer() && ids.contains( mFeaturePool->getLayer()->id() ) && isEnabled() )
{
if ( mTabWidget->currentWidget() == this )
{
QMessageBox::critical( this, tr( "Layer removed" ), tr( "The layer has been removed." ) );
}
setEnabled( false );
mFeaturePool->clearLayer();
qDeleteAll( mCurrentRubberBands );
mCurrentRubberBands.clear();
}
}
| gpl-2.0 |
xtingray/tupi | src/plugins/tools/composed/tweener.cpp | 1 | 34791 | /***************************************************************************
* Project TUPI: Magia 2D *
* Project Contact: info@maefloresta.com *
* Project Website: http://www.maefloresta.com *
* Project Leader: Gustav Gonzalez <info@maefloresta.com> *
* *
* Developers: *
* 2010: *
* Gustavo Gonzalez / xtingray *
* *
* KTooN's versions: *
* *
* 2006: *
* David Cuadrado *
* Jorge Cuadrado *
* 2003: *
* Fernado Roldan *
* Simena Dinas *
* *
* Copyright (C) 2010 Gustav Gonzalez - http://www.maefloresta.com *
* License: *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
***************************************************************************/
#include "tweener.h"
#include "taction.h"
#include "tnodegroup.h"
#include "tosd.h"
#include "tupsvg2qt.h"
#include "tupinputdeviceinformation.h"
#include "tupbrushmanager.h"
#include "tupgraphicsscene.h"
#include "tupgraphicobject.h"
#include "tuppathitem.h"
#include "tupellipseitem.h"
#include "tuprectitem.h"
#include "tuplineitem.h"
#include "tupsvgitem.h"
#include "tupitemtweener.h"
#include "tuprequestbuilder.h"
#include "tupprojectrequest.h"
#include "tuplibraryobject.h"
#include "tupscene.h"
#include "tuplayer.h"
struct Tweener::Private
{
QMap<QString, TAction *> actions;
Configurator *configurator;
TupGraphicsScene *scene;
QGraphicsPathItem *path;
QList<QGraphicsItem *> objects;
TupItemTweener *currentTween;
TNodeGroup *group;
bool pathAdded;
int startPoint;
TweenerPanel::Mode mode;
TweenerPanel::TweenerType currentTweenType;
TweenerPanel::EditMode editMode;
QPointF itemObjectReference;
QPointF pathOffset;
QPointF firstNode;
int baseZValue;
};
Tweener::Tweener() : TupToolPlugin(), k(new Private)
{
setupActions();
k->configurator = 0;
k->path = 0;
k->group = 0;
k->startPoint = 0;
}
Tweener::~Tweener()
{
delete k;
}
/* This method initializes the plugin */
void Tweener::init(TupGraphicsScene *scene)
{
delete k->path;
k->path = 0;
k->pathAdded = false;
delete k->group;
k->group = 0;
k->baseZValue = (2*ZLAYER_LIMIT) + (scene->scene()->layersCount() * ZLAYER_LIMIT);
k->scene = scene;
k->objects.clear();
k->pathOffset = QPointF(0, 0);
k->firstNode = QPointF(0, 0);
k->itemObjectReference = QPointF(0, 0);
k->mode = TweenerPanel::View;
k->editMode = TweenerPanel::None;
k->configurator->resetUI();
QList<QString> tweenList = k->scene->scene()->getTweenNames(TupItemTweener::Composed);
if (tweenList.size() > 0) {
k->configurator->loadTweenList(tweenList);
setCurrentTween(tweenList.at(0));
}
int total = framesCount();
k->configurator->initStartCombo(total, k->scene->currentFrameIndex());
}
void Tweener::updateStartPoint(int index)
{
if (k->startPoint != index && index >= 0) {
// tFatal() << "Tweener::updateStartPoint() - New Start Point: " << k->startPoint;
k->startPoint = index;
}
}
/* This method returns the plugin name */
QStringList Tweener::keys() const
{
return QStringList() << tr("Composed Tween");
}
/* This method makes an action when the mouse is pressed on the workspace
* depending on the active mode: Selecting an object or Creating a path
*/
void Tweener::press(const TupInputDeviceInformation *input, TupBrushManager *brushManager, TupGraphicsScene *scene)
{
#ifdef K_DEBUG
#ifdef Q_OS_WIN
qDebug() << "[Tweener::press()]";
#else
T_FUNCINFO;
#endif
#endif
Q_UNUSED(brushManager);
Q_UNUSED(scene);
if (k->editMode == TweenerPanel::TweenProperties && k->scene->currentFrameIndex() == k->startPoint) {
if (k->currentTweenType == TweenerPanel::Position) {
if (k->path) {
QPointF point = k->path->mapFromParent(input->pos());
QPainterPath path = k->path->path();
path.cubicTo(point, point, point);
k->path->setPath(path);
}
} else {
#ifdef K_DEBUG
QString msg = "Tweener::press() - Error: No position!";
#ifdef Q_OS_WIN
qDebug() << msg;
#else
tFatal() << msg;
#endif
#endif
}
}
}
/* This method is executed while the mouse is pressed and on movement */
void Tweener::move(const TupInputDeviceInformation *input, TupBrushManager *brushManager, TupGraphicsScene *scene)
{
Q_UNUSED(input);
Q_UNUSED(brushManager);
Q_UNUSED(scene);
}
/* This method finishes the action started on the press method depending
* on the active mode: Selecting an object or Creating a path
*/
void Tweener::release(const TupInputDeviceInformation *input, TupBrushManager *brushManager, TupGraphicsScene *scene)
{
#ifdef K_DEBUG
#ifdef Q_OS_WIN
qDebug() << "[Tweener::release()]";
#else
T_FUNCINFO;
#endif
#endif
Q_UNUSED(input);
Q_UNUSED(brushManager);
if (scene->currentFrameIndex() == k->startPoint) {
if (k->editMode == TweenerPanel::TweenProperties) {
if (k->currentTweenType == TweenerPanel::Position) {
if (k->group) {
k->group->createNodes(k->path);
k->group->expandAllNodes();
k->configurator->updateSteps(k->path, k->pathOffset);
QPainterPath::Element e = k->path->path().elementAt(0);
QPointF begin = QPointF(e.x, e.y);
if (begin != k->firstNode) {
QPointF oldPos = k->firstNode;
QPointF newPos = begin;
int distanceX = newPos.x() - oldPos.x();
int distanceY = newPos.y() - oldPos.y();
if (k->objects.size() > 0) {
foreach (QGraphicsItem *item, k->objects)
item->moveBy(distanceX, distanceY);
QGraphicsItem *item = k->objects.at(0);
QRectF rect = item->sceneBoundingRect();
k->itemObjectReference = rect.center();
}
k->firstNode = newPos;
}
}
} else {
#ifdef K_DEBUG
QString msg = "Tweener::release() - No position!";
#ifdef Q_OS_WIN
qDebug() << msg;
#else
tFatal() << msg;
#endif
#endif
}
} else {
// if k->editMode == TweenerPanel::TweenSelection
if (scene->selectedItems().size() > 0) {
k->objects = scene->selectedItems();
k->configurator->notifySelection(true);
QGraphicsItem *item = k->objects.at(0);
QRectF rect = item->sceneBoundingRect();
QPointF newPos = rect.center();
QPointF oldPos = k->itemObjectReference;
k->itemObjectReference = newPos;
if (!k->path) {
k->path = new QGraphicsPathItem;
k->path->setZValue(maxZValue());
QColor color = Qt::lightGray;
color.setAlpha(200);
QPen pen(QBrush(color), 1, Qt::DotLine);
k->path->setPen(pen);
QPainterPath path;
path.moveTo(newPos);
k->firstNode = newPos;
k->path->setPath(path);
scene->addItem(k->path);
k->pathAdded = true;
k->pathOffset = QPointF(0, 0);
} else {
int distanceX = newPos.x() - oldPos.x();
int distanceY = newPos.y() - oldPos.y();
k->path->moveBy(distanceX, distanceY);
k->pathOffset = QPointF(distanceX, distanceY);
}
}
}
}
}
/* This method returns the list of actions defined in this plugin */
QMap<QString, TAction *> Tweener::actions() const
{
return k->actions;
}
/* This method returns the list of actions defined in this plugin */
int Tweener::toolType() const
{
return TupToolInterface::Tweener;
}
/* This method returns the tool panel associated to this plugin */
QWidget *Tweener::configurator()
{
if (!k->configurator) {
k->mode = TweenerPanel::View;
k->configurator = new Configurator;
connect(k->configurator, SIGNAL(tweenPropertiesActivated(TweenerPanel::TweenerType)),
this, SLOT(updateCurrentTweenerType(TweenerPanel::TweenerType)));
connect(k->configurator, SIGNAL(startingPointChanged(int)), this, SLOT(updateStartPoint(int)));
connect(k->configurator, SIGNAL(clickedSelect()), this, SLOT(setSelect()));
connect(k->configurator, SIGNAL(clickedTweenProperties()), this, SLOT(tweenListMode()));
connect(k->configurator, SIGNAL(clickedRemoveTween(const QString &)), this, SLOT(removeTween(const QString &)));
connect(k->configurator, SIGNAL(clickedResetInterface()), this, SLOT(applyReset()));
connect(k->configurator, SIGNAL(loadPath(bool, bool)), this, SLOT(setPath(bool, bool)));
connect(k->configurator, SIGNAL(setMode(TweenerPanel::Mode)), this, SLOT(updateMode(TweenerPanel::Mode)));
connect(k->configurator, SIGNAL(clickedApplyTween()), this, SLOT(applyTween()));
connect(k->configurator, SIGNAL(getTweenData(const QString &)), this, SLOT(setCurrentTween(const QString &)));
} else {
k->mode = k->configurator->mode();
}
return k->configurator;
}
/* This method is called when there's a change on/of scene */
void Tweener::aboutToChangeScene(TupGraphicsScene *)
{
}
/* This method is called when this plugin is off */
void Tweener::aboutToChangeTool()
{
if (k->editMode == TweenerPanel::Selection) {
clearSelection();
disableSelection();
} else if (k->editMode == TweenerPanel::TweenProperties) {
if (k->currentTweenType == TweenerPanel::Position) {
if (k->path) {
// tFatal() << "Tweener::aboutToChangeTool() - Removing path!";
k->scene->removeItem(k->path);
k->pathAdded = false;
delete k->group;
k->group = 0;
}
}
}
k->mode = TweenerPanel::View;
k->editMode = TweenerPanel::None;
k->currentTweenType = TweenerPanel::Undefined;
}
/* SQA: What is it?
bool Tweener::isComplete() const
{
return true;
}
*/
/* This method defines the actions contained in this plugin */
void Tweener::setupActions()
{
TAction *translater = new TAction(QPixmap(kAppProp->themeDir() + "icons/composed_tween.png"),
tr("Composed Tween"), this);
translater->setCursor(QCursor(kAppProp->themeDir() + "cursors/tweener.png"));
translater->setShortcut(QKeySequence(tr("Shift+X")));
k->actions.insert(tr("Composed Tween"), translater);
}
/* This method initializes the "Create path" mode */
void Tweener::setCreatePath()
{
if (k->path) {
k->pathOffset = QPointF(0, 0);
if (!k->pathAdded) {
k->scene->addItem(k->path);
k->pathAdded = true;
}
if (k->group) {
k->group->createNodes(k->path);
} else {
k->group = new TNodeGroup(k->path, k->scene, TNodeGroup::ComposedTween, k->baseZValue);
connect(k->group, SIGNAL(nodeReleased()), SLOT(updatePath()));
k->group->createNodes(k->path);
}
k->group->expandAllNodes();
}
disableSelection();
}
/* This method initializes the "Select object" mode */
void Tweener::setSelect()
{
if (k->mode == TweenerPanel::Edit) {
if (k->startPoint != k->scene->currentFrameIndex()) {
TupProjectRequest request = TupRequestBuilder::createFrameRequest(k->scene->currentSceneIndex(),
k->scene->currentLayerIndex(),
k->startPoint, TupProjectRequest::Select, "1");
emit requested(&request);
}
}
k->editMode = TweenerPanel::Selection;
foreach (QGraphicsView * view, k->scene->views()) {
view->setDragMode(QGraphicsView::RubberBandDrag);
foreach (QGraphicsItem *item, view->scene()->items()) {
if ((item->zValue() >= (2*ZLAYER_LIMIT)) && (item->toolTip().length()==0)) {
item->setFlags(QGraphicsItem::ItemIsSelectable | QGraphicsItem::ItemIsMovable);
}
}
}
// When Object selection is enabled, previous selection is set
if (k->objects.size() > 0) {
foreach (QGraphicsItem *item, k->objects) {
item->setFlags(QGraphicsItem::ItemIsSelectable | QGraphicsItem::ItemIsMovable);
item->setSelected(true);
}
}
}
/* This method resets this plugin */
void Tweener::applyReset()
{
// tFatal() << "Tweener::applyReset() - Fire in the hole!";
k->mode = TweenerPanel::View;
k->editMode = TweenerPanel::None;
k->currentTweenType = TweenerPanel::Undefined;
clearSelection();
disableSelection();
k->pathAdded = false;
k->path = 0;
k->startPoint = k->scene->currentFrameIndex();
k->configurator->cleanTweensForms();
}
/* This method applies to the project, the Tween created from this plugin */
void Tweener::applyTween()
{
QString name = k->configurator->currentTweenName();
if (name.length() == 0) {
TOsd::self()->display(tr("Error"), tr("Tween name is missing!"), TOsd::Error);
return;
}
// SQA: Remove this code
/*
if (k->startPoint != k->scene->currentFrameIndex()) {
TupProjectRequest request = TupRequestBuilder::createFrameRequest(k->scene->currentSceneIndex(),
k->scene->currentLayerIndex(),
k->startPoint, TupProjectRequest::Select, "1");
emit requested(&request);
}
*/
/*
if (!k->scene->scene()->tweenExists(name, TupItemTweener::Composed))
tFatal() << "Tweener::applyTween() - Tween " << name << " is NEW!!!";
else
tFatal() << "Tweener::applyTween() - Tween " << name << " is NOT NEW!!!";
*/
if (!k->scene->scene()->tweenExists(name, TupItemTweener::Composed)) {
foreach (QGraphicsItem *item, k->objects) {
TupLibraryObject::Type type = TupLibraryObject::Item;
int objectIndex = k->scene->currentFrame()->indexOf(item);
QRectF rect = item->sceneBoundingRect();
QPointF point = rect.topLeft();
if (TupSvgItem *svg = qgraphicsitem_cast<TupSvgItem *>(item)) {
type = TupLibraryObject::Svg;
objectIndex = k->scene->currentFrame()->indexOf(svg);
} else {
if (qgraphicsitem_cast<TupPathItem *>(item) || qgraphicsitem_cast<TupEllipseItem *>(item)
|| qgraphicsitem_cast<TupLineItem *>(item) || qgraphicsitem_cast<TupRectItem *>(item))
point = item->pos();
}
// tFatal() << "Tweener::applyTween() - Point 1: [" << point.x() << ", " << point.y() << "]";
QDomDocument dom;
dom.appendChild(dynamic_cast<TupAbstractSerializable *>(item)->toXml(dom));
// tFatal() << "";
// tFatal() << "Tweener::applyTween() - " << dom.toString();
TupProjectRequest request = TupRequestBuilder::createItemRequest(
k->scene->currentSceneIndex(),
k->scene->currentLayerIndex(),
k->startPoint,
objectIndex,
QPointF(), k->scene->spaceContext(),
type, TupProjectRequest::SetTween,
k->configurator->tweenToXml(k->startPoint, point));
emit requested(&request);
}
int framesNumber = framesCount();
int total = k->startPoint + k->configurator->totalSteps() - 1;
if (total > framesNumber) {
for (int i = framesNumber; i <= total; i++) {
TupProjectRequest requestFrame = TupRequestBuilder::createFrameRequest(k->scene->currentSceneIndex(),
k->scene->currentLayerIndex(),
i, TupProjectRequest::Add, tr("Frame"));
emit requested(&requestFrame);
}
}
TupProjectRequest request = TupRequestBuilder::createFrameRequest(k->scene->currentSceneIndex(),
k->scene->currentLayerIndex(),
k->startPoint, TupProjectRequest::Select, "1");
emit requested(&request);
} else {
removeTweenFromProject(name);
QList<QGraphicsItem *> newList;
foreach (QGraphicsItem *item, k->objects) {
TupLibraryObject::Type type = TupLibraryObject::Item;
TupScene *scene = k->scene->scene();
TupLayer *layer = scene->layerAt(k->scene->currentLayerIndex());
TupFrame *frame = layer->frameAt(k->currentTween->initFrame());
int objectIndex = frame->indexOf(item);
QRectF rect = item->sceneBoundingRect();
QPointF point = rect.topLeft();
if (TupSvgItem *svg = qgraphicsitem_cast<TupSvgItem *>(item)) {
type = TupLibraryObject::Svg;
objectIndex = frame->indexOf(svg);
} else {
if (qgraphicsitem_cast<TupPathItem *>(item) || qgraphicsitem_cast<TupEllipseItem *>(item)
|| qgraphicsitem_cast<TupLineItem *>(item) || qgraphicsitem_cast<TupRectItem *>(item))
point = item->pos();
}
if (k->startPoint != k->currentTween->initFrame()) {
QDomDocument dom;
dom.appendChild(dynamic_cast<TupAbstractSerializable *>(item)->toXml(dom));
/*
tFatal() << "Tweener::applyTween() - Point 2: [" << point.x() << ", " << point.y() << "]";
tFatal() << "";
tFatal() << "Tweener::applyTween() - " << dom.toString();
*/
TupProjectRequest request = TupRequestBuilder::createItemRequest(k->scene->currentSceneIndex(),
k->scene->currentLayerIndex(),
k->startPoint, -1,
QPointF(), k->scene->spaceContext(),
type, TupProjectRequest::Add,
dom.toString());
emit requested(&request);
request = TupRequestBuilder::createItemRequest(k->scene->currentSceneIndex(),
k->scene->currentLayerIndex(),
k->currentTween->initFrame(),
objectIndex, QPointF(),
k->scene->spaceContext(), type,
TupProjectRequest::Remove);
emit requested(&request);
frame = layer->frameAt(k->startPoint);
if (type == TupLibraryObject::Item)
objectIndex = frame->graphicItemsCount() - 1;
else
objectIndex = frame->svgItemsCount() - 1;
newList.append(frame->graphicAt(objectIndex)->item());
}
TupProjectRequest request = TupRequestBuilder::createItemRequest(
k->scene->currentSceneIndex(),
k->scene->currentLayerIndex(),
k->startPoint,
objectIndex,
QPointF(), k->scene->spaceContext(),
type, TupProjectRequest::SetTween,
k->configurator->tweenToXml(k->startPoint, point));
emit requested(&request);
int total = k->startPoint + k->configurator->totalSteps();
int framesNumber = framesCount();
if (framesNumber < total) {
for (int i = framesNumber; i < total; i++) {
TupProjectRequest requestFrame = TupRequestBuilder::createFrameRequest(k->scene->currentSceneIndex(),
k->scene->currentLayerIndex(),
i, TupProjectRequest::Add, tr("Frame"));
emit requested(&requestFrame);
}
}
request = TupRequestBuilder::createFrameRequest(k->scene->currentSceneIndex(), k->scene->currentLayerIndex(),
k->startPoint, TupProjectRequest::Select, "1");
emit requested(&request);
}
if (newList.size() > 0)
k->objects = newList;
}
setCurrentTween(name);
TOsd::self()->display(tr("Info"), tr("Tween %1 applied!").arg(name), TOsd::Info);
}
/* This method updates the data of the path into the tool panel
* and disables edition mode for nodes
*/
void Tweener::updatePath()
{
k->configurator->updateSteps(k->path, k->pathOffset);
}
/* This method saves the settings of this plugin */
void Tweener::saveConfig()
{
}
/* This method updates the workspace when the plugin changes the scene */
void Tweener::updateScene(TupGraphicsScene *scene)
{
k->mode = k->configurator->mode();
if(k->mode == TweenerPanel::Edit) {
// tFatal() << "Tweener::updateScene() - Mode: TweenerPanel::Edit";
int total = k->startPoint + k->configurator->totalSteps();
if (k->editMode == TweenerPanel::TweenProperties) {
if (k->currentTweenType == TweenerPanel::Position) {
if (scene->currentFrameIndex() >= k->startPoint && scene->currentFrameIndex() < total) {
if (k->path && k->group) {
k->scene->addItem(k->path);
k->group->createNodes(k->path);
k->group->expandAllNodes();
}
}
}
}
int framesNumber = framesCount();
if (k->configurator->startComboSize() < framesNumber)
k->configurator->initStartCombo(framesNumber, k->startPoint);
} else if (k->mode == TweenerPanel::Add) {
// tFatal() << "Tweener::updateScene() - Mode: TweenerPanel::Add";
int total = framesCount();
if (k->configurator->startComboSize() < total) {
k->configurator->initStartCombo(total, k->startPoint);
} else {
if (scene->currentFrameIndex() != k->startPoint)
k->configurator->setStartFrame(scene->currentFrameIndex());
}
if (k->editMode == TweenerPanel::TweenProperties) {
if (k->currentTweenType == TweenerPanel::Position)
k->path = 0;
k->configurator->cleanTweensForms();
clearSelection();
k->configurator->activateMode(TweenerPanel::Selection);
} else if (k->editMode == TweenerPanel::Selection) {
if (k->currentTweenType == TweenerPanel::Position)
k->path = 0;
if (scene->currentFrameIndex() != k->startPoint) {
clearSelection();
k->startPoint = scene->currentFrameIndex();
setSelect();
}
} else if (k->editMode == TweenerPanel::TweenList) {
if (scene->currentFrameIndex() != k->startPoint) {
k->startPoint = scene->currentFrameIndex();
clearSelection();
k->configurator->activateMode(TweenerPanel::Selection);
}
}
} else {
// tFatal() << "Tweener::updateScene() - Mode: TweenerPanel::View";
if (scene->currentFrameIndex() != k->startPoint) {
k->configurator->setStartFrame(scene->currentFrameIndex());
}
}
}
void Tweener::updateMode(TweenerPanel::Mode mode)
{
k->mode = mode;
if (k->mode == TweenerPanel::Edit)
setEditEnv();
}
int Tweener::maxZValue()
{
int max = -1;
foreach (QGraphicsView *view, k->scene->views()) {
foreach (QGraphicsItem *item, view->scene()->items()) {
if (item->zValue() > max)
max = item->zValue();
}
}
return max + 1;
}
void Tweener::removeTweenFromProject(const QString &name)
{
TupScene *scene = k->scene->scene();
bool removed = scene->removeTween(name, TupItemTweener::Composed);
if (removed) {
foreach (QGraphicsView * view, k->scene->views()) {
foreach (QGraphicsItem *item, view->scene()->items()) {
QString tip = item->toolTip();
if (tip.startsWith(tr("Composed Tween") + ": " + name))
item->setToolTip("");
}
}
} else {
#ifdef K_DEBUG
QString msg = "Tweener::removeTweenFromProject() - Position tween couldn't be removed -> " + name;
#ifdef Q_OS_WIN
qDebug() << msg;
#else
tError() << msg;
#endif
#endif
}
}
void Tweener::removeTween(const QString &name)
{
removeTweenFromProject(name);
applyReset();
}
void Tweener::setCurrentTween(const QString &name)
{
// tFatal() << "Tweener::setCurrentTween(Tweener::setCurrentTween() - Updating tweener: " << name;
TupScene *scene = k->scene->scene();
k->currentTween = scene->tween(name, TupItemTweener::Composed);
if (k->currentTween) {
k->configurator->setCurrentTween(k->currentTween);
}
}
void Tweener::setEditEnv()
{
// tFatal() << "void Tweener::setEditEnv() - Just tracing!!!";
k->startPoint = k->currentTween->initFrame();
if (k->startPoint != k->scene->currentFrameIndex()) {
TupProjectRequest request = TupRequestBuilder::createFrameRequest(k->scene->currentSceneIndex(),
k->scene->currentLayerIndex(),
k->startPoint, TupProjectRequest::Select, "1");
emit requested(&request);
}
k->mode = TweenerPanel::Edit;
TupScene *scene = k->scene->scene();
k->objects = scene->getItemsFromTween(k->currentTween->name(), TupItemTweener::Composed);
QGraphicsItem *item = k->objects.at(0);
QRectF rect = item->sceneBoundingRect();
k->itemObjectReference = rect.center();
if (k->currentTween->contains(TupItemTweener::Position)) {
// tFatal() << "void Tweener::setEditEnv() - Adding path!";
k->path = k->currentTween->graphicsPath();
k->path->setZValue(maxZValue());
QPainterPath::Element e = k->path->path().elementAt(0);
k->firstNode = QPointF(e.x, e.y);
QPointF oldPos = QPointF(e.x, e.y);
QPointF newPos = rect.center();
int distanceX = newPos.x() - oldPos.x();
int distanceY = newPos.y() - oldPos.y();
k->path->moveBy(distanceX, distanceY);
k->pathOffset = QPointF(distanceX, distanceY);
QColor color = Qt::lightGray;
color.setAlpha(200);
QPen pen(QBrush(color), 1, Qt::DotLine);
k->path->setPen(pen);
// setCreatePath();
}
}
int Tweener::framesCount()
{
int total = 1;
TupLayer *layer = k->scene->scene()->layerAt(k->scene->currentLayerIndex());
if (layer)
total = layer->framesCount();
return total;
}
/* This method clear selection */
void Tweener::clearSelection()
{
if (k->objects.size() > 0) {
foreach (QGraphicsItem *item, k->objects) {
if (item->isSelected())
item->setSelected(false);
}
k->objects.clear();
k->configurator->notifySelection(false);
}
}
/* This method disables object selection */
void Tweener::disableSelection()
{
foreach (QGraphicsView *view, k->scene->views()) {
view->setDragMode (QGraphicsView::NoDrag);
foreach (QGraphicsItem *item, view->scene()->items()) {
item->setFlag(QGraphicsItem::ItemIsSelectable, false);
item->setFlag(QGraphicsItem::ItemIsMovable, false);
}
}
}
void Tweener::updateCurrentTweenerType(TweenerPanel::TweenerType type)
{
// tFatal() << "updateCurrentTweenerType() - Just following type: " << type;
k->currentTweenType = type;
k->editMode = TweenerPanel::TweenProperties;
if (k->currentTweenType == TweenerPanel::Position) {
// tFatal() << "Tweener::updateCurrentTweenerType() - Setting path!";
setCreatePath();
} else {
// tFatal() << "Tweener::updateCurrentTweenerType() - Type is not Position!";
}
}
void Tweener::setPath(bool isEnabled, bool reset)
{
if (isEnabled) {
k->editMode = TweenerPanel::TweenProperties;
setCreatePath();
} else {
if (k->group) {
k->group->clear();
k->group = 0;
}
if (k->path) {
if (k->startPoint == k->scene->currentFrameIndex())
k->scene->removeItem(k->path);
k->pathAdded = false;
if (reset) {
QPainterPath::Element e = k->path->path().elementAt(0);
QPointF newPos = QPointF(e.x, e.y);
k->path = new QGraphicsPathItem;
k->path->setZValue(maxZValue());
QColor color = Qt::lightGray;
color.setAlpha(200);
QPen pen(QBrush(color), 1, Qt::DotLine);
k->path->setPen(pen);
QPainterPath path;
path.moveTo(newPos);
k->firstNode = newPos;
k->path->setPath(path);
}
}
}
}
void Tweener::tweenListMode()
{
k->editMode = TweenerPanel::TweenList;
disableSelection();
}
void Tweener::sceneResponse(const TupSceneResponse *event)
{
if ((event->action() == TupProjectRequest::Remove || event->action() == TupProjectRequest::Reset)
&& (k->scene->currentSceneIndex() == event->sceneIndex())) {
init(k->scene);
}
if (event->action() == TupProjectRequest::Select)
init(k->scene);
}
void Tweener::layerResponse(const TupLayerResponse *event)
{
if (event->action() == TupProjectRequest::Remove)
init(k->scene);
}
void Tweener::frameResponse(const TupFrameResponse *event)
{
if (event->action() == TupProjectRequest::Remove && k->scene->currentLayerIndex() == event->layerIndex())
init(k->scene);
}
| gpl-2.0 |
ChaosWars/logitechdaemon | src/dbusobject.c | 1 | 8847 | /***************************************************************************
* Copyright (C) 2007 by Lawrence Lee *
* valheru.ashen.shugar@gmail.com *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#include <daemon.h>
#include <libg15.h>
#include <libg15render.h>
#include <stdbool.h>
#include <stdlib.h>
#include <dbus/dbus.h>
#include <dbus/dbus-glib-bindings.h>
#include "dbusobject.h"
#include "dbusobjectglue.h"
#include "logo.h"
enum {
LCD_BRIGHTNESS_SET = 0,
LCD_CONTRAST_SET,
KB_BRIGHTNESS_SET,
NUMBER_OF_SIGNALS
};
extern int kb_brightness;
static guint dbus_object_signals[NUMBER_OF_SIGNALS];
static GObjectClass *parent_class;
struct _DBusObjectPrivate {
gboolean dispose_has_run;
g15canvas *canvas;
};
GType dbus_object_get_type()
{
static GType type = 0;
if ( type == 0 ) {
static const GTypeInfo info = {
sizeof ( DBusObjectClass ),
NULL, /* base_init */
NULL, /* base_finalize */
dbus_object_class_init, /* class_init */
NULL, /* class_finalize */
NULL, /* class_data */
sizeof ( DBusObject ),
0, /* n_preallocs */
dbus_object_init /* instance_init */
};
type = g_type_register_static ( G_TYPE_OBJECT, "DBusObjectType", &info, 0 );
}
return type;
}
static void dbus_object_class_init ( DBusObjectClass *klass )
{
GObjectClass *gobject_class = G_OBJECT_CLASS ( klass );
gobject_class->dispose = dbus_object_dispose;
gobject_class->finalize = dbus_object_finalize;
parent_class = g_type_class_peek_parent ( klass );
g_type_class_add_private ( klass, sizeof ( DBusObjectPrivate ) );
GError *error = NULL;
klass->connection = dbus_g_bus_get ( DBUS_BUS_SYSTEM, &error );
dbus_object_signals[LCD_BRIGHTNESS_SET] = g_signal_new ( "lcd_brightness_set",
G_OBJECT_CLASS_TYPE ( klass ),
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET ( DBusObjectClass, dbus_object_lcd_brightness_set ) /* class closure */,
NULL /* accumulator */,
NULL /* accu_data */,
g_cclosure_marshal_VOID__INT,
G_TYPE_NONE /* return_type */,
1 /* n_params */,
G_TYPE_INT /* param_types */ );
dbus_object_signals[LCD_CONTRAST_SET] = g_signal_new ( "lcd_contrast_set",
G_OBJECT_CLASS_TYPE ( klass ),
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET ( DBusObjectClass, dbus_object_lcd_contrast_set ) /* class closure */,
NULL /* accumulator */,
NULL /* accu_data */,
g_cclosure_marshal_VOID__INT,
G_TYPE_NONE /* return_type */,
1 /* n_params */,
G_TYPE_INT /* param_types */ );
dbus_object_signals[KB_BRIGHTNESS_SET] = g_signal_new ( "kb_brightness_set",
G_OBJECT_CLASS_TYPE ( klass ),
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET ( DBusObjectClass, dbus_object_kb_brightness_set ) /* class closure */,
NULL /* accumulator */,
NULL /* accu_data */,
g_cclosure_marshal_VOID__INT,
G_TYPE_NONE /* return_type */,
1 /* n_params */,
G_TYPE_INT /* param_types */ );
if ( klass->connection == NULL ) {
daemon_log ( LOG_ERR, "Failed to open connection to system bus: %s\n", error->message );
g_error_free ( error );
return;
}
dbus_g_object_type_install_info ( DBUS_OBJECT_TYPE, &dbus_glib_dbus_object_object_info );
}
static void dbus_object_init ( GTypeInstance *instance, gpointer g_class )
{
DBusObject *self = DBUS_OBJECT ( instance );
self->priv = G_TYPE_INSTANCE_GET_PRIVATE ( self, DBUS_OBJECT_TYPE, DBusObjectPrivate );
self->priv = g_new0 ( DBusObjectPrivate, 1 );
self->priv->dispose_has_run = FALSE;
self->priv->canvas = g_new0 ( g15canvas, 1 );
DBusObjectClass *klass = DBUS_OBJECT_GET_CLASS ( instance );
DBusGProxy *proxy = dbus_g_proxy_new_for_name ( klass->connection, DBUS_SERVICE_DBUS, DBUS_PATH_DBUS, DBUS_INTERFACE_DBUS );
GError *error;
guint32 request_name_ret;
if ( !org_freedesktop_DBus_request_name ( proxy, "com.googlecode.logitechg15", 0, &request_name_ret, &error ) ) {
daemon_log ( LOG_ERR, "Failed to obtain address on bus: %s\n", error->message );
g_error_free ( error );
}
if ( request_name_ret != DBUS_REQUEST_NAME_REPLY_PRIMARY_OWNER ) {
daemon_log ( LOG_ERR, "Address is already registered on bus\n" );
}
dbus_g_connection_register_g_object ( klass->connection, "/com/googlecode/logitechg15", G_OBJECT ( instance ) );
g_object_unref ( proxy );
}
static void dbus_object_dispose ( GObject *object )
{
DBusObject *self = DBUS_OBJECT ( object );
if ( self->priv->dispose_has_run ) {
/* If dispose did already run, return. */
return;
}
/* Make sure dispose does not run twice. */
self->priv->dispose_has_run = TRUE;
/*
* In dispose, you are supposed to free all types referenced from this
* object which might themselves hold a reference to self. Generally,
* the most simple solution is to unref all members on which you own a
* reference.
*/
/* Chain up to the parent class */
G_OBJECT_CLASS ( parent_class )->dispose ( object );
}
static void dbus_object_finalize ( GObject *object )
{
DBusObject *self = DBUS_OBJECT ( object );
/* Chain up to the parent class */
G_OBJECT_CLASS ( parent_class )->finalize ( object );
g_free ( self->priv->canvas );
g_free ( self->priv );
}
static gboolean dbus_object_set_lcd_brightness ( DBusObject *object, gint32 IN_brightness, GError **error )
{
int retval = setLCDBrightness ( IN_brightness );
if ( retval < 0 ) {
g_set_error ( error, 0, 0, "Failed to set LCD brightness\n" );
return false;
}
g_signal_emit ( object, dbus_object_signals[LCD_BRIGHTNESS_SET], 0, IN_brightness );
return true;
}
static gboolean dbus_object_set_lcd_contrast ( DBusObject *object, gint32 IN_contrast, GError **error )
{
int retval = setLCDContrast ( IN_contrast );
if ( retval < 0 ) {
g_set_error ( error, 0, 0, "Failed to set LCD contrast\n" );
return false;
}
g_signal_emit ( object, dbus_object_signals[LCD_CONTRAST_SET], 0, IN_contrast );
return true;
}
static gboolean dbus_object_set_kb_brightness ( DBusObject *object, gint32 IN_brightness, GError **error )
{
int retval = setKBBrightness ( IN_brightness );
if ( retval < 0 ) {
g_set_error ( error, 0, 0, "Failed to set keyboard brightness.\n" );
return false;
}
kb_brightness = IN_brightness;
g_signal_emit ( object, dbus_object_signals[KB_BRIGHTNESS_SET], 0, IN_brightness );
return true;
}
static gboolean dbus_object_blank_screen ( DBusObject *object, GError **error )
{
g15r_clearScreen ( object->priv->canvas, 0 );
writePixmapToLCD ( object->priv->canvas->buffer );
return true;
}
static gboolean dbus_object_show_logo ( DBusObject *object, GError **error )
{
memcpy ( object->priv->canvas->buffer, logo_data, G15_BUFFER_LEN );
writePixmapToLCD ( object->priv->canvas->buffer );
return true;
}
| gpl-2.0 |
dvj/dfu-util | src/dfu_file.c | 1 | 15218 | /*
* Load or store DFU files including suffix and prefix
*
* Copyright 2014 Tormod Volden <debian.tormod@gmail.com>
* Copyright 2012 Stefan Schmidt <stefan@datenfreihafen.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <time.h>
#include <fcntl.h>
#include "portable.h"
#include "dfu_file.h"
#define DFU_SUFFIX_LENGTH 16
#define LMDFU_PREFIX_LENGTH 8
#define LPCDFU_PREFIX_LENGTH 16
#define PROGRESS_BAR_WIDTH 25
#define STDIN_CHUNK_SIZE 65536
static const unsigned long crc32_table[] = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d};
static uint32_t crc32_byte(uint32_t accum, uint8_t delta)
{
return crc32_table[(accum ^ delta) & 0xff] ^ (accum >> 8);
}
static int probe_prefix(struct dfu_file *file)
{
uint8_t *prefix = file->firmware;
if (file->size.total < LMDFU_PREFIX_LENGTH)
return 1;
if ((prefix[0] == 0x01) && (prefix[1] == 0x00)) {
file->prefix_type = LMDFU_PREFIX;
file->size.prefix = LMDFU_PREFIX_LENGTH;
file->lmdfu_address = 1024 * ((prefix[3] << 8) | prefix[2]);
}
else if (((prefix[0] & 0x3f) == 0x1a) && ((prefix[1] & 0x3f)== 0x3f)) {
file->prefix_type = LPCDFU_UNENCRYPTED_PREFIX;
file->size.prefix = LPCDFU_PREFIX_LENGTH;
}
if (file->size.prefix + file->size.suffix > file->size.total)
return 1;
return 0;
}
void dfu_progress_bar(const char *desc, unsigned long long curr,
unsigned long long max)
{
static char buf[PROGRESS_BAR_WIDTH + 1];
static unsigned long long last_progress = -1;
static time_t last_time;
time_t curr_time = time(NULL);
unsigned long long progress;
unsigned long long x;
/* check for not known maximum */
if (max < curr)
max = curr + 1;
/* make none out of none give zero */
if (max == 0 && curr == 0)
max = 1;
/* compute completion */
progress = (PROGRESS_BAR_WIDTH * curr) / max;
if (progress > PROGRESS_BAR_WIDTH)
progress = PROGRESS_BAR_WIDTH;
if (progress == last_progress &&
curr_time == last_time)
return;
last_progress = progress;
last_time = curr_time;
for (x = 0; x != PROGRESS_BAR_WIDTH; x++) {
if (x < progress)
buf[x] = '=';
else
buf[x] = ' ';
}
buf[x] = 0;
printf("\r%s\t[%s] %3lld%% %12lld bytes", desc, buf,
(100ULL * curr) / max, curr);
if (progress == PROGRESS_BAR_WIDTH)
printf("\n%s done.\n", desc);
}
void *dfu_malloc(size_t size)
{
void *ptr = malloc(size);
if (ptr == NULL)
errx(EX_SOFTWARE, "Cannot allocate memory of size %d bytes", (int)size);
return (ptr);
}
uint32_t dfu_file_write_crc(int f, uint32_t crc, const void *buf, int size)
{
int x;
/* compute CRC */
for (x = 0; x != size; x++)
crc = crc32_byte(crc, ((uint8_t *)buf)[x]);
/* write data */
if (write(f, buf, size) != size)
err(EX_IOERR, "Could not write %d bytes to file %d", size, f);
return (crc);
}
void dfu_load_file(struct dfu_file *file, enum suffix_req check_suffix, enum prefix_req check_prefix)
{
long int offset;
int f;
int i;
int res;
file->size.prefix = 0;
file->size.suffix = 0;
/* default values, if no valid suffix is found */
file->bcdDFU = 0;
file->idVendor = 0xffff; /* wildcard value */
file->idProduct = 0xffff; /* wildcard value */
file->bcdDevice = 0xffff; /* wildcard value */
/* default values, if no valid prefix is found */
file->lmdfu_address = 0;
free(file->firmware);
if (!strcmp(file->name, "-")) {
int read_bytes;
#ifdef WIN32
_setmode( _fileno( stdin ), _O_BINARY );
#endif
file->firmware = (uint8_t*) dfu_malloc(STDIN_CHUNK_SIZE);
read_bytes = fread(file->firmware, 1, STDIN_CHUNK_SIZE, stdin);
file->size.total = read_bytes;
while (read_bytes == STDIN_CHUNK_SIZE) {
file->firmware = (uint8_t*) realloc(file->firmware, file->size.total + STDIN_CHUNK_SIZE);
if (!file->firmware)
err(EX_IOERR, "Could not allocate firmware buffer");
read_bytes = fread(file->firmware + file->size.total, 1, STDIN_CHUNK_SIZE, stdin);
file->size.total += read_bytes;
}
if (verbose)
printf("Read %i bytes from stdin\n", file->size.total);
/* Never require suffix when reading from stdin */
check_suffix = MAYBE_SUFFIX;
} else {
f = open(file->name, O_RDONLY | O_BINARY);
if (f < 0)
err(EX_IOERR, "Could not open file %s for reading", file->name);
offset = lseek(f, 0, SEEK_END);
if ((int)offset < 0 || (int)offset != offset)
err(EX_IOERR, "File size is too big");
if (lseek(f, 0, SEEK_SET) != 0)
err(EX_IOERR, "Could not seek to beginning");
file->size.total = offset;
file->firmware = dfu_malloc(file->size.total);
if (read(f, file->firmware, file->size.total) != file->size.total) {
err(EX_IOERR, "Could not read %d bytes from %s",
file->size.total, file->name);
}
close(f);
}
/* Check for possible DFU file suffix by trying to parse one */
{
uint32_t crc = 0xffffffff;
const uint8_t *dfusuffix;
int missing_suffix = 0;
const char *reason;
if (file->size.total < DFU_SUFFIX_LENGTH) {
reason = "File too short for DFU suffix";
missing_suffix = 1;
goto checked;
}
dfusuffix = file->firmware + file->size.total -
DFU_SUFFIX_LENGTH;
for (i = 0; i < file->size.total - 4; i++)
crc = crc32_byte(crc, file->firmware[i]);
if (dfusuffix[10] != 'D' ||
dfusuffix[9] != 'F' ||
dfusuffix[8] != 'U') {
reason = "Invalid DFU suffix signature";
missing_suffix = 1;
goto checked;
}
file->dwCRC = (dfusuffix[15] << 24) +
(dfusuffix[14] << 16) +
(dfusuffix[13] << 8) +
dfusuffix[12];
if (file->dwCRC != crc) {
reason = "DFU suffix CRC does not match";
missing_suffix = 1;
goto checked;
}
/* At this point we believe we have a DFU suffix
so we require further checks to succeed */
file->bcdDFU = (dfusuffix[7] << 8) + dfusuffix[6];
if (verbose)
printf("DFU suffix version %x\n", file->bcdDFU);
file->size.suffix = dfusuffix[11];
if (file->size.suffix < DFU_SUFFIX_LENGTH) {
errx(EX_IOERR, "Unsupported DFU suffix length %d",
file->size.suffix);
}
if (file->size.suffix > file->size.total) {
errx(EX_IOERR, "Invalid DFU suffix length %d",
file->size.suffix);
}
file->idVendor = (dfusuffix[5] << 8) + dfusuffix[4];
file->idProduct = (dfusuffix[3] << 8) + dfusuffix[2];
file->bcdDevice = (dfusuffix[1] << 8) + dfusuffix[0];
checked:
if (missing_suffix) {
if (check_suffix == NEEDS_SUFFIX) {
warnx("%s", reason);
errx(EX_IOERR, "Valid DFU suffix needed");
} else if (check_suffix == MAYBE_SUFFIX) {
warnx("%s", reason);
warnx("A valid DFU suffix will be required in "
"a future dfu-util release!!!");
}
} else {
if (check_suffix == NO_SUFFIX) {
errx(EX_SOFTWARE, "Please remove existing DFU suffix before adding a new one.\n");
}
}
}
res = probe_prefix(file);
if ((res || file->size.prefix == 0) && check_prefix == NEEDS_PREFIX)
errx(EX_IOERR, "Valid DFU prefix needed");
if (file->size.prefix && check_prefix == NO_PREFIX)
errx(EX_IOERR, "A prefix already exists, please delete it first");
if (file->size.prefix && verbose) {
uint8_t *data = file->firmware;
if (file->prefix_type == LMDFU_PREFIX)
printf("Possible TI Stellaris DFU prefix with "
"the following properties\n"
"Address: 0x%08x\n"
"Payload length: %d\n",
file->lmdfu_address,
data[4] | (data[5] << 8) |
(data[6] << 16) | (data[7] << 14));
else if (file->prefix_type == LPCDFU_UNENCRYPTED_PREFIX)
printf("Possible unencrypted NXP LPC DFU prefix with "
"the following properties\n"
"Payload length: %d kiByte\n",
data[2] >>1 | (data[3] << 7) );
else
errx(EX_IOERR, "Unknown DFU prefix type");
}
}
void dfu_store_file(struct dfu_file *file, int write_suffix, int write_prefix)
{
uint32_t crc = 0xffffffff;
int f;
f = open(file->name, O_WRONLY | O_BINARY | O_TRUNC | O_CREAT, 0666);
if (f < 0)
err(EX_IOERR, "Could not open file %s for writing", file->name);
/* write prefix, if any */
if (write_prefix) {
if (file->prefix_type == LMDFU_PREFIX) {
uint8_t lmdfu_prefix[LMDFU_PREFIX_LENGTH];
uint32_t addr = file->lmdfu_address / 1024;
/* lmdfu_dfu_prefix payload length excludes prefix and suffix */
uint32_t len = file->size.total -
file->size.prefix - file->size.suffix;
lmdfu_prefix[0] = 0x01; /* STELLARIS_DFU_PROG */
lmdfu_prefix[1] = 0x00; /* Reserved */
lmdfu_prefix[2] = (uint8_t)(addr & 0xff);
lmdfu_prefix[3] = (uint8_t)(addr >> 8);
lmdfu_prefix[4] = (uint8_t)(len & 0xff);
lmdfu_prefix[5] = (uint8_t)(len >> 8) & 0xff;
lmdfu_prefix[6] = (uint8_t)(len >> 16) & 0xff;
lmdfu_prefix[7] = (uint8_t)(len >> 24);
crc = dfu_file_write_crc(f, crc, lmdfu_prefix, LMDFU_PREFIX_LENGTH);
}
if (file->prefix_type == LPCDFU_UNENCRYPTED_PREFIX) {
uint8_t lpcdfu_prefix[LPCDFU_PREFIX_LENGTH] = {0};
int i;
/* Payload is firmware and prefix rounded to 512 bytes */
uint32_t len = (file->size.total - file->size.suffix + 511) /512;
lpcdfu_prefix[0] = 0x1a; /* Unencypted*/
lpcdfu_prefix[1] = 0x3f; /* Reserved */
lpcdfu_prefix[2] = (uint8_t)(len & 0xff);
lpcdfu_prefix[3] = (uint8_t)((len >> 8) & 0xff);
for (i = 12; i < LPCDFU_PREFIX_LENGTH; i++)
lpcdfu_prefix[i] = 0xff;
crc = dfu_file_write_crc(f, crc, lpcdfu_prefix, LPCDFU_PREFIX_LENGTH);
}
}
/* write firmware binary */
crc = dfu_file_write_crc(f, crc, file->firmware + file->size.prefix,
file->size.total - file->size.prefix - file->size.suffix);
/* write suffix, if any */
if (write_suffix) {
uint8_t dfusuffix[DFU_SUFFIX_LENGTH];
dfusuffix[0] = file->bcdDevice & 0xff;
dfusuffix[1] = file->bcdDevice >> 8;
dfusuffix[2] = file->idProduct & 0xff;
dfusuffix[3] = file->idProduct >> 8;
dfusuffix[4] = file->idVendor & 0xff;
dfusuffix[5] = file->idVendor >> 8;
dfusuffix[6] = file->bcdDFU & 0xff;
dfusuffix[7] = file->bcdDFU >> 8;
dfusuffix[8] = 'U';
dfusuffix[9] = 'F';
dfusuffix[10] = 'D';
dfusuffix[11] = DFU_SUFFIX_LENGTH;
crc = dfu_file_write_crc(f, crc, dfusuffix,
DFU_SUFFIX_LENGTH - 4);
dfusuffix[12] = crc;
dfusuffix[13] = crc >> 8;
dfusuffix[14] = crc >> 16;
dfusuffix[15] = crc >> 24;
crc = dfu_file_write_crc(f, crc, dfusuffix + 12, 4);
}
close(f);
}
void show_suffix_and_prefix(struct dfu_file *file)
{
if (file->size.prefix == LMDFU_PREFIX_LENGTH) {
printf("The file %s contains a TI Stellaris DFU prefix with the following properties:\n", file->name);
printf("Address:\t0x%08x\n", file->lmdfu_address);
} else if (file->size.prefix == LPCDFU_PREFIX_LENGTH) {
uint8_t * prefix = file->firmware;
printf("The file %s contains a NXP unencrypted LPC DFU prefix with the following properties:\n", file->name);
printf("Size:\t%5d kiB\n", prefix[2]>>1|prefix[3]<<7);
} else if (file->size.prefix != 0) {
printf("The file %s contains an unknown prefix\n", file->name);
}
if (file->size.suffix > 0) {
printf("The file %s contains a DFU suffix with the following properties:\n", file->name);
printf("BCD device:\t0x%04X\n", file->bcdDevice);
printf("Product ID:\t0x%04X\n",file->idProduct);
printf("Vendor ID:\t0x%04X\n", file->idVendor);
printf("BCD DFU:\t0x%04X\n", file->bcdDFU);
printf("Length:\t\t%i\n", file->size.suffix);
printf("CRC:\t\t0x%08X\n", file->dwCRC);
}
}
| gpl-2.0 |
110440/fastsocket | kernel/arch/arm/mach-msm/dma.c | 513 | 8166 | /* linux/arch/arm/mach-msm/dma.c
*
* Copyright (C) 2007 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/io.h>
#include <linux/interrupt.h>
#include <mach/dma.h>
#define MSM_DMOV_CHANNEL_COUNT 16
enum {
MSM_DMOV_PRINT_ERRORS = 1,
MSM_DMOV_PRINT_IO = 2,
MSM_DMOV_PRINT_FLOW = 4
};
static DEFINE_SPINLOCK(msm_dmov_lock);
static unsigned int channel_active;
static struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
static struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS;
#define MSM_DMOV_DPRINTF(mask, format, args...) \
do { \
if ((mask) & msm_dmov_print_mask) \
printk(KERN_ERR format, args); \
} while (0)
#define PRINT_ERROR(format, args...) \
MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_ERRORS, format, args);
#define PRINT_IO(format, args...) \
MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_IO, format, args);
#define PRINT_FLOW(format, args...) \
MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args);
void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful)
{
writel((graceful << 31), DMOV_FLUSH0(id));
}
void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
{
unsigned long irq_flags;
unsigned int status;
spin_lock_irqsave(&msm_dmov_lock, irq_flags);
status = readl(DMOV_STATUS(id));
if (list_empty(&ready_commands[id]) &&
(status & DMOV_STATUS_CMD_PTR_RDY)) {
#if 0
if (list_empty(&active_commands[id])) {
PRINT_FLOW("msm_dmov_enqueue_cmd(%d), enable interrupt\n", id);
writel(DMOV_CONFIG_IRQ_EN, DMOV_CONFIG(id));
}
#endif
PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n", id, status);
list_add_tail(&cmd->list, &active_commands[id]);
if (!channel_active)
enable_irq(INT_ADM_AARM);
channel_active |= 1U << id;
writel(cmd->cmdptr, DMOV_CMD_PTR(id));
} else {
if (list_empty(&active_commands[id]))
PRINT_ERROR("msm_dmov_enqueue_cmd(%d), error datamover stalled, status %x\n", id, status);
PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status %x\n", id, status);
list_add_tail(&cmd->list, &ready_commands[id]);
}
spin_unlock_irqrestore(&msm_dmov_lock, irq_flags);
}
struct msm_dmov_exec_cmdptr_cmd {
struct msm_dmov_cmd dmov_cmd;
struct completion complete;
unsigned id;
unsigned int result;
struct msm_dmov_errdata err;
};
static void
dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd,
unsigned int result,
struct msm_dmov_errdata *err)
{
struct msm_dmov_exec_cmdptr_cmd *cmd = container_of(_cmd, struct msm_dmov_exec_cmdptr_cmd, dmov_cmd);
cmd->result = result;
if (result != 0x80000002 && err)
memcpy(&cmd->err, err, sizeof(struct msm_dmov_errdata));
complete(&cmd->complete);
}
int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
{
struct msm_dmov_exec_cmdptr_cmd cmd;
PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id, cmdptr);
cmd.dmov_cmd.cmdptr = cmdptr;
cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
cmd.id = id;
init_completion(&cmd.complete);
msm_dmov_enqueue_cmd(id, &cmd.dmov_cmd);
wait_for_completion(&cmd.complete);
if (cmd.result != 0x80000002) {
PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id, cmd.result);
PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n",
id, cmd.err.flush[0], cmd.err.flush[1], cmd.err.flush[2], cmd.err.flush[3]);
return -EIO;
}
PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr);
return 0;
}
static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
{
unsigned int int_status, mask, id;
unsigned long irq_flags;
unsigned int ch_status;
unsigned int ch_result;
struct msm_dmov_cmd *cmd;
spin_lock_irqsave(&msm_dmov_lock, irq_flags);
int_status = readl(DMOV_ISR); /* read and clear interrupt */
PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
while (int_status) {
mask = int_status & -int_status;
id = fls(mask) - 1;
PRINT_FLOW("msm_datamover_irq_handler %08x %08x id %d\n", int_status, mask, id);
int_status &= ~mask;
ch_status = readl(DMOV_STATUS(id));
if (!(ch_status & DMOV_STATUS_RSLT_VALID)) {
PRINT_FLOW("msm_datamover_irq_handler id %d, result not valid %x\n", id, ch_status);
continue;
}
do {
ch_result = readl(DMOV_RSLT(id));
if (list_empty(&active_commands[id])) {
PRINT_ERROR("msm_datamover_irq_handler id %d, got result "
"with no active command, status %x, result %x\n",
id, ch_status, ch_result);
cmd = NULL;
} else
cmd = list_entry(active_commands[id].next, typeof(*cmd), list);
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x, result %x\n", id, ch_status, ch_result);
if (ch_result & DMOV_RSLT_DONE) {
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n",
id, ch_status);
PRINT_IO("msm_datamover_irq_handler id %d, got result "
"for %p, result %x\n", id, cmd, ch_result);
if (cmd) {
list_del(&cmd->list);
cmd->complete_func(cmd, ch_result, NULL);
}
}
if (ch_result & DMOV_RSLT_FLUSH) {
struct msm_dmov_errdata errdata;
errdata.flush[0] = readl(DMOV_FLUSH0(id));
errdata.flush[1] = readl(DMOV_FLUSH1(id));
errdata.flush[2] = readl(DMOV_FLUSH2(id));
errdata.flush[3] = readl(DMOV_FLUSH3(id));
errdata.flush[4] = readl(DMOV_FLUSH4(id));
errdata.flush[5] = readl(DMOV_FLUSH5(id));
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
if (cmd) {
list_del(&cmd->list);
cmd->complete_func(cmd, ch_result, &errdata);
}
}
if (ch_result & DMOV_RSLT_ERROR) {
struct msm_dmov_errdata errdata;
errdata.flush[0] = readl(DMOV_FLUSH0(id));
errdata.flush[1] = readl(DMOV_FLUSH1(id));
errdata.flush[2] = readl(DMOV_FLUSH2(id));
errdata.flush[3] = readl(DMOV_FLUSH3(id));
errdata.flush[4] = readl(DMOV_FLUSH4(id));
errdata.flush[5] = readl(DMOV_FLUSH5(id));
PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
if (cmd) {
list_del(&cmd->list);
cmd->complete_func(cmd, ch_result, &errdata);
}
/* this does not seem to work, once we get an error */
/* the datamover will no longer accept commands */
writel(0, DMOV_FLUSH0(id));
}
ch_status = readl(DMOV_STATUS(id));
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
if ((ch_status & DMOV_STATUS_CMD_PTR_RDY) && !list_empty(&ready_commands[id])) {
cmd = list_entry(ready_commands[id].next, typeof(*cmd), list);
list_del(&cmd->list);
list_add_tail(&cmd->list, &active_commands[id]);
PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id);
writel(cmd->cmdptr, DMOV_CMD_PTR(id));
}
} while (ch_status & DMOV_STATUS_RSLT_VALID);
if (list_empty(&active_commands[id]) && list_empty(&ready_commands[id]))
channel_active &= ~(1U << id);
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
}
if (!channel_active)
disable_irq(INT_ADM_AARM);
spin_unlock_irqrestore(&msm_dmov_lock, irq_flags);
return IRQ_HANDLED;
}
static int __init msm_init_datamover(void)
{
int i;
int ret;
for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
INIT_LIST_HEAD(&ready_commands[i]);
INIT_LIST_HEAD(&active_commands[i]);
writel(DMOV_CONFIG_IRQ_EN | DMOV_CONFIG_FORCE_TOP_PTR_RSLT | DMOV_CONFIG_FORCE_FLUSH_RSLT, DMOV_CONFIG(i));
}
ret = request_irq(INT_ADM_AARM, msm_datamover_irq_handler, 0, "msmdatamover", NULL);
if (ret)
return ret;
disable_irq(INT_ADM_AARM);
return 0;
}
arch_initcall(msm_init_datamover);
| gpl-2.0 |
rperier/linux-rockchip | drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c | 513 | 1993 | // SPDX-License-Identifier: GPL-2.0
/*
* Support for Merrifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2012 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2012 Silicon Hive www.siliconhive.com.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#include "type_support.h"
#include "mmu/isp_mmu.h"
#include "mmu/sh_mmu_mrfld.h"
#include "atomisp_compat.h"
#define MERR_VALID_PTE_MASK 0x80000000
/*
* include SH header file here
*/
static unsigned int sh_phys_to_pte(struct isp_mmu *mmu,
phys_addr_t phys)
{
return phys >> ISP_PAGE_OFFSET;
}
static phys_addr_t sh_pte_to_phys(struct isp_mmu *mmu,
unsigned int pte)
{
unsigned int mask = mmu->driver->pte_valid_mask;
return (phys_addr_t)((pte & ~mask) << ISP_PAGE_OFFSET);
}
static unsigned int sh_get_pd_base(struct isp_mmu *mmu,
phys_addr_t phys)
{
unsigned int pte = sh_phys_to_pte(mmu, phys);
return HOST_ADDRESS(pte);
}
/*
* callback to flush tlb.
*
* tlb_flush_range will at least flush TLBs containing
* address mapping from addr to addr + size.
*
* tlb_flush_all will flush all TLBs.
*
* tlb_flush_all is must be provided. if tlb_flush_range is
* not valid, it will set to tlb_flush_all by default.
*/
static void sh_tlb_flush(struct isp_mmu *mmu)
{
ia_css_mmu_invalidate_cache();
}
struct isp_mmu_client sh_mmu_mrfld = {
.name = "Silicon Hive ISP3000 MMU",
.pte_valid_mask = MERR_VALID_PTE_MASK,
.null_pte = ~MERR_VALID_PTE_MASK,
.get_pd_base = sh_get_pd_base,
.tlb_flush_all = sh_tlb_flush,
.phys_to_pte = sh_phys_to_pte,
.pte_to_phys = sh_pte_to_phys,
};
| gpl-2.0 |
rilian-la-te/kernel | net/irda/discovery.c | 769 | 12757 | /*********************************************************************
*
* Filename: discovery.c
* Version: 0.1
* Description: Routines for handling discoveries at the IrLMP layer
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Apr 6 15:33:50 1999
* Modified at: Sat Oct 9 17:11:31 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Modified at: Fri May 28 3:11 CST 1999
* Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/irda/irda.h>
#include <net/irda/irlmp.h>
#include <net/irda/discovery.h>
#include <asm/unaligned.h>
/*
* Function irlmp_add_discovery (cachelog, discovery)
*
* Add a new discovery to the cachelog, and remove any old discoveries
* from the same device
*
* Note : we try to preserve the time this device was *first* discovered
* (as opposed to the time of last discovery used for cleanup). This is
* used by clients waiting for discovery events to tell if the device
* discovered is "new" or just the same old one. They can't rely there
* on a binary flag (new/old), because not all discovery events are
* propagated to them, and they might not always listen, so they would
* miss some new devices popping up...
* Jean II
*/
void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *new)
{
discovery_t *discovery, *node;
unsigned long flags;
/* Set time of first discovery if node is new (see below) */
new->firststamp = new->timestamp;
spin_lock_irqsave(&cachelog->hb_spinlock, flags);
/*
* Remove all discoveries of devices that has previously been
* discovered on the same link with the same name (info), or the
* same daddr. We do this since some devices (mostly PDAs) change
* their device address between every discovery.
*/
discovery = (discovery_t *) hashbin_get_first(cachelog);
while (discovery != NULL ) {
node = discovery;
/* Be sure to stay one item ahead */
discovery = (discovery_t *) hashbin_get_next(cachelog);
if ((node->data.saddr == new->data.saddr) &&
((node->data.daddr == new->data.daddr) ||
(strcmp(node->data.info, new->data.info) == 0)))
{
/* This discovery is a previous discovery
* from the same device, so just remove it
*/
hashbin_remove_this(cachelog, (irda_queue_t *) node);
/* Check if hints bits are unchanged */
if (get_unaligned((__u16 *)node->data.hints) == get_unaligned((__u16 *)new->data.hints))
/* Set time of first discovery for this node */
new->firststamp = node->firststamp;
kfree(node);
}
}
/* Insert the new and updated version */
hashbin_insert(cachelog, (irda_queue_t *) new, new->data.daddr, NULL);
spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
}
/*
* Function irlmp_add_discovery_log (cachelog, log)
*
* Merge a disovery log into the cachelog.
*
*/
void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log)
{
discovery_t *discovery;
IRDA_DEBUG(4, "%s()\n", __func__);
/*
* If log is missing this means that IrLAP was unable to perform the
* discovery, so restart discovery again with just the half timeout
* of the normal one.
*/
/* Well... It means that there was nobody out there - Jean II */
if (log == NULL) {
/* irlmp_start_discovery_timer(irlmp, 150); */
return;
}
/*
* Locking : we are the only owner of this discovery log, so
* no need to lock it.
* We just need to lock the global log in irlmp_add_discovery().
*/
discovery = (discovery_t *) hashbin_remove_first(log);
while (discovery != NULL) {
irlmp_add_discovery(cachelog, discovery);
discovery = (discovery_t *) hashbin_remove_first(log);
}
/* Delete the now empty log */
hashbin_delete(log, (FREE_FUNC) kfree);
}
/*
* Function irlmp_expire_discoveries (log, saddr, force)
*
* Go through all discoveries and expire all that has stayed too long
*
* Note : this assume that IrLAP won't change its saddr, which
* currently is a valid assumption...
*/
void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force)
{
discovery_t * discovery;
discovery_t * curr;
unsigned long flags;
discinfo_t * buffer = NULL;
int n; /* Size of the full log */
int i = 0; /* How many we expired */
IRDA_ASSERT(log != NULL, return;);
IRDA_DEBUG(4, "%s()\n", __func__);
spin_lock_irqsave(&log->hb_spinlock, flags);
discovery = (discovery_t *) hashbin_get_first(log);
while (discovery != NULL) {
/* Be sure to be one item ahead */
curr = discovery;
discovery = (discovery_t *) hashbin_get_next(log);
/* Test if it's time to expire this discovery */
if ((curr->data.saddr == saddr) &&
(force ||
((jiffies - curr->timestamp) > DISCOVERY_EXPIRE_TIMEOUT)))
{
/* Create buffer as needed.
* As this function get called a lot and most time
* we don't have anything to put in the log (we are
* quite picky), we can save a lot of overhead
* by not calling kmalloc. Jean II */
if(buffer == NULL) {
/* Create the client specific buffer */
n = HASHBIN_GET_SIZE(log);
buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
if (buffer == NULL) {
spin_unlock_irqrestore(&log->hb_spinlock, flags);
return;
}
}
/* Copy discovery information */
memcpy(&(buffer[i]), &(curr->data),
sizeof(discinfo_t));
i++;
/* Remove it from the log */
curr = hashbin_remove_this(log, (irda_queue_t *) curr);
kfree(curr);
}
}
/* Drop the spinlock before calling the higher layers, as
* we can't guarantee they won't call us back and create a
* deadlock. We will work on our own private data, so we
* don't care to be interrupted. - Jean II */
spin_unlock_irqrestore(&log->hb_spinlock, flags);
if(buffer == NULL)
return;
/* Tell IrLMP and registered clients about it */
irlmp_discovery_expiry(buffer, i);
/* Free up our buffer */
kfree(buffer);
}
#if 0
/*
* Function irlmp_dump_discoveries (log)
*
* Print out all discoveries in log
*
*/
void irlmp_dump_discoveries(hashbin_t *log)
{
discovery_t *discovery;
IRDA_ASSERT(log != NULL, return;);
discovery = (discovery_t *) hashbin_get_first(log);
while (discovery != NULL) {
IRDA_DEBUG(0, "Discovery:\n");
IRDA_DEBUG(0, " daddr=%08x\n", discovery->data.daddr);
IRDA_DEBUG(0, " saddr=%08x\n", discovery->data.saddr);
IRDA_DEBUG(0, " nickname=%s\n", discovery->data.info);
discovery = (discovery_t *) hashbin_get_next(log);
}
}
#endif
/*
* Function irlmp_copy_discoveries (log, pn, mask)
*
* Copy all discoveries in a buffer
*
* This function implement a safe way for lmp clients to access the
* discovery log. The basic problem is that we don't want the log
* to change (add/remove) while the client is reading it. If the
* lmp client manipulate directly the hashbin, he is sure to get
* into troubles...
* The idea is that we copy all the current discovery log in a buffer
* which is specific to the client and pass this copy to him. As we
* do this operation with the spinlock grabbed, we are safe...
* Note : we don't want those clients to grab the spinlock, because
* we have no control on how long they will hold it...
* Note : we choose to copy the log in "struct irda_device_info" to
* save space...
* Note : the client must kfree himself() the log...
* Jean II
*/
struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
__u16 mask, int old_entries)
{
discovery_t * discovery;
unsigned long flags;
discinfo_t * buffer = NULL;
int j_timeout = (sysctl_discovery_timeout * HZ);
int n; /* Size of the full log */
int i = 0; /* How many we picked */
IRDA_ASSERT(pn != NULL, return NULL;);
IRDA_ASSERT(log != NULL, return NULL;);
/* Save spin lock */
spin_lock_irqsave(&log->hb_spinlock, flags);
discovery = (discovery_t *) hashbin_get_first(log);
while (discovery != NULL) {
/* Mask out the ones we don't want :
* We want to match the discovery mask, and to get only
* the most recent one (unless we want old ones) */
if ((get_unaligned((__u16 *)discovery->data.hints) & mask) &&
((old_entries) ||
((jiffies - discovery->firststamp) < j_timeout))) {
/* Create buffer as needed.
* As this function get called a lot and most time
* we don't have anything to put in the log (we are
* quite picky), we can save a lot of overhead
* by not calling kmalloc. Jean II */
if(buffer == NULL) {
/* Create the client specific buffer */
n = HASHBIN_GET_SIZE(log);
buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
if (buffer == NULL) {
spin_unlock_irqrestore(&log->hb_spinlock, flags);
return NULL;
}
}
/* Copy discovery information */
memcpy(&(buffer[i]), &(discovery->data),
sizeof(discinfo_t));
i++;
}
discovery = (discovery_t *) hashbin_get_next(log);
}
spin_unlock_irqrestore(&log->hb_spinlock, flags);
/* Get the actual number of device in the buffer and return */
*pn = i;
return buffer;
}
#ifdef CONFIG_PROC_FS
static inline discovery_t *discovery_seq_idx(loff_t pos)
{
discovery_t *discovery;
for (discovery = (discovery_t *) hashbin_get_first(irlmp->cachelog);
discovery != NULL;
discovery = (discovery_t *) hashbin_get_next(irlmp->cachelog)) {
if (pos-- == 0)
break;
}
return discovery;
}
static void *discovery_seq_start(struct seq_file *seq, loff_t *pos)
{
spin_lock_irq(&irlmp->cachelog->hb_spinlock);
return *pos ? discovery_seq_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void *discovery_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN)
? (void *) hashbin_get_first(irlmp->cachelog)
: (void *) hashbin_get_next(irlmp->cachelog);
}
static void discovery_seq_stop(struct seq_file *seq, void *v)
{
spin_unlock_irq(&irlmp->cachelog->hb_spinlock);
}
static int discovery_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "IrLMP: Discovery log:\n\n");
else {
const discovery_t *discovery = v;
seq_printf(seq, "nickname: %s, hint: 0x%02x%02x",
discovery->data.info,
discovery->data.hints[0],
discovery->data.hints[1]);
#if 0
if ( discovery->data.hints[0] & HINT_PNP)
seq_puts(seq, "PnP Compatible ");
if ( discovery->data.hints[0] & HINT_PDA)
seq_puts(seq, "PDA/Palmtop ");
if ( discovery->data.hints[0] & HINT_COMPUTER)
seq_puts(seq, "Computer ");
if ( discovery->data.hints[0] & HINT_PRINTER)
seq_puts(seq, "Printer ");
if ( discovery->data.hints[0] & HINT_MODEM)
seq_puts(seq, "Modem ");
if ( discovery->data.hints[0] & HINT_FAX)
seq_puts(seq, "Fax ");
if ( discovery->data.hints[0] & HINT_LAN)
seq_puts(seq, "LAN Access ");
if ( discovery->data.hints[1] & HINT_TELEPHONY)
seq_puts(seq, "Telephony ");
if ( discovery->data.hints[1] & HINT_FILE_SERVER)
seq_puts(seq, "File Server ");
if ( discovery->data.hints[1] & HINT_COMM)
seq_puts(seq, "IrCOMM ");
if ( discovery->data.hints[1] & HINT_OBEX)
seq_puts(seq, "IrOBEX ");
#endif
seq_printf(seq,", saddr: 0x%08x, daddr: 0x%08x\n\n",
discovery->data.saddr,
discovery->data.daddr);
seq_putc(seq, '\n');
}
return 0;
}
static const struct seq_operations discovery_seq_ops = {
.start = discovery_seq_start,
.next = discovery_seq_next,
.stop = discovery_seq_stop,
.show = discovery_seq_show,
};
static int discovery_seq_open(struct inode *inode, struct file *file)
{
IRDA_ASSERT(irlmp != NULL, return -EINVAL;);
return seq_open(file, &discovery_seq_ops);
}
const struct file_operations discovery_seq_fops = {
.owner = THIS_MODULE,
.open = discovery_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif
| gpl-2.0 |
avareldalton85/rt-linux-rpi2 | kernel/kthread.c | 769 | 19043 | /* Kernel thread helper functions.
* Copyright (C) 2004 IBM Corporation, Rusty Russell.
*
* Creation is done via kthreadd, so that we get a clean environment
* even if we're invoked from userspace (think modprobe, hotplug cpu,
* etc.).
*/
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/cpuset.h>
#include <linux/unistd.h>
#include <linux/file.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
struct kthread_create_info
{
/* Information passed to kthread() from kthreadd. */
int (*threadfn)(void *data);
void *data;
int node;
/* Result passed back to kthread_create() from kthreadd. */
struct task_struct *result;
struct completion *done;
struct list_head list;
};
struct kthread {
unsigned long flags;
unsigned int cpu;
void *data;
struct completion parked;
struct completion exited;
};
enum KTHREAD_BITS {
KTHREAD_IS_PER_CPU = 0,
KTHREAD_SHOULD_STOP,
KTHREAD_SHOULD_PARK,
KTHREAD_IS_PARKED,
};
#define __to_kthread(vfork) \
container_of(vfork, struct kthread, exited)
static inline struct kthread *to_kthread(struct task_struct *k)
{
return __to_kthread(k->vfork_done);
}
static struct kthread *to_live_kthread(struct task_struct *k)
{
struct completion *vfork = ACCESS_ONCE(k->vfork_done);
if (likely(vfork))
return __to_kthread(vfork);
return NULL;
}
/**
* kthread_should_stop - should this kthread return now?
*
* When someone calls kthread_stop() on your kthread, it will be woken
* and this will return true. You should then return, and your return
* value will be passed through to kthread_stop().
*/
bool kthread_should_stop(void)
{
return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
}
EXPORT_SYMBOL(kthread_should_stop);
/**
* kthread_should_park - should this kthread park now?
*
* When someone calls kthread_park() on your kthread, it will be woken
* and this will return true. You should then do the necessary
* cleanup and call kthread_parkme()
*
* Similar to kthread_should_stop(), but this keeps the thread alive
* and in a park position. kthread_unpark() "restarts" the thread and
* calls the thread function again.
*/
bool kthread_should_park(void)
{
return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
}
/**
* kthread_freezable_should_stop - should this freezable kthread return now?
* @was_frozen: optional out parameter, indicates whether %current was frozen
*
* kthread_should_stop() for freezable kthreads, which will enter
* refrigerator if necessary. This function is safe from kthread_stop() /
* freezer deadlock and freezable kthreads should use this function instead
* of calling try_to_freeze() directly.
*/
bool kthread_freezable_should_stop(bool *was_frozen)
{
bool frozen = false;
might_sleep();
if (unlikely(freezing(current)))
frozen = __refrigerator(true);
if (was_frozen)
*was_frozen = frozen;
return kthread_should_stop();
}
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
/**
* kthread_data - return data value specified on kthread creation
* @task: kthread task in question
*
* Return the data value specified when kthread @task was created.
* The caller is responsible for ensuring the validity of @task when
* calling this function.
*/
void *kthread_data(struct task_struct *task)
{
return to_kthread(task)->data;
}
/**
* probe_kthread_data - speculative version of kthread_data()
* @task: possible kthread task in question
*
* @task could be a kthread task. Return the data value specified when it
* was created if accessible. If @task isn't a kthread task or its data is
* inaccessible for any reason, %NULL is returned. This function requires
* that @task itself is safe to dereference.
*/
void *probe_kthread_data(struct task_struct *task)
{
struct kthread *kthread = to_kthread(task);
void *data = NULL;
probe_kernel_read(&data, &kthread->data, sizeof(data));
return data;
}
static void __kthread_parkme(struct kthread *self)
{
__set_current_state(TASK_PARKED);
while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
complete(&self->parked);
schedule();
__set_current_state(TASK_PARKED);
}
clear_bit(KTHREAD_IS_PARKED, &self->flags);
__set_current_state(TASK_RUNNING);
}
void kthread_parkme(void)
{
__kthread_parkme(to_kthread(current));
}
static int kthread(void *_create)
{
/* Copy data: it's on kthread's stack */
struct kthread_create_info *create = _create;
int (*threadfn)(void *data) = create->threadfn;
void *data = create->data;
struct completion *done;
struct kthread self;
int ret;
self.flags = 0;
self.data = data;
init_completion(&self.exited);
init_completion(&self.parked);
current->vfork_done = &self.exited;
/* If user was SIGKILLed, I release the structure. */
done = xchg(&create->done, NULL);
if (!done) {
kfree(create);
do_exit(-EINTR);
}
/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_UNINTERRUPTIBLE);
create->result = current;
complete(done);
schedule();
ret = -EINTR;
if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
__kthread_parkme(&self);
ret = threadfn(data);
}
/* we can't just return, we must preserve "self" on stack */
do_exit(ret);
}
/* called from do_fork() to get node information for about to be created task */
int tsk_fork_get_node(struct task_struct *tsk)
{
#ifdef CONFIG_NUMA
if (tsk == kthreadd_task)
return tsk->pref_node_fork;
#endif
return NUMA_NO_NODE;
}
static void create_kthread(struct kthread_create_info *create)
{
int pid;
#ifdef CONFIG_NUMA
current->pref_node_fork = create->node;
#endif
/* We want our own signal handler (we take no signals by default). */
pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
if (pid < 0) {
/* If user was SIGKILLed, I release the structure. */
struct completion *done = xchg(&create->done, NULL);
if (!done) {
kfree(create);
return;
}
create->result = ERR_PTR(pid);
complete(done);
}
}
/**
* kthread_create_on_node - create a kthread.
* @threadfn: the function to run until signal_pending(current).
* @data: data ptr for @threadfn.
* @node: memory node number.
* @namefmt: printf-style name for the thread.
*
* Description: This helper function creates and names a kernel
* thread. The thread will be stopped: use wake_up_process() to start
* it. See also kthread_run().
*
* If thread is going to be bound on a particular cpu, give its node
* in @node, to get NUMA affinity for kthread stack, or else give -1.
* When woken, the thread will run @threadfn() with @data as its
* argument. @threadfn() can either call do_exit() directly if it is a
* standalone thread for which no one will call kthread_stop(), or
* return when 'kthread_should_stop()' is true (which means
* kthread_stop() has been called). The return value should be zero
* or a negative error number; it will be passed to kthread_stop().
*
* Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
*/
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void *data, int node,
const char namefmt[],
...)
{
DECLARE_COMPLETION_ONSTACK(done);
struct task_struct *task;
struct kthread_create_info *create = kmalloc(sizeof(*create),
GFP_KERNEL);
if (!create)
return ERR_PTR(-ENOMEM);
create->threadfn = threadfn;
create->data = data;
create->node = node;
create->done = &done;
spin_lock(&kthread_create_lock);
list_add_tail(&create->list, &kthread_create_list);
spin_unlock(&kthread_create_lock);
wake_up_process(kthreadd_task);
/*
* Wait for completion in killable state, for I might be chosen by
* the OOM killer while kthreadd is trying to allocate memory for
* new kernel thread.
*/
if (unlikely(wait_for_completion_killable(&done))) {
/*
* If I was SIGKILLed before kthreadd (or new kernel thread)
* calls complete(), leave the cleanup of this structure to
* that thread.
*/
if (xchg(&create->done, NULL))
return ERR_PTR(-EINTR);
/*
* kthreadd (or new kernel thread) will call complete()
* shortly.
*/
wait_for_completion(&done);
}
task = create->result;
if (!IS_ERR(task)) {
static const struct sched_param param = { .sched_priority = 0 };
va_list args;
va_start(args, namefmt);
vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
va_end(args);
/*
* root may have changed our (kthreadd's) priority or CPU mask.
* The kernel thread should not inherit these properties.
*/
sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
set_cpus_allowed_ptr(task, cpu_all_mask);
}
kfree(create);
return task;
}
EXPORT_SYMBOL(kthread_create_on_node);
static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
{
/* Must have done schedule() in kthread() before we set_task_cpu */
if (!wait_task_inactive(p, state)) {
WARN_ON(1);
return;
}
/* It's safe because the task is inactive. */
do_set_cpus_allowed(p, cpumask_of(cpu));
p->flags |= PF_NO_SETAFFINITY;
}
/**
* kthread_bind - bind a just-created kthread to a cpu.
* @p: thread created by kthread_create().
* @cpu: cpu (might not be online, must be possible) for @k to run on.
*
* Description: This function is equivalent to set_cpus_allowed(),
* except that @cpu doesn't need to be online, and the thread must be
* stopped (i.e., just returned from kthread_create()).
*/
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(kthread_bind);
/**
* kthread_create_on_cpu - Create a cpu bound kthread
* @threadfn: the function to run until signal_pending(current).
* @data: data ptr for @threadfn.
* @cpu: The cpu on which the thread should be bound,
* @namefmt: printf-style name for the thread. Format is restricted
* to "name.*%u". Code fills in cpu number.
*
* Description: This helper function creates and names a kernel thread
* The thread will be woken and put into park mode.
*/
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
void *data, unsigned int cpu,
const char *namefmt)
{
struct task_struct *p;
p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
cpu);
if (IS_ERR(p))
return p;
set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
to_kthread(p)->cpu = cpu;
/* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
kthread_park(p);
return p;
}
static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
{
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
/*
* We clear the IS_PARKED bit here as we don't wait
* until the task has left the park code. So if we'd
* park before that happens we'd see the IS_PARKED bit
* which might be about to be cleared.
*/
if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
__kthread_bind(k, kthread->cpu, TASK_PARKED);
wake_up_state(k, TASK_PARKED);
}
}
/**
* kthread_unpark - unpark a thread created by kthread_create().
* @k: thread created by kthread_create().
*
* Sets kthread_should_park() for @k to return false, wakes it, and
* waits for it to return. If the thread is marked percpu then its
* bound to the cpu again.
*/
void kthread_unpark(struct task_struct *k)
{
struct kthread *kthread = to_live_kthread(k);
if (kthread)
__kthread_unpark(k, kthread);
}
/**
* kthread_park - park a thread created by kthread_create().
* @k: thread created by kthread_create().
*
* Sets kthread_should_park() for @k to return true, wakes it, and
* waits for it to return. This can also be called after kthread_create()
* instead of calling wake_up_process(): the thread will park without
* calling threadfn().
*
* Returns 0 if the thread is parked, -ENOSYS if the thread exited.
* If called by the kthread itself just the park bit is set.
*/
int kthread_park(struct task_struct *k)
{
struct kthread *kthread = to_live_kthread(k);
int ret = -ENOSYS;
if (kthread) {
if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
if (k != current) {
wake_up_process(k);
wait_for_completion(&kthread->parked);
}
}
ret = 0;
}
return ret;
}
/**
* kthread_stop - stop a thread created by kthread_create().
* @k: thread created by kthread_create().
*
* Sets kthread_should_stop() for @k to return true, wakes it, and
* waits for it to exit. This can also be called after kthread_create()
* instead of calling wake_up_process(): the thread will exit without
* calling threadfn().
*
* If threadfn() may call do_exit() itself, the caller must ensure
* task_struct can't go away.
*
* Returns the result of threadfn(), or %-EINTR if wake_up_process()
* was never called.
*/
int kthread_stop(struct task_struct *k)
{
struct kthread *kthread;
int ret;
trace_sched_kthread_stop(k);
get_task_struct(k);
kthread = to_live_kthread(k);
if (kthread) {
set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
__kthread_unpark(k, kthread);
wake_up_process(k);
wait_for_completion(&kthread->exited);
}
ret = k->exit_code;
put_task_struct(k);
trace_sched_kthread_stop_ret(ret);
return ret;
}
EXPORT_SYMBOL(kthread_stop);
int kthreadd(void *unused)
{
struct task_struct *tsk = current;
/* Setup a clean context for our children to inherit. */
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
set_cpus_allowed_ptr(tsk, cpu_all_mask);
set_mems_allowed(node_states[N_MEMORY]);
current->flags |= PF_NOFREEZE;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (list_empty(&kthread_create_list))
schedule();
__set_current_state(TASK_RUNNING);
spin_lock(&kthread_create_lock);
while (!list_empty(&kthread_create_list)) {
struct kthread_create_info *create;
create = list_entry(kthread_create_list.next,
struct kthread_create_info, list);
list_del_init(&create->list);
spin_unlock(&kthread_create_lock);
create_kthread(create);
spin_lock(&kthread_create_lock);
}
spin_unlock(&kthread_create_lock);
}
return 0;
}
void __init_kthread_worker(struct kthread_worker *worker,
const char *name,
struct lock_class_key *key)
{
spin_lock_init(&worker->lock);
lockdep_set_class_and_name(&worker->lock, key, name);
INIT_LIST_HEAD(&worker->work_list);
worker->task = NULL;
}
EXPORT_SYMBOL_GPL(__init_kthread_worker);
/**
* kthread_worker_fn - kthread function to process kthread_worker
* @worker_ptr: pointer to initialized kthread_worker
*
* This function can be used as @threadfn to kthread_create() or
* kthread_run() with @worker_ptr argument pointing to an initialized
* kthread_worker. The started kthread will process work_list until
* the it is stopped with kthread_stop(). A kthread can also call
* this function directly after extra initialization.
*
* Different kthreads can be used for the same kthread_worker as long
* as there's only one kthread attached to it at any given time. A
* kthread_worker without an attached kthread simply collects queued
* kthread_works.
*/
int kthread_worker_fn(void *worker_ptr)
{
struct kthread_worker *worker = worker_ptr;
struct kthread_work *work;
WARN_ON(worker->task);
worker->task = current;
repeat:
set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
spin_lock_irq(&worker->lock);
worker->task = NULL;
spin_unlock_irq(&worker->lock);
return 0;
}
work = NULL;
spin_lock_irq(&worker->lock);
if (!list_empty(&worker->work_list)) {
work = list_first_entry(&worker->work_list,
struct kthread_work, node);
list_del_init(&work->node);
}
worker->current_work = work;
spin_unlock_irq(&worker->lock);
if (work) {
__set_current_state(TASK_RUNNING);
work->func(work);
} else if (!freezing(current))
schedule();
try_to_freeze();
goto repeat;
}
EXPORT_SYMBOL_GPL(kthread_worker_fn);
/* insert @work before @pos in @worker */
static void insert_kthread_work(struct kthread_worker *worker,
struct kthread_work *work,
struct list_head *pos)
{
lockdep_assert_held(&worker->lock);
list_add_tail(&work->node, pos);
work->worker = worker;
if (!worker->current_work && likely(worker->task))
wake_up_process(worker->task);
}
/**
* queue_kthread_work - queue a kthread_work
* @worker: target kthread_worker
* @work: kthread_work to queue
*
* Queue @work to work processor @task for async execution. @task
* must have been created with kthread_worker_create(). Returns %true
* if @work was successfully queued, %false if it was already pending.
*/
bool queue_kthread_work(struct kthread_worker *worker,
struct kthread_work *work)
{
bool ret = false;
unsigned long flags;
spin_lock_irqsave(&worker->lock, flags);
if (list_empty(&work->node)) {
insert_kthread_work(worker, work, &worker->work_list);
ret = true;
}
spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(queue_kthread_work);
struct kthread_flush_work {
struct kthread_work work;
struct completion done;
};
static void kthread_flush_work_fn(struct kthread_work *work)
{
struct kthread_flush_work *fwork =
container_of(work, struct kthread_flush_work, work);
complete(&fwork->done);
}
/**
* flush_kthread_work - flush a kthread_work
* @work: work to flush
*
* If @work is queued or executing, wait for it to finish execution.
*/
void flush_kthread_work(struct kthread_work *work)
{
struct kthread_flush_work fwork = {
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
COMPLETION_INITIALIZER_ONSTACK(fwork.done),
};
struct kthread_worker *worker;
bool noop = false;
retry:
worker = work->worker;
if (!worker)
return;
spin_lock_irq(&worker->lock);
if (work->worker != worker) {
spin_unlock_irq(&worker->lock);
goto retry;
}
if (!list_empty(&work->node))
insert_kthread_work(worker, &fwork.work, work->node.next);
else if (worker->current_work == work)
insert_kthread_work(worker, &fwork.work, worker->work_list.next);
else
noop = true;
spin_unlock_irq(&worker->lock);
if (!noop)
wait_for_completion(&fwork.done);
}
EXPORT_SYMBOL_GPL(flush_kthread_work);
/**
* flush_kthread_worker - flush all current works on a kthread_worker
* @worker: worker to flush
*
* Wait until all currently executing or pending works on @worker are
* finished.
*/
void flush_kthread_worker(struct kthread_worker *worker)
{
struct kthread_flush_work fwork = {
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
COMPLETION_INITIALIZER_ONSTACK(fwork.done),
};
queue_kthread_work(worker, &fwork.work);
wait_for_completion(&fwork.done);
}
EXPORT_SYMBOL_GPL(flush_kthread_worker);
| gpl-2.0 |
whyorean/android_kernel_xiaomi_msm8996 | drivers/tty/serial/sn_console.c | 1025 | 29946 | /*
* C-Brick Serial Port (and console) driver for SGI Altix machines.
*
* This driver is NOT suitable for talking to the l1-controller for
* anything other than 'console activities' --- please use the l1
* driver for that.
*
*
* Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/sysrq.h>
#include <linux/circ_buf.h>
#include <linux/serial_reg.h>
#include <linux/delay.h> /* for mdelay */
#include <linux/miscdevice.h>
#include <linux/serial_core.h>
#include <asm/io.h>
#include <asm/sn/simulator.h>
#include <asm/sn/sn_sal.h>
/* number of characters we can transmit to the SAL console at a time */
#define SN_SAL_MAX_CHARS 120
/* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to
* avoid losing chars, (always has to be a power of 2) */
#define SN_SAL_BUFFER_SIZE (64 * (1 << 10))
#define SN_SAL_UART_FIFO_DEPTH 16
#define SN_SAL_UART_FIFO_SPEED_CPS (9600/10)
/* sn_transmit_chars() calling args */
#define TRANSMIT_BUFFERED 0
#define TRANSMIT_RAW 1
/* To use dynamic numbers only and not use the assigned major and minor,
* define the following.. */
/* #define USE_DYNAMIC_MINOR 1 *//* use dynamic minor number */
#define USE_DYNAMIC_MINOR 0 /* Don't rely on misc_register dynamic minor */
/* Device name we're using */
#define DEVICE_NAME "ttySG"
#define DEVICE_NAME_DYNAMIC "ttySG0" /* need full name for misc_register */
/* The major/minor we are using, ignored for USE_DYNAMIC_MINOR */
#define DEVICE_MAJOR 204
#define DEVICE_MINOR 40
#ifdef CONFIG_MAGIC_SYSRQ
static char sysrq_serial_str[] = "\eSYS";
static char *sysrq_serial_ptr = sysrq_serial_str;
static unsigned long sysrq_requested;
#endif /* CONFIG_MAGIC_SYSRQ */
/*
* Port definition - this kinda drives it all
*/
struct sn_cons_port {
struct timer_list sc_timer;
struct uart_port sc_port;
struct sn_sal_ops {
int (*sal_puts_raw) (const char *s, int len);
int (*sal_puts) (const char *s, int len);
int (*sal_getc) (void);
int (*sal_input_pending) (void);
void (*sal_wakeup_transmit) (struct sn_cons_port *, int);
} *sc_ops;
unsigned long sc_interrupt_timeout;
int sc_is_asynch;
};
static struct sn_cons_port sal_console_port;
static int sn_process_input;
/* Only used if USE_DYNAMIC_MINOR is set to 1 */
static struct miscdevice misc; /* used with misc_register for dynamic */
extern void early_sn_setup(void);
#undef DEBUG
#ifdef DEBUG
static int sn_debug_printf(const char *fmt, ...);
#define DPRINTF(x...) sn_debug_printf(x)
#else
#define DPRINTF(x...) do { } while (0)
#endif
/* Prototypes */
static int snt_hw_puts_raw(const char *, int);
static int snt_hw_puts_buffered(const char *, int);
static int snt_poll_getc(void);
static int snt_poll_input_pending(void);
static int snt_intr_getc(void);
static int snt_intr_input_pending(void);
static void sn_transmit_chars(struct sn_cons_port *, int);
/* A table for polling:
*/
static struct sn_sal_ops poll_ops = {
.sal_puts_raw = snt_hw_puts_raw,
.sal_puts = snt_hw_puts_raw,
.sal_getc = snt_poll_getc,
.sal_input_pending = snt_poll_input_pending
};
/* A table for interrupts enabled */
static struct sn_sal_ops intr_ops = {
.sal_puts_raw = snt_hw_puts_raw,
.sal_puts = snt_hw_puts_buffered,
.sal_getc = snt_intr_getc,
.sal_input_pending = snt_intr_input_pending,
.sal_wakeup_transmit = sn_transmit_chars
};
/* the console does output in two distinctly different ways:
* synchronous (raw) and asynchronous (buffered). initially, early_printk
* does synchronous output. any data written goes directly to the SAL
* to be output (incidentally, it is internally buffered by the SAL)
* after interrupts and timers are initialized and available for use,
* the console init code switches to asynchronous output. this is
* also the earliest opportunity to begin polling for console input.
* after console initialization, console output and tty (serial port)
* output is buffered and sent to the SAL asynchronously (either by
* timer callback or by UART interrupt) */
/* routines for running the console in polling mode */
/**
* snt_poll_getc - Get a character from the console in polling mode
*
*/
static int snt_poll_getc(void)
{
int ch;
ia64_sn_console_getc(&ch);
return ch;
}
/**
* snt_poll_input_pending - Check if any input is waiting - polling mode.
*
*/
static int snt_poll_input_pending(void)
{
int status, input;
status = ia64_sn_console_check(&input);
return !status && input;
}
/* routines for an interrupt driven console (normal) */
/**
* snt_intr_getc - Get a character from the console, interrupt mode
*
*/
static int snt_intr_getc(void)
{
return ia64_sn_console_readc();
}
/**
* snt_intr_input_pending - Check if input is pending, interrupt mode
*
*/
static int snt_intr_input_pending(void)
{
return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV;
}
/* these functions are polled and interrupt */
/**
* snt_hw_puts_raw - Send raw string to the console, polled or interrupt mode
* @s: String
* @len: Length
*
*/
static int snt_hw_puts_raw(const char *s, int len)
{
/* this will call the PROM and not return until this is done */
return ia64_sn_console_putb(s, len);
}
/**
* snt_hw_puts_buffered - Send string to console, polled or interrupt mode
* @s: String
* @len: Length
*
*/
static int snt_hw_puts_buffered(const char *s, int len)
{
/* queue data to the PROM */
return ia64_sn_console_xmit_chars((char *)s, len);
}
/* uart interface structs
* These functions are associated with the uart_port that the serial core
* infrastructure calls.
*
* Note: Due to how the console works, many routines are no-ops.
*/
/**
* snp_type - What type of console are we?
* @port: Port to operate with (we ignore since we only have one port)
*
*/
static const char *snp_type(struct uart_port *port)
{
return ("SGI SN L1");
}
/**
* snp_tx_empty - Is the transmitter empty? We pretend we're always empty
* @port: Port to operate on (we ignore since we only have one port)
*
*/
static unsigned int snp_tx_empty(struct uart_port *port)
{
return 1;
}
/**
* snp_stop_tx - stop the transmitter - no-op for us
* @port: Port to operat eon - we ignore - no-op function
*
*/
static void snp_stop_tx(struct uart_port *port)
{
}
/**
* snp_release_port - Free i/o and resources for port - no-op for us
* @port: Port to operate on - we ignore - no-op function
*
*/
static void snp_release_port(struct uart_port *port)
{
}
/**
* snp_shutdown - shut down the port - free irq and disable - no-op for us
* @port: Port to shut down - we ignore
*
*/
static void snp_shutdown(struct uart_port *port)
{
}
/**
* snp_set_mctrl - set control lines (dtr, rts, etc) - no-op for our console
* @port: Port to operate on - we ignore
* @mctrl: Lines to set/unset - we ignore
*
*/
static void snp_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
/**
* snp_get_mctrl - get contorl line info, we just return a static value
* @port: port to operate on - we only have one port so we ignore this
*
*/
static unsigned int snp_get_mctrl(struct uart_port *port)
{
return TIOCM_CAR | TIOCM_RNG | TIOCM_DSR | TIOCM_CTS;
}
/**
* snp_stop_rx - Stop the receiver - we ignor ethis
* @port: Port to operate on - we ignore
*
*/
static void snp_stop_rx(struct uart_port *port)
{
}
/**
* snp_start_tx - Start transmitter
* @port: Port to operate on
*
*/
static void snp_start_tx(struct uart_port *port)
{
if (sal_console_port.sc_ops->sal_wakeup_transmit)
sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port,
TRANSMIT_BUFFERED);
}
/**
* snp_break_ctl - handle breaks - ignored by us
* @port: Port to operate on
* @break_state: Break state
*
*/
static void snp_break_ctl(struct uart_port *port, int break_state)
{
}
/**
* snp_startup - Start up the serial port - always return 0 (We're always on)
* @port: Port to operate on
*
*/
static int snp_startup(struct uart_port *port)
{
return 0;
}
/**
* snp_set_termios - set termios stuff - we ignore these
* @port: port to operate on
* @termios: New settings
* @termios: Old
*
*/
static void
snp_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
}
/**
* snp_request_port - allocate resources for port - ignored by us
* @port: port to operate on
*
*/
static int snp_request_port(struct uart_port *port)
{
return 0;
}
/**
* snp_config_port - allocate resources, set up - we ignore, we're always on
* @port: Port to operate on
* @flags: flags used for port setup
*
*/
static void snp_config_port(struct uart_port *port, int flags)
{
}
/* Associate the uart functions above - given to serial core */
static struct uart_ops sn_console_ops = {
.tx_empty = snp_tx_empty,
.set_mctrl = snp_set_mctrl,
.get_mctrl = snp_get_mctrl,
.stop_tx = snp_stop_tx,
.start_tx = snp_start_tx,
.stop_rx = snp_stop_rx,
.break_ctl = snp_break_ctl,
.startup = snp_startup,
.shutdown = snp_shutdown,
.set_termios = snp_set_termios,
.pm = NULL,
.type = snp_type,
.release_port = snp_release_port,
.request_port = snp_request_port,
.config_port = snp_config_port,
.verify_port = NULL,
};
/* End of uart struct functions and defines */
#ifdef DEBUG
/**
* sn_debug_printf - close to hardware debugging printf
* @fmt: printf format
*
* This is as "close to the metal" as we can get, used when the driver
* itself may be broken.
*
*/
static int sn_debug_printf(const char *fmt, ...)
{
static char printk_buf[1024];
int printed_len;
va_list args;
va_start(args, fmt);
printed_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
if (!sal_console_port.sc_ops) {
sal_console_port.sc_ops = &poll_ops;
early_sn_setup();
}
sal_console_port.sc_ops->sal_puts_raw(printk_buf, printed_len);
va_end(args);
return printed_len;
}
#endif /* DEBUG */
/*
* Interrupt handling routines.
*/
/**
* sn_receive_chars - Grab characters, pass them to tty layer
* @port: Port to operate on
* @flags: irq flags
*
* Note: If we're not registered with the serial core infrastructure yet,
* we don't try to send characters to it...
*
*/
static void
sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
{
struct tty_port *tport = NULL;
int ch;
if (!port) {
printk(KERN_ERR "sn_receive_chars - port NULL so can't receive\n");
return;
}
if (!port->sc_ops) {
printk(KERN_ERR "sn_receive_chars - port->sc_ops NULL so can't receive\n");
return;
}
if (port->sc_port.state) {
/* The serial_core stuffs are initialized, use them */
tport = &port->sc_port.state->port;
}
while (port->sc_ops->sal_input_pending()) {
ch = port->sc_ops->sal_getc();
if (ch < 0) {
printk(KERN_ERR "sn_console: An error occurred while "
"obtaining data from the console (0x%0x)\n", ch);
break;
}
#ifdef CONFIG_MAGIC_SYSRQ
if (sysrq_requested) {
unsigned long sysrq_timeout = sysrq_requested + HZ*5;
sysrq_requested = 0;
if (ch && time_before(jiffies, sysrq_timeout)) {
spin_unlock_irqrestore(&port->sc_port.lock, flags);
handle_sysrq(ch);
spin_lock_irqsave(&port->sc_port.lock, flags);
/* ignore actual sysrq command char */
continue;
}
}
if (ch == *sysrq_serial_ptr) {
if (!(*++sysrq_serial_ptr)) {
sysrq_requested = jiffies;
sysrq_serial_ptr = sysrq_serial_str;
}
/*
* ignore the whole sysrq string except for the
* leading escape
*/
if (ch != '\e')
continue;
}
else
sysrq_serial_ptr = sysrq_serial_str;
#endif /* CONFIG_MAGIC_SYSRQ */
/* record the character to pass up to the tty layer */
if (tport) {
if (tty_insert_flip_char(tport, ch, TTY_NORMAL) == 0)
break;
}
port->sc_port.icount.rx++;
}
if (tport)
tty_flip_buffer_push(tport);
}
/**
* sn_transmit_chars - grab characters from serial core, send off
* @port: Port to operate on
* @raw: Transmit raw or buffered
*
* Note: If we're early, before we're registered with serial core, the
* writes are going through sn_sal_console_write because that's how
* register_console has been set up. We currently could have asynch
* polls calling this function due to sn_sal_switch_to_asynch but we can
* ignore them until we register with the serial core stuffs.
*
*/
static void sn_transmit_chars(struct sn_cons_port *port, int raw)
{
int xmit_count, tail, head, loops, ii;
int result;
char *start;
struct circ_buf *xmit;
if (!port)
return;
BUG_ON(!port->sc_is_asynch);
if (port->sc_port.state) {
/* We're initialized, using serial core infrastructure */
xmit = &port->sc_port.state->xmit;
} else {
/* Probably sn_sal_switch_to_asynch has been run but serial core isn't
* initialized yet. Just return. Writes are going through
* sn_sal_console_write (due to register_console) at this time.
*/
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&port->sc_port)) {
/* Nothing to do. */
ia64_sn_console_intr_disable(SAL_CONSOLE_INTR_XMIT);
return;
}
head = xmit->head;
tail = xmit->tail;
start = &xmit->buf[tail];
/* twice around gets the tail to the end of the buffer and
* then to the head, if needed */
loops = (head < tail) ? 2 : 1;
for (ii = 0; ii < loops; ii++) {
xmit_count = (head < tail) ?
(UART_XMIT_SIZE - tail) : (head - tail);
if (xmit_count > 0) {
if (raw == TRANSMIT_RAW)
result =
port->sc_ops->sal_puts_raw(start,
xmit_count);
else
result =
port->sc_ops->sal_puts(start, xmit_count);
#ifdef DEBUG
if (!result)
DPRINTF("`");
#endif
if (result > 0) {
xmit_count -= result;
port->sc_port.icount.tx += result;
tail += result;
tail &= UART_XMIT_SIZE - 1;
xmit->tail = tail;
start = &xmit->buf[tail];
}
}
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&port->sc_port);
if (uart_circ_empty(xmit))
snp_stop_tx(&port->sc_port); /* no-op for us */
}
/**
* sn_sal_interrupt - Handle console interrupts
* @irq: irq #, useful for debug statements
* @dev_id: our pointer to our port (sn_cons_port which contains the uart port)
*
*/
static irqreturn_t sn_sal_interrupt(int irq, void *dev_id)
{
struct sn_cons_port *port = (struct sn_cons_port *)dev_id;
unsigned long flags;
int status = ia64_sn_console_intr_status();
if (!port)
return IRQ_NONE;
spin_lock_irqsave(&port->sc_port.lock, flags);
if (status & SAL_CONSOLE_INTR_RECV) {
sn_receive_chars(port, flags);
}
if (status & SAL_CONSOLE_INTR_XMIT) {
sn_transmit_chars(port, TRANSMIT_BUFFERED);
}
spin_unlock_irqrestore(&port->sc_port.lock, flags);
return IRQ_HANDLED;
}
/**
* sn_sal_timer_poll - this function handles polled console mode
* @data: A pointer to our sn_cons_port (which contains the uart port)
*
* data is the pointer that init_timer will store for us. This function is
* associated with init_timer to see if there is any console traffic.
* Obviously not used in interrupt mode
*
*/
static void sn_sal_timer_poll(unsigned long data)
{
struct sn_cons_port *port = (struct sn_cons_port *)data;
unsigned long flags;
if (!port)
return;
if (!port->sc_port.irq) {
spin_lock_irqsave(&port->sc_port.lock, flags);
if (sn_process_input)
sn_receive_chars(port, flags);
sn_transmit_chars(port, TRANSMIT_RAW);
spin_unlock_irqrestore(&port->sc_port.lock, flags);
mod_timer(&port->sc_timer,
jiffies + port->sc_interrupt_timeout);
}
}
/*
* Boot-time initialization code
*/
/**
* sn_sal_switch_to_asynch - Switch to async mode (as opposed to synch)
* @port: Our sn_cons_port (which contains the uart port)
*
* So this is used by sn_sal_serial_console_init (early on, before we're
* registered with serial core). It's also used by sn_sal_module_init
* right after we've registered with serial core. The later only happens
* if we didn't already come through here via sn_sal_serial_console_init.
*
*/
static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
{
unsigned long flags;
if (!port)
return;
DPRINTF("sn_console: about to switch to asynchronous console\n");
/* without early_printk, we may be invoked late enough to race
* with other cpus doing console IO at this point, however
* console interrupts will never be enabled */
spin_lock_irqsave(&port->sc_port.lock, flags);
/* early_printk invocation may have done this for us */
if (!port->sc_ops)
port->sc_ops = &poll_ops;
/* we can't turn on the console interrupt (as request_irq
* calls kmalloc, which isn't set up yet), so we rely on a
* timer to poll for input and push data from the console
* buffer.
*/
init_timer(&port->sc_timer);
port->sc_timer.function = sn_sal_timer_poll;
port->sc_timer.data = (unsigned long)port;
if (IS_RUNNING_ON_SIMULATOR())
port->sc_interrupt_timeout = 6;
else {
/* 960cps / 16 char FIFO = 60HZ
* HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */
port->sc_interrupt_timeout =
HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS;
}
mod_timer(&port->sc_timer, jiffies + port->sc_interrupt_timeout);
port->sc_is_asynch = 1;
spin_unlock_irqrestore(&port->sc_port.lock, flags);
}
/**
* sn_sal_switch_to_interrupts - Switch to interrupt driven mode
* @port: Our sn_cons_port (which contains the uart port)
*
* In sn_sal_module_init, after we're registered with serial core and
* the port is added, this function is called to switch us to interrupt
* mode. We were previously in asynch/polling mode (using init_timer).
*
* We attempt to switch to interrupt mode here by calling
* request_irq. If that works out, we enable receive interrupts.
*/
static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port)
{
unsigned long flags;
if (port) {
DPRINTF("sn_console: switching to interrupt driven console\n");
if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
IRQF_SHARED,
"SAL console driver", port) >= 0) {
spin_lock_irqsave(&port->sc_port.lock, flags);
port->sc_port.irq = SGI_UART_VECTOR;
port->sc_ops = &intr_ops;
irq_set_handler(port->sc_port.irq, handle_level_irq);
/* turn on receive interrupts */
ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
spin_unlock_irqrestore(&port->sc_port.lock, flags);
}
else {
printk(KERN_INFO
"sn_console: console proceeding in polled mode\n");
}
}
}
/*
* Kernel console definitions
*/
static void sn_sal_console_write(struct console *, const char *, unsigned);
static int sn_sal_console_setup(struct console *, char *);
static struct uart_driver sal_console_uart;
extern struct tty_driver *uart_console_device(struct console *, int *);
static struct console sal_console = {
.name = DEVICE_NAME,
.write = sn_sal_console_write,
.device = uart_console_device,
.setup = sn_sal_console_setup,
.index = -1, /* unspecified */
.data = &sal_console_uart,
};
#define SAL_CONSOLE &sal_console
static struct uart_driver sal_console_uart = {
.owner = THIS_MODULE,
.driver_name = "sn_console",
.dev_name = DEVICE_NAME,
.major = 0, /* major/minor set at registration time per USE_DYNAMIC_MINOR */
.minor = 0,
.nr = 1, /* one port */
.cons = SAL_CONSOLE,
};
/**
* sn_sal_module_init - When the kernel loads us, get us rolling w/ serial core
*
* Before this is called, we've been printing kernel messages in a special
* early mode not making use of the serial core infrastructure. When our
* driver is loaded for real, we register the driver and port with serial
* core and try to enable interrupt driven mode.
*
*/
static int __init sn_sal_module_init(void)
{
int retval;
if (!ia64_platform_is("sn2"))
return 0;
printk(KERN_INFO "sn_console: Console driver init\n");
if (USE_DYNAMIC_MINOR == 1) {
misc.minor = MISC_DYNAMIC_MINOR;
misc.name = DEVICE_NAME_DYNAMIC;
retval = misc_register(&misc);
if (retval != 0) {
printk(KERN_WARNING "Failed to register console "
"device using misc_register.\n");
return -ENODEV;
}
sal_console_uart.major = MISC_MAJOR;
sal_console_uart.minor = misc.minor;
} else {
sal_console_uart.major = DEVICE_MAJOR;
sal_console_uart.minor = DEVICE_MINOR;
}
/* We register the driver and the port before switching to interrupts
* or async above so the proper uart structures are populated */
if (uart_register_driver(&sal_console_uart) < 0) {
printk
("ERROR sn_sal_module_init failed uart_register_driver, line %d\n",
__LINE__);
return -ENODEV;
}
spin_lock_init(&sal_console_port.sc_port.lock);
/* Setup the port struct with the minimum needed */
sal_console_port.sc_port.membase = (char *)1; /* just needs to be non-zero */
sal_console_port.sc_port.type = PORT_16550A;
sal_console_port.sc_port.fifosize = SN_SAL_MAX_CHARS;
sal_console_port.sc_port.ops = &sn_console_ops;
sal_console_port.sc_port.line = 0;
if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) {
/* error - not sure what I'd do - so I'll do nothing */
printk(KERN_ERR "%s: unable to add port\n", __func__);
}
/* when this driver is compiled in, the console initialization
* will have already switched us into asynchronous operation
* before we get here through the module initcalls */
if (!sal_console_port.sc_is_asynch) {
sn_sal_switch_to_asynch(&sal_console_port);
}
/* at this point (module_init) we can try to turn on interrupts */
if (!IS_RUNNING_ON_SIMULATOR()) {
sn_sal_switch_to_interrupts(&sal_console_port);
}
sn_process_input = 1;
return 0;
}
/**
* sn_sal_module_exit - When we're unloaded, remove the driver/port
*
*/
static void __exit sn_sal_module_exit(void)
{
del_timer_sync(&sal_console_port.sc_timer);
uart_remove_one_port(&sal_console_uart, &sal_console_port.sc_port);
uart_unregister_driver(&sal_console_uart);
misc_deregister(&misc);
}
module_init(sn_sal_module_init);
module_exit(sn_sal_module_exit);
/**
* puts_raw_fixed - sn_sal_console_write helper for adding \r's as required
* @puts_raw : puts function to do the writing
* @s: input string
* @count: length
*
* We need a \r ahead of every \n for direct writes through
* ia64_sn_console_putb (what sal_puts_raw below actually does).
*
*/
static void puts_raw_fixed(int (*puts_raw) (const char *s, int len),
const char *s, int count)
{
const char *s1;
/* Output '\r' before each '\n' */
while ((s1 = memchr(s, '\n', count)) != NULL) {
puts_raw(s, s1 - s);
puts_raw("\r\n", 2);
count -= s1 + 1 - s;
s = s1 + 1;
}
puts_raw(s, count);
}
/**
* sn_sal_console_write - Print statements before serial core available
* @console: Console to operate on - we ignore since we have just one
* @s: String to send
* @count: length
*
* This is referenced in the console struct. It is used for early
* console printing before we register with serial core and for things
* such as kdb. The console_lock must be held when we get here.
*
* This function has some code for trying to print output even if the lock
* is held. We try to cover the case where a lock holder could have died.
* We don't use this special case code if we're not registered with serial
* core yet. After we're registered with serial core, the only time this
* function would be used is for high level kernel output like magic sys req,
* kdb, and printk's.
*/
static void
sn_sal_console_write(struct console *co, const char *s, unsigned count)
{
unsigned long flags = 0;
struct sn_cons_port *port = &sal_console_port;
static int stole_lock = 0;
BUG_ON(!port->sc_is_asynch);
/* We can't look at the xmit buffer if we're not registered with serial core
* yet. So only do the fancy recovery after registering
*/
if (!port->sc_port.state) {
/* Not yet registered with serial core - simple case */
puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
return;
}
/* somebody really wants this output, might be an
* oops, kdb, panic, etc. make sure they get it. */
if (spin_is_locked(&port->sc_port.lock)) {
int lhead = port->sc_port.state->xmit.head;
int ltail = port->sc_port.state->xmit.tail;
int counter, got_lock = 0;
/*
* We attempt to determine if someone has died with the
* lock. We wait ~20 secs after the head and tail ptrs
* stop moving and assume the lock holder is not functional
* and plow ahead. If the lock is freed within the time out
* period we re-get the lock and go ahead normally. We also
* remember if we have plowed ahead so that we don't have
* to wait out the time out period again - the asumption
* is that we will time out again.
*/
for (counter = 0; counter < 150; mdelay(125), counter++) {
if (!spin_is_locked(&port->sc_port.lock)
|| stole_lock) {
if (!stole_lock) {
spin_lock_irqsave(&port->sc_port.lock,
flags);
got_lock = 1;
}
break;
} else {
/* still locked */
if ((lhead != port->sc_port.state->xmit.head)
|| (ltail !=
port->sc_port.state->xmit.tail)) {
lhead =
port->sc_port.state->xmit.head;
ltail =
port->sc_port.state->xmit.tail;
counter = 0;
}
}
}
/* flush anything in the serial core xmit buffer, raw */
sn_transmit_chars(port, 1);
if (got_lock) {
spin_unlock_irqrestore(&port->sc_port.lock, flags);
stole_lock = 0;
} else {
/* fell thru */
stole_lock = 1;
}
puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
} else {
stole_lock = 0;
spin_lock_irqsave(&port->sc_port.lock, flags);
sn_transmit_chars(port, 1);
spin_unlock_irqrestore(&port->sc_port.lock, flags);
puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
}
}
/**
* sn_sal_console_setup - Set up console for early printing
* @co: Console to work with
* @options: Options to set
*
* Altix console doesn't do anything with baud rates, etc, anyway.
*
* This isn't required since not providing the setup function in the
* console struct is ok. However, other patches like KDB plop something
* here so providing it is easier.
*
*/
static int sn_sal_console_setup(struct console *co, char *options)
{
return 0;
}
/**
* sn_sal_console_write_early - simple early output routine
* @co - console struct
* @s - string to print
* @count - count
*
* Simple function to provide early output, before even
* sn_sal_serial_console_init is called. Referenced in the
* console struct registerd in sn_serial_console_early_setup.
*
*/
static void __init
sn_sal_console_write_early(struct console *co, const char *s, unsigned count)
{
puts_raw_fixed(sal_console_port.sc_ops->sal_puts_raw, s, count);
}
/* Used for very early console printing - again, before
* sn_sal_serial_console_init is run */
static struct console sal_console_early __initdata = {
.name = "sn_sal",
.write = sn_sal_console_write_early,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/**
* sn_serial_console_early_setup - Sets up early console output support
*
* Register a console early on... This is for output before even
* sn_sal_serial_cosnole_init is called. This function is called from
* setup.c. This allows us to do really early polled writes. When
* sn_sal_serial_console_init is called, this console is unregistered
* and a new one registered.
*/
int __init sn_serial_console_early_setup(void)
{
if (!ia64_platform_is("sn2"))
return -1;
sal_console_port.sc_ops = &poll_ops;
spin_lock_init(&sal_console_port.sc_port.lock);
early_sn_setup(); /* Find SAL entry points */
register_console(&sal_console_early);
return 0;
}
/**
* sn_sal_serial_console_init - Early console output - set up for register
*
* This function is called when regular console init happens. Because we
* support even earlier console output with sn_serial_console_early_setup
* (called from setup.c directly), this function unregisters the really
* early console.
*
* Note: Even if setup.c doesn't register sal_console_early, unregistering
* it here doesn't hurt anything.
*
*/
static int __init sn_sal_serial_console_init(void)
{
if (ia64_platform_is("sn2")) {
sn_sal_switch_to_asynch(&sal_console_port);
DPRINTF("sn_sal_serial_console_init : register console\n");
register_console(&sal_console);
unregister_console(&sal_console_early);
}
return 0;
}
console_initcall(sn_sal_serial_console_init);
| gpl-2.0 |
DC07/android_kernel_lge_dory | drivers/net/ethernet/chelsio/cxgb4/sge.c | 1281 | 77302 | /*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/dma-mapping.h>
#include <linux/jiffies.h>
#include <linux/prefetch.h>
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4_msg.h"
#include "t4fw_api.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
* pages under memory shortage.
*/
#if PAGE_SHIFT >= 16
# define FL_PG_ORDER 0
#else
# define FL_PG_ORDER (16 - PAGE_SHIFT)
#endif
/* RX_PULL_LEN should be <= RX_COPY_THRES */
#define RX_COPY_THRES 256
#define RX_PULL_LEN 128
/*
* Main body length for sk_buffs used for Rx Ethernet packets with fragments.
* Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
*/
#define RX_PKT_SKB_LEN 512
/*
* Max number of Tx descriptors we clean up at a time. Should be modest as
* freeing skbs isn't cheap and it happens while holding locks. We just need
* to free packets faster than they arrive, we eventually catch up and keep
* the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
*/
#define MAX_TX_RECLAIM 16
/*
* Max number of Rx buffers we replenish at a time. Again keep this modest,
* allocating buffers isn't cheap either.
*/
#define MAX_RX_REFILL 16U
/*
* Period of the Rx queue check timer. This timer is infrequent as it has
* something to do only when the system experiences severe memory shortage.
*/
#define RX_QCHECK_PERIOD (HZ / 2)
/*
* Period of the Tx queue check timer.
*/
#define TX_QCHECK_PERIOD (HZ / 2)
/*
* Max number of Tx descriptors to be reclaimed by the Tx timer.
*/
#define MAX_TIMER_TX_RECLAIM 100
/*
* Timer index used when backing off due to memory shortage.
*/
#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
/*
* An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
* attempt to refill it.
*/
#define FL_STARVE_THRES 4
/*
* Suspend an Ethernet Tx queue with fewer available descriptors than this.
* This is the same as calc_tx_descs() for a TSO packet with
* nr_frags == MAX_SKB_FRAGS.
*/
#define ETHTXQ_STOP_THRES \
(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
/*
* Suspension threshold for non-Ethernet Tx queues. We require enough room
* for a full sized WR.
*/
#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
/*
* Max Tx descriptor space we allow for an Ethernet packet to be inlined
* into a WR.
*/
#define MAX_IMM_TX_PKT_LEN 128
/*
* Max size of a WR sent through a control Tx queue.
*/
#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
struct tx_sw_desc { /* SW state per Tx descriptor */
struct sk_buff *skb;
struct ulptx_sgl *sgl;
};
struct rx_sw_desc { /* SW state per Rx descriptor */
struct page *page;
dma_addr_t dma_addr;
};
/*
* Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
* buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
* We could easily support more but there doesn't seem to be much need for
* that ...
*/
#define FL_MTU_SMALL 1500
#define FL_MTU_LARGE 9000
static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
unsigned int mtu)
{
struct sge *s = &adapter->sge;
return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
}
#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
/*
* Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
* these to specify the buffer size as an index into the SGE Free List Buffer
* Size register array. We also use bit 4, when the buffer has been unmapped
* for DMA, but this is of course never sent to the hardware and is only used
* to prevent double unmappings. All of the above requires that the Free List
* Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
* 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
* Free List Buffer alignment is 32 bytes, this works out for us ...
*/
enum {
RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
/*
* XXX We shouldn't depend on being able to use these indices.
* XXX Especially when some other Master PF has initialized the
* XXX adapter or we use the Firmware Configuration File. We
* XXX should really search through the Host Buffer Size register
* XXX array for the appropriately sized buffer indices.
*/
RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
};
static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
{
return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
}
static inline bool is_buf_mapped(const struct rx_sw_desc *d)
{
return !(d->dma_addr & RX_UNMAPPED_BUF);
}
/**
* txq_avail - return the number of available slots in a Tx queue
* @q: the Tx queue
*
* Returns the number of descriptors in a Tx queue available to write new
* packets.
*/
static inline unsigned int txq_avail(const struct sge_txq *q)
{
return q->size - 1 - q->in_use;
}
/**
* fl_cap - return the capacity of a free-buffer list
* @fl: the FL
*
* Returns the capacity of a free-buffer list. The capacity is less than
* the size because one descriptor needs to be left unpopulated, otherwise
* HW will think the FL is empty.
*/
static inline unsigned int fl_cap(const struct sge_fl *fl)
{
return fl->size - 8; /* 1 descriptor = 8 buffers */
}
static inline bool fl_starving(const struct sge_fl *fl)
{
return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
}
static int map_skb(struct device *dev, const struct sk_buff *skb,
dma_addr_t *addr)
{
const skb_frag_t *fp, *end;
const struct skb_shared_info *si;
*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
goto out_err;
si = skb_shinfo(skb);
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++) {
*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
goto unwind;
}
return 0;
unwind:
while (fp-- > si->frags)
dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
out_err:
return -ENOMEM;
}
#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
const dma_addr_t *addr)
{
const skb_frag_t *fp, *end;
const struct skb_shared_info *si;
dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
si = skb_shinfo(skb);
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++)
dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
}
/**
* deferred_unmap_destructor - unmap a packet when it is freed
* @skb: the packet
*
* This is the packet destructor used for Tx packets that need to remain
* mapped until they are freed rather than until their Tx descriptors are
* freed.
*/
static void deferred_unmap_destructor(struct sk_buff *skb)
{
unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
}
#endif
static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
const struct ulptx_sgl *sgl, const struct sge_txq *q)
{
const struct ulptx_sge_pair *p;
unsigned int nfrags = skb_shinfo(skb)->nr_frags;
if (likely(skb_headlen(skb)))
dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
DMA_TO_DEVICE);
else {
dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
DMA_TO_DEVICE);
nfrags--;
}
/*
* the complexity below is because of the possibility of a wrap-around
* in the middle of an SGL
*/
for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
ntohl(p->len[0]), DMA_TO_DEVICE);
dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
ntohl(p->len[1]), DMA_TO_DEVICE);
p++;
} else if ((u8 *)p == (u8 *)q->stat) {
p = (const struct ulptx_sge_pair *)q->desc;
goto unmap;
} else if ((u8 *)p + 8 == (u8 *)q->stat) {
const __be64 *addr = (const __be64 *)q->desc;
dma_unmap_page(dev, be64_to_cpu(addr[0]),
ntohl(p->len[0]), DMA_TO_DEVICE);
dma_unmap_page(dev, be64_to_cpu(addr[1]),
ntohl(p->len[1]), DMA_TO_DEVICE);
p = (const struct ulptx_sge_pair *)&addr[2];
} else {
const __be64 *addr = (const __be64 *)q->desc;
dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
ntohl(p->len[0]), DMA_TO_DEVICE);
dma_unmap_page(dev, be64_to_cpu(addr[0]),
ntohl(p->len[1]), DMA_TO_DEVICE);
p = (const struct ulptx_sge_pair *)&addr[1];
}
}
if (nfrags) {
__be64 addr;
if ((u8 *)p == (u8 *)q->stat)
p = (const struct ulptx_sge_pair *)q->desc;
addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
*(const __be64 *)q->desc;
dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
DMA_TO_DEVICE);
}
}
/**
* free_tx_desc - reclaims Tx descriptors and their buffers
* @adapter: the adapter
* @q: the Tx queue to reclaim descriptors from
* @n: the number of descriptors to reclaim
* @unmap: whether the buffers should be unmapped for DMA
*
* Reclaims Tx descriptors from an SGE Tx queue and frees the associated
* Tx buffers. Called with the Tx queue lock held.
*/
static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap)
{
struct tx_sw_desc *d;
unsigned int cidx = q->cidx;
struct device *dev = adap->pdev_dev;
d = &q->sdesc[cidx];
while (n--) {
if (d->skb) { /* an SGL is present */
if (unmap)
unmap_sgl(dev, d->skb, d->sgl, q);
kfree_skb(d->skb);
d->skb = NULL;
}
++d;
if (++cidx == q->size) {
cidx = 0;
d = q->sdesc;
}
}
q->cidx = cidx;
}
/*
* Return the number of reclaimable descriptors in a Tx queue.
*/
static inline int reclaimable(const struct sge_txq *q)
{
int hw_cidx = ntohs(q->stat->cidx);
hw_cidx -= q->cidx;
return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
}
/**
* reclaim_completed_tx - reclaims completed Tx descriptors
* @adap: the adapter
* @q: the Tx queue to reclaim completed descriptors from
* @unmap: whether the buffers should be unmapped for DMA
*
* Reclaims Tx descriptors that the SGE has indicated it has processed,
* and frees the associated buffers if possible. Called with the Tx
* queue locked.
*/
static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
bool unmap)
{
int avail = reclaimable(q);
if (avail) {
/*
* Limit the amount of clean up work we do at a time to keep
* the Tx lock hold time O(1).
*/
if (avail > MAX_TX_RECLAIM)
avail = MAX_TX_RECLAIM;
free_tx_desc(adap, q, avail, unmap);
q->in_use -= avail;
}
}
static inline int get_buf_size(struct adapter *adapter,
const struct rx_sw_desc *d)
{
struct sge *s = &adapter->sge;
unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
int buf_size;
switch (rx_buf_size_idx) {
case RX_SMALL_PG_BUF:
buf_size = PAGE_SIZE;
break;
case RX_LARGE_PG_BUF:
buf_size = PAGE_SIZE << s->fl_pg_order;
break;
case RX_SMALL_MTU_BUF:
buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
break;
case RX_LARGE_MTU_BUF:
buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
break;
default:
BUG_ON(1);
}
return buf_size;
}
/**
* free_rx_bufs - free the Rx buffers on an SGE free list
* @adap: the adapter
* @q: the SGE free list to free buffers from
* @n: how many buffers to free
*
* Release the next @n buffers on an SGE free-buffer Rx queue. The
* buffers must be made inaccessible to HW before calling this function.
*/
static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
{
while (n--) {
struct rx_sw_desc *d = &q->sdesc[q->cidx];
if (is_buf_mapped(d))
dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
get_buf_size(adap, d),
PCI_DMA_FROMDEVICE);
put_page(d->page);
d->page = NULL;
if (++q->cidx == q->size)
q->cidx = 0;
q->avail--;
}
}
/**
* unmap_rx_buf - unmap the current Rx buffer on an SGE free list
* @adap: the adapter
* @q: the SGE free list
*
* Unmap the current buffer on an SGE free-buffer Rx queue. The
* buffer must be made inaccessible to HW before calling this function.
*
* This is similar to @free_rx_bufs above but does not free the buffer.
* Do note that the FL still loses any further access to the buffer.
*/
static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
{
struct rx_sw_desc *d = &q->sdesc[q->cidx];
if (is_buf_mapped(d))
dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
d->page = NULL;
if (++q->cidx == q->size)
q->cidx = 0;
q->avail--;
}
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
u32 val;
if (q->pend_cred >= 8) {
val = PIDX(q->pend_cred / 8);
if (!is_t4(adap->chip))
val |= DBTYPE(1);
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
QID(q->cntxt_id) | val);
q->pend_cred &= 7;
}
}
static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
dma_addr_t mapping)
{
sd->page = pg;
sd->dma_addr = mapping; /* includes size low bits */
}
/**
* refill_fl - refill an SGE Rx buffer ring
* @adap: the adapter
* @q: the ring to refill
* @n: the number of new buffers to allocate
* @gfp: the gfp flags for the allocations
*
* (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
* allocated with the supplied gfp flags. The caller must assure that
* @n does not exceed the queue's capacity. If afterwards the queue is
* found critically low mark it as starving in the bitmap of starving FLs.
*
* Returns the number of buffers allocated.
*/
static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
gfp_t gfp)
{
struct sge *s = &adap->sge;
struct page *pg;
dma_addr_t mapping;
unsigned int cred = q->avail;
__be64 *d = &q->desc[q->pidx];
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
gfp |= __GFP_NOWARN | __GFP_COLD;
if (s->fl_pg_order == 0)
goto alloc_small_pages;
/*
* Prefer large buffers
*/
while (n) {
pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
if (unlikely(!pg)) {
q->large_alloc_failed++;
break; /* fall back to single pages */
}
mapping = dma_map_page(adap->pdev_dev, pg, 0,
PAGE_SIZE << s->fl_pg_order,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
__free_pages(pg, s->fl_pg_order);
goto out; /* do not try small pages for this error */
}
mapping |= RX_LARGE_PG_BUF;
*d++ = cpu_to_be64(mapping);
set_rx_sw_desc(sd, pg, mapping);
sd++;
q->avail++;
if (++q->pidx == q->size) {
q->pidx = 0;
sd = q->sdesc;
d = q->desc;
}
n--;
}
alloc_small_pages:
while (n--) {
pg = __skb_alloc_page(gfp, NULL);
if (unlikely(!pg)) {
q->alloc_failed++;
break;
}
mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
put_page(pg);
goto out;
}
*d++ = cpu_to_be64(mapping);
set_rx_sw_desc(sd, pg, mapping);
sd++;
q->avail++;
if (++q->pidx == q->size) {
q->pidx = 0;
sd = q->sdesc;
d = q->desc;
}
}
out: cred = q->avail - cred;
q->pend_cred += cred;
ring_fl_db(adap, q);
if (unlikely(fl_starving(q))) {
smp_wmb();
set_bit(q->cntxt_id - adap->sge.egr_start,
adap->sge.starving_fl);
}
return cred;
}
static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
{
refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
GFP_ATOMIC);
}
/**
* alloc_ring - allocate resources for an SGE descriptor ring
* @dev: the PCI device's core device
* @nelem: the number of descriptors
* @elem_size: the size of each descriptor
* @sw_size: the size of the SW state associated with each ring element
* @phys: the physical address of the allocated ring
* @metadata: address of the array holding the SW state for the ring
* @stat_size: extra space in HW ring for status information
* @node: preferred node for memory allocations
*
* Allocates resources for an SGE descriptor ring, such as Tx queues,
* free buffer lists, or response queues. Each SGE ring requires
* space for its HW descriptors plus, optionally, space for the SW state
* associated with each HW entry (the metadata). The function returns
* three values: the virtual address for the HW ring (the return value
* of the function), the bus address of the HW ring, and the address
* of the SW ring.
*/
static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
size_t sw_size, dma_addr_t *phys, void *metadata,
size_t stat_size, int node)
{
size_t len = nelem * elem_size + stat_size;
void *s = NULL;
void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
if (!p)
return NULL;
if (sw_size) {
s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
if (!s) {
dma_free_coherent(dev, len, p, *phys);
return NULL;
}
}
if (metadata)
*(void **)metadata = s;
memset(p, 0, len);
return p;
}
/**
* sgl_len - calculates the size of an SGL of the given capacity
* @n: the number of SGL entries
*
* Calculates the number of flits needed for a scatter/gather list that
* can hold the given number of entries.
*/
static inline unsigned int sgl_len(unsigned int n)
{
n--;
return (3 * n) / 2 + (n & 1) + 2;
}
/**
* flits_to_desc - returns the num of Tx descriptors for the given flits
* @n: the number of flits
*
* Returns the number of Tx descriptors needed for the supplied number
* of flits.
*/
static inline unsigned int flits_to_desc(unsigned int n)
{
BUG_ON(n > SGE_MAX_WR_LEN / 8);
return DIV_ROUND_UP(n, 8);
}
/**
* is_eth_imm - can an Ethernet packet be sent as immediate data?
* @skb: the packet
*
* Returns whether an Ethernet packet is small enough to fit as
* immediate data.
*/
static inline int is_eth_imm(const struct sk_buff *skb)
{
return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
}
/**
* calc_tx_flits - calculate the number of flits for a packet Tx WR
* @skb: the packet
*
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
{
unsigned int flits;
if (is_eth_imm(skb))
return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
if (skb_shinfo(skb)->gso_size)
flits += 2;
return flits;
}
/**
* calc_tx_descs - calculate the number of Tx descriptors for a packet
* @skb: the packet
*
* Returns the number of Tx descriptors needed for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
{
return flits_to_desc(calc_tx_flits(skb));
}
/**
* write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
* @sgl: starting location for writing the SGL
* @end: points right after the end of the SGL
* @start: start offset into skb main-body data to include in the SGL
* @addr: the list of bus addresses for the SGL elements
*
* Generates a gather list for the buffers that make up a packet.
* The caller must provide adequate space for the SGL that will be written.
* The SGL includes all of the packet's page fragments and the data in its
* main body except for the first @start bytes. @sgl must be 16-byte
* aligned and within a Tx descriptor with available space. @end points
* right after the end of the SGL but does not account for any potential
* wrap around, i.e., @end > @sgl.
*/
static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end, unsigned int start,
const dma_addr_t *addr)
{
unsigned int i, len;
struct ulptx_sge_pair *to;
const struct skb_shared_info *si = skb_shinfo(skb);
unsigned int nfrags = si->nr_frags;
struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
len = skb_headlen(skb) - start;
if (likely(len)) {
sgl->len0 = htonl(len);
sgl->addr0 = cpu_to_be64(addr[0] + start);
nfrags++;
} else {
sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
sgl->addr0 = cpu_to_be64(addr[1]);
}
sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
if (likely(--nfrags == 0))
return;
/*
* Most of the complexity below deals with the possibility we hit the
* end of the queue in the middle of writing the SGL. For this case
* only we create the SGL in a temporary buffer and then copy it.
*/
to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
to->addr[0] = cpu_to_be64(addr[i]);
to->addr[1] = cpu_to_be64(addr[++i]);
}
if (nfrags) {
to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to->len[1] = cpu_to_be32(0);
to->addr[0] = cpu_to_be64(addr[i + 1]);
}
if (unlikely((u8 *)end > (u8 *)q->stat)) {
unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
if (likely(part0))
memcpy(sgl->sge, buf, part0);
part1 = (u8 *)end - (u8 *)q->stat;
memcpy(q->desc, (u8 *)buf + part0, part1);
end = (void *)q->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
*end = 0;
}
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space(user space writes).
* For coalesced WR SGE, fetches data from the FIFO instead of from Host.
*/
static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
{
int count = 8;
while (count) {
writeq(*src, dst);
src++;
dst++;
count--;
}
}
/**
* ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
* @q: the Tx queue
* @n: number of new descriptors to give to HW
*
* Ring the doorbel for a Tx queue.
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
unsigned int *wr, index;
wmb(); /* write descriptors before telling HW */
spin_lock(&q->db_lock);
if (!q->db_disabled) {
if (is_t4(adap->chip)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
QID(q->cntxt_id) | PIDX(n));
} else {
if (n == 1) {
index = q->pidx ? (q->pidx - 1) : (q->size - 1);
wr = (unsigned int *)&q->desc[index];
cxgb_pio_copy((u64 __iomem *)
(adap->bar2 + q->udb + 64),
(u64 *)wr);
} else
writel(n, adap->bar2 + q->udb + 8);
wmb();
}
}
q->db_pidx = q->pidx;
spin_unlock(&q->db_lock);
}
/**
* inline_tx_skb - inline a packet's data into Tx descriptors
* @skb: the packet
* @q: the Tx queue where the packet will be inlined
* @pos: starting position in the Tx queue where to inline the packet
*
* Inline a packet's contents directly into Tx descriptors, starting at
* the given position within the Tx DMA ring.
* Most of the complexity of this operation is dealing with wrap arounds
* in the middle of the packet we want to inline.
*/
static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
void *pos)
{
u64 *p;
int left = (void *)q->stat - pos;
if (likely(skb->len <= left)) {
if (likely(!skb->data_len))
skb_copy_from_linear_data(skb, pos, skb->len);
else
skb_copy_bits(skb, 0, pos, skb->len);
pos += skb->len;
} else {
skb_copy_bits(skb, 0, pos, left);
skb_copy_bits(skb, left, q->desc, skb->len - left);
pos = (void *)q->desc + (skb->len - left);
}
/* 0-pad to multiple of 16 */
p = PTR_ALIGN(pos, 8);
if ((uintptr_t)p & 8)
*p = 0;
}
/*
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
static u64 hwcsum(const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
if (iph->version == 4) {
if (iph->protocol == IPPROTO_TCP)
csum_type = TX_CSUM_TCPIP;
else if (iph->protocol == IPPROTO_UDP)
csum_type = TX_CSUM_UDPIP;
else {
nocsum: /*
* unknown protocol, disable HW csum
* and hope a bad packet is detected
*/
return TXPKT_L4CSUM_DIS;
}
} else {
/*
* this doesn't work with extension headers
*/
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
if (ip6h->nexthdr == IPPROTO_TCP)
csum_type = TX_CSUM_TCPIP6;
else if (ip6h->nexthdr == IPPROTO_UDP)
csum_type = TX_CSUM_UDPIP6;
else
goto nocsum;
}
if (likely(csum_type >= TX_CSUM_TCPIP))
return TXPKT_CSUM_TYPE(csum_type) |
TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
else {
int start = skb_transport_offset(skb);
return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
TXPKT_CSUM_LOC(start + skb->csum_offset);
}
}
static void eth_txq_stop(struct sge_eth_txq *q)
{
netif_tx_stop_queue(q->txq);
q->q.stops++;
}
static inline void txq_advance(struct sge_txq *q, unsigned int n)
{
q->in_use += n;
q->pidx += n;
if (q->pidx >= q->size)
q->pidx -= q->size;
}
/**
* t4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
* @dev: the egress net device
*
* Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid;
u64 cntrl, *end;
int qidx, credits;
unsigned int flits, ndesc;
struct adapter *adap;
struct sge_eth_txq *q;
const struct port_info *pi;
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
/*
* The chip min packet length is 10 octets but play safe and reject
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
out_free: dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb_get_queue_mapping(skb);
q = &adap->sge.ethtxq[qidx + pi->first_qset];
reclaim_completed_tx(adap, &q->q, true);
flits = calc_tx_flits(skb);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
eth_txq_stop(q);
dev_err(adap->pdev_dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, qidx);
return NETDEV_TX_BUSY;
}
if (!is_eth_imm(skb) &&
unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
goto out_free;
}
wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
eth_txq_stop(q);
wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
}
wr = (void *)&q->q.desc[q->q.pidx];
wr->equiq_to_len16 = htonl(wr_mid);
wr->r3 = cpu_to_be64(0);
end = (u64 *)wr + flits;
ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN(sizeof(*lso)));
lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE | LSO_LAST_SLICE |
LSO_IPV6(v6) |
LSO_ETHHDR_LEN(eth_xtra_len / 4) |
LSO_IPHDR_LEN(l3hdr_len / 4) |
LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
lso->c.ipid_ofst = htons(0);
lso->c.mss = htons(ssi->gso_size);
lso->c.seqno_offset = htonl(0);
lso->c.len = htonl(skb->len);
cpl = (void *)(lso + 1);
cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
TXPKT_IPHDR_LEN(l3hdr_len) |
TXPKT_ETHHDR_LEN(eth_xtra_len);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
int len;
len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
q->tx_cso++;
} else
cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
}
if (vlan_tx_tag_present(skb)) {
q->vlan_ins++;
cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
}
cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
if (is_eth_imm(skb)) {
inline_tx_skb(skb, &q->q, cpl + 1);
dev_kfree_skb(skb);
} else {
int last_desc;
write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
addr);
skb_orphan(skb);
last_desc = q->q.pidx + ndesc - 1;
if (last_desc >= q->q.size)
last_desc -= q->q.size;
q->q.sdesc[last_desc].skb = skb;
q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
}
txq_advance(&q->q, ndesc);
ring_tx_db(adap, &q->q, ndesc);
return NETDEV_TX_OK;
}
/**
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
*
* This is a variant of reclaim_completed_tx() that is used for Tx queues
* that send only immediate data (presently just the control queues) and
* thus do not have any sk_buffs to release.
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
int hw_cidx = ntohs(q->stat->cidx);
int reclaim = hw_cidx - q->cidx;
if (reclaim < 0)
reclaim += q->size;
q->in_use -= reclaim;
q->cidx = hw_cidx;
}
/**
* is_imm - check whether a packet can be sent as immediate data
* @skb: the packet
*
* Returns true if a packet can be sent as a WR with immediate data.
*/
static inline int is_imm(const struct sk_buff *skb)
{
return skb->len <= MAX_CTRL_WR_LEN;
}
/**
* ctrlq_check_stop - check if a control queue is full and should stop
* @q: the queue
* @wr: most recent WR written to the queue
*
* Check if a control queue has become full and should be stopped.
* We clean up control queue descriptors very lazily, only when we are out.
* If the queue is still full after reclaiming any completed descriptors
* we suspend it and have the last WR wake it up.
*/
static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
{
reclaim_completed_tx_imm(&q->q);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
q->q.stops++;
q->full = 1;
}
}
/**
* ctrl_xmit - send a packet through an SGE control Tx queue
* @q: the control queue
* @skb: the packet
*
* Send a packet through an SGE control Tx queue. Packets sent through
* a control queue must fit entirely as immediate data.
*/
static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
{
unsigned int ndesc;
struct fw_wr_hdr *wr;
if (unlikely(!is_imm(skb))) {
WARN_ON(1);
dev_kfree_skb(skb);
return NET_XMIT_DROP;
}
ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
spin_lock(&q->sendq.lock);
if (unlikely(q->full)) {
skb->priority = ndesc; /* save for restart */
__skb_queue_tail(&q->sendq, skb);
spin_unlock(&q->sendq.lock);
return NET_XMIT_CN;
}
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
inline_tx_skb(skb, &q->q, wr);
txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
ctrlq_check_stop(q, wr);
ring_tx_db(q->adap, &q->q, ndesc);
spin_unlock(&q->sendq.lock);
kfree_skb(skb);
return NET_XMIT_SUCCESS;
}
/**
* restart_ctrlq - restart a suspended control queue
* @data: the control queue to restart
*
* Resumes transmission on a suspended Tx control queue.
*/
static void restart_ctrlq(unsigned long data)
{
struct sk_buff *skb;
unsigned int written = 0;
struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
spin_lock(&q->sendq.lock);
reclaim_completed_tx_imm(&q->q);
BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
struct fw_wr_hdr *wr;
unsigned int ndesc = skb->priority; /* previously saved */
/*
* Write descriptors and free skbs outside the lock to limit
* wait times. q->full is still set so new skbs will be queued.
*/
spin_unlock(&q->sendq.lock);
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
inline_tx_skb(skb, &q->q, wr);
kfree_skb(skb);
written += ndesc;
txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
unsigned long old = q->q.stops;
ctrlq_check_stop(q, wr);
if (q->q.stops != old) { /* suspended anew */
spin_lock(&q->sendq.lock);
goto ringdb;
}
}
if (written > 16) {
ring_tx_db(q->adap, &q->q, written);
written = 0;
}
spin_lock(&q->sendq.lock);
}
q->full = 0;
ringdb: if (written)
ring_tx_db(q->adap, &q->q, written);
spin_unlock(&q->sendq.lock);
}
/**
* t4_mgmt_tx - send a management message
* @adap: the adapter
* @skb: the packet containing the management message
*
* Send a management message through control queue 0.
*/
int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
{
int ret;
local_bh_disable();
ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
local_bh_enable();
return ret;
}
/**
* is_ofld_imm - check whether a packet can be sent as immediate data
* @skb: the packet
*
* Returns true if a packet can be sent as an offload WR with immediate
* data. We currently use the same limit as for Ethernet packets.
*/
static inline int is_ofld_imm(const struct sk_buff *skb)
{
return skb->len <= MAX_IMM_TX_PKT_LEN;
}
/**
* calc_tx_flits_ofld - calculate # of flits for an offload packet
* @skb: the packet
*
* Returns the number of flits needed for the given offload packet.
* These packets are already fully constructed and no additional headers
* will be added.
*/
static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
{
unsigned int flits, cnt;
if (is_ofld_imm(skb))
return DIV_ROUND_UP(skb->len, 8);
flits = skb_transport_offset(skb) / 8U; /* headers */
cnt = skb_shinfo(skb)->nr_frags;
if (skb->tail != skb->transport_header)
cnt++;
return flits + sgl_len(cnt);
}
/**
* txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
* @adap: the adapter
* @q: the queue to stop
*
* Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
* inability to map packets. A periodic timer attempts to restart
* queues so marked.
*/
static void txq_stop_maperr(struct sge_ofld_txq *q)
{
q->mapping_err++;
q->q.stops++;
set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
q->adap->sge.txq_maperr);
}
/**
* ofldtxq_stop - stop an offload Tx queue that has become full
* @q: the queue to stop
* @skb: the packet causing the queue to become full
*
* Stops an offload Tx queue that has become full and modifies the packet
* being written to request a wakeup.
*/
static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
{
struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
q->q.stops++;
q->full = 1;
}
/**
* service_ofldq - restart a suspended offload queue
* @q: the offload queue
*
* Services an offload Tx queue by moving packets from its packet queue
* to the HW Tx ring. The function starts and ends with the queue locked.
*/
static void service_ofldq(struct sge_ofld_txq *q)
{
u64 *pos;
int credits;
struct sk_buff *skb;
unsigned int written = 0;
unsigned int flits, ndesc;
while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
/*
* We drop the lock but leave skb on sendq, thus retaining
* exclusive access to the state of the queue.
*/
spin_unlock(&q->sendq.lock);
reclaim_completed_tx(q->adap, &q->q, false);
flits = skb->priority; /* previously saved */
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
BUG_ON(credits < 0);
if (unlikely(credits < TXQ_STOP_THRES))
ofldtxq_stop(q, skb);
pos = (u64 *)&q->q.desc[q->q.pidx];
if (is_ofld_imm(skb))
inline_tx_skb(skb, &q->q, pos);
else if (map_skb(q->adap->pdev_dev, skb,
(dma_addr_t *)skb->head)) {
txq_stop_maperr(q);
spin_lock(&q->sendq.lock);
break;
} else {
int last_desc, hdr_len = skb_transport_offset(skb);
memcpy(pos, skb->data, hdr_len);
write_sgl(skb, &q->q, (void *)pos + hdr_len,
pos + flits, hdr_len,
(dma_addr_t *)skb->head);
#ifdef CONFIG_NEED_DMA_MAP_STATE
skb->dev = q->adap->port[0];
skb->destructor = deferred_unmap_destructor;
#endif
last_desc = q->q.pidx + ndesc - 1;
if (last_desc >= q->q.size)
last_desc -= q->q.size;
q->q.sdesc[last_desc].skb = skb;
}
txq_advance(&q->q, ndesc);
written += ndesc;
if (unlikely(written > 32)) {
ring_tx_db(q->adap, &q->q, written);
written = 0;
}
spin_lock(&q->sendq.lock);
__skb_unlink(skb, &q->sendq);
if (is_ofld_imm(skb))
kfree_skb(skb);
}
if (likely(written))
ring_tx_db(q->adap, &q->q, written);
}
/**
* ofld_xmit - send a packet through an offload queue
* @q: the Tx offload queue
* @skb: the packet
*
* Send an offload packet through an SGE offload queue.
*/
static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
{
skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
spin_lock(&q->sendq.lock);
__skb_queue_tail(&q->sendq, skb);
if (q->sendq.qlen == 1)
service_ofldq(q);
spin_unlock(&q->sendq.lock);
return NET_XMIT_SUCCESS;
}
/**
* restart_ofldq - restart a suspended offload queue
* @data: the offload queue to restart
*
* Resumes transmission on a suspended Tx offload queue.
*/
static void restart_ofldq(unsigned long data)
{
struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
spin_lock(&q->sendq.lock);
q->full = 0; /* the queue actually is completely empty now */
service_ofldq(q);
spin_unlock(&q->sendq.lock);
}
/**
* skb_txq - return the Tx queue an offload packet should use
* @skb: the packet
*
* Returns the Tx queue an offload packet should use as indicated by bits
* 1-15 in the packet's queue_mapping.
*/
static inline unsigned int skb_txq(const struct sk_buff *skb)
{
return skb->queue_mapping >> 1;
}
/**
* is_ctrl_pkt - return whether an offload packet is a control packet
* @skb: the packet
*
* Returns whether an offload packet should use an OFLD or a CTRL
* Tx queue as indicated by bit 0 in the packet's queue_mapping.
*/
static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
{
return skb->queue_mapping & 1;
}
static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
{
unsigned int idx = skb_txq(skb);
if (unlikely(is_ctrl_pkt(skb)))
return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
}
/**
* t4_ofld_send - send an offload packet
* @adap: the adapter
* @skb: the packet
*
* Sends an offload packet. We use the packet queue_mapping to select the
* appropriate Tx queue as follows: bit 0 indicates whether the packet
* should be sent as regular or control, bits 1-15 select the queue.
*/
int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
{
int ret;
local_bh_disable();
ret = ofld_send(adap, skb);
local_bh_enable();
return ret;
}
/**
* cxgb4_ofld_send - send an offload packet
* @dev: the net device
* @skb: the packet
*
* Sends an offload packet. This is an exported version of @t4_ofld_send,
* intended for ULDs.
*/
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
{
return t4_ofld_send(netdev2adap(dev), skb);
}
EXPORT_SYMBOL(cxgb4_ofld_send);
static inline void copy_frags(struct sk_buff *skb,
const struct pkt_gl *gl, unsigned int offset)
{
int i;
/* usually there's just one frag */
__skb_fill_page_desc(skb, 0, gl->frags[0].page,
gl->frags[0].offset + offset,
gl->frags[0].size - offset);
skb_shinfo(skb)->nr_frags = gl->nfrags;
for (i = 1; i < gl->nfrags; i++)
__skb_fill_page_desc(skb, i, gl->frags[i].page,
gl->frags[i].offset,
gl->frags[i].size);
/* get a reference to the last page, we don't own it */
get_page(gl->frags[gl->nfrags - 1].page);
}
/**
* cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
* @gl: the gather list
* @skb_len: size of sk_buff main body if it carries fragments
* @pull_len: amount of data to move to the sk_buff's main body
*
* Builds an sk_buff from the given packet gather list. Returns the
* sk_buff or %NULL if sk_buff allocation failed.
*/
struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len)
{
struct sk_buff *skb;
/*
* Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
* size, which is expected since buffers are at least PAGE_SIZEd.
* In this case packets up to RX_COPY_THRES have only one fragment.
*/
if (gl->tot_len <= RX_COPY_THRES) {
skb = dev_alloc_skb(gl->tot_len);
if (unlikely(!skb))
goto out;
__skb_put(skb, gl->tot_len);
skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
} else {
skb = dev_alloc_skb(skb_len);
if (unlikely(!skb))
goto out;
__skb_put(skb, pull_len);
skb_copy_to_linear_data(skb, gl->va, pull_len);
copy_frags(skb, gl, pull_len);
skb->len = gl->tot_len;
skb->data_len = skb->len - pull_len;
skb->truesize += skb->data_len;
}
out: return skb;
}
EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
/**
* t4_pktgl_free - free a packet gather list
* @gl: the gather list
*
* Releases the pages of a packet gather list. We do not own the last
* page on the list and do not free it.
*/
static void t4_pktgl_free(const struct pkt_gl *gl)
{
int n;
const struct page_frag *p;
for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
put_page(p->page);
}
/*
* Process an MPS trace packet. Give it an unused protocol number so it won't
* be delivered to anyone and send it to the stack for capture.
*/
static noinline int handle_trace_pkt(struct adapter *adap,
const struct pkt_gl *gl)
{
struct sk_buff *skb;
skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
if (unlikely(!skb)) {
t4_pktgl_free(gl);
return 0;
}
if (is_t4(adap->chip))
__skb_pull(skb, sizeof(struct cpl_trace_pkt));
else
__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
skb_reset_mac_header(skb);
skb->protocol = htons(0xffff);
skb->dev = adap->port[0];
netif_receive_skb(skb);
return 0;
}
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
const struct cpl_rx_pkt *pkt)
{
struct adapter *adapter = rxq->rspq.adap;
struct sge *s = &adapter->sge;
int ret;
struct sk_buff *skb;
skb = napi_get_frags(&rxq->rspq.napi);
if (unlikely(!skb)) {
t4_pktgl_free(gl);
rxq->stats.rx_drops++;
return;
}
copy_frags(skb, gl, s->pktshift);
skb->len = gl->tot_len - s->pktshift;
skb->data_len = skb->len;
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
}
ret = napi_gro_frags(&rxq->rspq.napi);
if (ret == GRO_HELD)
rxq->stats.lro_pkts++;
else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
rxq->stats.lro_merged++;
rxq->stats.pkts++;
rxq->stats.rx_cso++;
}
/**
* t4_ethrx_handler - process an ingress ethernet packet
* @q: the response queue that received the packet
* @rsp: the response queue descriptor holding the RX_PKT message
* @si: the gather list of packet fragments
*
* Process an ingress ethernet packet and deliver it to the stack.
*/
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *si)
{
bool csum_ok;
struct sk_buff *skb;
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
struct sge *s = &q->adap->sge;
int cpl_trace_pkt = is_t4(q->adap->chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
csum_ok = pkt->csum_calc && !pkt->err_vec;
if ((pkt->l2info & htonl(RXF_TCP)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
return 0;
}
skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
if (unlikely(!skb)) {
t4_pktgl_free(si);
rxq->stats.rx_drops++;
return 0;
}
__skb_pull(skb, s->pktshift); /* remove ethernet header padding */
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
if (skb->dev->features & NETIF_F_RXHASH)
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
rxq->stats.pkts++;
if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
(pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
rxq->stats.rx_cso++;
} else if (pkt->l2info & htonl(RXF_IP)) {
__sum16 c = (__force __sum16)pkt->csum;
skb->csum = csum_unfold(c);
skb->ip_summed = CHECKSUM_COMPLETE;
rxq->stats.rx_cso++;
}
} else
skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
}
netif_receive_skb(skb);
return 0;
}
/**
* restore_rx_bufs - put back a packet's Rx buffers
* @si: the packet gather list
* @q: the SGE free list
* @frags: number of FL buffers to restore
*
* Puts back on an FL the Rx buffers associated with @si. The buffers
* have already been unmapped and are left unmapped, we mark them so to
* prevent further unmapping attempts.
*
* This function undoes a series of @unmap_rx_buf calls when we find out
* that the current packet can't be processed right away afterall and we
* need to come back to it later. This is a very rare event and there's
* no effort to make this particularly efficient.
*/
static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
int frags)
{
struct rx_sw_desc *d;
while (frags--) {
if (q->cidx == 0)
q->cidx = q->size - 1;
else
q->cidx--;
d = &q->sdesc[q->cidx];
d->page = si->frags[frags].page;
d->dma_addr |= RX_UNMAPPED_BUF;
q->avail++;
}
}
/**
* is_new_response - check if a response is newly written
* @r: the response descriptor
* @q: the response queue
*
* Returns true if a response descriptor contains a yet unprocessed
* response.
*/
static inline bool is_new_response(const struct rsp_ctrl *r,
const struct sge_rspq *q)
{
return RSPD_GEN(r->type_gen) == q->gen;
}
/**
* rspq_next - advance to the next entry in a response queue
* @q: the queue
*
* Updates the state of a response queue to advance it to the next entry.
*/
static inline void rspq_next(struct sge_rspq *q)
{
q->cur_desc = (void *)q->cur_desc + q->iqe_len;
if (unlikely(++q->cidx == q->size)) {
q->cidx = 0;
q->gen ^= 1;
q->cur_desc = q->desc;
}
}
/**
* process_responses - process responses from an SGE response queue
* @q: the ingress queue to process
* @budget: how many responses can be processed in this round
*
* Process responses from an SGE response queue up to the supplied budget.
* Responses include received packets as well as control messages from FW
* or HW.
*
* Additionally choose the interrupt holdoff time for the next interrupt
* on this queue. If the system is under memory shortage use a fairly
* long delay to help recovery.
*/
static int process_responses(struct sge_rspq *q, int budget)
{
int ret, rsp_type;
int budget_left = budget;
const struct rsp_ctrl *rc;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
struct adapter *adapter = q->adap;
struct sge *s = &adapter->sge;
while (likely(budget_left)) {
rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
if (!is_new_response(rc, q))
break;
rmb();
rsp_type = RSPD_TYPE(rc->type_gen);
if (likely(rsp_type == RSP_TYPE_FLBUF)) {
struct page_frag *fp;
struct pkt_gl si;
const struct rx_sw_desc *rsd;
u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
if (len & RSPD_NEWBUF) {
if (likely(q->offset > 0)) {
free_rx_bufs(q->adap, &rxq->fl, 1);
q->offset = 0;
}
len = RSPD_LEN(len);
}
si.tot_len = len;
/* gather packet fragments */
for (frags = 0, fp = si.frags; ; frags++, fp++) {
rsd = &rxq->fl.sdesc[rxq->fl.cidx];
bufsz = get_buf_size(adapter, rsd);
fp->page = rsd->page;
fp->offset = q->offset;
fp->size = min(bufsz, len);
len -= fp->size;
if (!len)
break;
unmap_rx_buf(q->adap, &rxq->fl);
}
/*
* Last buffer remains mapped so explicitly make it
* coherent for CPU access.
*/
dma_sync_single_for_cpu(q->adap->pdev_dev,
get_buf_addr(rsd),
fp->size, DMA_FROM_DEVICE);
si.va = page_address(si.frags[0].page) +
si.frags[0].offset;
prefetch(si.va);
si.nfrags = frags + 1;
ret = q->handler(q, q->cur_desc, &si);
if (likely(ret == 0))
q->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&si, &rxq->fl, frags);
} else if (likely(rsp_type == RSP_TYPE_CPL)) {
ret = q->handler(q, q->cur_desc, NULL);
} else {
ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
}
if (unlikely(ret)) {
/* couldn't process descriptor, back off for recovery */
q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
break;
}
rspq_next(q);
budget_left--;
}
if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
__refill_fl(q->adap, &rxq->fl);
return budget - budget_left;
}
/**
* napi_rx_handler - the NAPI handler for Rx processing
* @napi: the napi instance
* @budget: how many packets we can process in this round
*
* Handler for new data events when using NAPI. This does not need any
* locking or protection from interrupts as data interrupts are off at
* this point and other adapter interrupts do not interfere (the latter
* in not a concern at all with MSI-X as non-data interrupts then have
* a separate handler).
*/
static int napi_rx_handler(struct napi_struct *napi, int budget)
{
unsigned int params;
struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
int work_done = process_responses(q, budget);
if (likely(work_done < budget)) {
napi_complete(napi);
params = q->next_intr_params;
q->next_intr_params = q->intr_params;
} else
params = QINTR_TIMER_IDX(7);
t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
return work_done;
}
/*
* The MSI-X interrupt handler for an SGE response queue.
*/
irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
{
struct sge_rspq *q = cookie;
napi_schedule(&q->napi);
return IRQ_HANDLED;
}
/*
* Process the indirect interrupt entries in the interrupt queue and kick off
* NAPI for each queue that has generated an entry.
*/
static unsigned int process_intrq(struct adapter *adap)
{
unsigned int credits;
const struct rsp_ctrl *rc;
struct sge_rspq *q = &adap->sge.intrq;
spin_lock(&adap->sge.intrq_lock);
for (credits = 0; ; credits++) {
rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
if (!is_new_response(rc, q))
break;
rmb();
if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
unsigned int qid = ntohl(rc->pldbuflen_qid);
qid -= adap->sge.ingr_start;
napi_schedule(&adap->sge.ingr_map[qid]->napi);
}
rspq_next(q);
}
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
spin_unlock(&adap->sge.intrq_lock);
return credits;
}
/*
* The MSI interrupt handler, which handles data events from SGE response queues
* as well as error and other async events as they all use the same MSI vector.
*/
static irqreturn_t t4_intr_msi(int irq, void *cookie)
{
struct adapter *adap = cookie;
t4_slow_intr_handler(adap);
process_intrq(adap);
return IRQ_HANDLED;
}
/*
* Interrupt handler for legacy INTx interrupts.
* Handles data events from SGE response queues as well as error and other
* async events as they all use the same interrupt line.
*/
static irqreturn_t t4_intr_intx(int irq, void *cookie)
{
struct adapter *adap = cookie;
t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
if (t4_slow_intr_handler(adap) | process_intrq(adap))
return IRQ_HANDLED;
return IRQ_NONE; /* probably shared interrupt */
}
/**
* t4_intr_handler - select the top-level interrupt handler
* @adap: the adapter
*
* Selects the top-level interrupt handler based on the type of interrupts
* (MSI-X, MSI, or INTx).
*/
irq_handler_t t4_intr_handler(struct adapter *adap)
{
if (adap->flags & USING_MSIX)
return t4_sge_intr_msix;
if (adap->flags & USING_MSI)
return t4_intr_msi;
return t4_intr_intx;
}
static void sge_rx_timer_cb(unsigned long data)
{
unsigned long m;
unsigned int i, cnt[2];
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
for (m = s->starving_fl[i]; m; m &= m - 1) {
struct sge_eth_rxq *rxq;
unsigned int id = __ffs(m) + i * BITS_PER_LONG;
struct sge_fl *fl = s->egr_map[id];
clear_bit(id, s->starving_fl);
smp_mb__after_clear_bit();
if (fl_starving(fl)) {
rxq = container_of(fl, struct sge_eth_rxq, fl);
if (napi_reschedule(&rxq->rspq.napi))
fl->starving++;
else
set_bit(id, s->starving_fl);
}
}
t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
for (i = 0; i < 2; i++)
if (cnt[i] >= s->starve_thres) {
if (s->idma_state[i] || cnt[i] == 0xffffffff)
continue;
s->idma_state[i] = 1;
t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
dev_warn(adap->pdev_dev,
"SGE idma%u starvation detected for "
"queue %lu\n", i, m & 0xffff);
} else if (s->idma_state[i])
s->idma_state[i] = 0;
mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
}
static void sge_tx_timer_cb(unsigned long data)
{
unsigned long m;
unsigned int i, budget;
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
for (m = s->txq_maperr[i]; m; m &= m - 1) {
unsigned long id = __ffs(m) + i * BITS_PER_LONG;
struct sge_ofld_txq *txq = s->egr_map[id];
clear_bit(id, s->txq_maperr);
tasklet_schedule(&txq->qresume_tsk);
}
budget = MAX_TIMER_TX_RECLAIM;
i = s->ethtxq_rover;
do {
struct sge_eth_txq *q = &s->ethtxq[i];
if (q->q.in_use &&
time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
__netif_tx_trylock(q->txq)) {
int avail = reclaimable(&q->q);
if (avail) {
if (avail > budget)
avail = budget;
free_tx_desc(adap, &q->q, avail, true);
q->q.in_use -= avail;
budget -= avail;
}
__netif_tx_unlock(q->txq);
}
if (++i >= s->ethqsets)
i = 0;
} while (budget && i != s->ethtxq_rover);
s->ethtxq_rover = i;
mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
}
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
struct sge_fl *fl, rspq_handler_t hnd)
{
int ret, flsz = 0;
struct fw_iq_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
/* Size needs to be multiple of 16, including status entry. */
iq->size = roundup(iq->size, 16);
iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
&iq->phys_addr, NULL, 0, NUMA_NO_NODE);
if (!iq->desc)
return -ENOMEM;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_CMD_EXEC |
FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
FW_LEN16(c));
c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
FW_IQ_CMD_IQGTSMODE |
FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
if (fl) {
fl->size = roundup(fl->size, 8);
fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
sizeof(struct rx_sw_desc), &fl->addr,
&fl->sdesc, s->stat_len, NUMA_NO_NODE);
if (!fl->desc)
goto fl_nomem;
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) |
FW_IQ_CMD_FL0FETCHRO(1) |
FW_IQ_CMD_FL0DATARO(1) |
FW_IQ_CMD_FL0PADEN(1));
c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
FW_IQ_CMD_FL0FBMAX(3));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
if (ret)
goto err;
netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
iq->cur_desc = iq->desc;
iq->cidx = 0;
iq->gen = 1;
iq->next_intr_params = iq->intr_params;
iq->cntxt_id = ntohs(c.iqid);
iq->abs_id = ntohs(c.physiqid);
iq->size--; /* subtract status entry */
iq->adap = adap;
iq->netdev = dev;
iq->handler = hnd;
/* set offset to -1 to distinguish ingress queues without FL */
iq->offset = fl ? 0 : -1;
adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
if (fl) {
fl->cntxt_id = ntohs(c.fl0id);
fl->avail = fl->pend_cred = 0;
fl->pidx = fl->cidx = 0;
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
}
return 0;
fl_nomem:
ret = -ENOMEM;
err:
if (iq->desc) {
dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
iq->desc, iq->phys_addr);
iq->desc = NULL;
}
if (fl && fl->desc) {
kfree(fl->sdesc);
fl->sdesc = NULL;
dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
fl->desc, fl->addr);
fl->desc = NULL;
}
return ret;
}
static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
{
q->cntxt_id = id;
if (!is_t4(adap->chip)) {
unsigned int s_qpp;
unsigned short udb_density;
unsigned long qpshift;
int page;
s_qpp = QUEUESPERPAGEPF1 * adap->fn;
udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap,
SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp));
qpshift = PAGE_SHIFT - ilog2(udb_density);
q->udb = q->cntxt_id << qpshift;
q->udb &= PAGE_MASK;
page = q->udb / PAGE_SIZE;
q->udb += (q->cntxt_id - (page * udb_density)) * 128;
}
q->in_use = 0;
q->cidx = q->pidx = 0;
q->stops = q->restarts = 0;
q->stat = (void *)&q->desc[q->size];
spin_lock_init(&q->db_lock);
adap->sge.egr_map[id - adap->sge.egr_start] = q;
}
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
unsigned int iqid)
{
int ret, nentries;
struct fw_eq_eth_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
netdev_queue_numa_node_read(netdevq));
if (!txq->q.desc)
return -ENOMEM;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_CMD_EXEC |
FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO(1) |
FW_EQ_ETH_CMD_IQID(iqid));
c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
FW_EQ_ETH_CMD_FBMAX(3) |
FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
FW_EQ_ETH_CMD_EQSIZE(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
txq->q.desc, txq->q.phys_addr);
txq->q.desc = NULL;
return ret;
}
init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
txq->txq = netdevq;
txq->tso = txq->tx_cso = txq->vlan_ins = 0;
txq->mapping_err = 0;
return 0;
}
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int cmplqid)
{
int ret, nentries;
struct fw_eq_ctrl_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
sizeof(struct tx_desc), 0, &txq->q.phys_addr,
NULL, 0, NUMA_NO_NODE);
if (!txq->q.desc)
return -ENOMEM;
c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_CMD_EXEC |
FW_EQ_CTRL_CMD_PFN(adap->fn) |
FW_EQ_CTRL_CMD_VFN(0));
c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
c.physeqid_pkd = htonl(0);
c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
FW_EQ_CTRL_CMD_FETCHRO |
FW_EQ_CTRL_CMD_IQID(iqid));
c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
FW_EQ_CTRL_CMD_FBMAX(3) |
FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
FW_EQ_CTRL_CMD_EQSIZE(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
if (ret) {
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
txq->q.desc, txq->q.phys_addr);
txq->q.desc = NULL;
return ret;
}
init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
txq->full = 0;
return 0;
}
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid)
{
int ret, nentries;
struct fw_eq_ofld_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
NUMA_NO_NODE);
if (!txq->q.desc)
return -ENOMEM;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_CMD_EXEC |
FW_EQ_OFLD_CMD_PFN(adap->fn) |
FW_EQ_OFLD_CMD_VFN(0));
c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
FW_EQ_OFLD_CMD_FETCHRO(1) |
FW_EQ_OFLD_CMD_IQID(iqid));
c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
FW_EQ_OFLD_CMD_FBMAX(3) |
FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
FW_EQ_OFLD_CMD_EQSIZE(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
txq->q.desc, txq->q.phys_addr);
txq->q.desc = NULL;
return ret;
}
init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
txq->full = 0;
txq->mapping_err = 0;
return 0;
}
static void free_txq(struct adapter *adap, struct sge_txq *q)
{
struct sge *s = &adap->sge;
dma_free_coherent(adap->pdev_dev,
q->size * sizeof(struct tx_desc) + s->stat_len,
q->desc, q->phys_addr);
q->cntxt_id = 0;
q->sdesc = NULL;
q->desc = NULL;
}
static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
struct sge_fl *fl)
{
struct sge *s = &adap->sge;
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq->desc, rq->phys_addr);
netif_napi_del(&rq->napi);
rq->netdev = NULL;
rq->cntxt_id = rq->abs_id = 0;
rq->desc = NULL;
if (fl) {
free_rx_bufs(adap, fl, fl->avail);
dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
fl->desc, fl->addr);
kfree(fl->sdesc);
fl->sdesc = NULL;
fl->cntxt_id = 0;
fl->desc = NULL;
}
}
/**
* t4_free_sge_resources - free SGE resources
* @adap: the adapter
*
* Frees resources used by the SGE queue sets.
*/
void t4_free_sge_resources(struct adapter *adap)
{
int i;
struct sge_eth_rxq *eq = adap->sge.ethrxq;
struct sge_eth_txq *etq = adap->sge.ethtxq;
struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
/* clean up Ethernet Tx/Rx queues */
for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq, &eq->fl);
if (etq->q.desc) {
t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
etq->q.cntxt_id);
free_tx_desc(adap, &etq->q, etq->q.in_use, true);
kfree(etq->q.sdesc);
free_txq(adap, &etq->q);
}
}
/* clean up RDMA and iSCSI Rx queues */
for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
if (oq->rspq.desc)
free_rspq_fl(adap, &oq->rspq, &oq->fl);
}
for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
if (oq->rspq.desc)
free_rspq_fl(adap, &oq->rspq, &oq->fl);
}
/* clean up offload Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
if (q->q.desc) {
tasklet_kill(&q->qresume_tsk);
t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
q->q.cntxt_id);
free_tx_desc(adap, &q->q, q->q.in_use, false);
kfree(q->q.sdesc);
__skb_queue_purge(&q->sendq);
free_txq(adap, &q->q);
}
}
/* clean up control Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
if (cq->q.desc) {
tasklet_kill(&cq->qresume_tsk);
t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
cq->q.cntxt_id);
__skb_queue_purge(&cq->sendq);
free_txq(adap, &cq->q);
}
}
if (adap->sge.fw_evtq.desc)
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
if (adap->sge.intrq.desc)
free_rspq_fl(adap, &adap->sge.intrq, NULL);
/* clear the reverse egress queue map */
memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
}
void t4_sge_start(struct adapter *adap)
{
adap->sge.ethtxq_rover = 0;
mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
}
/**
* t4_sge_stop - disable SGE operation
* @adap: the adapter
*
* Stop tasklets and timers associated with the DMA engine. Note that
* this is effective only if measures have been taken to disable any HW
* events that may restart them.
*/
void t4_sge_stop(struct adapter *adap)
{
int i;
struct sge *s = &adap->sge;
if (in_interrupt()) /* actions below require waiting */
return;
if (s->rx_timer.function)
del_timer_sync(&s->rx_timer);
if (s->tx_timer.function)
del_timer_sync(&s->tx_timer);
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
struct sge_ofld_txq *q = &s->ofldtxq[i];
if (q->q.desc)
tasklet_kill(&q->qresume_tsk);
}
for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
struct sge_ctrl_txq *cq = &s->ctrlq[i];
if (cq->q.desc)
tasklet_kill(&cq->qresume_tsk);
}
}
/**
* t4_sge_init - initialize SGE
* @adap: the adapter
*
* Performs SGE initialization needed every time after a chip reset.
* We do not initialize any of the queues here, instead the driver
* top-level must request them individually.
*
* Called in two different modes:
*
* 1. Perform actual hardware initialization and record hard-coded
* parameters which were used. This gets used when we're the
* Master PF and the Firmware Configuration File support didn't
* work for some reason.
*
* 2. We're not the Master PF or initialization was performed with
* a Firmware Configuration File. In this case we need to grab
* any of the SGE operating parameters that we need to have in
* order to do our job and make sure we can live with them ...
*/
static int t4_sge_init_soft(struct adapter *adap)
{
struct sge *s = &adap->sge;
u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
u32 ingress_rx_threshold;
/*
* Verify that CPL messages are going to the Ingress Queue for
* process_responses() and that only packet data is going to the
* Free Lists.
*/
if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL;
}
/*
* Validate the Host Buffer Register Array indices that we want to
* use ...
*
* XXX Note that we should really read through the Host Buffer Size
* XXX register array and find the indices of the Buffer Sizes which
* XXX meet our needs!
*/
#define READ_FL_BUF(x) \
t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
#undef READ_FL_BUF
if (fl_small_pg != PAGE_SIZE ||
(fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
(fl_large_pg & (fl_large_pg-1)) != 0))) {
dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);
return -EINVAL;
}
if (fl_large_pg)
s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
fl_small_mtu, fl_large_mtu);
return -EINVAL;
}
/*
* Retrieve our RX interrupt holdoff timer values and counter
* threshold values from the SGE parameters.
*/
timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
s->timer_val[0] = core_ticks_to_us(adap,
TIMERVALUE0_GET(timer_value_0_and_1));
s->timer_val[1] = core_ticks_to_us(adap,
TIMERVALUE1_GET(timer_value_0_and_1));
s->timer_val[2] = core_ticks_to_us(adap,
TIMERVALUE2_GET(timer_value_2_and_3));
s->timer_val[3] = core_ticks_to_us(adap,
TIMERVALUE3_GET(timer_value_2_and_3));
s->timer_val[4] = core_ticks_to_us(adap,
TIMERVALUE4_GET(timer_value_4_and_5));
s->timer_val[5] = core_ticks_to_us(adap,
TIMERVALUE5_GET(timer_value_4_and_5));
ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
return 0;
}
static int t4_sge_init_hard(struct adapter *adap)
{
struct sge *s = &adap->sge;
/*
* Set up our basic SGE mode to deliver CPL messages to our Ingress
* Queue and Packet Date to the Free List.
*/
t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
RXPKTCPLMODE_MASK);
/*
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
if (is_t4(adap->chip)) {
t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
V_HP_INT_THRESH(M_HP_INT_THRESH) |
V_LP_INT_THRESH(M_LP_INT_THRESH),
V_HP_INT_THRESH(dbfifo_int_thresh) |
V_LP_INT_THRESH(dbfifo_int_thresh));
} else {
t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
V_LP_INT_THRESH_T5(dbfifo_int_thresh));
t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
V_HP_INT_THRESH_T5(dbfifo_int_thresh));
}
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
F_ENABLE_DROP);
/*
* SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
* t4_fixup_host_params().
*/
s->fl_pg_order = FL_PG_ORDER;
if (s->fl_pg_order)
t4_write_reg(adap,
SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
PAGE_SIZE << FL_PG_ORDER);
t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
FL_MTU_SMALL_BUFSIZE(adap));
t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
FL_MTU_LARGE_BUFSIZE(adap));
/*
* Note that the SGE Ingress Packet Count Interrupt Threshold and
* Timer Holdoff values must be supplied by our caller.
*/
t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
THRESHOLD_0(s->counter_val[0]) |
THRESHOLD_1(s->counter_val[1]) |
THRESHOLD_2(s->counter_val[2]) |
THRESHOLD_3(s->counter_val[3]));
t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
return 0;
}
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
u32 sge_control;
int ret;
/*
* Ingress Padding Boundary and Egress Status Page Size are set up by
* t4_fixup_host_params().
*/
sge_control = t4_read_reg(adap, SGE_CONTROL);
s->pktshift = PKTSHIFT_GET(sge_control);
s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
X_INGPADBOUNDARY_SHIFT);
if (adap->flags & USING_SOFT_PARAMS)
ret = t4_sge_init_soft(adap);
else
ret = t4_sge_init_hard(adap);
if (ret < 0)
return ret;
/*
* A FL with <= fl_starve_thres buffers is starving and a periodic
* timer will attempt to refill it. This needs to be larger than the
* SGE's Egress Congestion Threshold. If it isn't, then we can get
* stuck waiting for new packets while the SGE is waiting for us to
* give it more Free List entries. (Note that the SGE's Egress
* Congestion Threshold is in units of 2 Free List pointers.)
*/
s->fl_starve_thres
= EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
s->idma_state[0] = s->idma_state[1] = 0;
spin_lock_init(&s->intrq_lock);
return 0;
}
| gpl-2.0 |
DJSteve/StreakKernel | arch/x86/mm/highmem_32.c | 1281 | 3283 | #include <linux/highmem.h>
#include <linux/module.h>
#include <linux/swap.h> /* for totalram_pages */
void *kmap(struct page *page)
{
might_sleep();
if (!PageHighMem(page))
return page_address(page);
return kmap_high(page);
}
void kunmap(struct page *page)
{
if (in_interrupt())
BUG();
if (!PageHighMem(page))
return;
kunmap_high(page);
}
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
* However when holding an atomic kmap it is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
{
enum fixed_addresses idx;
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
set_pte(kmap_pte-idx, mk_pte(page, prot));
return (void *)vaddr;
}
void *kmap_atomic(struct page *page, enum km_type type)
{
return kmap_atomic_prot(page, type, kmap_prot);
}
void kunmap_atomic(void *kvaddr, enum km_type type)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
/*
* Force other mappings to Oops if they'll try to access this pte
* without first remap it. Keeping stale mappings around is a bad idea
* also, in case the page changes cacheability attributes or becomes
* a protected page in a hypervisor.
*/
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
kpte_clear_flush(kmap_pte-idx, vaddr);
else {
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr < PAGE_OFFSET);
BUG_ON(vaddr >= (unsigned long)high_memory);
#endif
}
pagefault_enable();
}
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
{
return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
}
EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
EXPORT_SYMBOL(kmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic);
EXPORT_SYMBOL(kmap_atomic_prot);
EXPORT_SYMBOL(kmap_atomic_to_page);
void __init set_highmem_pages_init(void)
{
struct zone *zone;
int nid;
for_each_zone(zone) {
unsigned long zone_start_pfn, zone_end_pfn;
if (!is_highmem(zone))
continue;
zone_start_pfn = zone->zone_start_pfn;
zone_end_pfn = zone_start_pfn + zone->spanned_pages;
nid = zone_to_nid(zone);
printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
zone->name, nid, zone_start_pfn, zone_end_pfn);
add_highpages_with_active_regions(nid, zone_start_pfn,
zone_end_pfn);
}
totalram_pages += totalhigh_pages;
}
| gpl-2.0 |
CarbonROM/android_kernel_asus_fugu | net/phonet/pn_dev.c | 2049 | 10103 | /*
* File: pn_dev.c
*
* Phonet network device
*
* Copyright (C) 2008 Nokia Corporation.
*
* Authors: Sakari Ailus <sakari.ailus@nokia.com>
* Rémi Denis-Courmont
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/phonet.h>
#include <linux/proc_fs.h>
#include <linux/if_arp.h>
#include <net/sock.h>
#include <net/netns/generic.h>
#include <net/phonet/pn_dev.h>
struct phonet_routes {
struct mutex lock;
struct net_device *table[64];
};
struct phonet_net {
struct phonet_device_list pndevs;
struct phonet_routes routes;
};
static int phonet_net_id __read_mostly;
static struct phonet_net *phonet_pernet(struct net *net)
{
BUG_ON(!net);
return net_generic(net, phonet_net_id);
}
struct phonet_device_list *phonet_device_list(struct net *net)
{
struct phonet_net *pnn = phonet_pernet(net);
return &pnn->pndevs;
}
/* Allocate new Phonet device. */
static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC);
if (pnd == NULL)
return NULL;
pnd->netdev = dev;
bitmap_zero(pnd->addrs, 64);
BUG_ON(!mutex_is_locked(&pndevs->lock));
list_add_rcu(&pnd->list, &pndevs->list);
return pnd;
}
static struct phonet_device *__phonet_get(struct net_device *dev)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
BUG_ON(!mutex_is_locked(&pndevs->lock));
list_for_each_entry(pnd, &pndevs->list, list) {
if (pnd->netdev == dev)
return pnd;
}
return NULL;
}
static struct phonet_device *__phonet_get_rcu(struct net_device *dev)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
list_for_each_entry_rcu(pnd, &pndevs->list, list) {
if (pnd->netdev == dev)
return pnd;
}
return NULL;
}
static void phonet_device_destroy(struct net_device *dev)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
ASSERT_RTNL();
mutex_lock(&pndevs->lock);
pnd = __phonet_get(dev);
if (pnd)
list_del_rcu(&pnd->list);
mutex_unlock(&pndevs->lock);
if (pnd) {
u8 addr;
for_each_set_bit(addr, pnd->addrs, 64)
phonet_address_notify(RTM_DELADDR, dev, addr);
kfree(pnd);
}
}
struct net_device *phonet_device_get(struct net *net)
{
struct phonet_device_list *pndevs = phonet_device_list(net);
struct phonet_device *pnd;
struct net_device *dev = NULL;
rcu_read_lock();
list_for_each_entry_rcu(pnd, &pndevs->list, list) {
dev = pnd->netdev;
BUG_ON(!dev);
if ((dev->reg_state == NETREG_REGISTERED) &&
((pnd->netdev->flags & IFF_UP)) == IFF_UP)
break;
dev = NULL;
}
if (dev)
dev_hold(dev);
rcu_read_unlock();
return dev;
}
int phonet_address_add(struct net_device *dev, u8 addr)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
int err = 0;
mutex_lock(&pndevs->lock);
/* Find or create Phonet-specific device data */
pnd = __phonet_get(dev);
if (pnd == NULL)
pnd = __phonet_device_alloc(dev);
if (unlikely(pnd == NULL))
err = -ENOMEM;
else if (test_and_set_bit(addr >> 2, pnd->addrs))
err = -EEXIST;
mutex_unlock(&pndevs->lock);
return err;
}
int phonet_address_del(struct net_device *dev, u8 addr)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
int err = 0;
mutex_lock(&pndevs->lock);
pnd = __phonet_get(dev);
if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) {
err = -EADDRNOTAVAIL;
pnd = NULL;
} else if (bitmap_empty(pnd->addrs, 64))
list_del_rcu(&pnd->list);
else
pnd = NULL;
mutex_unlock(&pndevs->lock);
if (pnd)
kfree_rcu(pnd, rcu);
return err;
}
/* Gets a source address toward a destination, through a interface. */
u8 phonet_address_get(struct net_device *dev, u8 daddr)
{
struct phonet_device *pnd;
u8 saddr;
rcu_read_lock();
pnd = __phonet_get_rcu(dev);
if (pnd) {
BUG_ON(bitmap_empty(pnd->addrs, 64));
/* Use same source address as destination, if possible */
if (test_bit(daddr >> 2, pnd->addrs))
saddr = daddr;
else
saddr = find_first_bit(pnd->addrs, 64) << 2;
} else
saddr = PN_NO_ADDR;
rcu_read_unlock();
if (saddr == PN_NO_ADDR) {
/* Fallback to another device */
struct net_device *def_dev;
def_dev = phonet_device_get(dev_net(dev));
if (def_dev) {
if (def_dev != dev)
saddr = phonet_address_get(def_dev, daddr);
dev_put(def_dev);
}
}
return saddr;
}
int phonet_address_lookup(struct net *net, u8 addr)
{
struct phonet_device_list *pndevs = phonet_device_list(net);
struct phonet_device *pnd;
int err = -EADDRNOTAVAIL;
rcu_read_lock();
list_for_each_entry_rcu(pnd, &pndevs->list, list) {
/* Don't allow unregistering devices! */
if ((pnd->netdev->reg_state != NETREG_REGISTERED) ||
((pnd->netdev->flags & IFF_UP)) != IFF_UP)
continue;
if (test_bit(addr >> 2, pnd->addrs)) {
err = 0;
goto found;
}
}
found:
rcu_read_unlock();
return err;
}
/* automatically configure a Phonet device, if supported */
static int phonet_device_autoconf(struct net_device *dev)
{
struct if_phonet_req req;
int ret;
if (!dev->netdev_ops->ndo_do_ioctl)
return -EOPNOTSUPP;
ret = dev->netdev_ops->ndo_do_ioctl(dev, (struct ifreq *)&req,
SIOCPNGAUTOCONF);
if (ret < 0)
return ret;
ASSERT_RTNL();
ret = phonet_address_add(dev, req.ifr_phonet_autoconf.device);
if (ret)
return ret;
phonet_address_notify(RTM_NEWADDR, dev,
req.ifr_phonet_autoconf.device);
return 0;
}
static void phonet_route_autodel(struct net_device *dev)
{
struct phonet_net *pnn = phonet_pernet(dev_net(dev));
unsigned int i;
DECLARE_BITMAP(deleted, 64);
/* Remove left-over Phonet routes */
bitmap_zero(deleted, 64);
mutex_lock(&pnn->routes.lock);
for (i = 0; i < 64; i++)
if (dev == pnn->routes.table[i]) {
RCU_INIT_POINTER(pnn->routes.table[i], NULL);
set_bit(i, deleted);
}
mutex_unlock(&pnn->routes.lock);
if (bitmap_empty(deleted, 64))
return; /* short-circuit RCU */
synchronize_rcu();
for_each_set_bit(i, deleted, 64) {
rtm_phonet_notify(RTM_DELROUTE, dev, i);
dev_put(dev);
}
}
/* notify Phonet of device events */
static int phonet_device_notify(struct notifier_block *me, unsigned long what,
void *arg)
{
struct net_device *dev = arg;
switch (what) {
case NETDEV_REGISTER:
if (dev->type == ARPHRD_PHONET)
phonet_device_autoconf(dev);
break;
case NETDEV_UNREGISTER:
phonet_device_destroy(dev);
phonet_route_autodel(dev);
break;
}
return 0;
}
static struct notifier_block phonet_device_notifier = {
.notifier_call = phonet_device_notify,
.priority = 0,
};
/* Per-namespace Phonet devices handling */
static int __net_init phonet_init_net(struct net *net)
{
struct phonet_net *pnn = phonet_pernet(net);
if (!proc_create("phonet", 0, net->proc_net, &pn_sock_seq_fops))
return -ENOMEM;
INIT_LIST_HEAD(&pnn->pndevs.list);
mutex_init(&pnn->pndevs.lock);
mutex_init(&pnn->routes.lock);
return 0;
}
static void __net_exit phonet_exit_net(struct net *net)
{
remove_proc_entry("phonet", net->proc_net);
}
static struct pernet_operations phonet_net_ops = {
.init = phonet_init_net,
.exit = phonet_exit_net,
.id = &phonet_net_id,
.size = sizeof(struct phonet_net),
};
/* Initialize Phonet devices list */
int __init phonet_device_init(void)
{
int err = register_pernet_subsys(&phonet_net_ops);
if (err)
return err;
proc_create("pnresource", 0, init_net.proc_net, &pn_res_seq_fops);
register_netdevice_notifier(&phonet_device_notifier);
err = phonet_netlink_register();
if (err)
phonet_device_exit();
return err;
}
void phonet_device_exit(void)
{
rtnl_unregister_all(PF_PHONET);
unregister_netdevice_notifier(&phonet_device_notifier);
unregister_pernet_subsys(&phonet_net_ops);
remove_proc_entry("pnresource", init_net.proc_net);
}
int phonet_route_add(struct net_device *dev, u8 daddr)
{
struct phonet_net *pnn = phonet_pernet(dev_net(dev));
struct phonet_routes *routes = &pnn->routes;
int err = -EEXIST;
daddr = daddr >> 2;
mutex_lock(&routes->lock);
if (routes->table[daddr] == NULL) {
rcu_assign_pointer(routes->table[daddr], dev);
dev_hold(dev);
err = 0;
}
mutex_unlock(&routes->lock);
return err;
}
int phonet_route_del(struct net_device *dev, u8 daddr)
{
struct phonet_net *pnn = phonet_pernet(dev_net(dev));
struct phonet_routes *routes = &pnn->routes;
daddr = daddr >> 2;
mutex_lock(&routes->lock);
if (dev == routes->table[daddr])
RCU_INIT_POINTER(routes->table[daddr], NULL);
else
dev = NULL;
mutex_unlock(&routes->lock);
if (!dev)
return -ENOENT;
synchronize_rcu();
dev_put(dev);
return 0;
}
struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr)
{
struct phonet_net *pnn = phonet_pernet(net);
struct phonet_routes *routes = &pnn->routes;
struct net_device *dev;
daddr >>= 2;
dev = rcu_dereference(routes->table[daddr]);
return dev;
}
struct net_device *phonet_route_output(struct net *net, u8 daddr)
{
struct phonet_net *pnn = phonet_pernet(net);
struct phonet_routes *routes = &pnn->routes;
struct net_device *dev;
daddr >>= 2;
rcu_read_lock();
dev = rcu_dereference(routes->table[daddr]);
if (dev)
dev_hold(dev);
rcu_read_unlock();
if (!dev)
dev = phonet_device_get(net); /* Default route */
return dev;
}
| gpl-2.0 |
wusijie/android_kernel_oneplus_msm8994 | drivers/tty/rocket.c | 2305 | 95781 | /*
* RocketPort device driver for Linux
*
* Written by Theodore Ts'o, 1995, 1996, 1997, 1998, 1999, 2000.
*
* Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2003 by Comtrol, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Kernel Synchronization:
*
* This driver has 2 kernel control paths - exception handlers (calls into the driver
* from user mode) and the timer bottom half (tasklet). This is a polled driver, interrupts
* are not used.
*
* Critical data:
* - rp_table[], accessed through passed "info" pointers, is a global (static) array of
* serial port state information and the xmit_buf circular buffer. Protected by
* a per port spinlock.
* - xmit_flags[], an array of ints indexed by line (port) number, indicating that there
* is data to be transmitted. Protected by atomic bit operations.
* - rp_num_ports, int indicating number of open ports, protected by atomic operations.
*
* rp_write() and rp_write_char() functions use a per port semaphore to protect against
* simultaneous access to the same port by more than one process.
*/
/****** Defines ******/
#define ROCKET_PARANOIA_CHECK
#define ROCKET_DISABLE_SIMUSAGE
#undef ROCKET_SOFT_FLOW
#undef ROCKET_DEBUG_OPEN
#undef ROCKET_DEBUG_INTR
#undef ROCKET_DEBUG_WRITE
#undef ROCKET_DEBUG_FLOW
#undef ROCKET_DEBUG_THROTTLE
#undef ROCKET_DEBUG_WAIT_UNTIL_SENT
#undef ROCKET_DEBUG_RECEIVE
#undef ROCKET_DEBUG_HANGUP
#undef REV_PCI_ORDER
#undef ROCKET_DEBUG_IO
#define POLL_PERIOD (HZ/100) /* Polling period .01 seconds (10ms) */
/****** Kernel includes ******/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/major.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/mutex.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/init.h>
/****** RocketPort includes ******/
#include "rocket_int.h"
#include "rocket.h"
#define ROCKET_VERSION "2.09"
#define ROCKET_DATE "12-June-2003"
/****** RocketPort Local Variables ******/
static void rp_do_poll(unsigned long dummy);
static struct tty_driver *rocket_driver;
static struct rocket_version driver_version = {
ROCKET_VERSION, ROCKET_DATE
};
static struct r_port *rp_table[MAX_RP_PORTS]; /* The main repository of serial port state information. */
static unsigned int xmit_flags[NUM_BOARDS]; /* Bit significant, indicates port had data to transmit. */
/* eg. Bit 0 indicates port 0 has xmit data, ... */
static atomic_t rp_num_ports_open; /* Number of serial ports open */
static DEFINE_TIMER(rocket_timer, rp_do_poll, 0, 0);
static unsigned long board1; /* ISA addresses, retrieved from rocketport.conf */
static unsigned long board2;
static unsigned long board3;
static unsigned long board4;
static unsigned long controller;
static bool support_low_speed;
static unsigned long modem1;
static unsigned long modem2;
static unsigned long modem3;
static unsigned long modem4;
static unsigned long pc104_1[8];
static unsigned long pc104_2[8];
static unsigned long pc104_3[8];
static unsigned long pc104_4[8];
static unsigned long *pc104[4] = { pc104_1, pc104_2, pc104_3, pc104_4 };
static int rp_baud_base[NUM_BOARDS]; /* Board config info (Someday make a per-board structure) */
static unsigned long rcktpt_io_addr[NUM_BOARDS];
static int rcktpt_type[NUM_BOARDS];
static int is_PCI[NUM_BOARDS];
static rocketModel_t rocketModel[NUM_BOARDS];
static int max_board;
static const struct tty_port_operations rocket_port_ops;
/*
* The following arrays define the interrupt bits corresponding to each AIOP.
* These bits are different between the ISA and regular PCI boards and the
* Universal PCI boards.
*/
static Word_t aiop_intr_bits[AIOP_CTL_SIZE] = {
AIOP_INTR_BIT_0,
AIOP_INTR_BIT_1,
AIOP_INTR_BIT_2,
AIOP_INTR_BIT_3
};
#ifdef CONFIG_PCI
static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = {
UPCI_AIOP_INTR_BIT_0,
UPCI_AIOP_INTR_BIT_1,
UPCI_AIOP_INTR_BIT_2,
UPCI_AIOP_INTR_BIT_3
};
#endif
static Byte_t RData[RDATASIZE] = {
0x00, 0x09, 0xf6, 0x82,
0x02, 0x09, 0x86, 0xfb,
0x04, 0x09, 0x00, 0x0a,
0x06, 0x09, 0x01, 0x0a,
0x08, 0x09, 0x8a, 0x13,
0x0a, 0x09, 0xc5, 0x11,
0x0c, 0x09, 0x86, 0x85,
0x0e, 0x09, 0x20, 0x0a,
0x10, 0x09, 0x21, 0x0a,
0x12, 0x09, 0x41, 0xff,
0x14, 0x09, 0x82, 0x00,
0x16, 0x09, 0x82, 0x7b,
0x18, 0x09, 0x8a, 0x7d,
0x1a, 0x09, 0x88, 0x81,
0x1c, 0x09, 0x86, 0x7a,
0x1e, 0x09, 0x84, 0x81,
0x20, 0x09, 0x82, 0x7c,
0x22, 0x09, 0x0a, 0x0a
};
static Byte_t RRegData[RREGDATASIZE] = {
0x00, 0x09, 0xf6, 0x82, /* 00: Stop Rx processor */
0x08, 0x09, 0x8a, 0x13, /* 04: Tx software flow control */
0x0a, 0x09, 0xc5, 0x11, /* 08: XON char */
0x0c, 0x09, 0x86, 0x85, /* 0c: XANY */
0x12, 0x09, 0x41, 0xff, /* 10: Rx mask char */
0x14, 0x09, 0x82, 0x00, /* 14: Compare/Ignore #0 */
0x16, 0x09, 0x82, 0x7b, /* 18: Compare #1 */
0x18, 0x09, 0x8a, 0x7d, /* 1c: Compare #2 */
0x1a, 0x09, 0x88, 0x81, /* 20: Interrupt #1 */
0x1c, 0x09, 0x86, 0x7a, /* 24: Ignore/Replace #1 */
0x1e, 0x09, 0x84, 0x81, /* 28: Interrupt #2 */
0x20, 0x09, 0x82, 0x7c, /* 2c: Ignore/Replace #2 */
0x22, 0x09, 0x0a, 0x0a /* 30: Rx FIFO Enable */
};
static CONTROLLER_T sController[CTL_SIZE] = {
{-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0},
{0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}},
{-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0},
{0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}},
{-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0},
{0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}},
{-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0},
{0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}}
};
static Byte_t sBitMapClrTbl[8] = {
0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f
};
static Byte_t sBitMapSetTbl[8] = {
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
};
static int sClockPrescale = 0x14;
/*
* Line number is the ttySIx number (x), the Minor number. We
* assign them sequentially, starting at zero. The following
* array keeps track of the line number assigned to a given board/aiop/channel.
*/
static unsigned char lineNumbers[MAX_RP_PORTS];
static unsigned long nextLineNumber;
/***** RocketPort Static Prototypes *********/
static int __init init_ISA(int i);
static void rp_wait_until_sent(struct tty_struct *tty, int timeout);
static void rp_flush_buffer(struct tty_struct *tty);
static unsigned char GetLineNumber(int ctrl, int aiop, int ch);
static unsigned char SetLineNumber(int ctrl, int aiop, int ch);
static void rp_start(struct tty_struct *tty);
static int sInitChan(CONTROLLER_T * CtlP, CHANNEL_T * ChP, int AiopNum,
int ChanNum);
static void sSetInterfaceMode(CHANNEL_T * ChP, Byte_t mode);
static void sFlushRxFIFO(CHANNEL_T * ChP);
static void sFlushTxFIFO(CHANNEL_T * ChP);
static void sEnInterrupts(CHANNEL_T * ChP, Word_t Flags);
static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags);
static void sModemReset(CONTROLLER_T * CtlP, int chan, int on);
static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on);
static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data);
static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
ByteIO_t * AiopIOList, int AiopIOListSize,
int IRQNum, Byte_t Frequency, int PeriodicOnly);
static int sReadAiopID(ByteIO_t io);
static int sReadAiopNumChan(WordIO_t io);
MODULE_AUTHOR("Theodore Ts'o");
MODULE_DESCRIPTION("Comtrol RocketPort driver");
module_param(board1, ulong, 0);
MODULE_PARM_DESC(board1, "I/O port for (ISA) board #1");
module_param(board2, ulong, 0);
MODULE_PARM_DESC(board2, "I/O port for (ISA) board #2");
module_param(board3, ulong, 0);
MODULE_PARM_DESC(board3, "I/O port for (ISA) board #3");
module_param(board4, ulong, 0);
MODULE_PARM_DESC(board4, "I/O port for (ISA) board #4");
module_param(controller, ulong, 0);
MODULE_PARM_DESC(controller, "I/O port for (ISA) rocketport controller");
module_param(support_low_speed, bool, 0);
MODULE_PARM_DESC(support_low_speed, "1 means support 50 baud, 0 means support 460400 baud");
module_param(modem1, ulong, 0);
MODULE_PARM_DESC(modem1, "1 means (ISA) board #1 is a RocketModem");
module_param(modem2, ulong, 0);
MODULE_PARM_DESC(modem2, "1 means (ISA) board #2 is a RocketModem");
module_param(modem3, ulong, 0);
MODULE_PARM_DESC(modem3, "1 means (ISA) board #3 is a RocketModem");
module_param(modem4, ulong, 0);
MODULE_PARM_DESC(modem4, "1 means (ISA) board #4 is a RocketModem");
module_param_array(pc104_1, ulong, NULL, 0);
MODULE_PARM_DESC(pc104_1, "set interface types for ISA(PC104) board #1 (e.g. pc104_1=232,232,485,485,...");
module_param_array(pc104_2, ulong, NULL, 0);
MODULE_PARM_DESC(pc104_2, "set interface types for ISA(PC104) board #2 (e.g. pc104_2=232,232,485,485,...");
module_param_array(pc104_3, ulong, NULL, 0);
MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc104_3=232,232,485,485,...");
module_param_array(pc104_4, ulong, NULL, 0);
MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,...");
static int rp_init(void);
static void rp_cleanup_module(void);
module_init(rp_init);
module_exit(rp_cleanup_module);
MODULE_LICENSE("Dual BSD/GPL");
/*************************************************************************/
/* Module code starts here */
static inline int rocket_paranoia_check(struct r_port *info,
const char *routine)
{
#ifdef ROCKET_PARANOIA_CHECK
if (!info)
return 1;
if (info->magic != RPORT_MAGIC) {
printk(KERN_WARNING "Warning: bad magic number for rocketport "
"struct in %s\n", routine);
return 1;
}
#endif
return 0;
}
/* Serial port receive data function. Called (from timer poll) when an AIOPIC signals
* that receive data is present on a serial port. Pulls data from FIFO, moves it into the
* tty layer.
*/
static void rp_do_receive(struct r_port *info, CHANNEL_t *cp,
unsigned int ChanStatus)
{
unsigned int CharNStat;
int ToRecv, wRecv, space;
unsigned char *cbuf;
ToRecv = sGetRxCnt(cp);
#ifdef ROCKET_DEBUG_INTR
printk(KERN_INFO "rp_do_receive(%d)...\n", ToRecv);
#endif
if (ToRecv == 0)
return;
/*
* if status indicates there are errored characters in the
* FIFO, then enter status mode (a word in FIFO holds
* character and status).
*/
if (ChanStatus & (RXFOVERFL | RXBREAK | RXFRAME | RXPARITY)) {
if (!(ChanStatus & STATMODE)) {
#ifdef ROCKET_DEBUG_RECEIVE
printk(KERN_INFO "Entering STATMODE...\n");
#endif
ChanStatus |= STATMODE;
sEnRxStatusMode(cp);
}
}
/*
* if we previously entered status mode, then read down the
* FIFO one word at a time, pulling apart the character and
* the status. Update error counters depending on status
*/
if (ChanStatus & STATMODE) {
#ifdef ROCKET_DEBUG_RECEIVE
printk(KERN_INFO "Ignore %x, read %x...\n",
info->ignore_status_mask, info->read_status_mask);
#endif
while (ToRecv) {
char flag;
CharNStat = sInW(sGetTxRxDataIO(cp));
#ifdef ROCKET_DEBUG_RECEIVE
printk(KERN_INFO "%x...\n", CharNStat);
#endif
if (CharNStat & STMBREAKH)
CharNStat &= ~(STMFRAMEH | STMPARITYH);
if (CharNStat & info->ignore_status_mask) {
ToRecv--;
continue;
}
CharNStat &= info->read_status_mask;
if (CharNStat & STMBREAKH)
flag = TTY_BREAK;
else if (CharNStat & STMPARITYH)
flag = TTY_PARITY;
else if (CharNStat & STMFRAMEH)
flag = TTY_FRAME;
else if (CharNStat & STMRCVROVRH)
flag = TTY_OVERRUN;
else
flag = TTY_NORMAL;
tty_insert_flip_char(&info->port, CharNStat & 0xff,
flag);
ToRecv--;
}
/*
* after we've emptied the FIFO in status mode, turn
* status mode back off
*/
if (sGetRxCnt(cp) == 0) {
#ifdef ROCKET_DEBUG_RECEIVE
printk(KERN_INFO "Status mode off.\n");
#endif
sDisRxStatusMode(cp);
}
} else {
/*
* we aren't in status mode, so read down the FIFO two
* characters at time by doing repeated word IO
* transfer.
*/
space = tty_prepare_flip_string(&info->port, &cbuf, ToRecv);
if (space < ToRecv) {
#ifdef ROCKET_DEBUG_RECEIVE
printk(KERN_INFO "rp_do_receive:insufficient space ToRecv=%d space=%d\n", ToRecv, space);
#endif
if (space <= 0)
return;
ToRecv = space;
}
wRecv = ToRecv >> 1;
if (wRecv)
sInStrW(sGetTxRxDataIO(cp), (unsigned short *) cbuf, wRecv);
if (ToRecv & 1)
cbuf[ToRecv - 1] = sInB(sGetTxRxDataIO(cp));
}
/* Push the data up to the tty layer */
tty_flip_buffer_push(&info->port);
}
/*
* Serial port transmit data function. Called from the timer polling loop as a
* result of a bit set in xmit_flags[], indicating data (from the tty layer) is ready
* to be sent out the serial port. Data is buffered in rp_table[line].xmit_buf, it is
* moved to the port's xmit FIFO. *info is critical data, protected by spinlocks.
*/
static void rp_do_transmit(struct r_port *info)
{
int c;
CHANNEL_t *cp = &info->channel;
struct tty_struct *tty;
unsigned long flags;
#ifdef ROCKET_DEBUG_INTR
printk(KERN_DEBUG "%s\n", __func__);
#endif
if (!info)
return;
tty = tty_port_tty_get(&info->port);
if (tty == NULL) {
printk(KERN_WARNING "rp: WARNING %s called with tty==NULL\n", __func__);
clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
return;
}
spin_lock_irqsave(&info->slock, flags);
info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp);
/* Loop sending data to FIFO until done or FIFO full */
while (1) {
if (tty->stopped)
break;
c = min(info->xmit_fifo_room, info->xmit_cnt);
c = min(c, XMIT_BUF_SIZE - info->xmit_tail);
if (c <= 0 || info->xmit_fifo_room <= 0)
break;
sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) (info->xmit_buf + info->xmit_tail), c / 2);
if (c & 1)
sOutB(sGetTxRxDataIO(cp), info->xmit_buf[info->xmit_tail + c - 1]);
info->xmit_tail += c;
info->xmit_tail &= XMIT_BUF_SIZE - 1;
info->xmit_cnt -= c;
info->xmit_fifo_room -= c;
#ifdef ROCKET_DEBUG_INTR
printk(KERN_INFO "tx %d chars...\n", c);
#endif
}
if (info->xmit_cnt == 0)
clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
if (info->xmit_cnt < WAKEUP_CHARS) {
tty_wakeup(tty);
#ifdef ROCKETPORT_HAVE_POLL_WAIT
wake_up_interruptible(&tty->poll_wait);
#endif
}
spin_unlock_irqrestore(&info->slock, flags);
tty_kref_put(tty);
#ifdef ROCKET_DEBUG_INTR
printk(KERN_DEBUG "(%d,%d,%d,%d)...\n", info->xmit_cnt, info->xmit_head,
info->xmit_tail, info->xmit_fifo_room);
#endif
}
/*
* Called when a serial port signals it has read data in it's RX FIFO.
* It checks what interrupts are pending and services them, including
* receiving serial data.
*/
static void rp_handle_port(struct r_port *info)
{
CHANNEL_t *cp;
unsigned int IntMask, ChanStatus;
if (!info)
return;
if ((info->port.flags & ASYNC_INITIALIZED) == 0) {
printk(KERN_WARNING "rp: WARNING: rp_handle_port called with "
"info->flags & NOT_INIT\n");
return;
}
cp = &info->channel;
IntMask = sGetChanIntID(cp) & info->intmask;
#ifdef ROCKET_DEBUG_INTR
printk(KERN_INFO "rp_interrupt %02x...\n", IntMask);
#endif
ChanStatus = sGetChanStatus(cp);
if (IntMask & RXF_TRIG) { /* Rx FIFO trigger level */
rp_do_receive(info, cp, ChanStatus);
}
if (IntMask & DELTA_CD) { /* CD change */
#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_INTR) || defined(ROCKET_DEBUG_HANGUP))
printk(KERN_INFO "ttyR%d CD now %s...\n", info->line,
(ChanStatus & CD_ACT) ? "on" : "off");
#endif
if (!(ChanStatus & CD_ACT) && info->cd_status) {
#ifdef ROCKET_DEBUG_HANGUP
printk(KERN_INFO "CD drop, calling hangup.\n");
#endif
tty_port_tty_hangup(&info->port, false);
}
info->cd_status = (ChanStatus & CD_ACT) ? 1 : 0;
wake_up_interruptible(&info->port.open_wait);
}
#ifdef ROCKET_DEBUG_INTR
if (IntMask & DELTA_CTS) { /* CTS change */
printk(KERN_INFO "CTS change...\n");
}
if (IntMask & DELTA_DSR) { /* DSR change */
printk(KERN_INFO "DSR change...\n");
}
#endif
}
/*
* The top level polling routine. Repeats every 1/100 HZ (10ms).
*/
static void rp_do_poll(unsigned long dummy)
{
CONTROLLER_t *ctlp;
int ctrl, aiop, ch, line;
unsigned int xmitmask, i;
unsigned int CtlMask;
unsigned char AiopMask;
Word_t bit;
/* Walk through all the boards (ctrl's) */
for (ctrl = 0; ctrl < max_board; ctrl++) {
if (rcktpt_io_addr[ctrl] <= 0)
continue;
/* Get a ptr to the board's control struct */
ctlp = sCtlNumToCtlPtr(ctrl);
/* Get the interrupt status from the board */
#ifdef CONFIG_PCI
if (ctlp->BusType == isPCI)
CtlMask = sPCIGetControllerIntStatus(ctlp);
else
#endif
CtlMask = sGetControllerIntStatus(ctlp);
/* Check if any AIOP read bits are set */
for (aiop = 0; CtlMask; aiop++) {
bit = ctlp->AiopIntrBits[aiop];
if (CtlMask & bit) {
CtlMask &= ~bit;
AiopMask = sGetAiopIntStatus(ctlp, aiop);
/* Check if any port read bits are set */
for (ch = 0; AiopMask; AiopMask >>= 1, ch++) {
if (AiopMask & 1) {
/* Get the line number (/dev/ttyRx number). */
/* Read the data from the port. */
line = GetLineNumber(ctrl, aiop, ch);
rp_handle_port(rp_table[line]);
}
}
}
}
xmitmask = xmit_flags[ctrl];
/*
* xmit_flags contains bit-significant flags, indicating there is data
* to xmit on the port. Bit 0 is port 0 on this board, bit 1 is port
* 1, ... (32 total possible). The variable i has the aiop and ch
* numbers encoded in it (port 0-7 are aiop0, 8-15 are aiop1, etc).
*/
if (xmitmask) {
for (i = 0; i < rocketModel[ctrl].numPorts; i++) {
if (xmitmask & (1 << i)) {
aiop = (i & 0x18) >> 3;
ch = i & 0x07;
line = GetLineNumber(ctrl, aiop, ch);
rp_do_transmit(rp_table[line]);
}
}
}
}
/*
* Reset the timer so we get called at the next clock tick (10ms).
*/
if (atomic_read(&rp_num_ports_open))
mod_timer(&rocket_timer, jiffies + POLL_PERIOD);
}
/*
* Initializes the r_port structure for a port, as well as enabling the port on
* the board.
* Inputs: board, aiop, chan numbers
*/
static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
{
unsigned rocketMode;
struct r_port *info;
int line;
CONTROLLER_T *ctlp;
/* Get the next available line number */
line = SetLineNumber(board, aiop, chan);
ctlp = sCtlNumToCtlPtr(board);
/* Get a r_port struct for the port, fill it in and save it globally, indexed by line number */
info = kzalloc(sizeof (struct r_port), GFP_KERNEL);
if (!info) {
printk(KERN_ERR "Couldn't allocate info struct for line #%d\n",
line);
return;
}
info->magic = RPORT_MAGIC;
info->line = line;
info->ctlp = ctlp;
info->board = board;
info->aiop = aiop;
info->chan = chan;
tty_port_init(&info->port);
info->port.ops = &rocket_port_ops;
init_completion(&info->close_wait);
info->flags &= ~ROCKET_MODE_MASK;
switch (pc104[board][line]) {
case 422:
info->flags |= ROCKET_MODE_RS422;
break;
case 485:
info->flags |= ROCKET_MODE_RS485;
break;
case 232:
default:
info->flags |= ROCKET_MODE_RS232;
break;
}
info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR;
if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {
printk(KERN_ERR "RocketPort sInitChan(%d, %d, %d) failed!\n",
board, aiop, chan);
tty_port_destroy(&info->port);
kfree(info);
return;
}
rocketMode = info->flags & ROCKET_MODE_MASK;
if ((info->flags & ROCKET_RTS_TOGGLE) || (rocketMode == ROCKET_MODE_RS485))
sEnRTSToggle(&info->channel);
else
sDisRTSToggle(&info->channel);
if (ctlp->boardType == ROCKET_TYPE_PC104) {
switch (rocketMode) {
case ROCKET_MODE_RS485:
sSetInterfaceMode(&info->channel, InterfaceModeRS485);
break;
case ROCKET_MODE_RS422:
sSetInterfaceMode(&info->channel, InterfaceModeRS422);
break;
case ROCKET_MODE_RS232:
default:
if (info->flags & ROCKET_RTS_TOGGLE)
sSetInterfaceMode(&info->channel, InterfaceModeRS232T);
else
sSetInterfaceMode(&info->channel, InterfaceModeRS232);
break;
}
}
spin_lock_init(&info->slock);
mutex_init(&info->write_mtx);
rp_table[line] = info;
tty_port_register_device(&info->port, rocket_driver, line,
pci_dev ? &pci_dev->dev : NULL);
}
/*
* Configures a rocketport port according to its termio settings. Called from
* user mode into the driver (exception handler). *info CD manipulation is spinlock protected.
*/
static void configure_r_port(struct tty_struct *tty, struct r_port *info,
struct ktermios *old_termios)
{
unsigned cflag;
unsigned long flags;
unsigned rocketMode;
int bits, baud, divisor;
CHANNEL_t *cp;
struct ktermios *t = &tty->termios;
cp = &info->channel;
cflag = t->c_cflag;
/* Byte size and parity */
if ((cflag & CSIZE) == CS8) {
sSetData8(cp);
bits = 10;
} else {
sSetData7(cp);
bits = 9;
}
if (cflag & CSTOPB) {
sSetStop2(cp);
bits++;
} else {
sSetStop1(cp);
}
if (cflag & PARENB) {
sEnParity(cp);
bits++;
if (cflag & PARODD) {
sSetOddParity(cp);
} else {
sSetEvenParity(cp);
}
} else {
sDisParity(cp);
}
/* baud rate */
baud = tty_get_baud_rate(tty);
if (!baud)
baud = 9600;
divisor = ((rp_baud_base[info->board] + (baud >> 1)) / baud) - 1;
if ((divisor >= 8192 || divisor < 0) && old_termios) {
baud = tty_termios_baud_rate(old_termios);
if (!baud)
baud = 9600;
divisor = (rp_baud_base[info->board] / baud) - 1;
}
if (divisor >= 8192 || divisor < 0) {
baud = 9600;
divisor = (rp_baud_base[info->board] / baud) - 1;
}
info->cps = baud / bits;
sSetBaud(cp, divisor);
/* FIXME: Should really back compute a baud rate from the divisor */
tty_encode_baud_rate(tty, baud, baud);
if (cflag & CRTSCTS) {
info->intmask |= DELTA_CTS;
sEnCTSFlowCtl(cp);
} else {
info->intmask &= ~DELTA_CTS;
sDisCTSFlowCtl(cp);
}
if (cflag & CLOCAL) {
info->intmask &= ~DELTA_CD;
} else {
spin_lock_irqsave(&info->slock, flags);
if (sGetChanStatus(cp) & CD_ACT)
info->cd_status = 1;
else
info->cd_status = 0;
info->intmask |= DELTA_CD;
spin_unlock_irqrestore(&info->slock, flags);
}
/*
* Handle software flow control in the board
*/
#ifdef ROCKET_SOFT_FLOW
if (I_IXON(tty)) {
sEnTxSoftFlowCtl(cp);
if (I_IXANY(tty)) {
sEnIXANY(cp);
} else {
sDisIXANY(cp);
}
sSetTxXONChar(cp, START_CHAR(tty));
sSetTxXOFFChar(cp, STOP_CHAR(tty));
} else {
sDisTxSoftFlowCtl(cp);
sDisIXANY(cp);
sClrTxXOFF(cp);
}
#endif
/*
* Set up ignore/read mask words
*/
info->read_status_mask = STMRCVROVRH | 0xFF;
if (I_INPCK(tty))
info->read_status_mask |= STMFRAMEH | STMPARITYH;
if (I_BRKINT(tty) || I_PARMRK(tty))
info->read_status_mask |= STMBREAKH;
/*
* Characters to ignore
*/
info->ignore_status_mask = 0;
if (I_IGNPAR(tty))
info->ignore_status_mask |= STMFRAMEH | STMPARITYH;
if (I_IGNBRK(tty)) {
info->ignore_status_mask |= STMBREAKH;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too. (For real raw support).
*/
if (I_IGNPAR(tty))
info->ignore_status_mask |= STMRCVROVRH;
}
rocketMode = info->flags & ROCKET_MODE_MASK;
if ((info->flags & ROCKET_RTS_TOGGLE)
|| (rocketMode == ROCKET_MODE_RS485))
sEnRTSToggle(cp);
else
sDisRTSToggle(cp);
sSetRTS(&info->channel);
if (cp->CtlP->boardType == ROCKET_TYPE_PC104) {
switch (rocketMode) {
case ROCKET_MODE_RS485:
sSetInterfaceMode(cp, InterfaceModeRS485);
break;
case ROCKET_MODE_RS422:
sSetInterfaceMode(cp, InterfaceModeRS422);
break;
case ROCKET_MODE_RS232:
default:
if (info->flags & ROCKET_RTS_TOGGLE)
sSetInterfaceMode(cp, InterfaceModeRS232T);
else
sSetInterfaceMode(cp, InterfaceModeRS232);
break;
}
}
}
static int carrier_raised(struct tty_port *port)
{
struct r_port *info = container_of(port, struct r_port, port);
return (sGetChanStatusLo(&info->channel) & CD_ACT) ? 1 : 0;
}
static void dtr_rts(struct tty_port *port, int on)
{
struct r_port *info = container_of(port, struct r_port, port);
if (on) {
sSetDTR(&info->channel);
sSetRTS(&info->channel);
} else {
sClrDTR(&info->channel);
sClrRTS(&info->channel);
}
}
/*
* Exception handler that opens a serial port. Creates xmit_buf storage, fills in
* port's r_port struct. Initializes the port hardware.
*/
static int rp_open(struct tty_struct *tty, struct file *filp)
{
struct r_port *info;
struct tty_port *port;
int retval;
CHANNEL_t *cp;
unsigned long page;
info = rp_table[tty->index];
if (info == NULL)
return -ENXIO;
port = &info->port;
page = __get_free_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
if (port->flags & ASYNC_CLOSING) {
retval = wait_for_completion_interruptible(&info->close_wait);
free_page(page);
if (retval)
return retval;
return ((port->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS);
}
/*
* We must not sleep from here until the port is marked fully in use.
*/
if (info->xmit_buf)
free_page(page);
else
info->xmit_buf = (unsigned char *) page;
tty->driver_data = info;
tty_port_tty_set(port, tty);
if (port->count++ == 0) {
atomic_inc(&rp_num_ports_open);
#ifdef ROCKET_DEBUG_OPEN
printk(KERN_INFO "rocket mod++ = %d...\n",
atomic_read(&rp_num_ports_open));
#endif
}
#ifdef ROCKET_DEBUG_OPEN
printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
#endif
/*
* Info->count is now 1; so it's safe to sleep now.
*/
if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
cp = &info->channel;
sSetRxTrigger(cp, TRIG_1);
if (sGetChanStatus(cp) & CD_ACT)
info->cd_status = 1;
else
info->cd_status = 0;
sDisRxStatusMode(cp);
sFlushRxFIFO(cp);
sFlushTxFIFO(cp);
sEnInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN));
sSetRxTrigger(cp, TRIG_1);
sGetChanStatus(cp);
sDisRxStatusMode(cp);
sClrTxXOFF(cp);
sDisCTSFlowCtl(cp);
sDisTxSoftFlowCtl(cp);
sEnRxFIFO(cp);
sEnTransmit(cp);
set_bit(ASYNCB_INITIALIZED, &info->port.flags);
/*
* Set up the tty->alt_speed kludge
*/
if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI)
tty->alt_speed = 57600;
if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI)
tty->alt_speed = 115200;
if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI)
tty->alt_speed = 230400;
if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP)
tty->alt_speed = 460800;
configure_r_port(tty, info, NULL);
if (tty->termios.c_cflag & CBAUD) {
sSetDTR(cp);
sSetRTS(cp);
}
}
/* Starts (or resets) the maint polling loop */
mod_timer(&rocket_timer, jiffies + POLL_PERIOD);
retval = tty_port_block_til_ready(port, tty, filp);
if (retval) {
#ifdef ROCKET_DEBUG_OPEN
printk(KERN_INFO "rp_open returning after block_til_ready with %d\n", retval);
#endif
return retval;
}
return 0;
}
/*
* Exception handler that closes a serial port. info->port.count is considered critical.
*/
static void rp_close(struct tty_struct *tty, struct file *filp)
{
struct r_port *info = tty->driver_data;
struct tty_port *port = &info->port;
int timeout;
CHANNEL_t *cp;
if (rocket_paranoia_check(info, "rp_close"))
return;
#ifdef ROCKET_DEBUG_OPEN
printk(KERN_INFO "rp_close ttyR%d, count = %d\n", info->line, info->port.count);
#endif
if (tty_port_close_start(port, tty, filp) == 0)
return;
mutex_lock(&port->mutex);
cp = &info->channel;
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
* important if there is a transmit FIFO!
*/
timeout = (sGetTxCnt(cp) + 1) * HZ / info->cps;
if (timeout == 0)
timeout = 1;
rp_wait_until_sent(tty, timeout);
clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
sDisTransmit(cp);
sDisInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN));
sDisCTSFlowCtl(cp);
sDisTxSoftFlowCtl(cp);
sClrTxXOFF(cp);
sFlushRxFIFO(cp);
sFlushTxFIFO(cp);
sClrRTS(cp);
if (C_HUPCL(tty))
sClrDTR(cp);
rp_flush_buffer(tty);
tty_ldisc_flush(tty);
clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
/* We can't yet use tty_port_close_end as the buffer handling in this
driver is a bit different to the usual */
if (port->blocked_open) {
if (port->close_delay) {
msleep_interruptible(jiffies_to_msecs(port->close_delay));
}
wake_up_interruptible(&port->open_wait);
} else {
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
info->xmit_buf = NULL;
}
}
spin_lock_irq(&port->lock);
info->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING | ASYNC_NORMAL_ACTIVE);
tty->closing = 0;
spin_unlock_irq(&port->lock);
mutex_unlock(&port->mutex);
tty_port_tty_set(port, NULL);
wake_up_interruptible(&port->close_wait);
complete_all(&info->close_wait);
atomic_dec(&rp_num_ports_open);
#ifdef ROCKET_DEBUG_OPEN
printk(KERN_INFO "rocket mod-- = %d...\n",
atomic_read(&rp_num_ports_open));
printk(KERN_INFO "rp_close ttyR%d complete shutdown\n", info->line);
#endif
}
static void rp_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
struct r_port *info = tty->driver_data;
CHANNEL_t *cp;
unsigned cflag;
if (rocket_paranoia_check(info, "rp_set_termios"))
return;
cflag = tty->termios.c_cflag;
/*
* This driver doesn't support CS5 or CS6
*/
if (((cflag & CSIZE) == CS5) || ((cflag & CSIZE) == CS6))
tty->termios.c_cflag =
((cflag & ~CSIZE) | (old_termios->c_cflag & CSIZE));
/* Or CMSPAR */
tty->termios.c_cflag &= ~CMSPAR;
configure_r_port(tty, info, old_termios);
cp = &info->channel;
/* Handle transition to B0 status */
if ((old_termios->c_cflag & CBAUD) && !(tty->termios.c_cflag & CBAUD)) {
sClrDTR(cp);
sClrRTS(cp);
}
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) && (tty->termios.c_cflag & CBAUD)) {
sSetRTS(cp);
sSetDTR(cp);
}
if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios.c_cflag & CRTSCTS))
rp_start(tty);
}
static int rp_break(struct tty_struct *tty, int break_state)
{
struct r_port *info = tty->driver_data;
unsigned long flags;
if (rocket_paranoia_check(info, "rp_break"))
return -EINVAL;
spin_lock_irqsave(&info->slock, flags);
if (break_state == -1)
sSendBreak(&info->channel);
else
sClrBreak(&info->channel);
spin_unlock_irqrestore(&info->slock, flags);
return 0;
}
/*
* sGetChanRI used to be a macro in rocket_int.h. When the functionality for
* the UPCI boards was added, it was decided to make this a function because
* the macro was getting too complicated. All cases except the first one
* (UPCIRingInd) are taken directly from the original macro.
*/
static int sGetChanRI(CHANNEL_T * ChP)
{
CONTROLLER_t *CtlP = ChP->CtlP;
int ChanNum = ChP->ChanNum;
int RingInd = 0;
if (CtlP->UPCIRingInd)
RingInd = !(sInB(CtlP->UPCIRingInd) & sBitMapSetTbl[ChanNum]);
else if (CtlP->AltChanRingIndicator)
RingInd = sInB((ByteIO_t) (ChP->ChanStat + 8)) & DSR_ACT;
else if (CtlP->boardType == ROCKET_TYPE_PC104)
RingInd = !(sInB(CtlP->AiopIO[3]) & sBitMapSetTbl[ChanNum]);
return RingInd;
}
/********************************************************************************************/
/* Here are the routines used by rp_ioctl. These are all called from exception handlers. */
/*
* Returns the state of the serial modem control lines. These next 2 functions
* are the way kernel versions > 2.5 handle modem control lines rather than IOCTLs.
*/
static int rp_tiocmget(struct tty_struct *tty)
{
struct r_port *info = tty->driver_data;
unsigned int control, result, ChanStatus;
ChanStatus = sGetChanStatusLo(&info->channel);
control = info->channel.TxControl[3];
result = ((control & SET_RTS) ? TIOCM_RTS : 0) |
((control & SET_DTR) ? TIOCM_DTR : 0) |
((ChanStatus & CD_ACT) ? TIOCM_CAR : 0) |
(sGetChanRI(&info->channel) ? TIOCM_RNG : 0) |
((ChanStatus & DSR_ACT) ? TIOCM_DSR : 0) |
((ChanStatus & CTS_ACT) ? TIOCM_CTS : 0);
return result;
}
/*
* Sets the modem control lines
*/
static int rp_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct r_port *info = tty->driver_data;
if (set & TIOCM_RTS)
info->channel.TxControl[3] |= SET_RTS;
if (set & TIOCM_DTR)
info->channel.TxControl[3] |= SET_DTR;
if (clear & TIOCM_RTS)
info->channel.TxControl[3] &= ~SET_RTS;
if (clear & TIOCM_DTR)
info->channel.TxControl[3] &= ~SET_DTR;
out32(info->channel.IndexAddr, info->channel.TxControl);
return 0;
}
static int get_config(struct r_port *info, struct rocket_config __user *retinfo)
{
struct rocket_config tmp;
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof (tmp));
mutex_lock(&info->port.mutex);
tmp.line = info->line;
tmp.flags = info->flags;
tmp.close_delay = info->port.close_delay;
tmp.closing_wait = info->port.closing_wait;
tmp.port = rcktpt_io_addr[(info->line >> 5) & 3];
mutex_unlock(&info->port.mutex);
if (copy_to_user(retinfo, &tmp, sizeof (*retinfo)))
return -EFAULT;
return 0;
}
static int set_config(struct tty_struct *tty, struct r_port *info,
struct rocket_config __user *new_info)
{
struct rocket_config new_serial;
if (copy_from_user(&new_serial, new_info, sizeof (new_serial)))
return -EFAULT;
mutex_lock(&info->port.mutex);
if (!capable(CAP_SYS_ADMIN))
{
if ((new_serial.flags & ~ROCKET_USR_MASK) != (info->flags & ~ROCKET_USR_MASK)) {
mutex_unlock(&info->port.mutex);
return -EPERM;
}
info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
configure_r_port(tty, info, NULL);
mutex_unlock(&info->port.mutex);
return 0;
}
info->flags = ((info->flags & ~ROCKET_FLAGS) | (new_serial.flags & ROCKET_FLAGS));
info->port.close_delay = new_serial.close_delay;
info->port.closing_wait = new_serial.closing_wait;
if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI)
tty->alt_speed = 57600;
if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI)
tty->alt_speed = 115200;
if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI)
tty->alt_speed = 230400;
if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP)
tty->alt_speed = 460800;
mutex_unlock(&info->port.mutex);
configure_r_port(tty, info, NULL);
return 0;
}
/*
* This function fills in a rocket_ports struct with information
* about what boards/ports are in the system. This info is passed
* to user space. See setrocket.c where the info is used to create
* the /dev/ttyRx ports.
*/
static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
{
struct rocket_ports tmp;
int board;
if (!retports)
return -EFAULT;
memset(&tmp, 0, sizeof (tmp));
tmp.tty_major = rocket_driver->major;
for (board = 0; board < 4; board++) {
tmp.rocketModel[board].model = rocketModel[board].model;
strcpy(tmp.rocketModel[board].modelString, rocketModel[board].modelString);
tmp.rocketModel[board].numPorts = rocketModel[board].numPorts;
tmp.rocketModel[board].loadrm2 = rocketModel[board].loadrm2;
tmp.rocketModel[board].startingPortNumber = rocketModel[board].startingPortNumber;
}
if (copy_to_user(retports, &tmp, sizeof (*retports)))
return -EFAULT;
return 0;
}
static int reset_rm2(struct r_port *info, void __user *arg)
{
int reset;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&reset, arg, sizeof (int)))
return -EFAULT;
if (reset)
reset = 1;
if (rcktpt_type[info->board] != ROCKET_TYPE_MODEMII &&
rcktpt_type[info->board] != ROCKET_TYPE_MODEMIII)
return -EINVAL;
if (info->ctlp->BusType == isISA)
sModemReset(info->ctlp, info->chan, reset);
else
sPCIModemReset(info->ctlp, info->chan, reset);
return 0;
}
static int get_version(struct r_port *info, struct rocket_version __user *retvers)
{
if (copy_to_user(retvers, &driver_version, sizeof (*retvers)))
return -EFAULT;
return 0;
}
/* IOCTL call handler into the driver */
static int rp_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct r_port *info = tty->driver_data;
void __user *argp = (void __user *)arg;
int ret = 0;
if (cmd != RCKP_GET_PORTS && rocket_paranoia_check(info, "rp_ioctl"))
return -ENXIO;
switch (cmd) {
case RCKP_GET_STRUCT:
if (copy_to_user(argp, info, sizeof (struct r_port)))
ret = -EFAULT;
break;
case RCKP_GET_CONFIG:
ret = get_config(info, argp);
break;
case RCKP_SET_CONFIG:
ret = set_config(tty, info, argp);
break;
case RCKP_GET_PORTS:
ret = get_ports(info, argp);
break;
case RCKP_RESET_RM2:
ret = reset_rm2(info, argp);
break;
case RCKP_GET_VERSION:
ret = get_version(info, argp);
break;
default:
ret = -ENOIOCTLCMD;
}
return ret;
}
static void rp_send_xchar(struct tty_struct *tty, char ch)
{
struct r_port *info = tty->driver_data;
CHANNEL_t *cp;
if (rocket_paranoia_check(info, "rp_send_xchar"))
return;
cp = &info->channel;
if (sGetTxCnt(cp))
sWriteTxPrioByte(cp, ch);
else
sWriteTxByte(sGetTxRxDataIO(cp), ch);
}
static void rp_throttle(struct tty_struct *tty)
{
struct r_port *info = tty->driver_data;
#ifdef ROCKET_DEBUG_THROTTLE
printk(KERN_INFO "throttle %s: %d....\n", tty->name,
tty->ldisc.chars_in_buffer(tty));
#endif
if (rocket_paranoia_check(info, "rp_throttle"))
return;
if (I_IXOFF(tty))
rp_send_xchar(tty, STOP_CHAR(tty));
sClrRTS(&info->channel);
}
static void rp_unthrottle(struct tty_struct *tty)
{
struct r_port *info = tty->driver_data;
#ifdef ROCKET_DEBUG_THROTTLE
printk(KERN_INFO "unthrottle %s: %d....\n", tty->name,
tty->ldisc.chars_in_buffer(tty));
#endif
if (rocket_paranoia_check(info, "rp_throttle"))
return;
if (I_IXOFF(tty))
rp_send_xchar(tty, START_CHAR(tty));
sSetRTS(&info->channel);
}
/*
* ------------------------------------------------------------
* rp_stop() and rp_start()
*
* This routines are called before setting or resetting tty->stopped.
* They enable or disable transmitter interrupts, as necessary.
* ------------------------------------------------------------
*/
static void rp_stop(struct tty_struct *tty)
{
struct r_port *info = tty->driver_data;
#ifdef ROCKET_DEBUG_FLOW
printk(KERN_INFO "stop %s: %d %d....\n", tty->name,
info->xmit_cnt, info->xmit_fifo_room);
#endif
if (rocket_paranoia_check(info, "rp_stop"))
return;
if (sGetTxCnt(&info->channel))
sDisTransmit(&info->channel);
}
static void rp_start(struct tty_struct *tty)
{
struct r_port *info = tty->driver_data;
#ifdef ROCKET_DEBUG_FLOW
printk(KERN_INFO "start %s: %d %d....\n", tty->name,
info->xmit_cnt, info->xmit_fifo_room);
#endif
if (rocket_paranoia_check(info, "rp_stop"))
return;
sEnTransmit(&info->channel);
set_bit((info->aiop * 8) + info->chan,
(void *) &xmit_flags[info->board]);
}
/*
* rp_wait_until_sent() --- wait until the transmitter is empty
*/
static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct r_port *info = tty->driver_data;
CHANNEL_t *cp;
unsigned long orig_jiffies;
int check_time, exit_time;
int txcnt;
if (rocket_paranoia_check(info, "rp_wait_until_sent"))
return;
cp = &info->channel;
orig_jiffies = jiffies;
#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
printk(KERN_INFO "In RP_wait_until_sent(%d) (jiff=%lu)...\n", timeout,
jiffies);
printk(KERN_INFO "cps=%d...\n", info->cps);
#endif
while (1) {
txcnt = sGetTxCnt(cp);
if (!txcnt) {
if (sGetChanStatusLo(cp) & TXSHRMT)
break;
check_time = (HZ / info->cps) / 5;
} else {
check_time = HZ * txcnt / info->cps;
}
if (timeout) {
exit_time = orig_jiffies + timeout - jiffies;
if (exit_time <= 0)
break;
if (exit_time < check_time)
check_time = exit_time;
}
if (check_time == 0)
check_time = 1;
#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
printk(KERN_INFO "txcnt = %d (jiff=%lu,check=%d)...\n", txcnt,
jiffies, check_time);
#endif
msleep_interruptible(jiffies_to_msecs(check_time));
if (signal_pending(current))
break;
}
__set_current_state(TASK_RUNNING);
#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
printk(KERN_INFO "txcnt = %d (jiff=%lu)...done\n", txcnt, jiffies);
#endif
}
/*
* rp_hangup() --- called by tty_hangup() when a hangup is signaled.
*/
static void rp_hangup(struct tty_struct *tty)
{
CHANNEL_t *cp;
struct r_port *info = tty->driver_data;
unsigned long flags;
if (rocket_paranoia_check(info, "rp_hangup"))
return;
#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_HANGUP))
printk(KERN_INFO "rp_hangup of ttyR%d...\n", info->line);
#endif
rp_flush_buffer(tty);
spin_lock_irqsave(&info->port.lock, flags);
if (info->port.flags & ASYNC_CLOSING) {
spin_unlock_irqrestore(&info->port.lock, flags);
return;
}
if (info->port.count)
atomic_dec(&rp_num_ports_open);
clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
spin_unlock_irqrestore(&info->port.lock, flags);
tty_port_hangup(&info->port);
cp = &info->channel;
sDisRxFIFO(cp);
sDisTransmit(cp);
sDisInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN));
sDisCTSFlowCtl(cp);
sDisTxSoftFlowCtl(cp);
sClrTxXOFF(cp);
clear_bit(ASYNCB_INITIALIZED, &info->port.flags);
wake_up_interruptible(&info->port.open_wait);
}
/*
* Exception handler - write char routine. The RocketPort driver uses a
* double-buffering strategy, with the twist that if the in-memory CPU
* buffer is empty, and there's space in the transmit FIFO, the
* writing routines will write directly to transmit FIFO.
* Write buffer and counters protected by spinlocks
*/
static int rp_put_char(struct tty_struct *tty, unsigned char ch)
{
struct r_port *info = tty->driver_data;
CHANNEL_t *cp;
unsigned long flags;
if (rocket_paranoia_check(info, "rp_put_char"))
return 0;
/*
* Grab the port write mutex, locking out other processes that try to
* write to this port
*/
mutex_lock(&info->write_mtx);
#ifdef ROCKET_DEBUG_WRITE
printk(KERN_INFO "rp_put_char %c...\n", ch);
#endif
spin_lock_irqsave(&info->slock, flags);
cp = &info->channel;
if (!tty->stopped && info->xmit_fifo_room == 0)
info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp);
if (tty->stopped || info->xmit_fifo_room == 0 || info->xmit_cnt != 0) {
info->xmit_buf[info->xmit_head++] = ch;
info->xmit_head &= XMIT_BUF_SIZE - 1;
info->xmit_cnt++;
set_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
} else {
sOutB(sGetTxRxDataIO(cp), ch);
info->xmit_fifo_room--;
}
spin_unlock_irqrestore(&info->slock, flags);
mutex_unlock(&info->write_mtx);
return 1;
}
/*
* Exception handler - write routine, called when user app writes to the device.
* A per port write mutex is used to protect from another process writing to
* this port at the same time. This other process could be running on the other CPU
* or get control of the CPU if the copy_from_user() blocks due to a page fault (swapped out).
* Spinlocks protect the info xmit members.
*/
static int rp_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct r_port *info = tty->driver_data;
CHANNEL_t *cp;
const unsigned char *b;
int c, retval = 0;
unsigned long flags;
if (count <= 0 || rocket_paranoia_check(info, "rp_write"))
return 0;
if (mutex_lock_interruptible(&info->write_mtx))
return -ERESTARTSYS;
#ifdef ROCKET_DEBUG_WRITE
printk(KERN_INFO "rp_write %d chars...\n", count);
#endif
cp = &info->channel;
if (!tty->stopped && info->xmit_fifo_room < count)
info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp);
/*
* If the write queue for the port is empty, and there is FIFO space, stuff bytes
* into FIFO. Use the write queue for temp storage.
*/
if (!tty->stopped && info->xmit_cnt == 0 && info->xmit_fifo_room > 0) {
c = min(count, info->xmit_fifo_room);
b = buf;
/* Push data into FIFO, 2 bytes at a time */
sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) b, c / 2);
/* If there is a byte remaining, write it */
if (c & 1)
sOutB(sGetTxRxDataIO(cp), b[c - 1]);
retval += c;
buf += c;
count -= c;
spin_lock_irqsave(&info->slock, flags);
info->xmit_fifo_room -= c;
spin_unlock_irqrestore(&info->slock, flags);
}
/* If count is zero, we wrote it all and are done */
if (!count)
goto end;
/* Write remaining data into the port's xmit_buf */
while (1) {
/* Hung up ? */
if (!test_bit(ASYNCB_NORMAL_ACTIVE, &info->port.flags))
goto end;
c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1);
c = min(c, XMIT_BUF_SIZE - info->xmit_head);
if (c <= 0)
break;
b = buf;
memcpy(info->xmit_buf + info->xmit_head, b, c);
spin_lock_irqsave(&info->slock, flags);
info->xmit_head =
(info->xmit_head + c) & (XMIT_BUF_SIZE - 1);
info->xmit_cnt += c;
spin_unlock_irqrestore(&info->slock, flags);
buf += c;
count -= c;
retval += c;
}
if ((retval > 0) && !tty->stopped)
set_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
end:
if (info->xmit_cnt < WAKEUP_CHARS) {
tty_wakeup(tty);
#ifdef ROCKETPORT_HAVE_POLL_WAIT
wake_up_interruptible(&tty->poll_wait);
#endif
}
mutex_unlock(&info->write_mtx);
return retval;
}
/*
* Return the number of characters that can be sent. We estimate
* only using the in-memory transmit buffer only, and ignore the
* potential space in the transmit FIFO.
*/
static int rp_write_room(struct tty_struct *tty)
{
struct r_port *info = tty->driver_data;
int ret;
if (rocket_paranoia_check(info, "rp_write_room"))
return 0;
ret = XMIT_BUF_SIZE - info->xmit_cnt - 1;
if (ret < 0)
ret = 0;
#ifdef ROCKET_DEBUG_WRITE
printk(KERN_INFO "rp_write_room returns %d...\n", ret);
#endif
return ret;
}
/*
* Return the number of characters in the buffer. Again, this only
* counts those characters in the in-memory transmit buffer.
*/
static int rp_chars_in_buffer(struct tty_struct *tty)
{
struct r_port *info = tty->driver_data;
if (rocket_paranoia_check(info, "rp_chars_in_buffer"))
return 0;
#ifdef ROCKET_DEBUG_WRITE
printk(KERN_INFO "rp_chars_in_buffer returns %d...\n", info->xmit_cnt);
#endif
return info->xmit_cnt;
}
/*
* Flushes the TX fifo for a port, deletes data in the xmit_buf stored in the
* r_port struct for the port. Note that spinlock are used to protect info members,
* do not call this function if the spinlock is already held.
*/
static void rp_flush_buffer(struct tty_struct *tty)
{
struct r_port *info = tty->driver_data;
CHANNEL_t *cp;
unsigned long flags;
if (rocket_paranoia_check(info, "rp_flush_buffer"))
return;
spin_lock_irqsave(&info->slock, flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
spin_unlock_irqrestore(&info->slock, flags);
#ifdef ROCKETPORT_HAVE_POLL_WAIT
wake_up_interruptible(&tty->poll_wait);
#endif
tty_wakeup(tty);
cp = &info->channel;
sFlushTxFIFO(cp);
}
#ifdef CONFIG_PCI
static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4QUAD) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8OCTA) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP8OCTA) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8INTF) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP8INTF) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8J) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4J) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8SNI) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP16SNI) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP16INTF) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP16INTF) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_CRP16INTF) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP32INTF) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP32INTF) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RPP4) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RPP8) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP2_232) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP2_422) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP6M) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4M) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_UPCI_RM3_8PORT) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_UPCI_RM3_4PORT) },
{ }
};
MODULE_DEVICE_TABLE(pci, rocket_pci_ids);
/* Resets the speaker controller on RocketModem II and III devices */
static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model)
{
ByteIO_t addr;
/* RocketModem II speaker control is at the 8th port location of offset 0x40 */
if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) {
addr = CtlP->AiopIO[0] + 0x4F;
sOutB(addr, 0);
}
/* RocketModem III speaker control is at the 1st port location of offset 0x80 */
if ((model == MODEL_UPCI_RM3_8PORT)
|| (model == MODEL_UPCI_RM3_4PORT)) {
addr = CtlP->AiopIO[0] + 0x88;
sOutB(addr, 0);
}
}
/***************************************************************************
Function: sPCIInitController
Purpose: Initialization of controller global registers and controller
structure.
Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize,
IRQNum,Frequency,PeriodicOnly)
CONTROLLER_T *CtlP; Ptr to controller structure
int CtlNum; Controller number
ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
This list must be in the order the AIOPs will be found on the
controller. Once an AIOP in the list is not found, it is
assumed that there are no more AIOPs on the controller.
int AiopIOListSize; Number of addresses in AiopIOList
int IRQNum; Interrupt Request number. Can be any of the following:
0: Disable global interrupts
3: IRQ 3
4: IRQ 4
5: IRQ 5
9: IRQ 9
10: IRQ 10
11: IRQ 11
12: IRQ 12
15: IRQ 15
Byte_t Frequency: A flag identifying the frequency
of the periodic interrupt, can be any one of the following:
FREQ_DIS - periodic interrupt disabled
FREQ_137HZ - 137 Hertz
FREQ_69HZ - 69 Hertz
FREQ_34HZ - 34 Hertz
FREQ_17HZ - 17 Hertz
FREQ_9HZ - 9 Hertz
FREQ_4HZ - 4 Hertz
If IRQNum is set to 0 the Frequency parameter is
overidden, it is forced to a value of FREQ_DIS.
int PeriodicOnly: 1 if all interrupts except the periodic
interrupt are to be blocked.
0 is both the periodic interrupt and
other channel interrupts are allowed.
If IRQNum is set to 0 the PeriodicOnly parameter is
overidden, it is forced to a value of 0.
Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller
initialization failed.
Comments:
If periodic interrupts are to be disabled but AIOP interrupts
are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
If interrupts are to be completely disabled set IRQNum to 0.
Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
invalid combination.
This function performs initialization of global interrupt modes,
but it does not actually enable global interrupts. To enable
and disable global interrupts use functions sEnGlobalInt() and
sDisGlobalInt(). Enabling of global interrupts is normally not
done until all other initializations are complete.
Even if interrupts are globally enabled, they must also be
individually enabled for each channel that is to generate
interrupts.
Warnings: No range checking on any of the parameters is done.
No context switches are allowed while executing this function.
After this function all AIOPs on the controller are disabled,
they can be enabled with sEnAiop().
*/
static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
ByteIO_t * AiopIOList, int AiopIOListSize,
WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
int PeriodicOnly, int altChanRingIndicator,
int UPCIRingInd)
{
int i;
ByteIO_t io;
CtlP->AltChanRingIndicator = altChanRingIndicator;
CtlP->UPCIRingInd = UPCIRingInd;
CtlP->CtlNum = CtlNum;
CtlP->CtlID = CTLID_0001; /* controller release 1 */
CtlP->BusType = isPCI; /* controller release 1 */
if (ConfigIO) {
CtlP->isUPCI = 1;
CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL;
CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL;
CtlP->AiopIntrBits = upci_aiop_intr_bits;
} else {
CtlP->isUPCI = 0;
CtlP->PCIIO =
(WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC);
CtlP->AiopIntrBits = aiop_intr_bits;
}
sPCIControllerEOI(CtlP); /* clear EOI if warm init */
/* Init AIOPs */
CtlP->NumAiop = 0;
for (i = 0; i < AiopIOListSize; i++) {
io = AiopIOList[i];
CtlP->AiopIO[i] = (WordIO_t) io;
CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */
if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */
break; /* done looking for AIOPs */
CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */
sOutB(io + _INDX_DATA, sClockPrescale);
CtlP->NumAiop++; /* bump count of AIOPs */
}
if (CtlP->NumAiop == 0)
return (-1);
else
return (CtlP->NumAiop);
}
/*
* Called when a PCI card is found. Retrieves and stores model information,
* init's aiopic and serial port hardware.
* Inputs: i is the board number (0-n)
*/
static __init int register_PCI(int i, struct pci_dev *dev)
{
int num_aiops, aiop, max_num_aiops, num_chan, chan;
unsigned int aiopio[MAX_AIOPS_PER_BOARD];
CONTROLLER_t *ctlp;
int fast_clock = 0;
int altChanRingIndicator = 0;
int ports_per_aiop = 8;
WordIO_t ConfigIO = 0;
ByteIO_t UPCIRingInd = 0;
if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
pci_enable_device(dev))
return 0;
rcktpt_io_addr[i] = pci_resource_start(dev, 0);
rcktpt_type[i] = ROCKET_TYPE_NORMAL;
rocketModel[i].loadrm2 = 0;
rocketModel[i].startingPortNumber = nextLineNumber;
/* Depending on the model, set up some config variables */
switch (dev->device) {
case PCI_DEVICE_ID_RP4QUAD:
max_num_aiops = 1;
ports_per_aiop = 4;
rocketModel[i].model = MODEL_RP4QUAD;
strcpy(rocketModel[i].modelString, "RocketPort 4 port w/quad cable");
rocketModel[i].numPorts = 4;
break;
case PCI_DEVICE_ID_RP8OCTA:
max_num_aiops = 1;
rocketModel[i].model = MODEL_RP8OCTA;
strcpy(rocketModel[i].modelString, "RocketPort 8 port w/octa cable");
rocketModel[i].numPorts = 8;
break;
case PCI_DEVICE_ID_URP8OCTA:
max_num_aiops = 1;
rocketModel[i].model = MODEL_UPCI_RP8OCTA;
strcpy(rocketModel[i].modelString, "RocketPort UPCI 8 port w/octa cable");
rocketModel[i].numPorts = 8;
break;
case PCI_DEVICE_ID_RP8INTF:
max_num_aiops = 1;
rocketModel[i].model = MODEL_RP8INTF;
strcpy(rocketModel[i].modelString, "RocketPort 8 port w/external I/F");
rocketModel[i].numPorts = 8;
break;
case PCI_DEVICE_ID_URP8INTF:
max_num_aiops = 1;
rocketModel[i].model = MODEL_UPCI_RP8INTF;
strcpy(rocketModel[i].modelString, "RocketPort UPCI 8 port w/external I/F");
rocketModel[i].numPorts = 8;
break;
case PCI_DEVICE_ID_RP8J:
max_num_aiops = 1;
rocketModel[i].model = MODEL_RP8J;
strcpy(rocketModel[i].modelString, "RocketPort 8 port w/RJ11 connectors");
rocketModel[i].numPorts = 8;
break;
case PCI_DEVICE_ID_RP4J:
max_num_aiops = 1;
ports_per_aiop = 4;
rocketModel[i].model = MODEL_RP4J;
strcpy(rocketModel[i].modelString, "RocketPort 4 port w/RJ45 connectors");
rocketModel[i].numPorts = 4;
break;
case PCI_DEVICE_ID_RP8SNI:
max_num_aiops = 1;
rocketModel[i].model = MODEL_RP8SNI;
strcpy(rocketModel[i].modelString, "RocketPort 8 port w/ custom DB78");
rocketModel[i].numPorts = 8;
break;
case PCI_DEVICE_ID_RP16SNI:
max_num_aiops = 2;
rocketModel[i].model = MODEL_RP16SNI;
strcpy(rocketModel[i].modelString, "RocketPort 16 port w/ custom DB78");
rocketModel[i].numPorts = 16;
break;
case PCI_DEVICE_ID_RP16INTF:
max_num_aiops = 2;
rocketModel[i].model = MODEL_RP16INTF;
strcpy(rocketModel[i].modelString, "RocketPort 16 port w/external I/F");
rocketModel[i].numPorts = 16;
break;
case PCI_DEVICE_ID_URP16INTF:
max_num_aiops = 2;
rocketModel[i].model = MODEL_UPCI_RP16INTF;
strcpy(rocketModel[i].modelString, "RocketPort UPCI 16 port w/external I/F");
rocketModel[i].numPorts = 16;
break;
case PCI_DEVICE_ID_CRP16INTF:
max_num_aiops = 2;
rocketModel[i].model = MODEL_CPCI_RP16INTF;
strcpy(rocketModel[i].modelString, "RocketPort Compact PCI 16 port w/external I/F");
rocketModel[i].numPorts = 16;
break;
case PCI_DEVICE_ID_RP32INTF:
max_num_aiops = 4;
rocketModel[i].model = MODEL_RP32INTF;
strcpy(rocketModel[i].modelString, "RocketPort 32 port w/external I/F");
rocketModel[i].numPorts = 32;
break;
case PCI_DEVICE_ID_URP32INTF:
max_num_aiops = 4;
rocketModel[i].model = MODEL_UPCI_RP32INTF;
strcpy(rocketModel[i].modelString, "RocketPort UPCI 32 port w/external I/F");
rocketModel[i].numPorts = 32;
break;
case PCI_DEVICE_ID_RPP4:
max_num_aiops = 1;
ports_per_aiop = 4;
altChanRingIndicator++;
fast_clock++;
rocketModel[i].model = MODEL_RPP4;
strcpy(rocketModel[i].modelString, "RocketPort Plus 4 port");
rocketModel[i].numPorts = 4;
break;
case PCI_DEVICE_ID_RPP8:
max_num_aiops = 2;
ports_per_aiop = 4;
altChanRingIndicator++;
fast_clock++;
rocketModel[i].model = MODEL_RPP8;
strcpy(rocketModel[i].modelString, "RocketPort Plus 8 port");
rocketModel[i].numPorts = 8;
break;
case PCI_DEVICE_ID_RP2_232:
max_num_aiops = 1;
ports_per_aiop = 2;
altChanRingIndicator++;
fast_clock++;
rocketModel[i].model = MODEL_RP2_232;
strcpy(rocketModel[i].modelString, "RocketPort Plus 2 port RS232");
rocketModel[i].numPorts = 2;
break;
case PCI_DEVICE_ID_RP2_422:
max_num_aiops = 1;
ports_per_aiop = 2;
altChanRingIndicator++;
fast_clock++;
rocketModel[i].model = MODEL_RP2_422;
strcpy(rocketModel[i].modelString, "RocketPort Plus 2 port RS422");
rocketModel[i].numPorts = 2;
break;
case PCI_DEVICE_ID_RP6M:
max_num_aiops = 1;
ports_per_aiop = 6;
/* If revision is 1, the rocketmodem flash must be loaded.
* If it is 2 it is a "socketed" version. */
if (dev->revision == 1) {
rcktpt_type[i] = ROCKET_TYPE_MODEMII;
rocketModel[i].loadrm2 = 1;
} else {
rcktpt_type[i] = ROCKET_TYPE_MODEM;
}
rocketModel[i].model = MODEL_RP6M;
strcpy(rocketModel[i].modelString, "RocketModem 6 port");
rocketModel[i].numPorts = 6;
break;
case PCI_DEVICE_ID_RP4M:
max_num_aiops = 1;
ports_per_aiop = 4;
if (dev->revision == 1) {
rcktpt_type[i] = ROCKET_TYPE_MODEMII;
rocketModel[i].loadrm2 = 1;
} else {
rcktpt_type[i] = ROCKET_TYPE_MODEM;
}
rocketModel[i].model = MODEL_RP4M;
strcpy(rocketModel[i].modelString, "RocketModem 4 port");
rocketModel[i].numPorts = 4;
break;
default:
max_num_aiops = 0;
break;
}
/*
* Check for UPCI boards.
*/
switch (dev->device) {
case PCI_DEVICE_ID_URP32INTF:
case PCI_DEVICE_ID_URP8INTF:
case PCI_DEVICE_ID_URP16INTF:
case PCI_DEVICE_ID_CRP16INTF:
case PCI_DEVICE_ID_URP8OCTA:
rcktpt_io_addr[i] = pci_resource_start(dev, 2);
ConfigIO = pci_resource_start(dev, 1);
if (dev->device == PCI_DEVICE_ID_URP8OCTA) {
UPCIRingInd = rcktpt_io_addr[i] + _PCI_9030_RING_IND;
/*
* Check for octa or quad cable.
*/
if (!
(sInW(ConfigIO + _PCI_9030_GPIO_CTRL) &
PCI_GPIO_CTRL_8PORT)) {
ports_per_aiop = 4;
rocketModel[i].numPorts = 4;
}
}
break;
case PCI_DEVICE_ID_UPCI_RM3_8PORT:
max_num_aiops = 1;
rocketModel[i].model = MODEL_UPCI_RM3_8PORT;
strcpy(rocketModel[i].modelString, "RocketModem III 8 port");
rocketModel[i].numPorts = 8;
rcktpt_io_addr[i] = pci_resource_start(dev, 2);
UPCIRingInd = rcktpt_io_addr[i] + _PCI_9030_RING_IND;
ConfigIO = pci_resource_start(dev, 1);
rcktpt_type[i] = ROCKET_TYPE_MODEMIII;
break;
case PCI_DEVICE_ID_UPCI_RM3_4PORT:
max_num_aiops = 1;
rocketModel[i].model = MODEL_UPCI_RM3_4PORT;
strcpy(rocketModel[i].modelString, "RocketModem III 4 port");
rocketModel[i].numPorts = 4;
rcktpt_io_addr[i] = pci_resource_start(dev, 2);
UPCIRingInd = rcktpt_io_addr[i] + _PCI_9030_RING_IND;
ConfigIO = pci_resource_start(dev, 1);
rcktpt_type[i] = ROCKET_TYPE_MODEMIII;
break;
default:
break;
}
if (fast_clock) {
sClockPrescale = 0x12; /* mod 2 (divide by 3) */
rp_baud_base[i] = 921600;
} else {
/*
* If support_low_speed is set, use the slow clock
* prescale, which supports 50 bps
*/
if (support_low_speed) {
/* mod 9 (divide by 10) prescale */
sClockPrescale = 0x19;
rp_baud_base[i] = 230400;
} else {
/* mod 4 (divide by 5) prescale */
sClockPrescale = 0x14;
rp_baud_base[i] = 460800;
}
}
for (aiop = 0; aiop < max_num_aiops; aiop++)
aiopio[aiop] = rcktpt_io_addr[i] + (aiop * 0x40);
ctlp = sCtlNumToCtlPtr(i);
num_aiops = sPCIInitController(ctlp, i, aiopio, max_num_aiops, ConfigIO, 0, FREQ_DIS, 0, altChanRingIndicator, UPCIRingInd);
for (aiop = 0; aiop < max_num_aiops; aiop++)
ctlp->AiopNumChan[aiop] = ports_per_aiop;
dev_info(&dev->dev, "comtrol PCI controller #%d found at "
"address %04lx, %d AIOP(s) (%s), creating ttyR%d - %ld\n",
i, rcktpt_io_addr[i], num_aiops, rocketModel[i].modelString,
rocketModel[i].startingPortNumber,
rocketModel[i].startingPortNumber + rocketModel[i].numPorts-1);
if (num_aiops <= 0) {
rcktpt_io_addr[i] = 0;
return (0);
}
is_PCI[i] = 1;
/* Reset the AIOPIC, init the serial ports */
for (aiop = 0; aiop < num_aiops; aiop++) {
sResetAiopByNum(ctlp, aiop);
num_chan = ports_per_aiop;
for (chan = 0; chan < num_chan; chan++)
init_r_port(i, aiop, chan, dev);
}
/* Rocket modems must be reset */
if ((rcktpt_type[i] == ROCKET_TYPE_MODEM) ||
(rcktpt_type[i] == ROCKET_TYPE_MODEMII) ||
(rcktpt_type[i] == ROCKET_TYPE_MODEMIII)) {
num_chan = ports_per_aiop;
for (chan = 0; chan < num_chan; chan++)
sPCIModemReset(ctlp, chan, 1);
msleep(500);
for (chan = 0; chan < num_chan; chan++)
sPCIModemReset(ctlp, chan, 0);
msleep(500);
rmSpeakerReset(ctlp, rocketModel[i].model);
}
return (1);
}
/*
* Probes for PCI cards, inits them if found
* Input: board_found = number of ISA boards already found, or the
* starting board number
* Returns: Number of PCI boards found
*/
static int __init init_PCI(int boards_found)
{
struct pci_dev *dev = NULL;
int count = 0;
/* Work through the PCI device list, pulling out ours */
while ((dev = pci_get_device(PCI_VENDOR_ID_RP, PCI_ANY_ID, dev))) {
if (register_PCI(count + boards_found, dev))
count++;
}
return (count);
}
#endif /* CONFIG_PCI */
/*
* Probes for ISA cards
* Input: i = the board number to look for
* Returns: 1 if board found, 0 else
*/
static int __init init_ISA(int i)
{
int num_aiops, num_chan = 0, total_num_chan = 0;
int aiop, chan;
unsigned int aiopio[MAX_AIOPS_PER_BOARD];
CONTROLLER_t *ctlp;
char *type_string;
/* If io_addr is zero, no board configured */
if (rcktpt_io_addr[i] == 0)
return (0);
/* Reserve the IO region */
if (!request_region(rcktpt_io_addr[i], 64, "Comtrol RocketPort")) {
printk(KERN_ERR "Unable to reserve IO region for configured "
"ISA RocketPort at address 0x%lx, board not "
"installed...\n", rcktpt_io_addr[i]);
rcktpt_io_addr[i] = 0;
return (0);
}
ctlp = sCtlNumToCtlPtr(i);
ctlp->boardType = rcktpt_type[i];
switch (rcktpt_type[i]) {
case ROCKET_TYPE_PC104:
type_string = "(PC104)";
break;
case ROCKET_TYPE_MODEM:
type_string = "(RocketModem)";
break;
case ROCKET_TYPE_MODEMII:
type_string = "(RocketModem II)";
break;
default:
type_string = "";
break;
}
/*
* If support_low_speed is set, use the slow clock prescale,
* which supports 50 bps
*/
if (support_low_speed) {
sClockPrescale = 0x19; /* mod 9 (divide by 10) prescale */
rp_baud_base[i] = 230400;
} else {
sClockPrescale = 0x14; /* mod 4 (divide by 5) prescale */
rp_baud_base[i] = 460800;
}
for (aiop = 0; aiop < MAX_AIOPS_PER_BOARD; aiop++)
aiopio[aiop] = rcktpt_io_addr[i] + (aiop * 0x400);
num_aiops = sInitController(ctlp, i, controller + (i * 0x400), aiopio, MAX_AIOPS_PER_BOARD, 0, FREQ_DIS, 0);
if (ctlp->boardType == ROCKET_TYPE_PC104) {
sEnAiop(ctlp, 2); /* only one AIOPIC, but these */
sEnAiop(ctlp, 3); /* CSels used for other stuff */
}
/* If something went wrong initing the AIOP's release the ISA IO memory */
if (num_aiops <= 0) {
release_region(rcktpt_io_addr[i], 64);
rcktpt_io_addr[i] = 0;
return (0);
}
rocketModel[i].startingPortNumber = nextLineNumber;
for (aiop = 0; aiop < num_aiops; aiop++) {
sResetAiopByNum(ctlp, aiop);
sEnAiop(ctlp, aiop);
num_chan = sGetAiopNumChan(ctlp, aiop);
total_num_chan += num_chan;
for (chan = 0; chan < num_chan; chan++)
init_r_port(i, aiop, chan, NULL);
}
is_PCI[i] = 0;
if ((rcktpt_type[i] == ROCKET_TYPE_MODEM) || (rcktpt_type[i] == ROCKET_TYPE_MODEMII)) {
num_chan = sGetAiopNumChan(ctlp, 0);
total_num_chan = num_chan;
for (chan = 0; chan < num_chan; chan++)
sModemReset(ctlp, chan, 1);
msleep(500);
for (chan = 0; chan < num_chan; chan++)
sModemReset(ctlp, chan, 0);
msleep(500);
strcpy(rocketModel[i].modelString, "RocketModem ISA");
} else {
strcpy(rocketModel[i].modelString, "RocketPort ISA");
}
rocketModel[i].numPorts = total_num_chan;
rocketModel[i].model = MODEL_ISA;
printk(KERN_INFO "RocketPort ISA card #%d found at 0x%lx - %d AIOPs %s\n",
i, rcktpt_io_addr[i], num_aiops, type_string);
printk(KERN_INFO "Installing %s, creating /dev/ttyR%d - %ld\n",
rocketModel[i].modelString,
rocketModel[i].startingPortNumber,
rocketModel[i].startingPortNumber +
rocketModel[i].numPorts - 1);
return (1);
}
static const struct tty_operations rocket_ops = {
.open = rp_open,
.close = rp_close,
.write = rp_write,
.put_char = rp_put_char,
.write_room = rp_write_room,
.chars_in_buffer = rp_chars_in_buffer,
.flush_buffer = rp_flush_buffer,
.ioctl = rp_ioctl,
.throttle = rp_throttle,
.unthrottle = rp_unthrottle,
.set_termios = rp_set_termios,
.stop = rp_stop,
.start = rp_start,
.hangup = rp_hangup,
.break_ctl = rp_break,
.send_xchar = rp_send_xchar,
.wait_until_sent = rp_wait_until_sent,
.tiocmget = rp_tiocmget,
.tiocmset = rp_tiocmset,
};
static const struct tty_port_operations rocket_port_ops = {
.carrier_raised = carrier_raised,
.dtr_rts = dtr_rts,
};
/*
* The module "startup" routine; it's run when the module is loaded.
*/
static int __init rp_init(void)
{
int ret = -ENOMEM, pci_boards_found, isa_boards_found, i;
printk(KERN_INFO "RocketPort device driver module, version %s, %s\n",
ROCKET_VERSION, ROCKET_DATE);
rocket_driver = alloc_tty_driver(MAX_RP_PORTS);
if (!rocket_driver)
goto err;
/*
* If board 1 is non-zero, there is at least one ISA configured. If controller is
* zero, use the default controller IO address of board1 + 0x40.
*/
if (board1) {
if (controller == 0)
controller = board1 + 0x40;
} else {
controller = 0; /* Used as a flag, meaning no ISA boards */
}
/* If an ISA card is configured, reserve the 4 byte IO space for the Mudbac controller */
if (controller && (!request_region(controller, 4, "Comtrol RocketPort"))) {
printk(KERN_ERR "Unable to reserve IO region for first "
"configured ISA RocketPort controller 0x%lx. "
"Driver exiting\n", controller);
ret = -EBUSY;
goto err_tty;
}
/* Store ISA variable retrieved from command line or .conf file. */
rcktpt_io_addr[0] = board1;
rcktpt_io_addr[1] = board2;
rcktpt_io_addr[2] = board3;
rcktpt_io_addr[3] = board4;
rcktpt_type[0] = modem1 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL;
rcktpt_type[0] = pc104_1[0] ? ROCKET_TYPE_PC104 : rcktpt_type[0];
rcktpt_type[1] = modem2 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL;
rcktpt_type[1] = pc104_2[0] ? ROCKET_TYPE_PC104 : rcktpt_type[1];
rcktpt_type[2] = modem3 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL;
rcktpt_type[2] = pc104_3[0] ? ROCKET_TYPE_PC104 : rcktpt_type[2];
rcktpt_type[3] = modem4 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL;
rcktpt_type[3] = pc104_4[0] ? ROCKET_TYPE_PC104 : rcktpt_type[3];
/*
* Set up the tty driver structure and then register this
* driver with the tty layer.
*/
rocket_driver->flags = TTY_DRIVER_DYNAMIC_DEV;
rocket_driver->name = "ttyR";
rocket_driver->driver_name = "Comtrol RocketPort";
rocket_driver->major = TTY_ROCKET_MAJOR;
rocket_driver->minor_start = 0;
rocket_driver->type = TTY_DRIVER_TYPE_SERIAL;
rocket_driver->subtype = SERIAL_TYPE_NORMAL;
rocket_driver->init_termios = tty_std_termios;
rocket_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
rocket_driver->init_termios.c_ispeed = 9600;
rocket_driver->init_termios.c_ospeed = 9600;
#ifdef ROCKET_SOFT_FLOW
rocket_driver->flags |= TTY_DRIVER_REAL_RAW;
#endif
tty_set_operations(rocket_driver, &rocket_ops);
ret = tty_register_driver(rocket_driver);
if (ret < 0) {
printk(KERN_ERR "Couldn't install tty RocketPort driver\n");
goto err_controller;
}
#ifdef ROCKET_DEBUG_OPEN
printk(KERN_INFO "RocketPort driver is major %d\n", rocket_driver.major);
#endif
/*
* OK, let's probe each of the controllers looking for boards. Any boards found
* will be initialized here.
*/
isa_boards_found = 0;
pci_boards_found = 0;
for (i = 0; i < NUM_BOARDS; i++) {
if (init_ISA(i))
isa_boards_found++;
}
#ifdef CONFIG_PCI
if (isa_boards_found < NUM_BOARDS)
pci_boards_found = init_PCI(isa_boards_found);
#endif
max_board = pci_boards_found + isa_boards_found;
if (max_board == 0) {
printk(KERN_ERR "No rocketport ports found; unloading driver\n");
ret = -ENXIO;
goto err_ttyu;
}
return 0;
err_ttyu:
tty_unregister_driver(rocket_driver);
err_controller:
if (controller)
release_region(controller, 4);
err_tty:
put_tty_driver(rocket_driver);
err:
return ret;
}
static void rp_cleanup_module(void)
{
int retval;
int i;
del_timer_sync(&rocket_timer);
retval = tty_unregister_driver(rocket_driver);
if (retval)
printk(KERN_ERR "Error %d while trying to unregister "
"rocketport driver\n", -retval);
for (i = 0; i < MAX_RP_PORTS; i++)
if (rp_table[i]) {
tty_unregister_device(rocket_driver, i);
tty_port_destroy(&rp_table[i]->port);
kfree(rp_table[i]);
}
put_tty_driver(rocket_driver);
for (i = 0; i < NUM_BOARDS; i++) {
if (rcktpt_io_addr[i] <= 0 || is_PCI[i])
continue;
release_region(rcktpt_io_addr[i], 64);
}
if (controller)
release_region(controller, 4);
}
/***************************************************************************
Function: sInitController
Purpose: Initialization of controller global registers and controller
structure.
Call: sInitController(CtlP,CtlNum,MudbacIO,AiopIOList,AiopIOListSize,
IRQNum,Frequency,PeriodicOnly)
CONTROLLER_T *CtlP; Ptr to controller structure
int CtlNum; Controller number
ByteIO_t MudbacIO; Mudbac base I/O address.
ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
This list must be in the order the AIOPs will be found on the
controller. Once an AIOP in the list is not found, it is
assumed that there are no more AIOPs on the controller.
int AiopIOListSize; Number of addresses in AiopIOList
int IRQNum; Interrupt Request number. Can be any of the following:
0: Disable global interrupts
3: IRQ 3
4: IRQ 4
5: IRQ 5
9: IRQ 9
10: IRQ 10
11: IRQ 11
12: IRQ 12
15: IRQ 15
Byte_t Frequency: A flag identifying the frequency
of the periodic interrupt, can be any one of the following:
FREQ_DIS - periodic interrupt disabled
FREQ_137HZ - 137 Hertz
FREQ_69HZ - 69 Hertz
FREQ_34HZ - 34 Hertz
FREQ_17HZ - 17 Hertz
FREQ_9HZ - 9 Hertz
FREQ_4HZ - 4 Hertz
If IRQNum is set to 0 the Frequency parameter is
overidden, it is forced to a value of FREQ_DIS.
int PeriodicOnly: 1 if all interrupts except the periodic
interrupt are to be blocked.
0 is both the periodic interrupt and
other channel interrupts are allowed.
If IRQNum is set to 0 the PeriodicOnly parameter is
overidden, it is forced to a value of 0.
Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller
initialization failed.
Comments:
If periodic interrupts are to be disabled but AIOP interrupts
are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
If interrupts are to be completely disabled set IRQNum to 0.
Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
invalid combination.
This function performs initialization of global interrupt modes,
but it does not actually enable global interrupts. To enable
and disable global interrupts use functions sEnGlobalInt() and
sDisGlobalInt(). Enabling of global interrupts is normally not
done until all other initializations are complete.
Even if interrupts are globally enabled, they must also be
individually enabled for each channel that is to generate
interrupts.
Warnings: No range checking on any of the parameters is done.
No context switches are allowed while executing this function.
After this function all AIOPs on the controller are disabled,
they can be enabled with sEnAiop().
*/
static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
ByteIO_t * AiopIOList, int AiopIOListSize,
int IRQNum, Byte_t Frequency, int PeriodicOnly)
{
int i;
ByteIO_t io;
int done;
CtlP->AiopIntrBits = aiop_intr_bits;
CtlP->AltChanRingIndicator = 0;
CtlP->CtlNum = CtlNum;
CtlP->CtlID = CTLID_0001; /* controller release 1 */
CtlP->BusType = isISA;
CtlP->MBaseIO = MudbacIO;
CtlP->MReg1IO = MudbacIO + 1;
CtlP->MReg2IO = MudbacIO + 2;
CtlP->MReg3IO = MudbacIO + 3;
#if 1
CtlP->MReg2 = 0; /* interrupt disable */
CtlP->MReg3 = 0; /* no periodic interrupts */
#else
if (sIRQMap[IRQNum] == 0) { /* interrupts globally disabled */
CtlP->MReg2 = 0; /* interrupt disable */
CtlP->MReg3 = 0; /* no periodic interrupts */
} else {
CtlP->MReg2 = sIRQMap[IRQNum]; /* set IRQ number */
CtlP->MReg3 = Frequency; /* set frequency */
if (PeriodicOnly) { /* periodic interrupt only */
CtlP->MReg3 |= PERIODIC_ONLY;
}
}
#endif
sOutB(CtlP->MReg2IO, CtlP->MReg2);
sOutB(CtlP->MReg3IO, CtlP->MReg3);
sControllerEOI(CtlP); /* clear EOI if warm init */
/* Init AIOPs */
CtlP->NumAiop = 0;
for (i = done = 0; i < AiopIOListSize; i++) {
io = AiopIOList[i];
CtlP->AiopIO[i] = (WordIO_t) io;
CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
sOutB(CtlP->MReg2IO, CtlP->MReg2 | (i & 0x03)); /* AIOP index */
sOutB(MudbacIO, (Byte_t) (io >> 6)); /* set up AIOP I/O in MUDBAC */
if (done)
continue;
sEnAiop(CtlP, i); /* enable the AIOP */
CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */
if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */
done = 1; /* done looking for AIOPs */
else {
CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */
sOutB(io + _INDX_DATA, sClockPrescale);
CtlP->NumAiop++; /* bump count of AIOPs */
}
sDisAiop(CtlP, i); /* disable AIOP */
}
if (CtlP->NumAiop == 0)
return (-1);
else
return (CtlP->NumAiop);
}
/***************************************************************************
Function: sReadAiopID
Purpose: Read the AIOP idenfication number directly from an AIOP.
Call: sReadAiopID(io)
ByteIO_t io: AIOP base I/O address
Return: int: Flag AIOPID_XXXX if a valid AIOP is found, where X
is replace by an identifying number.
Flag AIOPID_NULL if no valid AIOP is found
Warnings: No context switches are allowed while executing this function.
*/
static int sReadAiopID(ByteIO_t io)
{
Byte_t AiopID; /* ID byte from AIOP */
sOutB(io + _CMD_REG, RESET_ALL); /* reset AIOP */
sOutB(io + _CMD_REG, 0x0);
AiopID = sInW(io + _CHN_STAT0) & 0x07;
if (AiopID == 0x06)
return (1);
else /* AIOP does not exist */
return (-1);
}
/***************************************************************************
Function: sReadAiopNumChan
Purpose: Read the number of channels available in an AIOP directly from
an AIOP.
Call: sReadAiopNumChan(io)
WordIO_t io: AIOP base I/O address
Return: int: The number of channels available
Comments: The number of channels is determined by write/reads from identical
offsets within the SRAM address spaces for channels 0 and 4.
If the channel 4 space is mirrored to channel 0 it is a 4 channel
AIOP, otherwise it is an 8 channel.
Warnings: No context switches are allowed while executing this function.
*/
static int sReadAiopNumChan(WordIO_t io)
{
Word_t x;
static Byte_t R[4] = { 0x00, 0x00, 0x34, 0x12 };
/* write to chan 0 SRAM */
out32((DWordIO_t) io + _INDX_ADDR, R);
sOutW(io + _INDX_ADDR, 0); /* read from SRAM, chan 0 */
x = sInW(io + _INDX_DATA);
sOutW(io + _INDX_ADDR, 0x4000); /* read from SRAM, chan 4 */
if (x != sInW(io + _INDX_DATA)) /* if different must be 8 chan */
return (8);
else
return (4);
}
/***************************************************************************
Function: sInitChan
Purpose: Initialization of a channel and channel structure
Call: sInitChan(CtlP,ChP,AiopNum,ChanNum)
CONTROLLER_T *CtlP; Ptr to controller structure
CHANNEL_T *ChP; Ptr to channel structure
int AiopNum; AIOP number within controller
int ChanNum; Channel number within AIOP
Return: int: 1 if initialization succeeded, 0 if it fails because channel
number exceeds number of channels available in AIOP.
Comments: This function must be called before a channel can be used.
Warnings: No range checking on any of the parameters is done.
No context switches are allowed while executing this function.
*/
static int sInitChan(CONTROLLER_T * CtlP, CHANNEL_T * ChP, int AiopNum,
int ChanNum)
{
int i;
WordIO_t AiopIO;
WordIO_t ChIOOff;
Byte_t *ChR;
Word_t ChOff;
static Byte_t R[4];
int brd9600;
if (ChanNum >= CtlP->AiopNumChan[AiopNum])
return 0; /* exceeds num chans in AIOP */
/* Channel, AIOP, and controller identifiers */
ChP->CtlP = CtlP;
ChP->ChanID = CtlP->AiopID[AiopNum];
ChP->AiopNum = AiopNum;
ChP->ChanNum = ChanNum;
/* Global direct addresses */
AiopIO = CtlP->AiopIO[AiopNum];
ChP->Cmd = (ByteIO_t) AiopIO + _CMD_REG;
ChP->IntChan = (ByteIO_t) AiopIO + _INT_CHAN;
ChP->IntMask = (ByteIO_t) AiopIO + _INT_MASK;
ChP->IndexAddr = (DWordIO_t) AiopIO + _INDX_ADDR;
ChP->IndexData = AiopIO + _INDX_DATA;
/* Channel direct addresses */
ChIOOff = AiopIO + ChP->ChanNum * 2;
ChP->TxRxData = ChIOOff + _TD0;
ChP->ChanStat = ChIOOff + _CHN_STAT0;
ChP->TxRxCount = ChIOOff + _FIFO_CNT0;
ChP->IntID = (ByteIO_t) AiopIO + ChP->ChanNum + _INT_ID0;
/* Initialize the channel from the RData array */
for (i = 0; i < RDATASIZE; i += 4) {
R[0] = RData[i];
R[1] = RData[i + 1] + 0x10 * ChanNum;
R[2] = RData[i + 2];
R[3] = RData[i + 3];
out32(ChP->IndexAddr, R);
}
ChR = ChP->R;
for (i = 0; i < RREGDATASIZE; i += 4) {
ChR[i] = RRegData[i];
ChR[i + 1] = RRegData[i + 1] + 0x10 * ChanNum;
ChR[i + 2] = RRegData[i + 2];
ChR[i + 3] = RRegData[i + 3];
}
/* Indexed registers */
ChOff = (Word_t) ChanNum *0x1000;
if (sClockPrescale == 0x14)
brd9600 = 47;
else
brd9600 = 23;
ChP->BaudDiv[0] = (Byte_t) (ChOff + _BAUD);
ChP->BaudDiv[1] = (Byte_t) ((ChOff + _BAUD) >> 8);
ChP->BaudDiv[2] = (Byte_t) brd9600;
ChP->BaudDiv[3] = (Byte_t) (brd9600 >> 8);
out32(ChP->IndexAddr, ChP->BaudDiv);
ChP->TxControl[0] = (Byte_t) (ChOff + _TX_CTRL);
ChP->TxControl[1] = (Byte_t) ((ChOff + _TX_CTRL) >> 8);
ChP->TxControl[2] = 0;
ChP->TxControl[3] = 0;
out32(ChP->IndexAddr, ChP->TxControl);
ChP->RxControl[0] = (Byte_t) (ChOff + _RX_CTRL);
ChP->RxControl[1] = (Byte_t) ((ChOff + _RX_CTRL) >> 8);
ChP->RxControl[2] = 0;
ChP->RxControl[3] = 0;
out32(ChP->IndexAddr, ChP->RxControl);
ChP->TxEnables[0] = (Byte_t) (ChOff + _TX_ENBLS);
ChP->TxEnables[1] = (Byte_t) ((ChOff + _TX_ENBLS) >> 8);
ChP->TxEnables[2] = 0;
ChP->TxEnables[3] = 0;
out32(ChP->IndexAddr, ChP->TxEnables);
ChP->TxCompare[0] = (Byte_t) (ChOff + _TXCMP1);
ChP->TxCompare[1] = (Byte_t) ((ChOff + _TXCMP1) >> 8);
ChP->TxCompare[2] = 0;
ChP->TxCompare[3] = 0;
out32(ChP->IndexAddr, ChP->TxCompare);
ChP->TxReplace1[0] = (Byte_t) (ChOff + _TXREP1B1);
ChP->TxReplace1[1] = (Byte_t) ((ChOff + _TXREP1B1) >> 8);
ChP->TxReplace1[2] = 0;
ChP->TxReplace1[3] = 0;
out32(ChP->IndexAddr, ChP->TxReplace1);
ChP->TxReplace2[0] = (Byte_t) (ChOff + _TXREP2);
ChP->TxReplace2[1] = (Byte_t) ((ChOff + _TXREP2) >> 8);
ChP->TxReplace2[2] = 0;
ChP->TxReplace2[3] = 0;
out32(ChP->IndexAddr, ChP->TxReplace2);
ChP->TxFIFOPtrs = ChOff + _TXF_OUTP;
ChP->TxFIFO = ChOff + _TX_FIFO;
sOutB(ChP->Cmd, (Byte_t) ChanNum | RESTXFCNT); /* apply reset Tx FIFO count */
sOutB(ChP->Cmd, (Byte_t) ChanNum); /* remove reset Tx FIFO count */
sOutW((WordIO_t) ChP->IndexAddr, ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */
sOutW(ChP->IndexData, 0);
ChP->RxFIFOPtrs = ChOff + _RXF_OUTP;
ChP->RxFIFO = ChOff + _RX_FIFO;
sOutB(ChP->Cmd, (Byte_t) ChanNum | RESRXFCNT); /* apply reset Rx FIFO count */
sOutB(ChP->Cmd, (Byte_t) ChanNum); /* remove reset Rx FIFO count */
sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs); /* clear Rx out ptr */
sOutW(ChP->IndexData, 0);
sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */
sOutW(ChP->IndexData, 0);
ChP->TxPrioCnt = ChOff + _TXP_CNT;
sOutW((WordIO_t) ChP->IndexAddr, ChP->TxPrioCnt);
sOutB(ChP->IndexData, 0);
ChP->TxPrioPtr = ChOff + _TXP_PNTR;
sOutW((WordIO_t) ChP->IndexAddr, ChP->TxPrioPtr);
sOutB(ChP->IndexData, 0);
ChP->TxPrioBuf = ChOff + _TXP_BUF;
sEnRxProcessor(ChP); /* start the Rx processor */
return 1;
}
/***************************************************************************
Function: sStopRxProcessor
Purpose: Stop the receive processor from processing a channel.
Call: sStopRxProcessor(ChP)
CHANNEL_T *ChP; Ptr to channel structure
Comments: The receive processor can be started again with sStartRxProcessor().
This function causes the receive processor to skip over the
stopped channel. It does not stop it from processing other channels.
Warnings: No context switches are allowed while executing this function.
Do not leave the receive processor stopped for more than one
character time.
After calling this function a delay of 4 uS is required to ensure
that the receive processor is no longer processing this channel.
*/
static void sStopRxProcessor(CHANNEL_T * ChP)
{
Byte_t R[4];
R[0] = ChP->R[0];
R[1] = ChP->R[1];
R[2] = 0x0a;
R[3] = ChP->R[3];
out32(ChP->IndexAddr, R);
}
/***************************************************************************
Function: sFlushRxFIFO
Purpose: Flush the Rx FIFO
Call: sFlushRxFIFO(ChP)
CHANNEL_T *ChP; Ptr to channel structure
Return: void
Comments: To prevent data from being enqueued or dequeued in the Tx FIFO
while it is being flushed the receive processor is stopped
and the transmitter is disabled. After these operations a
4 uS delay is done before clearing the pointers to allow
the receive processor to stop. These items are handled inside
this function.
Warnings: No context switches are allowed while executing this function.
*/
static void sFlushRxFIFO(CHANNEL_T * ChP)
{
int i;
Byte_t Ch; /* channel number within AIOP */
int RxFIFOEnabled; /* 1 if Rx FIFO enabled */
if (sGetRxCnt(ChP) == 0) /* Rx FIFO empty */
return; /* don't need to flush */
RxFIFOEnabled = 0;
if (ChP->R[0x32] == 0x08) { /* Rx FIFO is enabled */
RxFIFOEnabled = 1;
sDisRxFIFO(ChP); /* disable it */
for (i = 0; i < 2000 / 200; i++) /* delay 2 uS to allow proc to disable FIFO */
sInB(ChP->IntChan); /* depends on bus i/o timing */
}
sGetChanStatus(ChP); /* clear any pending Rx errors in chan stat */
Ch = (Byte_t) sGetChanNum(ChP);
sOutB(ChP->Cmd, Ch | RESRXFCNT); /* apply reset Rx FIFO count */
sOutB(ChP->Cmd, Ch); /* remove reset Rx FIFO count */
sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs); /* clear Rx out ptr */
sOutW(ChP->IndexData, 0);
sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */
sOutW(ChP->IndexData, 0);
if (RxFIFOEnabled)
sEnRxFIFO(ChP); /* enable Rx FIFO */
}
/***************************************************************************
Function: sFlushTxFIFO
Purpose: Flush the Tx FIFO
Call: sFlushTxFIFO(ChP)
CHANNEL_T *ChP; Ptr to channel structure
Return: void
Comments: To prevent data from being enqueued or dequeued in the Tx FIFO
while it is being flushed the receive processor is stopped
and the transmitter is disabled. After these operations a
4 uS delay is done before clearing the pointers to allow
the receive processor to stop. These items are handled inside
this function.
Warnings: No context switches are allowed while executing this function.
*/
static void sFlushTxFIFO(CHANNEL_T * ChP)
{
int i;
Byte_t Ch; /* channel number within AIOP */
int TxEnabled; /* 1 if transmitter enabled */
if (sGetTxCnt(ChP) == 0) /* Tx FIFO empty */
return; /* don't need to flush */
TxEnabled = 0;
if (ChP->TxControl[3] & TX_ENABLE) {
TxEnabled = 1;
sDisTransmit(ChP); /* disable transmitter */
}
sStopRxProcessor(ChP); /* stop Rx processor */
for (i = 0; i < 4000 / 200; i++) /* delay 4 uS to allow proc to stop */
sInB(ChP->IntChan); /* depends on bus i/o timing */
Ch = (Byte_t) sGetChanNum(ChP);
sOutB(ChP->Cmd, Ch | RESTXFCNT); /* apply reset Tx FIFO count */
sOutB(ChP->Cmd, Ch); /* remove reset Tx FIFO count */
sOutW((WordIO_t) ChP->IndexAddr, ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */
sOutW(ChP->IndexData, 0);
if (TxEnabled)
sEnTransmit(ChP); /* enable transmitter */
sStartRxProcessor(ChP); /* restart Rx processor */
}
/***************************************************************************
Function: sWriteTxPrioByte
Purpose: Write a byte of priority transmit data to a channel
Call: sWriteTxPrioByte(ChP,Data)
CHANNEL_T *ChP; Ptr to channel structure
Byte_t Data; The transmit data byte
Return: int: 1 if the bytes is successfully written, otherwise 0.
Comments: The priority byte is transmitted before any data in the Tx FIFO.
Warnings: No context switches are allowed while executing this function.
*/
static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data)
{
Byte_t DWBuf[4]; /* buffer for double word writes */
Word_t *WordPtr; /* must be far because Win SS != DS */
register DWordIO_t IndexAddr;
if (sGetTxCnt(ChP) > 1) { /* write it to Tx priority buffer */
IndexAddr = ChP->IndexAddr;
sOutW((WordIO_t) IndexAddr, ChP->TxPrioCnt); /* get priority buffer status */
if (sInB((ByteIO_t) ChP->IndexData) & PRI_PEND) /* priority buffer busy */
return (0); /* nothing sent */
WordPtr = (Word_t *) (&DWBuf[0]);
*WordPtr = ChP->TxPrioBuf; /* data byte address */
DWBuf[2] = Data; /* data byte value */
out32(IndexAddr, DWBuf); /* write it out */
*WordPtr = ChP->TxPrioCnt; /* Tx priority count address */
DWBuf[2] = PRI_PEND + 1; /* indicate 1 byte pending */
DWBuf[3] = 0; /* priority buffer pointer */
out32(IndexAddr, DWBuf); /* write it out */
} else { /* write it to Tx FIFO */
sWriteTxByte(sGetTxRxDataIO(ChP), Data);
}
return (1); /* 1 byte sent */
}
/***************************************************************************
Function: sEnInterrupts
Purpose: Enable one or more interrupts for a channel
Call: sEnInterrupts(ChP,Flags)
CHANNEL_T *ChP; Ptr to channel structure
Word_t Flags: Interrupt enable flags, can be any combination
of the following flags:
TXINT_EN: Interrupt on Tx FIFO empty
RXINT_EN: Interrupt on Rx FIFO at trigger level (see
sSetRxTrigger())
SRCINT_EN: Interrupt on SRC (Special Rx Condition)
MCINT_EN: Interrupt on modem input change
CHANINT_EN: Allow channel interrupt signal to the AIOP's
Interrupt Channel Register.
Return: void
Comments: If an interrupt enable flag is set in Flags, that interrupt will be
enabled. If an interrupt enable flag is not set in Flags, that
interrupt will not be changed. Interrupts can be disabled with
function sDisInterrupts().
This function sets the appropriate bit for the channel in the AIOP's
Interrupt Mask Register if the CHANINT_EN flag is set. This allows
this channel's bit to be set in the AIOP's Interrupt Channel Register.
Interrupts must also be globally enabled before channel interrupts
will be passed on to the host. This is done with function
sEnGlobalInt().
In some cases it may be desirable to disable interrupts globally but
enable channel interrupts. This would allow the global interrupt
status register to be used to determine which AIOPs need service.
*/
static void sEnInterrupts(CHANNEL_T * ChP, Word_t Flags)
{
Byte_t Mask; /* Interrupt Mask Register */
ChP->RxControl[2] |=
((Byte_t) Flags & (RXINT_EN | SRCINT_EN | MCINT_EN));
out32(ChP->IndexAddr, ChP->RxControl);
ChP->TxControl[2] |= ((Byte_t) Flags & TXINT_EN);
out32(ChP->IndexAddr, ChP->TxControl);
if (Flags & CHANINT_EN) {
Mask = sInB(ChP->IntMask) | sBitMapSetTbl[ChP->ChanNum];
sOutB(ChP->IntMask, Mask);
}
}
/***************************************************************************
Function: sDisInterrupts
Purpose: Disable one or more interrupts for a channel
Call: sDisInterrupts(ChP,Flags)
CHANNEL_T *ChP; Ptr to channel structure
Word_t Flags: Interrupt flags, can be any combination
of the following flags:
TXINT_EN: Interrupt on Tx FIFO empty
RXINT_EN: Interrupt on Rx FIFO at trigger level (see
sSetRxTrigger())
SRCINT_EN: Interrupt on SRC (Special Rx Condition)
MCINT_EN: Interrupt on modem input change
CHANINT_EN: Disable channel interrupt signal to the
AIOP's Interrupt Channel Register.
Return: void
Comments: If an interrupt flag is set in Flags, that interrupt will be
disabled. If an interrupt flag is not set in Flags, that
interrupt will not be changed. Interrupts can be enabled with
function sEnInterrupts().
This function clears the appropriate bit for the channel in the AIOP's
Interrupt Mask Register if the CHANINT_EN flag is set. This blocks
this channel's bit from being set in the AIOP's Interrupt Channel
Register.
*/
static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags)
{
Byte_t Mask; /* Interrupt Mask Register */
ChP->RxControl[2] &=
~((Byte_t) Flags & (RXINT_EN | SRCINT_EN | MCINT_EN));
out32(ChP->IndexAddr, ChP->RxControl);
ChP->TxControl[2] &= ~((Byte_t) Flags & TXINT_EN);
out32(ChP->IndexAddr, ChP->TxControl);
if (Flags & CHANINT_EN) {
Mask = sInB(ChP->IntMask) & sBitMapClrTbl[ChP->ChanNum];
sOutB(ChP->IntMask, Mask);
}
}
static void sSetInterfaceMode(CHANNEL_T * ChP, Byte_t mode)
{
sOutB(ChP->CtlP->AiopIO[2], (mode & 0x18) | ChP->ChanNum);
}
/*
* Not an official SSCI function, but how to reset RocketModems.
* ISA bus version
*/
static void sModemReset(CONTROLLER_T * CtlP, int chan, int on)
{
ByteIO_t addr;
Byte_t val;
addr = CtlP->AiopIO[0] + 0x400;
val = sInB(CtlP->MReg3IO);
/* if AIOP[1] is not enabled, enable it */
if ((val & 2) == 0) {
val = sInB(CtlP->MReg2IO);
sOutB(CtlP->MReg2IO, (val & 0xfc) | (1 & 0x03));
sOutB(CtlP->MBaseIO, (unsigned char) (addr >> 6));
}
sEnAiop(CtlP, 1);
if (!on)
addr += 8;
sOutB(addr + chan, 0); /* apply or remove reset */
sDisAiop(CtlP, 1);
}
/*
* Not an official SSCI function, but how to reset RocketModems.
* PCI bus version
*/
static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on)
{
ByteIO_t addr;
addr = CtlP->AiopIO[0] + 0x40; /* 2nd AIOP */
if (!on)
addr += 8;
sOutB(addr + chan, 0); /* apply or remove reset */
}
/* Returns the line number given the controller (board), aiop and channel number */
static unsigned char GetLineNumber(int ctrl, int aiop, int ch)
{
return lineNumbers[(ctrl << 5) | (aiop << 3) | ch];
}
/*
* Stores the line number associated with a given controller (board), aiop
* and channel number.
* Returns: The line number assigned
*/
static unsigned char SetLineNumber(int ctrl, int aiop, int ch)
{
lineNumbers[(ctrl << 5) | (aiop << 3) | ch] = nextLineNumber++;
return (nextLineNumber - 1);
}
| gpl-2.0 |
sour12/iamroot | drivers/mtd/nand/bcm47xxnflash/main.c | 2305 | 2317 | /*
* BCM47XX NAND flash driver
*
* Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include "bcm47xxnflash.h"
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rafał Miłecki");
static const char *probes[] = { "bcm47xxpart", NULL };
static int bcm47xxnflash_probe(struct platform_device *pdev)
{
struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
struct bcm47xxnflash *b47n;
int err = 0;
b47n = kzalloc(sizeof(*b47n), GFP_KERNEL);
if (!b47n) {
err = -ENOMEM;
goto out;
}
b47n->nand_chip.priv = b47n;
b47n->mtd.owner = THIS_MODULE;
b47n->mtd.priv = &b47n->nand_chip; /* Required */
b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
err = bcm47xxnflash_ops_bcm4706_init(b47n);
} else {
pr_err("Device not supported\n");
err = -ENOTSUPP;
}
if (err) {
pr_err("Initialization failed: %d\n", err);
goto err_init;
}
err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
goto err_dev_reg;
}
return 0;
err_dev_reg:
err_init:
kfree(b47n);
out:
return err;
}
static int bcm47xxnflash_remove(struct platform_device *pdev)
{
struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
if (nflash->mtd)
mtd_device_unregister(nflash->mtd);
return 0;
}
static struct platform_driver bcm47xxnflash_driver = {
.probe = bcm47xxnflash_probe,
.remove = bcm47xxnflash_remove,
.driver = {
.name = "bcma_nflash",
.owner = THIS_MODULE,
},
};
static int __init bcm47xxnflash_init(void)
{
int err;
err = platform_driver_register(&bcm47xxnflash_driver);
if (err)
pr_err("Failed to register bcm47xx nand flash driver: %d\n",
err);
return err;
}
static void __exit bcm47xxnflash_exit(void)
{
platform_driver_unregister(&bcm47xxnflash_driver);
}
module_init(bcm47xxnflash_init);
module_exit(bcm47xxnflash_exit);
| gpl-2.0 |
sudeepdutt/mic | drivers/hid/hid-wiimote-debug.c | 2561 | 5062 | /*
* Debug support for HID Nintendo Wii / Wii U peripherals
* Copyright (c) 2011-2013 David Herrmann <dh.herrmann@gmail.com>
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include "hid-wiimote.h"
struct wiimote_debug {
struct wiimote_data *wdata;
struct dentry *eeprom;
struct dentry *drm;
};
static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
loff_t *off)
{
struct wiimote_debug *dbg = f->private_data;
struct wiimote_data *wdata = dbg->wdata;
unsigned long flags;
ssize_t ret;
char buf[16];
__u16 size = 0;
if (s == 0)
return -EINVAL;
if (*off > 0xffffff)
return 0;
if (s > 16)
s = 16;
ret = wiimote_cmd_acquire(wdata);
if (ret)
return ret;
spin_lock_irqsave(&wdata->state.lock, flags);
wdata->state.cmd_read_size = s;
wdata->state.cmd_read_buf = buf;
wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, *off & 0xffff);
wiiproto_req_reeprom(wdata, *off, s);
spin_unlock_irqrestore(&wdata->state.lock, flags);
ret = wiimote_cmd_wait(wdata);
if (!ret)
size = wdata->state.cmd_read_size;
spin_lock_irqsave(&wdata->state.lock, flags);
wdata->state.cmd_read_buf = NULL;
spin_unlock_irqrestore(&wdata->state.lock, flags);
wiimote_cmd_release(wdata);
if (ret)
return ret;
else if (size == 0)
return -EIO;
if (copy_to_user(u, buf, size))
return -EFAULT;
*off += size;
ret = size;
return ret;
}
static const struct file_operations wiidebug_eeprom_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = wiidebug_eeprom_read,
.llseek = generic_file_llseek,
};
static const char *wiidebug_drmmap[] = {
[WIIPROTO_REQ_NULL] = "NULL",
[WIIPROTO_REQ_DRM_K] = "K",
[WIIPROTO_REQ_DRM_KA] = "KA",
[WIIPROTO_REQ_DRM_KE] = "KE",
[WIIPROTO_REQ_DRM_KAI] = "KAI",
[WIIPROTO_REQ_DRM_KEE] = "KEE",
[WIIPROTO_REQ_DRM_KAE] = "KAE",
[WIIPROTO_REQ_DRM_KIE] = "KIE",
[WIIPROTO_REQ_DRM_KAIE] = "KAIE",
[WIIPROTO_REQ_DRM_E] = "E",
[WIIPROTO_REQ_DRM_SKAI1] = "SKAI1",
[WIIPROTO_REQ_DRM_SKAI2] = "SKAI2",
[WIIPROTO_REQ_MAX] = NULL
};
static int wiidebug_drm_show(struct seq_file *f, void *p)
{
struct wiimote_debug *dbg = f->private;
const char *str = NULL;
unsigned long flags;
__u8 drm;
spin_lock_irqsave(&dbg->wdata->state.lock, flags);
drm = dbg->wdata->state.drm;
spin_unlock_irqrestore(&dbg->wdata->state.lock, flags);
if (drm < WIIPROTO_REQ_MAX)
str = wiidebug_drmmap[drm];
if (!str)
str = "unknown";
seq_printf(f, "%s\n", str);
return 0;
}
static int wiidebug_drm_open(struct inode *i, struct file *f)
{
return single_open(f, wiidebug_drm_show, i->i_private);
}
static ssize_t wiidebug_drm_write(struct file *f, const char __user *u,
size_t s, loff_t *off)
{
struct seq_file *sf = f->private_data;
struct wiimote_debug *dbg = sf->private;
unsigned long flags;
char buf[16];
ssize_t len;
int i;
if (s == 0)
return -EINVAL;
len = min((size_t) 15, s);
if (copy_from_user(buf, u, len))
return -EFAULT;
buf[len] = 0;
for (i = 0; i < WIIPROTO_REQ_MAX; ++i) {
if (!wiidebug_drmmap[i])
continue;
if (!strcasecmp(buf, wiidebug_drmmap[i]))
break;
}
if (i == WIIPROTO_REQ_MAX)
i = simple_strtoul(buf, NULL, 16);
spin_lock_irqsave(&dbg->wdata->state.lock, flags);
dbg->wdata->state.flags &= ~WIIPROTO_FLAG_DRM_LOCKED;
wiiproto_req_drm(dbg->wdata, (__u8) i);
if (i != WIIPROTO_REQ_NULL)
dbg->wdata->state.flags |= WIIPROTO_FLAG_DRM_LOCKED;
spin_unlock_irqrestore(&dbg->wdata->state.lock, flags);
return len;
}
static const struct file_operations wiidebug_drm_fops = {
.owner = THIS_MODULE,
.open = wiidebug_drm_open,
.read = seq_read,
.llseek = seq_lseek,
.write = wiidebug_drm_write,
.release = single_release,
};
int wiidebug_init(struct wiimote_data *wdata)
{
struct wiimote_debug *dbg;
unsigned long flags;
int ret = -ENOMEM;
dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
if (!dbg)
return -ENOMEM;
dbg->wdata = wdata;
dbg->eeprom = debugfs_create_file("eeprom", S_IRUSR,
dbg->wdata->hdev->debug_dir, dbg, &wiidebug_eeprom_fops);
if (!dbg->eeprom)
goto err;
dbg->drm = debugfs_create_file("drm", S_IRUSR,
dbg->wdata->hdev->debug_dir, dbg, &wiidebug_drm_fops);
if (!dbg->drm)
goto err_drm;
spin_lock_irqsave(&wdata->state.lock, flags);
wdata->debug = dbg;
spin_unlock_irqrestore(&wdata->state.lock, flags);
return 0;
err_drm:
debugfs_remove(dbg->eeprom);
err:
kfree(dbg);
return ret;
}
void wiidebug_deinit(struct wiimote_data *wdata)
{
struct wiimote_debug *dbg = wdata->debug;
unsigned long flags;
if (!dbg)
return;
spin_lock_irqsave(&wdata->state.lock, flags);
wdata->debug = NULL;
spin_unlock_irqrestore(&wdata->state.lock, flags);
debugfs_remove(dbg->drm);
debugfs_remove(dbg->eeprom);
kfree(dbg);
}
| gpl-2.0 |
RealVNC/android-kernel-omap | arch/arm/mach-davinci/da830.c | 2817 | 39835 | /*
* TI DA830/OMAP L137 chip specific setup
*
* Author: Mark A. Greer <mgreer@mvista.com>
*
* 2009 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/init.h>
#include <linux/clk.h>
#include <asm/mach/map.h>
#include <mach/psc.h>
#include <mach/irqs.h>
#include <mach/cputype.h>
#include <mach/common.h>
#include <mach/time.h>
#include <mach/da8xx.h>
#include <mach/gpio.h>
#include "clock.h"
#include "mux.h"
/* Offsets of the 8 compare registers on the da830 */
#define DA830_CMP12_0 0x60
#define DA830_CMP12_1 0x64
#define DA830_CMP12_2 0x68
#define DA830_CMP12_3 0x6c
#define DA830_CMP12_4 0x70
#define DA830_CMP12_5 0x74
#define DA830_CMP12_6 0x78
#define DA830_CMP12_7 0x7c
#define DA830_REF_FREQ 24000000
static struct pll_data pll0_data = {
.num = 1,
.phys_base = DA8XX_PLL0_BASE,
.flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
};
static struct clk ref_clk = {
.name = "ref_clk",
.rate = DA830_REF_FREQ,
};
static struct clk pll0_clk = {
.name = "pll0",
.parent = &ref_clk,
.pll_data = &pll0_data,
.flags = CLK_PLL,
};
static struct clk pll0_aux_clk = {
.name = "pll0_aux_clk",
.parent = &pll0_clk,
.flags = CLK_PLL | PRE_PLL,
};
static struct clk pll0_sysclk2 = {
.name = "pll0_sysclk2",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV2,
};
static struct clk pll0_sysclk3 = {
.name = "pll0_sysclk3",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV3,
};
static struct clk pll0_sysclk4 = {
.name = "pll0_sysclk4",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV4,
};
static struct clk pll0_sysclk5 = {
.name = "pll0_sysclk5",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV5,
};
static struct clk pll0_sysclk6 = {
.name = "pll0_sysclk6",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV6,
};
static struct clk pll0_sysclk7 = {
.name = "pll0_sysclk7",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV7,
};
static struct clk i2c0_clk = {
.name = "i2c0",
.parent = &pll0_aux_clk,
};
static struct clk timerp64_0_clk = {
.name = "timer0",
.parent = &pll0_aux_clk,
};
static struct clk timerp64_1_clk = {
.name = "timer1",
.parent = &pll0_aux_clk,
};
static struct clk arm_rom_clk = {
.name = "arm_rom",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_ARM_RAM_ROM,
.flags = ALWAYS_ENABLED,
};
static struct clk scr0_ss_clk = {
.name = "scr0_ss",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_SCR0_SS,
.flags = ALWAYS_ENABLED,
};
static struct clk scr1_ss_clk = {
.name = "scr1_ss",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_SCR1_SS,
.flags = ALWAYS_ENABLED,
};
static struct clk scr2_ss_clk = {
.name = "scr2_ss",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_SCR2_SS,
.flags = ALWAYS_ENABLED,
};
static struct clk dmax_clk = {
.name = "dmax",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_PRUSS,
.flags = ALWAYS_ENABLED,
};
static struct clk tpcc_clk = {
.name = "tpcc",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_TPCC,
.flags = ALWAYS_ENABLED | CLK_PSC,
};
static struct clk tptc0_clk = {
.name = "tptc0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_TPTC0,
.flags = ALWAYS_ENABLED,
};
static struct clk tptc1_clk = {
.name = "tptc1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_TPTC1,
.flags = ALWAYS_ENABLED,
};
static struct clk mmcsd_clk = {
.name = "mmcsd",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_MMC_SD,
};
static struct clk uart0_clk = {
.name = "uart0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_UART0,
};
static struct clk uart1_clk = {
.name = "uart1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_UART1,
.gpsc = 1,
};
static struct clk uart2_clk = {
.name = "uart2",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_UART2,
.gpsc = 1,
};
static struct clk spi0_clk = {
.name = "spi0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_SPI0,
};
static struct clk spi1_clk = {
.name = "spi1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_SPI1,
.gpsc = 1,
};
static struct clk ecap0_clk = {
.name = "ecap0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_ECAP,
.gpsc = 1,
};
static struct clk ecap1_clk = {
.name = "ecap1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_ECAP,
.gpsc = 1,
};
static struct clk ecap2_clk = {
.name = "ecap2",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_ECAP,
.gpsc = 1,
};
static struct clk pwm0_clk = {
.name = "pwm0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_PWM,
.gpsc = 1,
};
static struct clk pwm1_clk = {
.name = "pwm1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_PWM,
.gpsc = 1,
};
static struct clk pwm2_clk = {
.name = "pwm2",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_PWM,
.gpsc = 1,
};
static struct clk eqep0_clk = {
.name = "eqep0",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_EQEP,
.gpsc = 1,
};
static struct clk eqep1_clk = {
.name = "eqep1",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_EQEP,
.gpsc = 1,
};
static struct clk lcdc_clk = {
.name = "lcdc",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_LCDC,
.gpsc = 1,
};
static struct clk mcasp0_clk = {
.name = "mcasp0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_McASP0,
.gpsc = 1,
};
static struct clk mcasp1_clk = {
.name = "mcasp1",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_McASP1,
.gpsc = 1,
};
static struct clk mcasp2_clk = {
.name = "mcasp2",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_McASP2,
.gpsc = 1,
};
static struct clk usb20_clk = {
.name = "usb20",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_USB20,
.gpsc = 1,
};
static struct clk aemif_clk = {
.name = "aemif",
.parent = &pll0_sysclk3,
.lpsc = DA8XX_LPSC0_EMIF25,
.flags = ALWAYS_ENABLED,
};
static struct clk aintc_clk = {
.name = "aintc",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC0_AINTC,
.flags = ALWAYS_ENABLED,
};
static struct clk secu_mgr_clk = {
.name = "secu_mgr",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC0_SECU_MGR,
.flags = ALWAYS_ENABLED,
};
static struct clk emac_clk = {
.name = "emac",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_CPGMAC,
.gpsc = 1,
};
static struct clk gpio_clk = {
.name = "gpio",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_GPIO,
.gpsc = 1,
};
static struct clk i2c1_clk = {
.name = "i2c1",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_I2C,
.gpsc = 1,
};
static struct clk usb11_clk = {
.name = "usb11",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_USB11,
.gpsc = 1,
};
static struct clk emif3_clk = {
.name = "emif3",
.parent = &pll0_sysclk5,
.lpsc = DA8XX_LPSC1_EMIF3C,
.gpsc = 1,
.flags = ALWAYS_ENABLED,
};
static struct clk arm_clk = {
.name = "arm",
.parent = &pll0_sysclk6,
.lpsc = DA8XX_LPSC0_ARM,
.flags = ALWAYS_ENABLED,
};
static struct clk rmii_clk = {
.name = "rmii",
.parent = &pll0_sysclk7,
};
static struct clk_lookup da830_clks[] = {
CLK(NULL, "ref", &ref_clk),
CLK(NULL, "pll0", &pll0_clk),
CLK(NULL, "pll0_aux", &pll0_aux_clk),
CLK(NULL, "pll0_sysclk2", &pll0_sysclk2),
CLK(NULL, "pll0_sysclk3", &pll0_sysclk3),
CLK(NULL, "pll0_sysclk4", &pll0_sysclk4),
CLK(NULL, "pll0_sysclk5", &pll0_sysclk5),
CLK(NULL, "pll0_sysclk6", &pll0_sysclk6),
CLK(NULL, "pll0_sysclk7", &pll0_sysclk7),
CLK("i2c_davinci.1", NULL, &i2c0_clk),
CLK(NULL, "timer0", &timerp64_0_clk),
CLK("watchdog", NULL, &timerp64_1_clk),
CLK(NULL, "arm_rom", &arm_rom_clk),
CLK(NULL, "scr0_ss", &scr0_ss_clk),
CLK(NULL, "scr1_ss", &scr1_ss_clk),
CLK(NULL, "scr2_ss", &scr2_ss_clk),
CLK(NULL, "dmax", &dmax_clk),
CLK(NULL, "tpcc", &tpcc_clk),
CLK(NULL, "tptc0", &tptc0_clk),
CLK(NULL, "tptc1", &tptc1_clk),
CLK("davinci_mmc.0", NULL, &mmcsd_clk),
CLK(NULL, "uart0", &uart0_clk),
CLK(NULL, "uart1", &uart1_clk),
CLK(NULL, "uart2", &uart2_clk),
CLK("spi_davinci.0", NULL, &spi0_clk),
CLK("spi_davinci.1", NULL, &spi1_clk),
CLK(NULL, "ecap0", &ecap0_clk),
CLK(NULL, "ecap1", &ecap1_clk),
CLK(NULL, "ecap2", &ecap2_clk),
CLK(NULL, "pwm0", &pwm0_clk),
CLK(NULL, "pwm1", &pwm1_clk),
CLK(NULL, "pwm2", &pwm2_clk),
CLK("eqep.0", NULL, &eqep0_clk),
CLK("eqep.1", NULL, &eqep1_clk),
CLK("da8xx_lcdc.0", NULL, &lcdc_clk),
CLK("davinci-mcasp.0", NULL, &mcasp0_clk),
CLK("davinci-mcasp.1", NULL, &mcasp1_clk),
CLK("davinci-mcasp.2", NULL, &mcasp2_clk),
CLK(NULL, "usb20", &usb20_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, "aintc", &aintc_clk),
CLK(NULL, "secu_mgr", &secu_mgr_clk),
CLK("davinci_emac.1", NULL, &emac_clk),
CLK(NULL, "gpio", &gpio_clk),
CLK("i2c_davinci.2", NULL, &i2c1_clk),
CLK(NULL, "usb11", &usb11_clk),
CLK(NULL, "emif3", &emif3_clk),
CLK(NULL, "arm", &arm_clk),
CLK(NULL, "rmii", &rmii_clk),
CLK(NULL, NULL, NULL),
};
/*
* Device specific mux setup
*
* soc description mux mode mode mux dbg
* reg offset mask mode
*/
static const struct mux_config da830_pins[] = {
#ifdef CONFIG_DAVINCI_MUX
MUX_CFG(DA830, GPIO7_14, 0, 0, 0xf, 1, false)
MUX_CFG(DA830, RTCK, 0, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_15, 0, 4, 0xf, 1, false)
MUX_CFG(DA830, EMU_0, 0, 4, 0xf, 8, false)
MUX_CFG(DA830, EMB_SDCKE, 0, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_CLK_GLUE, 0, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_CLK, 0, 12, 0xf, 2, false)
MUX_CFG(DA830, NEMB_CS_0, 0, 16, 0xf, 1, false)
MUX_CFG(DA830, NEMB_CAS, 0, 20, 0xf, 1, false)
MUX_CFG(DA830, NEMB_RAS, 0, 24, 0xf, 1, false)
MUX_CFG(DA830, NEMB_WE, 0, 28, 0xf, 1, false)
MUX_CFG(DA830, EMB_BA_1, 1, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_BA_0, 1, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_0, 1, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_1, 1, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_2, 1, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_3, 1, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_4, 1, 24, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_5, 1, 28, 0xf, 1, false)
MUX_CFG(DA830, GPIO7_0, 1, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_1, 1, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_2, 1, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_3, 1, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_4, 1, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_5, 1, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_6, 1, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_7, 1, 28, 0xf, 8, false)
MUX_CFG(DA830, EMB_A_6, 2, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_7, 2, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_8, 2, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_9, 2, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_10, 2, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_11, 2, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_A_12, 2, 24, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_31, 2, 28, 0xf, 1, false)
MUX_CFG(DA830, GPIO7_8, 2, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_9, 2, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_10, 2, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_11, 2, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_12, 2, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO7_13, 2, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_13, 2, 24, 0xf, 8, false)
MUX_CFG(DA830, EMB_D_30, 3, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_29, 3, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_28, 3, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_27, 3, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_26, 3, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_25, 3, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_24, 3, 24, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_23, 3, 28, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_22, 4, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_21, 4, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_20, 4, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_19, 4, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_18, 4, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_17, 4, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_16, 4, 24, 0xf, 1, false)
MUX_CFG(DA830, NEMB_WE_DQM_3, 4, 28, 0xf, 1, false)
MUX_CFG(DA830, NEMB_WE_DQM_2, 5, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_0, 5, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_1, 5, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_2, 5, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_3, 5, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_4, 5, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_5, 5, 24, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_6, 5, 28, 0xf, 1, false)
MUX_CFG(DA830, GPIO6_0, 5, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_1, 5, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_2, 5, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_3, 5, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_4, 5, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_5, 5, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_6, 5, 28, 0xf, 8, false)
MUX_CFG(DA830, EMB_D_7, 6, 0, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_8, 6, 4, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_9, 6, 8, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_10, 6, 12, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_11, 6, 16, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_12, 6, 20, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_13, 6, 24, 0xf, 1, false)
MUX_CFG(DA830, EMB_D_14, 6, 28, 0xf, 1, false)
MUX_CFG(DA830, GPIO6_7, 6, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_8, 6, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_9, 6, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_10, 6, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_11, 6, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_12, 6, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_13, 6, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO6_14, 6, 28, 0xf, 8, false)
MUX_CFG(DA830, EMB_D_15, 7, 0, 0xf, 1, false)
MUX_CFG(DA830, NEMB_WE_DQM_1, 7, 4, 0xf, 1, false)
MUX_CFG(DA830, NEMB_WE_DQM_0, 7, 8, 0xf, 1, false)
MUX_CFG(DA830, SPI0_SOMI_0, 7, 12, 0xf, 1, false)
MUX_CFG(DA830, SPI0_SIMO_0, 7, 16, 0xf, 1, false)
MUX_CFG(DA830, SPI0_CLK, 7, 20, 0xf, 1, false)
MUX_CFG(DA830, NSPI0_ENA, 7, 24, 0xf, 1, false)
MUX_CFG(DA830, NSPI0_SCS_0, 7, 28, 0xf, 1, false)
MUX_CFG(DA830, EQEP0I, 7, 12, 0xf, 2, false)
MUX_CFG(DA830, EQEP0S, 7, 16, 0xf, 2, false)
MUX_CFG(DA830, EQEP1I, 7, 20, 0xf, 2, false)
MUX_CFG(DA830, NUART0_CTS, 7, 24, 0xf, 2, false)
MUX_CFG(DA830, NUART0_RTS, 7, 28, 0xf, 2, false)
MUX_CFG(DA830, EQEP0A, 7, 24, 0xf, 4, false)
MUX_CFG(DA830, EQEP0B, 7, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO6_15, 7, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_14, 7, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_15, 7, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_0, 7, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_1, 7, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_2, 7, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_3, 7, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_4, 7, 28, 0xf, 8, false)
MUX_CFG(DA830, SPI1_SOMI_0, 8, 0, 0xf, 1, false)
MUX_CFG(DA830, SPI1_SIMO_0, 8, 4, 0xf, 1, false)
MUX_CFG(DA830, SPI1_CLK, 8, 8, 0xf, 1, false)
MUX_CFG(DA830, UART0_RXD, 8, 12, 0xf, 1, false)
MUX_CFG(DA830, UART0_TXD, 8, 16, 0xf, 1, false)
MUX_CFG(DA830, AXR1_10, 8, 20, 0xf, 1, false)
MUX_CFG(DA830, AXR1_11, 8, 24, 0xf, 1, false)
MUX_CFG(DA830, NSPI1_ENA, 8, 28, 0xf, 1, false)
MUX_CFG(DA830, I2C1_SCL, 8, 0, 0xf, 2, false)
MUX_CFG(DA830, I2C1_SDA, 8, 4, 0xf, 2, false)
MUX_CFG(DA830, EQEP1S, 8, 8, 0xf, 2, false)
MUX_CFG(DA830, I2C0_SDA, 8, 12, 0xf, 2, false)
MUX_CFG(DA830, I2C0_SCL, 8, 16, 0xf, 2, false)
MUX_CFG(DA830, UART2_RXD, 8, 28, 0xf, 2, false)
MUX_CFG(DA830, TM64P0_IN12, 8, 12, 0xf, 4, false)
MUX_CFG(DA830, TM64P0_OUT12, 8, 16, 0xf, 4, false)
MUX_CFG(DA830, GPIO5_5, 8, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_6, 8, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_7, 8, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_8, 8, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_9, 8, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_10, 8, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_11, 8, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO5_12, 8, 28, 0xf, 8, false)
MUX_CFG(DA830, NSPI1_SCS_0, 9, 0, 0xf, 1, false)
MUX_CFG(DA830, USB0_DRVVBUS, 9, 4, 0xf, 1, false)
MUX_CFG(DA830, AHCLKX0, 9, 8, 0xf, 1, false)
MUX_CFG(DA830, ACLKX0, 9, 12, 0xf, 1, false)
MUX_CFG(DA830, AFSX0, 9, 16, 0xf, 1, false)
MUX_CFG(DA830, AHCLKR0, 9, 20, 0xf, 1, false)
MUX_CFG(DA830, ACLKR0, 9, 24, 0xf, 1, false)
MUX_CFG(DA830, AFSR0, 9, 28, 0xf, 1, false)
MUX_CFG(DA830, UART2_TXD, 9, 0, 0xf, 2, false)
MUX_CFG(DA830, AHCLKX2, 9, 8, 0xf, 2, false)
MUX_CFG(DA830, ECAP0_APWM0, 9, 12, 0xf, 2, false)
MUX_CFG(DA830, RMII_MHZ_50_CLK, 9, 20, 0xf, 2, false)
MUX_CFG(DA830, ECAP1_APWM1, 9, 24, 0xf, 2, false)
MUX_CFG(DA830, USB_REFCLKIN, 9, 8, 0xf, 4, false)
MUX_CFG(DA830, GPIO5_13, 9, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_15, 9, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_11, 9, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_12, 9, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_13, 9, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_14, 9, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_15, 9, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_12, 9, 28, 0xf, 8, false)
MUX_CFG(DA830, AMUTE0, 10, 0, 0xf, 1, false)
MUX_CFG(DA830, AXR0_0, 10, 4, 0xf, 1, false)
MUX_CFG(DA830, AXR0_1, 10, 8, 0xf, 1, false)
MUX_CFG(DA830, AXR0_2, 10, 12, 0xf, 1, false)
MUX_CFG(DA830, AXR0_3, 10, 16, 0xf, 1, false)
MUX_CFG(DA830, AXR0_4, 10, 20, 0xf, 1, false)
MUX_CFG(DA830, AXR0_5, 10, 24, 0xf, 1, false)
MUX_CFG(DA830, AXR0_6, 10, 28, 0xf, 1, false)
MUX_CFG(DA830, RMII_TXD_0, 10, 4, 0xf, 2, false)
MUX_CFG(DA830, RMII_TXD_1, 10, 8, 0xf, 2, false)
MUX_CFG(DA830, RMII_TXEN, 10, 12, 0xf, 2, false)
MUX_CFG(DA830, RMII_CRS_DV, 10, 16, 0xf, 2, false)
MUX_CFG(DA830, RMII_RXD_0, 10, 20, 0xf, 2, false)
MUX_CFG(DA830, RMII_RXD_1, 10, 24, 0xf, 2, false)
MUX_CFG(DA830, RMII_RXER, 10, 28, 0xf, 2, false)
MUX_CFG(DA830, AFSR2, 10, 4, 0xf, 4, false)
MUX_CFG(DA830, ACLKX2, 10, 8, 0xf, 4, false)
MUX_CFG(DA830, AXR2_3, 10, 12, 0xf, 4, false)
MUX_CFG(DA830, AXR2_2, 10, 16, 0xf, 4, false)
MUX_CFG(DA830, AXR2_1, 10, 20, 0xf, 4, false)
MUX_CFG(DA830, AFSX2, 10, 24, 0xf, 4, false)
MUX_CFG(DA830, ACLKR2, 10, 28, 0xf, 4, false)
MUX_CFG(DA830, NRESETOUT, 10, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_0, 10, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_1, 10, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_2, 10, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_3, 10, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_4, 10, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_5, 10, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_6, 10, 28, 0xf, 8, false)
MUX_CFG(DA830, AXR0_7, 11, 0, 0xf, 1, false)
MUX_CFG(DA830, AXR0_8, 11, 4, 0xf, 1, false)
MUX_CFG(DA830, UART1_RXD, 11, 8, 0xf, 1, false)
MUX_CFG(DA830, UART1_TXD, 11, 12, 0xf, 1, false)
MUX_CFG(DA830, AXR0_11, 11, 16, 0xf, 1, false)
MUX_CFG(DA830, AHCLKX1, 11, 20, 0xf, 1, false)
MUX_CFG(DA830, ACLKX1, 11, 24, 0xf, 1, false)
MUX_CFG(DA830, AFSX1, 11, 28, 0xf, 1, false)
MUX_CFG(DA830, MDIO_CLK, 11, 0, 0xf, 2, false)
MUX_CFG(DA830, MDIO_D, 11, 4, 0xf, 2, false)
MUX_CFG(DA830, AXR0_9, 11, 8, 0xf, 2, false)
MUX_CFG(DA830, AXR0_10, 11, 12, 0xf, 2, false)
MUX_CFG(DA830, EPWM0B, 11, 20, 0xf, 2, false)
MUX_CFG(DA830, EPWM0A, 11, 24, 0xf, 2, false)
MUX_CFG(DA830, EPWMSYNCI, 11, 28, 0xf, 2, false)
MUX_CFG(DA830, AXR2_0, 11, 16, 0xf, 4, false)
MUX_CFG(DA830, EPWMSYNC0, 11, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO3_7, 11, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_8, 11, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_9, 11, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_10, 11, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_11, 11, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_14, 11, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO3_15, 11, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_10, 11, 28, 0xf, 8, false)
MUX_CFG(DA830, AHCLKR1, 12, 0, 0xf, 1, false)
MUX_CFG(DA830, ACLKR1, 12, 4, 0xf, 1, false)
MUX_CFG(DA830, AFSR1, 12, 8, 0xf, 1, false)
MUX_CFG(DA830, AMUTE1, 12, 12, 0xf, 1, false)
MUX_CFG(DA830, AXR1_0, 12, 16, 0xf, 1, false)
MUX_CFG(DA830, AXR1_1, 12, 20, 0xf, 1, false)
MUX_CFG(DA830, AXR1_2, 12, 24, 0xf, 1, false)
MUX_CFG(DA830, AXR1_3, 12, 28, 0xf, 1, false)
MUX_CFG(DA830, ECAP2_APWM2, 12, 4, 0xf, 2, false)
MUX_CFG(DA830, EHRPWMGLUETZ, 12, 12, 0xf, 2, false)
MUX_CFG(DA830, EQEP1A, 12, 28, 0xf, 2, false)
MUX_CFG(DA830, GPIO4_11, 12, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_12, 12, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_13, 12, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_14, 12, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_0, 12, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_1, 12, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_2, 12, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_3, 12, 28, 0xf, 8, false)
MUX_CFG(DA830, AXR1_4, 13, 0, 0xf, 1, false)
MUX_CFG(DA830, AXR1_5, 13, 4, 0xf, 1, false)
MUX_CFG(DA830, AXR1_6, 13, 8, 0xf, 1, false)
MUX_CFG(DA830, AXR1_7, 13, 12, 0xf, 1, false)
MUX_CFG(DA830, AXR1_8, 13, 16, 0xf, 1, false)
MUX_CFG(DA830, AXR1_9, 13, 20, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_0, 13, 24, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_1, 13, 28, 0xf, 1, false)
MUX_CFG(DA830, EQEP1B, 13, 0, 0xf, 2, false)
MUX_CFG(DA830, EPWM2B, 13, 4, 0xf, 2, false)
MUX_CFG(DA830, EPWM2A, 13, 8, 0xf, 2, false)
MUX_CFG(DA830, EPWM1B, 13, 12, 0xf, 2, false)
MUX_CFG(DA830, EPWM1A, 13, 16, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_0, 13, 24, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_1, 13, 28, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_0, 13, 24, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_1, 13, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO4_4, 13, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_5, 13, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_6, 13, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_7, 13, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_8, 13, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO4_9, 13, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_0, 13, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_1, 13, 28, 0xf, 8, false)
MUX_CFG(DA830, EMA_D_2, 14, 0, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_3, 14, 4, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_4, 14, 8, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_5, 14, 12, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_6, 14, 16, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_7, 14, 20, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_8, 14, 24, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_9, 14, 28, 0xf, 1, false)
MUX_CFG(DA830, MMCSD_DAT_2, 14, 0, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_3, 14, 4, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_4, 14, 8, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_5, 14, 12, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_6, 14, 16, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_DAT_7, 14, 20, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_8, 14, 24, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_9, 14, 28, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_2, 14, 0, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_3, 14, 4, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_4, 14, 8, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_5, 14, 12, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_6, 14, 16, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HD_7, 14, 20, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_8, 14, 24, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_9, 14, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO0_2, 14, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_3, 14, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_4, 14, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_5, 14, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_6, 14, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_7, 14, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_8, 14, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_9, 14, 28, 0xf, 8, false)
MUX_CFG(DA830, EMA_D_10, 15, 0, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_11, 15, 4, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_12, 15, 8, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_13, 15, 12, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_14, 15, 16, 0xf, 1, false)
MUX_CFG(DA830, EMA_D_15, 15, 20, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_0, 15, 24, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_1, 15, 28, 0xf, 1, false)
MUX_CFG(DA830, UHPI_HD_10, 15, 0, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_11, 15, 4, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_12, 15, 8, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_13, 15, 12, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_14, 15, 16, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HD_15, 15, 20, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_7, 15, 24, 0xf, 2, false)
MUX_CFG(DA830, MMCSD_CLK, 15, 28, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_10, 15, 0, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_11, 15, 4, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_12, 15, 8, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_13, 15, 12, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_14, 15, 16, 0xf, 4, false)
MUX_CFG(DA830, LCD_D_15, 15, 20, 0xf, 4, false)
MUX_CFG(DA830, UHPI_HCNTL0, 15, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO0_10, 15, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_11, 15, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_12, 15, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_13, 15, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_14, 15, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO0_15, 15, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_0, 15, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_1, 15, 28, 0xf, 8, false)
MUX_CFG(DA830, EMA_A_2, 16, 0, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_3, 16, 4, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_4, 16, 8, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_5, 16, 12, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_6, 16, 16, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_7, 16, 20, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_8, 16, 24, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_9, 16, 28, 0xf, 1, false)
MUX_CFG(DA830, MMCSD_CMD, 16, 0, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_6, 16, 4, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_3, 16, 8, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_2, 16, 12, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_1, 16, 16, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_0, 16, 20, 0xf, 2, false)
MUX_CFG(DA830, LCD_PCLK, 16, 24, 0xf, 2, false)
MUX_CFG(DA830, LCD_HSYNC, 16, 28, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HCNTL1, 16, 0, 0xf, 4, false)
MUX_CFG(DA830, GPIO1_2, 16, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_3, 16, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_4, 16, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_5, 16, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_6, 16, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_7, 16, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_8, 16, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_9, 16, 28, 0xf, 8, false)
MUX_CFG(DA830, EMA_A_10, 17, 0, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_11, 17, 4, 0xf, 1, false)
MUX_CFG(DA830, EMA_A_12, 17, 8, 0xf, 1, false)
MUX_CFG(DA830, EMA_BA_1, 17, 12, 0xf, 1, false)
MUX_CFG(DA830, EMA_BA_0, 17, 16, 0xf, 1, false)
MUX_CFG(DA830, EMA_CLK, 17, 20, 0xf, 1, false)
MUX_CFG(DA830, EMA_SDCKE, 17, 24, 0xf, 1, false)
MUX_CFG(DA830, NEMA_CAS, 17, 28, 0xf, 1, false)
MUX_CFG(DA830, LCD_VSYNC, 17, 0, 0xf, 2, false)
MUX_CFG(DA830, NLCD_AC_ENB_CS, 17, 4, 0xf, 2, false)
MUX_CFG(DA830, LCD_MCLK, 17, 8, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_5, 17, 12, 0xf, 2, false)
MUX_CFG(DA830, LCD_D_4, 17, 16, 0xf, 2, false)
MUX_CFG(DA830, OBSCLK, 17, 20, 0xf, 2, false)
MUX_CFG(DA830, NEMA_CS_4, 17, 28, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HHWIL, 17, 12, 0xf, 4, false)
MUX_CFG(DA830, AHCLKR2, 17, 20, 0xf, 4, false)
MUX_CFG(DA830, GPIO1_10, 17, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_11, 17, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_12, 17, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_13, 17, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_14, 17, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO1_15, 17, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_0, 17, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_1, 17, 28, 0xf, 8, false)
MUX_CFG(DA830, NEMA_RAS, 18, 0, 0xf, 1, false)
MUX_CFG(DA830, NEMA_WE, 18, 4, 0xf, 1, false)
MUX_CFG(DA830, NEMA_CS_0, 18, 8, 0xf, 1, false)
MUX_CFG(DA830, NEMA_CS_2, 18, 12, 0xf, 1, false)
MUX_CFG(DA830, NEMA_CS_3, 18, 16, 0xf, 1, false)
MUX_CFG(DA830, NEMA_OE, 18, 20, 0xf, 1, false)
MUX_CFG(DA830, NEMA_WE_DQM_1, 18, 24, 0xf, 1, false)
MUX_CFG(DA830, NEMA_WE_DQM_0, 18, 28, 0xf, 1, false)
MUX_CFG(DA830, NEMA_CS_5, 18, 0, 0xf, 2, false)
MUX_CFG(DA830, UHPI_HRNW, 18, 4, 0xf, 2, false)
MUX_CFG(DA830, NUHPI_HAS, 18, 8, 0xf, 2, false)
MUX_CFG(DA830, NUHPI_HCS, 18, 12, 0xf, 2, false)
MUX_CFG(DA830, NUHPI_HDS1, 18, 20, 0xf, 2, false)
MUX_CFG(DA830, NUHPI_HDS2, 18, 24, 0xf, 2, false)
MUX_CFG(DA830, NUHPI_HINT, 18, 28, 0xf, 2, false)
MUX_CFG(DA830, AXR0_12, 18, 4, 0xf, 4, false)
MUX_CFG(DA830, AMUTE2, 18, 16, 0xf, 4, false)
MUX_CFG(DA830, AXR0_13, 18, 20, 0xf, 4, false)
MUX_CFG(DA830, AXR0_14, 18, 24, 0xf, 4, false)
MUX_CFG(DA830, AXR0_15, 18, 28, 0xf, 4, false)
MUX_CFG(DA830, GPIO2_2, 18, 0, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_3, 18, 4, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_4, 18, 8, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_5, 18, 12, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_6, 18, 16, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_7, 18, 20, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_8, 18, 24, 0xf, 8, false)
MUX_CFG(DA830, GPIO2_9, 18, 28, 0xf, 8, false)
MUX_CFG(DA830, EMA_WAIT_0, 19, 0, 0xf, 1, false)
MUX_CFG(DA830, NUHPI_HRDY, 19, 0, 0xf, 2, false)
MUX_CFG(DA830, GPIO2_10, 19, 0, 0xf, 8, false)
#endif
};
const short da830_emif25_pins[] __initdata = {
DA830_EMA_D_0, DA830_EMA_D_1, DA830_EMA_D_2, DA830_EMA_D_3,
DA830_EMA_D_4, DA830_EMA_D_5, DA830_EMA_D_6, DA830_EMA_D_7,
DA830_EMA_D_8, DA830_EMA_D_9, DA830_EMA_D_10, DA830_EMA_D_11,
DA830_EMA_D_12, DA830_EMA_D_13, DA830_EMA_D_14, DA830_EMA_D_15,
DA830_EMA_A_0, DA830_EMA_A_1, DA830_EMA_A_2, DA830_EMA_A_3,
DA830_EMA_A_4, DA830_EMA_A_5, DA830_EMA_A_6, DA830_EMA_A_7,
DA830_EMA_A_8, DA830_EMA_A_9, DA830_EMA_A_10, DA830_EMA_A_11,
DA830_EMA_A_12, DA830_EMA_BA_0, DA830_EMA_BA_1, DA830_EMA_CLK,
DA830_EMA_SDCKE, DA830_NEMA_CS_4, DA830_NEMA_CS_5, DA830_NEMA_WE,
DA830_NEMA_CS_0, DA830_NEMA_CS_2, DA830_NEMA_CS_3, DA830_NEMA_OE,
DA830_NEMA_WE_DQM_1, DA830_NEMA_WE_DQM_0, DA830_EMA_WAIT_0,
-1
};
const short da830_spi0_pins[] __initdata = {
DA830_SPI0_SOMI_0, DA830_SPI0_SIMO_0, DA830_SPI0_CLK, DA830_NSPI0_ENA,
DA830_NSPI0_SCS_0,
-1
};
const short da830_spi1_pins[] __initdata = {
DA830_SPI1_SOMI_0, DA830_SPI1_SIMO_0, DA830_SPI1_CLK, DA830_NSPI1_ENA,
DA830_NSPI1_SCS_0,
-1
};
const short da830_mmc_sd_pins[] __initdata = {
DA830_MMCSD_DAT_0, DA830_MMCSD_DAT_1, DA830_MMCSD_DAT_2,
DA830_MMCSD_DAT_3, DA830_MMCSD_DAT_4, DA830_MMCSD_DAT_5,
DA830_MMCSD_DAT_6, DA830_MMCSD_DAT_7, DA830_MMCSD_CLK,
DA830_MMCSD_CMD,
-1
};
const short da830_uart0_pins[] __initdata = {
DA830_NUART0_CTS, DA830_NUART0_RTS, DA830_UART0_RXD, DA830_UART0_TXD,
-1
};
const short da830_uart1_pins[] __initdata = {
DA830_UART1_RXD, DA830_UART1_TXD,
-1
};
const short da830_uart2_pins[] __initdata = {
DA830_UART2_RXD, DA830_UART2_TXD,
-1
};
const short da830_usb20_pins[] __initdata = {
DA830_USB0_DRVVBUS, DA830_USB_REFCLKIN,
-1
};
const short da830_usb11_pins[] __initdata = {
DA830_USB_REFCLKIN,
-1
};
const short da830_uhpi_pins[] __initdata = {
DA830_UHPI_HD_0, DA830_UHPI_HD_1, DA830_UHPI_HD_2, DA830_UHPI_HD_3,
DA830_UHPI_HD_4, DA830_UHPI_HD_5, DA830_UHPI_HD_6, DA830_UHPI_HD_7,
DA830_UHPI_HD_8, DA830_UHPI_HD_9, DA830_UHPI_HD_10, DA830_UHPI_HD_11,
DA830_UHPI_HD_12, DA830_UHPI_HD_13, DA830_UHPI_HD_14, DA830_UHPI_HD_15,
DA830_UHPI_HCNTL0, DA830_UHPI_HCNTL1, DA830_UHPI_HHWIL, DA830_UHPI_HRNW,
DA830_NUHPI_HAS, DA830_NUHPI_HCS, DA830_NUHPI_HDS1, DA830_NUHPI_HDS2,
DA830_NUHPI_HINT, DA830_NUHPI_HRDY,
-1
};
const short da830_cpgmac_pins[] __initdata = {
DA830_RMII_TXD_0, DA830_RMII_TXD_1, DA830_RMII_TXEN, DA830_RMII_CRS_DV,
DA830_RMII_RXD_0, DA830_RMII_RXD_1, DA830_RMII_RXER, DA830_MDIO_CLK,
DA830_MDIO_D,
-1
};
const short da830_emif3c_pins[] __initdata = {
DA830_EMB_SDCKE, DA830_EMB_CLK_GLUE, DA830_EMB_CLK, DA830_NEMB_CS_0,
DA830_NEMB_CAS, DA830_NEMB_RAS, DA830_NEMB_WE, DA830_EMB_BA_1,
DA830_EMB_BA_0, DA830_EMB_A_0, DA830_EMB_A_1, DA830_EMB_A_2,
DA830_EMB_A_3, DA830_EMB_A_4, DA830_EMB_A_5, DA830_EMB_A_6,
DA830_EMB_A_7, DA830_EMB_A_8, DA830_EMB_A_9, DA830_EMB_A_10,
DA830_EMB_A_11, DA830_EMB_A_12, DA830_NEMB_WE_DQM_3,
DA830_NEMB_WE_DQM_2, DA830_EMB_D_0, DA830_EMB_D_1, DA830_EMB_D_2,
DA830_EMB_D_3, DA830_EMB_D_4, DA830_EMB_D_5, DA830_EMB_D_6,
DA830_EMB_D_7, DA830_EMB_D_8, DA830_EMB_D_9, DA830_EMB_D_10,
DA830_EMB_D_11, DA830_EMB_D_12, DA830_EMB_D_13, DA830_EMB_D_14,
DA830_EMB_D_15, DA830_EMB_D_16, DA830_EMB_D_17, DA830_EMB_D_18,
DA830_EMB_D_19, DA830_EMB_D_20, DA830_EMB_D_21, DA830_EMB_D_22,
DA830_EMB_D_23, DA830_EMB_D_24, DA830_EMB_D_25, DA830_EMB_D_26,
DA830_EMB_D_27, DA830_EMB_D_28, DA830_EMB_D_29, DA830_EMB_D_30,
DA830_EMB_D_31, DA830_NEMB_WE_DQM_1, DA830_NEMB_WE_DQM_0,
-1
};
const short da830_mcasp0_pins[] __initdata = {
DA830_AHCLKX0, DA830_ACLKX0, DA830_AFSX0,
DA830_AHCLKR0, DA830_ACLKR0, DA830_AFSR0, DA830_AMUTE0,
DA830_AXR0_0, DA830_AXR0_1, DA830_AXR0_2, DA830_AXR0_3,
DA830_AXR0_4, DA830_AXR0_5, DA830_AXR0_6, DA830_AXR0_7,
DA830_AXR0_8, DA830_AXR0_9, DA830_AXR0_10, DA830_AXR0_11,
DA830_AXR0_12, DA830_AXR0_13, DA830_AXR0_14, DA830_AXR0_15,
-1
};
const short da830_mcasp1_pins[] __initdata = {
DA830_AHCLKX1, DA830_ACLKX1, DA830_AFSX1,
DA830_AHCLKR1, DA830_ACLKR1, DA830_AFSR1, DA830_AMUTE1,
DA830_AXR1_0, DA830_AXR1_1, DA830_AXR1_2, DA830_AXR1_3,
DA830_AXR1_4, DA830_AXR1_5, DA830_AXR1_6, DA830_AXR1_7,
DA830_AXR1_8, DA830_AXR1_9, DA830_AXR1_10, DA830_AXR1_11,
-1
};
const short da830_mcasp2_pins[] __initdata = {
DA830_AHCLKX2, DA830_ACLKX2, DA830_AFSX2,
DA830_AHCLKR2, DA830_ACLKR2, DA830_AFSR2, DA830_AMUTE2,
DA830_AXR2_0, DA830_AXR2_1, DA830_AXR2_2, DA830_AXR2_3,
-1
};
const short da830_i2c0_pins[] __initdata = {
DA830_I2C0_SDA, DA830_I2C0_SCL,
-1
};
const short da830_i2c1_pins[] __initdata = {
DA830_I2C1_SCL, DA830_I2C1_SDA,
-1
};
const short da830_lcdcntl_pins[] __initdata = {
DA830_LCD_D_0, DA830_LCD_D_1, DA830_LCD_D_2, DA830_LCD_D_3,
DA830_LCD_D_4, DA830_LCD_D_5, DA830_LCD_D_6, DA830_LCD_D_7,
DA830_LCD_D_8, DA830_LCD_D_9, DA830_LCD_D_10, DA830_LCD_D_11,
DA830_LCD_D_12, DA830_LCD_D_13, DA830_LCD_D_14, DA830_LCD_D_15,
DA830_LCD_PCLK, DA830_LCD_HSYNC, DA830_LCD_VSYNC, DA830_NLCD_AC_ENB_CS,
DA830_LCD_MCLK,
-1
};
const short da830_pwm_pins[] __initdata = {
DA830_ECAP0_APWM0, DA830_ECAP1_APWM1, DA830_EPWM0B, DA830_EPWM0A,
DA830_EPWMSYNCI, DA830_EPWMSYNC0, DA830_ECAP2_APWM2, DA830_EHRPWMGLUETZ,
DA830_EPWM2B, DA830_EPWM2A, DA830_EPWM1B, DA830_EPWM1A,
-1
};
const short da830_ecap0_pins[] __initdata = {
DA830_ECAP0_APWM0,
-1
};
const short da830_ecap1_pins[] __initdata = {
DA830_ECAP1_APWM1,
-1
};
const short da830_ecap2_pins[] __initdata = {
DA830_ECAP2_APWM2,
-1
};
const short da830_eqep0_pins[] __initdata = {
DA830_EQEP0I, DA830_EQEP0S, DA830_EQEP0A, DA830_EQEP0B,
-1
};
const short da830_eqep1_pins[] __initdata = {
DA830_EQEP1I, DA830_EQEP1S, DA830_EQEP1A, DA830_EQEP1B,
-1
};
/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */
static u8 da830_default_priorities[DA830_N_CP_INTC_IRQ] = {
[IRQ_DA8XX_COMMTX] = 7,
[IRQ_DA8XX_COMMRX] = 7,
[IRQ_DA8XX_NINT] = 7,
[IRQ_DA8XX_EVTOUT0] = 7,
[IRQ_DA8XX_EVTOUT1] = 7,
[IRQ_DA8XX_EVTOUT2] = 7,
[IRQ_DA8XX_EVTOUT3] = 7,
[IRQ_DA8XX_EVTOUT4] = 7,
[IRQ_DA8XX_EVTOUT5] = 7,
[IRQ_DA8XX_EVTOUT6] = 7,
[IRQ_DA8XX_EVTOUT7] = 7,
[IRQ_DA8XX_CCINT0] = 7,
[IRQ_DA8XX_CCERRINT] = 7,
[IRQ_DA8XX_TCERRINT0] = 7,
[IRQ_DA8XX_AEMIFINT] = 7,
[IRQ_DA8XX_I2CINT0] = 7,
[IRQ_DA8XX_MMCSDINT0] = 7,
[IRQ_DA8XX_MMCSDINT1] = 7,
[IRQ_DA8XX_ALLINT0] = 7,
[IRQ_DA8XX_RTC] = 7,
[IRQ_DA8XX_SPINT0] = 7,
[IRQ_DA8XX_TINT12_0] = 7,
[IRQ_DA8XX_TINT34_0] = 7,
[IRQ_DA8XX_TINT12_1] = 7,
[IRQ_DA8XX_TINT34_1] = 7,
[IRQ_DA8XX_UARTINT0] = 7,
[IRQ_DA8XX_KEYMGRINT] = 7,
[IRQ_DA830_MPUERR] = 7,
[IRQ_DA8XX_CHIPINT0] = 7,
[IRQ_DA8XX_CHIPINT1] = 7,
[IRQ_DA8XX_CHIPINT2] = 7,
[IRQ_DA8XX_CHIPINT3] = 7,
[IRQ_DA8XX_TCERRINT1] = 7,
[IRQ_DA8XX_C0_RX_THRESH_PULSE] = 7,
[IRQ_DA8XX_C0_RX_PULSE] = 7,
[IRQ_DA8XX_C0_TX_PULSE] = 7,
[IRQ_DA8XX_C0_MISC_PULSE] = 7,
[IRQ_DA8XX_C1_RX_THRESH_PULSE] = 7,
[IRQ_DA8XX_C1_RX_PULSE] = 7,
[IRQ_DA8XX_C1_TX_PULSE] = 7,
[IRQ_DA8XX_C1_MISC_PULSE] = 7,
[IRQ_DA8XX_MEMERR] = 7,
[IRQ_DA8XX_GPIO0] = 7,
[IRQ_DA8XX_GPIO1] = 7,
[IRQ_DA8XX_GPIO2] = 7,
[IRQ_DA8XX_GPIO3] = 7,
[IRQ_DA8XX_GPIO4] = 7,
[IRQ_DA8XX_GPIO5] = 7,
[IRQ_DA8XX_GPIO6] = 7,
[IRQ_DA8XX_GPIO7] = 7,
[IRQ_DA8XX_GPIO8] = 7,
[IRQ_DA8XX_I2CINT1] = 7,
[IRQ_DA8XX_LCDINT] = 7,
[IRQ_DA8XX_UARTINT1] = 7,
[IRQ_DA8XX_MCASPINT] = 7,
[IRQ_DA8XX_ALLINT1] = 7,
[IRQ_DA8XX_SPINT1] = 7,
[IRQ_DA8XX_UHPI_INT1] = 7,
[IRQ_DA8XX_USB_INT] = 7,
[IRQ_DA8XX_IRQN] = 7,
[IRQ_DA8XX_RWAKEUP] = 7,
[IRQ_DA8XX_UARTINT2] = 7,
[IRQ_DA8XX_DFTSSINT] = 7,
[IRQ_DA8XX_EHRPWM0] = 7,
[IRQ_DA8XX_EHRPWM0TZ] = 7,
[IRQ_DA8XX_EHRPWM1] = 7,
[IRQ_DA8XX_EHRPWM1TZ] = 7,
[IRQ_DA830_EHRPWM2] = 7,
[IRQ_DA830_EHRPWM2TZ] = 7,
[IRQ_DA8XX_ECAP0] = 7,
[IRQ_DA8XX_ECAP1] = 7,
[IRQ_DA8XX_ECAP2] = 7,
[IRQ_DA830_EQEP0] = 7,
[IRQ_DA830_EQEP1] = 7,
[IRQ_DA830_T12CMPINT0_0] = 7,
[IRQ_DA830_T12CMPINT1_0] = 7,
[IRQ_DA830_T12CMPINT2_0] = 7,
[IRQ_DA830_T12CMPINT3_0] = 7,
[IRQ_DA830_T12CMPINT4_0] = 7,
[IRQ_DA830_T12CMPINT5_0] = 7,
[IRQ_DA830_T12CMPINT6_0] = 7,
[IRQ_DA830_T12CMPINT7_0] = 7,
[IRQ_DA830_T12CMPINT0_1] = 7,
[IRQ_DA830_T12CMPINT1_1] = 7,
[IRQ_DA830_T12CMPINT2_1] = 7,
[IRQ_DA830_T12CMPINT3_1] = 7,
[IRQ_DA830_T12CMPINT4_1] = 7,
[IRQ_DA830_T12CMPINT5_1] = 7,
[IRQ_DA830_T12CMPINT6_1] = 7,
[IRQ_DA830_T12CMPINT7_1] = 7,
[IRQ_DA8XX_ARMCLKSTOPREQ] = 7,
};
static struct map_desc da830_io_desc[] = {
{
.virtual = IO_VIRT,
.pfn = __phys_to_pfn(IO_PHYS),
.length = IO_SIZE,
.type = MT_DEVICE
},
{
.virtual = DA8XX_CP_INTC_VIRT,
.pfn = __phys_to_pfn(DA8XX_CP_INTC_BASE),
.length = DA8XX_CP_INTC_SIZE,
.type = MT_DEVICE
},
};
static u32 da830_psc_bases[] = { DA8XX_PSC0_BASE, DA8XX_PSC1_BASE };
/* Contents of JTAG ID register used to identify exact cpu type */
static struct davinci_id da830_ids[] = {
{
.variant = 0x0,
.part_no = 0xb7df,
.manufacturer = 0x017, /* 0x02f >> 1 */
.cpu_id = DAVINCI_CPU_ID_DA830,
.name = "da830/omap-l137 rev1.0",
},
{
.variant = 0x8,
.part_no = 0xb7df,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_DA830,
.name = "da830/omap-l137 rev1.1",
},
{
.variant = 0x9,
.part_no = 0xb7df,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_DA830,
.name = "da830/omap-l137 rev2.0",
},
};
static struct davinci_timer_instance da830_timer_instance[2] = {
{
.base = DA8XX_TIMER64P0_BASE,
.bottom_irq = IRQ_DA8XX_TINT12_0,
.top_irq = IRQ_DA8XX_TINT34_0,
.cmp_off = DA830_CMP12_0,
.cmp_irq = IRQ_DA830_T12CMPINT0_0,
},
{
.base = DA8XX_TIMER64P1_BASE,
.bottom_irq = IRQ_DA8XX_TINT12_1,
.top_irq = IRQ_DA8XX_TINT34_1,
.cmp_off = DA830_CMP12_0,
.cmp_irq = IRQ_DA830_T12CMPINT0_1,
},
};
/*
* T0_BOT: Timer 0, bottom : Used for clock_event & clocksource
* T0_TOP: Timer 0, top : Used by DSP
* T1_BOT, T1_TOP: Timer 1, bottom & top: Used for watchdog timer
*/
static struct davinci_timer_info da830_timer_info = {
.timers = da830_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_BOT,
};
static struct davinci_soc_info davinci_soc_info_da830 = {
.io_desc = da830_io_desc,
.io_desc_num = ARRAY_SIZE(da830_io_desc),
.jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG,
.ids = da830_ids,
.ids_num = ARRAY_SIZE(da830_ids),
.cpu_clks = da830_clks,
.psc_bases = da830_psc_bases,
.psc_bases_num = ARRAY_SIZE(da830_psc_bases),
.pinmux_base = DA8XX_SYSCFG0_BASE + 0x120,
.pinmux_pins = da830_pins,
.pinmux_pins_num = ARRAY_SIZE(da830_pins),
.intc_base = DA8XX_CP_INTC_BASE,
.intc_type = DAVINCI_INTC_TYPE_CP_INTC,
.intc_irq_prios = da830_default_priorities,
.intc_irq_num = DA830_N_CP_INTC_IRQ,
.timer_info = &da830_timer_info,
.gpio_type = GPIO_TYPE_DAVINCI,
.gpio_base = DA8XX_GPIO_BASE,
.gpio_num = 128,
.gpio_irq = IRQ_DA8XX_GPIO0,
.serial_dev = &da8xx_serial_device,
.emac_pdata = &da8xx_emac_pdata,
.reset_device = &da8xx_wdt_device,
};
void __init da830_init(void)
{
davinci_common_init(&davinci_soc_info_da830);
da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module");
}
| gpl-2.0 |
DutchDanny/Holiday-ICE | drivers/cpufreq/speedstep-smi.c | 3073 | 11450 | /*
* Intel SpeedStep SMI driver.
*
* (C) 2003 Hiroshi Miura <miura@da-cha.org>
*
* Licensed under the terms of the GNU GPL License version 2.
*
*/
/*********************************************************************
* SPEEDSTEP - DEFINITIONS *
*********************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <asm/ist.h>
#include "speedstep-lib.h"
/* speedstep system management interface port/command.
*
* These parameters are got from IST-SMI BIOS call.
* If user gives it, these are used.
*
*/
static int smi_port;
static int smi_cmd;
static unsigned int smi_sig;
/* info about the processor */
static enum speedstep_processor speedstep_processor;
/*
* There are only two frequency states for each processor. Values
* are in kHz for the time being.
*/
static struct cpufreq_frequency_table speedstep_freqs[] = {
{SPEEDSTEP_HIGH, 0},
{SPEEDSTEP_LOW, 0},
{0, CPUFREQ_TABLE_END},
};
#define GET_SPEEDSTEP_OWNER 0
#define GET_SPEEDSTEP_STATE 1
#define SET_SPEEDSTEP_STATE 2
#define GET_SPEEDSTEP_FREQS 4
/* how often shall the SMI call be tried if it failed, e.g. because
* of DMA activity going on? */
#define SMI_TRIES 5
/**
* speedstep_smi_ownership
*/
static int speedstep_smi_ownership(void)
{
u32 command, result, magic, dummy;
u32 function = GET_SPEEDSTEP_OWNER;
unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation";
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
magic = virt_to_phys(magic_data);
pr_debug("trying to obtain ownership with command %x at port %x\n",
command, smi_port);
__asm__ __volatile__(
"push %%ebp\n"
"out %%al, (%%dx)\n"
"pop %%ebp\n"
: "=D" (result),
"=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy),
"=S" (dummy)
: "a" (command), "b" (function), "c" (0), "d" (smi_port),
"D" (0), "S" (magic)
: "memory"
);
pr_debug("result is %x\n", result);
return result;
}
/**
* speedstep_smi_get_freqs - get SpeedStep preferred & current freq.
* @low: the low frequency value is placed here
* @high: the high frequency value is placed here
*
* Only available on later SpeedStep-enabled systems, returns false results or
* even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing
* shows that the latter occurs if !(ist_info.event & 0xFFFF).
*/
static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high)
{
u32 command, result = 0, edi, high_mhz, low_mhz, dummy;
u32 state = 0;
u32 function = GET_SPEEDSTEP_FREQS;
if (!(ist_info.event & 0xFFFF)) {
pr_debug("bug #1422 -- can't read freqs from BIOS\n");
return -ENODEV;
}
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
pr_debug("trying to determine frequencies with command %x at port %x\n",
command, smi_port);
__asm__ __volatile__(
"push %%ebp\n"
"out %%al, (%%dx)\n"
"pop %%ebp"
: "=a" (result),
"=b" (high_mhz),
"=c" (low_mhz),
"=d" (state), "=D" (edi), "=S" (dummy)
: "a" (command),
"b" (function),
"c" (state),
"d" (smi_port), "S" (0), "D" (0)
);
pr_debug("result %x, low_freq %u, high_freq %u\n",
result, low_mhz, high_mhz);
/* abort if results are obviously incorrect... */
if ((high_mhz + low_mhz) < 600)
return -EINVAL;
*high = high_mhz * 1000;
*low = low_mhz * 1000;
return result;
}
/**
* speedstep_get_state - set the SpeedStep state
* @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
*
*/
static int speedstep_get_state(void)
{
u32 function = GET_SPEEDSTEP_STATE;
u32 result, state, edi, command, dummy;
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
pr_debug("trying to determine current setting with command %x "
"at port %x\n", command, smi_port);
__asm__ __volatile__(
"push %%ebp\n"
"out %%al, (%%dx)\n"
"pop %%ebp\n"
: "=a" (result),
"=b" (state), "=D" (edi),
"=c" (dummy), "=d" (dummy), "=S" (dummy)
: "a" (command), "b" (function), "c" (0),
"d" (smi_port), "S" (0), "D" (0)
);
pr_debug("state is %x, result is %x\n", state, result);
return state & 1;
}
/**
* speedstep_set_state - set the SpeedStep state
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
*
*/
static void speedstep_set_state(unsigned int state)
{
unsigned int result = 0, command, new_state, dummy;
unsigned long flags;
unsigned int function = SET_SPEEDSTEP_STATE;
unsigned int retry = 0;
if (state > 0x1)
return;
/* Disable IRQs */
local_irq_save(flags);
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
pr_debug("trying to set frequency to state %u "
"with command %x at port %x\n",
state, command, smi_port);
do {
if (retry) {
pr_debug("retry %u, previous result %u, waiting...\n",
retry, result);
mdelay(retry * 50);
}
retry++;
__asm__ __volatile__(
"push %%ebp\n"
"out %%al, (%%dx)\n"
"pop %%ebp"
: "=b" (new_state), "=D" (result),
"=c" (dummy), "=a" (dummy),
"=d" (dummy), "=S" (dummy)
: "a" (command), "b" (function), "c" (state),
"d" (smi_port), "S" (0), "D" (0)
);
} while ((new_state != state) && (retry <= SMI_TRIES));
/* enable IRQs */
local_irq_restore(flags);
if (new_state == state)
pr_debug("change to %u MHz succeeded after %u tries "
"with result %u\n",
(speedstep_freqs[new_state].frequency / 1000),
retry, result);
else
printk(KERN_ERR "cpufreq: change to state %u "
"failed with new_state %u and result %u\n",
state, new_state, result);
return;
}
/**
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
* @target_freq: new freq
* @relation:
*
* Sets a new CPUFreq policy/freq.
*/
static int speedstep_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
unsigned int newstate = 0;
struct cpufreq_freqs freqs;
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
target_freq, relation, &newstate))
return -EINVAL;
freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
freqs.new = speedstep_freqs[newstate].frequency;
freqs.cpu = 0; /* speedstep.c is UP only driver */
if (freqs.old == freqs.new)
return 0;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
speedstep_set_state(newstate);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return 0;
}
/**
* speedstep_verify - verifies a new CPUFreq policy
* @policy: new policy
*
* Limit must be within speedstep_low_freq and speedstep_high_freq, with
* at least one border included.
*/
static int speedstep_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
}
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
int result;
unsigned int speed, state;
unsigned int *low, *high;
/* capability check */
if (policy->cpu != 0)
return -ENODEV;
result = speedstep_smi_ownership();
if (result) {
pr_debug("fails in acquiring ownership of a SMI interface.\n");
return -EINVAL;
}
/* detect low and high frequency */
low = &speedstep_freqs[SPEEDSTEP_LOW].frequency;
high = &speedstep_freqs[SPEEDSTEP_HIGH].frequency;
result = speedstep_smi_get_freqs(low, high);
if (result) {
/* fall back to speedstep_lib.c dection mechanism:
* try both states out */
pr_debug("could not detect low and high frequencies "
"by SMI call.\n");
result = speedstep_get_freqs(speedstep_processor,
low, high,
NULL,
&speedstep_set_state);
if (result) {
pr_debug("could not detect two different speeds"
" -- aborting.\n");
return result;
} else
pr_debug("workaround worked.\n");
}
/* get current speed setting */
state = speedstep_get_state();
speed = speedstep_freqs[state].frequency;
pr_debug("currently at %s speed setting - %i MHz\n",
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
? "low" : "high",
(speed / 1000));
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = speed;
result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
if (result)
return result;
cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
return 0;
}
static int speedstep_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static unsigned int speedstep_get(unsigned int cpu)
{
if (cpu)
return -ENODEV;
return speedstep_get_frequency(speedstep_processor);
}
static int speedstep_resume(struct cpufreq_policy *policy)
{
int result = speedstep_smi_ownership();
if (result)
pr_debug("fails in re-acquiring ownership of a SMI interface.\n");
return result;
}
static struct freq_attr *speedstep_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-smi",
.verify = speedstep_verify,
.target = speedstep_target,
.init = speedstep_cpu_init,
.exit = speedstep_cpu_exit,
.get = speedstep_get,
.resume = speedstep_resume,
.owner = THIS_MODULE,
.attr = speedstep_attr,
};
/**
* speedstep_init - initializes the SpeedStep CPUFreq driver
*
* Initializes the SpeedStep support. Returns -ENODEV on unsupported
* BIOS, -EINVAL on problems during initiatization, and zero on
* success.
*/
static int __init speedstep_init(void)
{
speedstep_processor = speedstep_detect_processor();
switch (speedstep_processor) {
case SPEEDSTEP_CPU_PIII_T:
case SPEEDSTEP_CPU_PIII_C:
case SPEEDSTEP_CPU_PIII_C_EARLY:
break;
default:
speedstep_processor = 0;
}
if (!speedstep_processor) {
pr_debug("No supported Intel CPU detected.\n");
return -ENODEV;
}
pr_debug("signature:0x%.8ulx, command:0x%.8ulx, "
"event:0x%.8ulx, perf_level:0x%.8ulx.\n",
ist_info.signature, ist_info.command,
ist_info.event, ist_info.perf_level);
/* Error if no IST-SMI BIOS or no PARM
sig= 'ISGE' aka 'Intel Speedstep Gate E' */
if ((ist_info.signature != 0x47534943) && (
(smi_port == 0) || (smi_cmd == 0)))
return -ENODEV;
if (smi_sig == 1)
smi_sig = 0x47534943;
else
smi_sig = ist_info.signature;
/* setup smi_port from MODLULE_PARM or BIOS */
if ((smi_port > 0xff) || (smi_port < 0))
return -EINVAL;
else if (smi_port == 0)
smi_port = ist_info.command & 0xff;
if ((smi_cmd > 0xff) || (smi_cmd < 0))
return -EINVAL;
else if (smi_cmd == 0)
smi_cmd = (ist_info.command >> 16) & 0xff;
return cpufreq_register_driver(&speedstep_driver);
}
/**
* speedstep_exit - unregisters SpeedStep support
*
* Unregisters SpeedStep support.
*/
static void __exit speedstep_exit(void)
{
cpufreq_unregister_driver(&speedstep_driver);
}
module_param(smi_port, int, 0444);
module_param(smi_cmd, int, 0444);
module_param(smi_sig, uint, 0444);
MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value "
"-- Intel's default setting is 0xb2");
MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value "
"-- Intel's default setting is 0x82");
MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the "
"SMI interface.");
MODULE_AUTHOR("Hiroshi Miura");
MODULE_DESCRIPTION("Speedstep driver for IST applet SMI interface.");
MODULE_LICENSE("GPL");
module_init(speedstep_init);
module_exit(speedstep_exit);
| gpl-2.0 |
samm-git/alcatel_ot993D_kernel | drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c | 3585 | 20238 | /*
* Host AP crypt: host-based TKIP encryption implementation for Host AP driver
*
* Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
*/
//#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <asm/string.h>
#include "ieee80211.h"
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/crc32.h>
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Host AP crypt: TKIP");
MODULE_LICENSE("GPL");
struct ieee80211_tkip_data {
#define TKIP_KEY_LEN 32
u8 key[TKIP_KEY_LEN];
int key_set;
u32 tx_iv32;
u16 tx_iv16;
u16 tx_ttak[5];
int tx_phase1_done;
u32 rx_iv32;
u16 rx_iv16;
u16 rx_ttak[5];
int rx_phase1_done;
u32 rx_iv32_new;
u16 rx_iv16_new;
u32 dot11RSNAStatsTKIPReplays;
u32 dot11RSNAStatsTKIPICVErrors;
u32 dot11RSNAStatsTKIPLocalMICFailures;
int key_idx;
struct crypto_blkcipher *rx_tfm_arc4;
struct crypto_hash *rx_tfm_michael;
struct crypto_blkcipher *tx_tfm_arc4;
struct crypto_hash *tx_tfm_michael;
struct crypto_tfm *tfm_arc4;
struct crypto_tfm *tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16], tx_hdr[16];
};
static void * ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
priv->tx_tfm_arc4 = NULL;
goto fail;
}
priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_michael)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API michael_mic\n");
priv->tx_tfm_michael = NULL;
goto fail;
}
priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
priv->rx_tfm_arc4 = NULL;
goto fail;
}
priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_michael)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API michael_mic\n");
priv->rx_tfm_michael = NULL;
goto fail;
}
return priv;
fail:
if (priv) {
if (priv->tx_tfm_michael)
crypto_free_hash(priv->tx_tfm_michael);
if (priv->tx_tfm_arc4)
crypto_free_blkcipher(priv->tx_tfm_arc4);
if (priv->rx_tfm_michael)
crypto_free_hash(priv->rx_tfm_michael);
if (priv->rx_tfm_arc4)
crypto_free_blkcipher(priv->rx_tfm_arc4);
kfree(priv);
}
return NULL;
}
static void ieee80211_tkip_deinit(void *priv)
{
struct ieee80211_tkip_data *_priv = priv;
if (_priv) {
if (_priv->tx_tfm_michael)
crypto_free_hash(_priv->tx_tfm_michael);
if (_priv->tx_tfm_arc4)
crypto_free_blkcipher(_priv->tx_tfm_arc4);
if (_priv->rx_tfm_michael)
crypto_free_hash(_priv->rx_tfm_michael);
if (_priv->rx_tfm_arc4)
crypto_free_blkcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
static inline u16 RotR1(u16 val)
{
return (val >> 1) | (val << 15);
}
static inline u8 Lo8(u16 val)
{
return val & 0xff;
}
static inline u8 Hi8(u16 val)
{
return val >> 8;
}
static inline u16 Lo16(u32 val)
{
return val & 0xffff;
}
static inline u16 Hi16(u32 val)
{
return val >> 16;
}
static inline u16 Mk16(u8 hi, u8 lo)
{
return lo | (((u16) hi) << 8);
}
static inline u16 Mk16_le(u16 *v)
{
return le16_to_cpu(*v);
}
static const u16 Sbox[256] =
{
0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
};
static inline u16 _S_(u16 v)
{
u16 t = Sbox[Hi8(v)];
return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
}
#define PHASE1_LOOP_COUNT 8
static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
{
int i, j;
/* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
TTAK[0] = Lo16(IV32);
TTAK[1] = Hi16(IV32);
TTAK[2] = Mk16(TA[1], TA[0]);
TTAK[3] = Mk16(TA[3], TA[2]);
TTAK[4] = Mk16(TA[5], TA[4]);
for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
j = 2 * (i & 1);
TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
}
}
static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
u16 IV16)
{
/* Make temporary area overlap WEP seed so that the final copy can be
* avoided on little endian hosts. */
u16 *PPK = (u16 *) &WEPSeed[4];
/* Step 1 - make copy of TTAK and bring in TSC */
PPK[0] = TTAK[0];
PPK[1] = TTAK[1];
PPK[2] = TTAK[2];
PPK[3] = TTAK[3];
PPK[4] = TTAK[4];
PPK[5] = TTAK[4] + IV16;
/* Step 2 - 96-bit bijective mixing using S-box */
PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
PPK[2] += RotR1(PPK[1]);
PPK[3] += RotR1(PPK[2]);
PPK[4] += RotR1(PPK[3]);
PPK[5] += RotR1(PPK[4]);
/* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
* WEPSeed[0..2] is transmitted as WEP IV */
WEPSeed[0] = Hi8(IV16);
WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
WEPSeed[2] = Lo8(IV16);
WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
#ifdef __BIG_ENDIAN
{
int i;
for (i = 0; i < 6; i++)
PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
}
#endif
}
static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
int len;
u8 *pos;
struct ieee80211_hdr_4addr *hdr;
u8 rc4key[16],*icv;
u32 crc;
struct scatterlist sg;
int ret;
ret = 0;
if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len)
return -1;
hdr = (struct ieee80211_hdr_4addr *)skb->data;
if (!tkey->tx_phase1_done) {
tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
tkey->tx_iv32);
tkey->tx_phase1_done = 1;
}
tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
len = skb->len - hdr_len;
pos = skb_push(skb, 8);
memmove(pos, pos + 8, hdr_len);
pos += hdr_len;
*pos++ = rc4key[0];
*pos++ = rc4key[1];
*pos++ = rc4key[2];
*pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */;
*pos++ = tkey->tx_iv32 & 0xff;
*pos++ = (tkey->tx_iv32 >> 8) & 0xff;
*pos++ = (tkey->tx_iv32 >> 16) & 0xff;
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, len + 4);
ret= crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
tkey->tx_iv16++;
if (tkey->tx_iv16 == 0) {
tkey->tx_phase1_done = 0;
tkey->tx_iv32++;
}
return ret;
}
static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
struct blkcipher_desc desc = { .tfm = tkey->rx_tfm_arc4 };
u8 keyidx, *pos;
u32 iv32;
u16 iv16;
struct ieee80211_hdr_4addr *hdr;
u8 icv[4];
u32 crc;
struct scatterlist sg;
u8 rc4key[16];
int plen;
if (skb->len < hdr_len + 8 + 4)
return -1;
hdr = (struct ieee80211_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
if (net_ratelimit()) {
printk(KERN_DEBUG "TKIP: received packet without ExtIV"
" flag from %pM\n", hdr->addr2);
}
return -2;
}
keyidx >>= 6;
if (tkey->key_idx != keyidx) {
printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame "
"keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv);
return -6;
}
if (!tkey->key_set) {
if (net_ratelimit()) {
printk(KERN_DEBUG "TKIP: received packet from %pM"
" with keyid=%d that does not have a configured"
" key\n", hdr->addr2, keyidx);
}
return -3;
}
iv16 = (pos[0] << 8) | pos[2];
iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
pos += 8;
if (iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
if (net_ratelimit()) {
printk(KERN_DEBUG "TKIP: replay detected: STA=%pM"
" previous TSC %08x%04x received TSC "
"%08x%04x\n", hdr->addr2,
tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
}
tkey->dot11RSNAStatsTKIPReplays++;
return -4;
}
if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32);
tkey->rx_phase1_done = 1;
}
tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
plen = skb->len - hdr_len - 12;
crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, plen + 4);
if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
if (net_ratelimit()) {
printk(KERN_DEBUG ": TKIP: failed to decrypt "
"received packet from %pM\n",
hdr->addr2);
}
return -7;
}
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
if (memcmp(icv, pos + plen, 4) != 0) {
if (iv32 != tkey->rx_iv32) {
/* Previously cached Phase1 result was already lost, so
* it needs to be recalculated for the next packet. */
tkey->rx_phase1_done = 0;
}
if (net_ratelimit()) {
printk(KERN_DEBUG "TKIP: ICV error detected: STA="
"%pM\n", hdr->addr2);
}
tkey->dot11RSNAStatsTKIPICVErrors++;
return -5;
}
/* Update real counters only after Michael MIC verification has
* completed */
tkey->rx_iv32_new = iv32;
tkey->rx_iv16_new = iv16;
/* Remove IV and ICV */
memmove(skb->data + 8, skb->data, hdr_len);
skb_pull(skb, 8);
skb_trim(skb, skb->len - 4);
return keyidx;
}
static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
u8 * data, size_t data_len, u8 * mic)
{
struct hash_desc desc;
struct scatterlist sg[2];
if (tfm_michael == NULL) {
printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
return -1;
}
sg_init_table(sg, 2);
sg_set_buf(&sg[0], hdr, 16);
sg_set_buf(&sg[1], data, data_len);
if (crypto_hash_setkey(tfm_michael, key, 8))
return -1;
desc.tfm = tfm_michael;
desc.flags = 0;
return crypto_hash_digest(&desc, sg, data_len + 16, mic);
}
static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
{
struct ieee80211_hdr_4addr *hdr11;
hdr11 = (struct ieee80211_hdr_4addr *)skb->data;
switch (le16_to_cpu(hdr11->frame_ctl) &
(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
case IEEE80211_FCTL_TODS:
memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
break;
case IEEE80211_FCTL_FROMDS:
memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */
break;
case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
break;
case 0:
memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
break;
}
hdr[12] = 0; /* priority */
hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
}
static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
u8 *pos;
struct ieee80211_hdr_4addr *hdr;
hdr = (struct ieee80211_hdr_4addr *)skb->data;
if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
printk(KERN_DEBUG "Invalid packet for Michael MIC add "
"(tailroom=%d hdr_len=%d skb->len=%d)\n",
skb_tailroom(skb), hdr_len, skb->len);
return -1;
}
michael_mic_hdr(skb, tkey->tx_hdr);
// { david, 2006.9.1
// fix the wpa process with wmm enabled.
if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
}
// }
pos = skb_put(skb, 8);
if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
return -1;
return 0;
}
static void ieee80211_michael_mic_failure(struct net_device *dev,
struct ieee80211_hdr_4addr *hdr,
int keyidx)
{
union iwreq_data wrqu;
struct iw_michaelmicfailure ev;
/* TODO: needed parameters: count, keyid, key type, TSC */
memset(&ev, 0, sizeof(ev));
ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
if (hdr->addr1[0] & 0x01)
ev.flags |= IW_MICFAILURE_GROUP;
else
ev.flags |= IW_MICFAILURE_PAIRWISE;
ev.src_addr.sa_family = ARPHRD_ETHER;
memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = sizeof(ev);
wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
}
static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
int hdr_len, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
u8 mic[8];
struct ieee80211_hdr_4addr *hdr;
hdr = (struct ieee80211_hdr_4addr *)skb->data;
if (!tkey->key_set)
return -1;
michael_mic_hdr(skb, tkey->rx_hdr);
// { david, 2006.9.1
// fix the wpa process with wmm enabled.
if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
}
// }
if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
return -1;
if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
struct ieee80211_hdr_4addr *hdr;
hdr = (struct ieee80211_hdr_4addr *)skb->data;
printk(KERN_DEBUG "%s: Michael MIC verification failed for "
"MSDU from %pM keyidx=%d\n",
skb->dev ? skb->dev->name : "N/A", hdr->addr2,
keyidx);
if (skb->dev)
ieee80211_michael_mic_failure(skb->dev, hdr, keyidx);
tkey->dot11RSNAStatsTKIPLocalMICFailures++;
return -1;
}
/* Update TSC counters for RX now that the packet verification has
* completed. */
tkey->rx_iv32 = tkey->rx_iv32_new;
tkey->rx_iv16 = tkey->rx_iv16_new;
skb_trim(skb, skb->len - 8);
return 0;
}
static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
struct crypto_hash *tfm = tkey->tx_tfm_michael;
struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
tkey->key_idx = keyidx;
tkey->tx_tfm_michael = tfm;
tkey->tx_tfm_arc4 = tfm2;
tkey->rx_tfm_michael = tfm3;
tkey->rx_tfm_arc4 = tfm4;
if (len == TKIP_KEY_LEN) {
memcpy(tkey->key, key, TKIP_KEY_LEN);
tkey->key_set = 1;
tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
if (seq) {
tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
(seq[3] << 8) | seq[2];
tkey->rx_iv16 = (seq[1] << 8) | seq[0];
}
} else if (len == 0)
tkey->key_set = 0;
else
return -1;
return 0;
}
static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
if (len < TKIP_KEY_LEN)
return -1;
if (!tkey->key_set)
return 0;
memcpy(key, tkey->key, TKIP_KEY_LEN);
if (seq) {
/* Return the sequence number of the last transmitted frame. */
u16 iv16 = tkey->tx_iv16;
u32 iv32 = tkey->tx_iv32;
if (iv16 == 0)
iv32--;
iv16--;
seq[0] = tkey->tx_iv16;
seq[1] = tkey->tx_iv16 >> 8;
seq[2] = tkey->tx_iv32;
seq[3] = tkey->tx_iv32 >> 8;
seq[4] = tkey->tx_iv32 >> 16;
seq[5] = tkey->tx_iv32 >> 24;
}
return TKIP_KEY_LEN;
}
static char * ieee80211_tkip_print_stats(char *p, void *priv)
{
struct ieee80211_tkip_data *tkip = priv;
p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
"tx_pn=%02x%02x%02x%02x%02x%02x "
"rx_pn=%02x%02x%02x%02x%02x%02x "
"replays=%d icv_errors=%d local_mic_failures=%d\n",
tkip->key_idx, tkip->key_set,
(tkip->tx_iv32 >> 24) & 0xff,
(tkip->tx_iv32 >> 16) & 0xff,
(tkip->tx_iv32 >> 8) & 0xff,
tkip->tx_iv32 & 0xff,
(tkip->tx_iv16 >> 8) & 0xff,
tkip->tx_iv16 & 0xff,
(tkip->rx_iv32 >> 24) & 0xff,
(tkip->rx_iv32 >> 16) & 0xff,
(tkip->rx_iv32 >> 8) & 0xff,
tkip->rx_iv32 & 0xff,
(tkip->rx_iv16 >> 8) & 0xff,
tkip->rx_iv16 & 0xff,
tkip->dot11RSNAStatsTKIPReplays,
tkip->dot11RSNAStatsTKIPICVErrors,
tkip->dot11RSNAStatsTKIPLocalMICFailures);
return p;
}
static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
.name = "TKIP",
.init = ieee80211_tkip_init,
.deinit = ieee80211_tkip_deinit,
.encrypt_mpdu = ieee80211_tkip_encrypt,
.decrypt_mpdu = ieee80211_tkip_decrypt,
.encrypt_msdu = ieee80211_michael_mic_add,
.decrypt_msdu = ieee80211_michael_mic_verify,
.set_key = ieee80211_tkip_set_key,
.get_key = ieee80211_tkip_get_key,
.print_stats = ieee80211_tkip_print_stats,
.extra_prefix_len = 4 + 4, /* IV + ExtIV */
.extra_postfix_len = 8 + 4, /* MIC + ICV */
.owner = THIS_MODULE,
};
int ieee80211_crypto_tkip_init(void)
{
return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip);
}
void ieee80211_crypto_tkip_exit(void)
{
ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip);
}
void ieee80211_tkip_null(void)
{
// printk("============>%s()\n", __func__);
return;
}
| gpl-2.0 |
Kuzma30/kernel3NookTablet | drivers/net/hp-plus.c | 3585 | 15316 | /* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */
/*
Written 1994 by Donald Becker.
This driver is for the Hewlett Packard PC LAN (27***) plus ethercards.
These cards are sold under several model numbers, usually 2724*.
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
As is often the case, a great deal of credit is owed to Russ Nelson.
The Crynwr packet driver was my primary source of HP-specific
programming information.
*/
static const char version[] =
"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
#include <linux/module.h>
#include <linux/string.h> /* Important -- this inlines word moves. */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/system.h>
#include <asm/io.h>
#include "8390.h"
#define DRV_NAME "hp-plus"
/* A zero-terminated list of I/O addresses to be probed. */
static unsigned int hpplus_portlist[] __initdata =
{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
/*
The HP EtherTwist chip implementation is a fairly routine DP8390
implementation. It allows both shared memory and programmed-I/O buffer
access, using a custom interface for both. The programmed-I/O mode is
entirely implemented in the HP EtherTwist chip, bypassing the problem
ridden built-in 8390 facilities used on NE2000 designs. The shared
memory mode is likewise special, with an offset register used to make
packets appear at the shared memory base. Both modes use a base and bounds
page register to hide the Rx ring buffer wrap -- a packet that spans the
end of physical buffer memory appears continuous to the driver. (c.f. the
3c503 and Cabletron E2100)
A special note: the internal buffer of the board is only 8 bits wide.
This lays several nasty traps for the unaware:
- the 8390 must be programmed for byte-wide operations
- all I/O and memory operations must work on whole words (the access
latches are serially preloaded and have no byte-swapping ability).
This board is laid out in I/O space much like the earlier HP boards:
the first 16 locations are for the board registers, and the second 16 are
for the 8390. The board is easy to identify, with both a dedicated 16 bit
ID register and a constant 0x530* value in the upper bits of the paging
register.
*/
#define HP_ID 0x00 /* ID register, always 0x4850. */
#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */
#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */
#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */
#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */
#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */
#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */
#define HP_IO_EXTENT 32
#define HP_START_PG 0x00 /* First page of TX buffer */
#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */
/* The register set selected in HP_PAGING. */
enum PageName {
Perf_Page = 0, /* Normal operation. */
MAC_Page = 1, /* The ethernet address (+checksum). */
HW_Page = 2, /* EEPROM-loaded hardware parameters. */
LAN_Page = 4, /* Transceiver selection, testing, etc. */
ID_Page = 6 };
/* The bit definitions for the HPP_OPTION register. */
enum HP_Option {
NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */
EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20,
MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, };
static int hpp_probe1(struct net_device *dev, int ioaddr);
static void hpp_reset_8390(struct net_device *dev);
static int hpp_open(struct net_device *dev);
static int hpp_close(struct net_device *dev);
static void hpp_mem_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
static void hpp_mem_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page);
static void hpp_io_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
static void hpp_io_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page);
/* Probe a list of addresses for an HP LAN+ adaptor.
This routine is almost boilerplate. */
static int __init do_hpp_probe(struct net_device *dev)
{
int i;
int base_addr = dev->base_addr;
int irq = dev->irq;
if (base_addr > 0x1ff) /* Check a single specified location. */
return hpp_probe1(dev, base_addr);
else if (base_addr != 0) /* Don't probe at all. */
return -ENXIO;
for (i = 0; hpplus_portlist[i]; i++) {
if (hpp_probe1(dev, hpplus_portlist[i]) == 0)
return 0;
dev->irq = irq;
}
return -ENODEV;
}
#ifndef MODULE
struct net_device * __init hp_plus_probe(int unit)
{
struct net_device *dev = alloc_eip_netdev();
int err;
if (!dev)
return ERR_PTR(-ENOMEM);
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
err = do_hpp_probe(dev);
if (err)
goto out;
return dev;
out:
free_netdev(dev);
return ERR_PTR(err);
}
#endif
static const struct net_device_ops hpp_netdev_ops = {
.ndo_open = hpp_open,
.ndo_stop = hpp_close,
.ndo_start_xmit = eip_start_xmit,
.ndo_tx_timeout = eip_tx_timeout,
.ndo_get_stats = eip_get_stats,
.ndo_set_multicast_list = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = eip_poll,
#endif
};
/* Do the interesting part of the probe at a single address. */
static int __init hpp_probe1(struct net_device *dev, int ioaddr)
{
int i, retval;
unsigned char checksum = 0;
const char name[] = "HP-PC-LAN+";
int mem_start;
static unsigned version_printed;
if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
return -EBUSY;
/* Check for the HP+ signature, 50 48 0x 53. */
if (inw(ioaddr + HP_ID) != 0x4850 ||
(inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) {
retval = -ENODEV;
goto out;
}
if (ei_debug && version_printed++ == 0)
printk(version);
printk("%s: %s at %#3x, ", dev->name, name, ioaddr);
/* Retrieve and checksum the station address. */
outw(MAC_Page, ioaddr + HP_PAGING);
for(i = 0; i < ETHER_ADDR_LEN; i++) {
unsigned char inval = inb(ioaddr + 8 + i);
dev->dev_addr[i] = inval;
checksum += inval;
}
checksum += inb(ioaddr + 14);
printk("%pM", dev->dev_addr);
if (checksum != 0xff) {
printk(" bad checksum %2.2x.\n", checksum);
retval = -ENODEV;
goto out;
} else {
/* Point at the Software Configuration Flags. */
outw(ID_Page, ioaddr + HP_PAGING);
printk(" ID %4.4x", inw(ioaddr + 12));
}
/* Read the IRQ line. */
outw(HW_Page, ioaddr + HP_PAGING);
{
int irq = inb(ioaddr + 13) & 0x0f;
int option = inw(ioaddr + HPP_OPTION);
dev->irq = irq;
if (option & MemEnable) {
mem_start = inw(ioaddr + 9) << 8;
printk(", IRQ %d, memory address %#x.\n", irq, mem_start);
} else {
mem_start = 0;
printk(", IRQ %d, programmed-I/O mode.\n", irq);
}
}
/* Set the wrap registers for string I/O reads. */
outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
/* Set the base address to point to the NIC, not the "real" base! */
dev->base_addr = ioaddr + NIC_OFFSET;
dev->netdev_ops = &hpp_netdev_ops;
ei_status.name = name;
ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
ei_status.tx_start_page = HP_START_PG;
ei_status.rx_start_page = HP_START_PG + TX_PAGES/2;
ei_status.stop_page = HP_STOP_PG;
ei_status.reset_8390 = &hpp_reset_8390;
ei_status.block_input = &hpp_io_block_input;
ei_status.block_output = &hpp_io_block_output;
ei_status.get_8390_hdr = &hpp_io_get_8390_hdr;
/* Check if the memory_enable flag is set in the option register. */
if (mem_start) {
ei_status.block_input = &hpp_mem_block_input;
ei_status.block_output = &hpp_mem_block_output;
ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
dev->mem_start = mem_start;
ei_status.mem = ioremap(mem_start,
(HP_STOP_PG - HP_START_PG)*256);
if (!ei_status.mem) {
retval = -ENOMEM;
goto out;
}
ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256;
dev->mem_end = ei_status.rmem_end
= dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
}
outw(Perf_Page, ioaddr + HP_PAGING);
NS8390p_init(dev, 0);
/* Leave the 8390 and HP chip reset. */
outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);
retval = register_netdev(dev);
if (retval)
goto out1;
return 0;
out1:
iounmap(ei_status.mem);
out:
release_region(ioaddr, HP_IO_EXTENT);
return retval;
}
static int
hpp_open(struct net_device *dev)
{
int ioaddr = dev->base_addr - NIC_OFFSET;
int option_reg;
int retval;
if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) {
return retval;
}
/* Reset the 8390 and HP chip. */
option_reg = inw(ioaddr + HPP_OPTION);
outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
udelay(5);
/* Unreset the board and enable interrupts. */
outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
/* Set the wrap registers for programmed-I/O operation. */
outw(HW_Page, ioaddr + HP_PAGING);
outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
/* Select the operational page. */
outw(Perf_Page, ioaddr + HP_PAGING);
return eip_open(dev);
}
static int
hpp_close(struct net_device *dev)
{
int ioaddr = dev->base_addr - NIC_OFFSET;
int option_reg = inw(ioaddr + HPP_OPTION);
free_irq(dev->irq, dev);
eip_close(dev);
outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset,
ioaddr + HPP_OPTION);
return 0;
}
static void
hpp_reset_8390(struct net_device *dev)
{
int ioaddr = dev->base_addr - NIC_OFFSET;
int option_reg = inw(ioaddr + HPP_OPTION);
if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
/* Pause a few cycles for the hardware reset to take place. */
udelay(5);
ei_status.txing = 0;
outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
udelay(5);
if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
printk("%s: hp_reset_8390() did not complete.\n", dev->name);
if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
}
/* The programmed-I/O version of reading the 4 byte 8390 specific header.
Note that transfer with the EtherTwist+ must be on word boundaries. */
static void
hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
int ioaddr = dev->base_addr - NIC_OFFSET;
outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
}
/* Block input and output, similar to the Crynwr packet driver. */
static void
hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
int ioaddr = dev->base_addr - NIC_OFFSET;
char *buf = skb->data;
outw(ring_offset, ioaddr + HPP_IN_ADDR);
insw(ioaddr + HP_DATAPORT, buf, count>>1);
if (count & 0x01)
buf[count-1] = inw(ioaddr + HP_DATAPORT);
}
/* The corresponding shared memory versions of the above 2 functions. */
static void
hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
int ioaddr = dev->base_addr - NIC_OFFSET;
int option_reg = inw(ioaddr + HPP_OPTION);
outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
memcpy_fromio(hdr, ei_status.mem, sizeof(struct e8390_pkt_hdr));
outw(option_reg, ioaddr + HPP_OPTION);
hdr->count = (le16_to_cpu(hdr->count) + 3) & ~3; /* Round up allocation. */
}
static void
hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
int ioaddr = dev->base_addr - NIC_OFFSET;
int option_reg = inw(ioaddr + HPP_OPTION);
outw(ring_offset, ioaddr + HPP_IN_ADDR);
outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
/* Caution: this relies on get_8390_hdr() rounding up count!
Also note that we *can't* use eth_io_copy_and_sum() because
it will not always copy "count" bytes (e.g. padded IP). */
memcpy_fromio(skb->data, ei_status.mem, count);
outw(option_reg, ioaddr + HPP_OPTION);
}
/* A special note: we *must* always transfer >=16 bit words.
It's always safe to round up, so we do. */
static void
hpp_io_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page)
{
int ioaddr = dev->base_addr - NIC_OFFSET;
outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
}
static void
hpp_mem_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page)
{
int ioaddr = dev->base_addr - NIC_OFFSET;
int option_reg = inw(ioaddr + HPP_OPTION);
outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
memcpy_toio(ei_status.mem, buf, (count + 3) & ~3);
outw(option_reg, ioaddr + HPP_OPTION);
}
#ifdef MODULE
#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */
static struct net_device *dev_hpp[MAX_HPP_CARDS];
static int io[MAX_HPP_CARDS];
static int irq[MAX_HPP_CARDS];
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
MODULE_PARM_DESC(io, "I/O port address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s); ignored if properly detected");
MODULE_DESCRIPTION("HP PC-LAN+ ISA ethernet driver");
MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
int __init
init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
if (io[this_dev] == 0) {
if (this_dev != 0) break; /* only autoprobe 1st one */
printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n");
}
dev = alloc_eip_netdev();
if (!dev)
break;
dev->irq = irq[this_dev];
dev->base_addr = io[this_dev];
if (do_hpp_probe(dev) == 0) {
dev_hpp[found++] = dev;
continue;
}
free_netdev(dev);
printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]);
break;
}
if (found)
return 0;
return -ENXIO;
}
static void cleanup_card(struct net_device *dev)
{
/* NB: hpp_close() handles free_irq */
iounmap(ei_status.mem);
release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
}
void __exit
cleanup_module(void)
{
int this_dev;
for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
struct net_device *dev = dev_hpp[this_dev];
if (dev) {
unregister_netdev(dev);
cleanup_card(dev);
free_netdev(dev);
}
}
}
#endif /* MODULE */
| gpl-2.0 |
hallovveen31/HELLRAZOR | arch/powerpc/platforms/82xx/pq2.c | 4097 | 2000 | /*
* Common PowerQUICC II code.
*
* Author: Scott Wood <scottwood@freescale.com>
* Copyright (c) 2007 Freescale Semiconductor
*
* Based on code by Vitaly Bordug <vbordug@ru.mvista.com>
* pq2_restart fix by Wade Farnsworth <wfarnsworth@mvista.com>
* Copyright (c) 2006 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <asm/cpm2.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/system.h>
#include <platforms/82xx/pq2.h>
#define RMR_CSRE 0x00000001
void pq2_restart(char *cmd)
{
local_irq_disable();
setbits32(&cpm2_immr->im_clkrst.car_rmr, RMR_CSRE);
/* Clear the ME,EE,IR & DR bits in MSR to cause checkstop */
mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR));
in_8(&cpm2_immr->im_clkrst.res[0]);
panic("Restart failed\n");
}
#ifdef CONFIG_PCI
static int pq2_pci_exclude_device(struct pci_controller *hose,
u_char bus, u8 devfn)
{
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
else
return PCIBIOS_SUCCESSFUL;
}
static void __init pq2_pci_add_bridge(struct device_node *np)
{
struct pci_controller *hose;
struct resource r;
if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b)
goto err;
ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(np);
if (!hose)
return;
hose->dn = np;
setup_indirect_pci(hose, r.start + 0x100, r.start + 0x104, 0);
pci_process_bridge_OF_ranges(hose, np, 1);
return;
err:
printk(KERN_ERR "No valid PCI reg property in device tree\n");
}
void __init pq2_init_pci(void)
{
struct device_node *np = NULL;
ppc_md.pci_exclude_device = pq2_pci_exclude_device;
while ((np = of_find_compatible_node(np, NULL, "fsl,pq2-pci")))
pq2_pci_add_bridge(np);
}
#endif
| gpl-2.0 |
abbradar/kernel_I9001_samsung | arch/powerpc/platforms/82xx/pq2.c | 4097 | 2000 | /*
* Common PowerQUICC II code.
*
* Author: Scott Wood <scottwood@freescale.com>
* Copyright (c) 2007 Freescale Semiconductor
*
* Based on code by Vitaly Bordug <vbordug@ru.mvista.com>
* pq2_restart fix by Wade Farnsworth <wfarnsworth@mvista.com>
* Copyright (c) 2006 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <asm/cpm2.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/system.h>
#include <platforms/82xx/pq2.h>
#define RMR_CSRE 0x00000001
void pq2_restart(char *cmd)
{
local_irq_disable();
setbits32(&cpm2_immr->im_clkrst.car_rmr, RMR_CSRE);
/* Clear the ME,EE,IR & DR bits in MSR to cause checkstop */
mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR));
in_8(&cpm2_immr->im_clkrst.res[0]);
panic("Restart failed\n");
}
#ifdef CONFIG_PCI
static int pq2_pci_exclude_device(struct pci_controller *hose,
u_char bus, u8 devfn)
{
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
else
return PCIBIOS_SUCCESSFUL;
}
static void __init pq2_pci_add_bridge(struct device_node *np)
{
struct pci_controller *hose;
struct resource r;
if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b)
goto err;
ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(np);
if (!hose)
return;
hose->dn = np;
setup_indirect_pci(hose, r.start + 0x100, r.start + 0x104, 0);
pci_process_bridge_OF_ranges(hose, np, 1);
return;
err:
printk(KERN_ERR "No valid PCI reg property in device tree\n");
}
void __init pq2_init_pci(void)
{
struct device_node *np = NULL;
ppc_md.pci_exclude_device = pq2_pci_exclude_device;
while ((np = of_find_compatible_node(np, NULL, "fsl,pq2-pci")))
pq2_pci_add_bridge(np);
}
#endif
| gpl-2.0 |
MasterAwesome/android_kernel_oneplus_msm8974 | drivers/net/wireless/ath/ath5k/pci.c | 4865 | 9766 | /*
* Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/nl80211.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
#include "../ath.h"
#include "ath5k.h"
#include "debug.h"
#include "base.h"
#include "reg.h"
/* Known PCI ids */
static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
{ PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
{ PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
{ PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
{ PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
{ PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
{ PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
{ PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
{ PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
{ PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
{ PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
{ PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
/* return bus cachesize in 4B word units */
static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->priv;
u8 u8tmp;
pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
*csz = (int)u8tmp;
/*
* This check was put in to avoid "unpleasant" consequences if
* the bootrom has not fully initialized all PCI devices.
* Sometimes the cache line size register is not set
*/
if (*csz == 0)
*csz = L1_CACHE_BYTES >> 2; /* Use the default size */
}
/*
* Read from eeprom
*/
static bool
ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
u32 status, timeout;
/*
* Initialize EEPROM access
*/
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
(void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
} else {
ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
AR5K_EEPROM_CMD_READ);
}
for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
if (status & AR5K_EEPROM_STAT_RDDONE) {
if (status & AR5K_EEPROM_STAT_RDERR)
return false;
*data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
0xffff);
return true;
}
usleep_range(15, 20);
}
return false;
}
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
ah->ah_mac_srev = ath5k_hw_reg_read(ah, AR5K_SREV);
return 0;
}
/*
* Read the MAC address from eeprom or platform_data
*/
static int ath5k_pci_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
u8 mac_d[ETH_ALEN] = {};
u32 total, offset;
u16 data;
int octet;
AR5K_EEPROM_READ(0x20, data);
for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
AR5K_EEPROM_READ(offset, data);
total += data;
mac_d[octet + 1] = data & 0xff;
mac_d[octet] = data >> 8;
octet += 2;
}
if (!total || total == 3 * 0xffff)
return -EINVAL;
memcpy(mac, mac_d, ETH_ALEN);
return 0;
}
/* Common ath_bus_opts structure */
static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath5k_pci_read_cachesize,
.eeprom_read = ath5k_pci_eeprom_read,
.eeprom_read_mac = ath5k_pci_eeprom_read_mac,
};
/********************\
* PCI Initialization *
\********************/
static int __devinit
ath5k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void __iomem *mem;
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
int ret;
u8 csz;
/*
* L0s needs to be disabled on all ath5k cards.
*
* For distributions shipping with CONFIG_PCIEASPM (this will be enabled
* by default in the future in 2.6.36) this will also mean both L1 and
* L0s will be disabled when a pre 1.1 PCIe device is detected. We do
* know L1 works correctly even for all ath5k pre 1.1 PCIe devices
* though but cannot currently undue the effect of a blacklist, for
* details you can read pcie_aspm_sanity_check() and see how it adjusts
* the device link capability.
*
* It may be possible in the future to implement some PCI API to allow
* drivers to override blacklists for pre 1.1 PCIe but for now it is
* best to accept that both L0s and L1 will be disabled completely for
* distributions shipping with CONFIG_PCIEASPM rather than having this
* issue present. Motivation for adding this new API will be to help
* with power consumption for some of these devices.
*/
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "can't enable device\n");
goto err;
}
/* XXX 32-bit addressing only */
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "32-bit DMA not available\n");
goto err_dis;
}
/*
* Cache line size is used to size and align various
* structures used to communicate with the hardware.
*/
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
if (csz == 0) {
/*
* Linux 2.4.18 (at least) writes the cache line size
* register as a 16-bit wide register which is wrong.
* We must have this setup properly for rx buffer
* DMA to work so force a reasonable value here if it
* comes up zero.
*/
csz = L1_CACHE_BYTES >> 2;
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
}
/*
* The default setting of latency timer yields poor results,
* set it to the value used by other systems. It may be worth
* tweaking this setting more.
*/
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
/* Enable bus mastering */
pci_set_master(pdev);
/*
* Disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state.
*/
pci_write_config_byte(pdev, 0x41, 0);
ret = pci_request_region(pdev, 0, "ath5k");
if (ret) {
dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
goto err_dis;
}
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
dev_err(&pdev->dev, "cannot remap PCI memory region\n");
ret = -EIO;
goto err_reg;
}
/*
* Allocate hw (mac80211 main struct)
* and hw->priv (driver private data)
*/
hw = ieee80211_alloc_hw(sizeof(*ah), &ath5k_hw_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
ret = -ENOMEM;
goto err_map;
}
dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
ah = hw->priv;
ah->hw = hw;
ah->pdev = pdev;
ah->dev = &pdev->dev;
ah->irq = pdev->irq;
ah->devid = id->device;
ah->iobase = mem; /* So we can unmap it on detach */
/* Initialize */
ret = ath5k_init_ah(ah, &ath_pci_bus_ops);
if (ret)
goto err_free;
/* Set private data */
pci_set_drvdata(pdev, hw);
return 0;
err_free:
ieee80211_free_hw(hw);
err_map:
pci_iounmap(pdev, mem);
err_reg:
pci_release_region(pdev, 0);
err_dis:
pci_disable_device(pdev);
err:
return ret;
}
static void __devexit
ath5k_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
ath5k_deinit_ah(ah);
pci_iounmap(pdev, ah->iobase);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
ieee80211_free_hw(hw);
}
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
ath5k_led_off(ah);
return 0;
}
static int ath5k_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
/*
* Suspend/Resume resets the PCI configuration space, so we have to
* re-disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state
*/
pci_write_config_byte(pdev, 0x41, 0);
ath5k_led_enable(ah);
return 0;
}
static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
#define ATH5K_PM_OPS (&ath5k_pm_ops)
#else
#define ATH5K_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct pci_driver ath5k_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = ath5k_pci_id_table,
.probe = ath5k_pci_probe,
.remove = __devexit_p(ath5k_pci_remove),
.driver.pm = ATH5K_PM_OPS,
};
/*
* Module init/exit functions
*/
static int __init
init_ath5k_pci(void)
{
int ret;
ret = pci_register_driver(&ath5k_pci_driver);
if (ret) {
printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
return ret;
}
return 0;
}
static void __exit
exit_ath5k_pci(void)
{
pci_unregister_driver(&ath5k_pci_driver);
}
module_init(init_ath5k_pci);
module_exit(exit_ath5k_pci);
| gpl-2.0 |
SOKP/kernel_lge_hammerhead | drivers/ata/pata_artop.c | 5121 | 12604 | /*
* pata_artop.c - ARTOP ATA controller driver
*
* (C) 2006 Red Hat
* (C) 2007,2011 Bartlomiej Zolnierkiewicz
*
* Based in part on drivers/ide/pci/aec62xx.c
* Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
* 865/865R fixes for Macintosh card version from a patch to the old
* driver by Thibaut VARENE <varenet@parisc-linux.org>
* When setting the PCI latency we must set 0x80 or higher for burst
* performance Alessandro Zummo <alessandro.zummo@towertech.it>
*
* TODO
* Investigate no_dsc on 850R
* Clock detect
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_artop"
#define DRV_VERSION "0.4.6"
/*
* The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
* get PCI bus speed functionality we leave this as 0. Its a variable
* for when we get the functionality and also for folks wanting to
* test stuff.
*/
static int clock = 0;
/**
* artop62x0_pre_reset - probe begin
* @link: link
* @deadline: deadline jiffies for the operation
*
* Nothing complicated needed here.
*/
static int artop62x0_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits artop_enable_bits[] = {
{ 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
{ 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
/* Odd numbered device ids are the units with enable bits. */
if ((pdev->device & 1) &&
!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* artop6260_cable_detect - identify cable type
* @ap: Port
*
* Identify the cable type for the ARTOP interface in question
*/
static int artop6260_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp;
pci_read_config_byte(pdev, 0x49, &tmp);
if (tmp & (1 << ap->port_no))
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* artop6210_load_piomode - Load a set of PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device
* @pio: PIO mode
*
* Set PIO mode for device, in host controller PCI config space. This
* is used both to set PIO timings in PIO mode and also to set the
* matching PIO clocking for UDMA, as well as the MWDMA timings.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
const u16 timing[2][5] = {
{ 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
{ 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
};
/* Load the PIO timing active/recovery bits */
pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]);
}
/**
* artop6210_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set PIO mode for device, in host controller PCI config space. For
* ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
* the event UDMA is used the later call to set_dmamode will set the
* bits as required.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
u8 ultra;
artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
pci_read_config_byte(pdev, 0x54, &ultra);
ultra &= ~(3 << (2 * dn));
pci_write_config_byte(pdev, 0x54, ultra);
}
/**
* artop6260_load_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
* @pio: PIO mode
*
* Set PIO mode for device, in host controller PCI config space. The
* ARTOP6260 and relatives store the timing data differently.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
const u8 timing[2][5] = {
{ 0x00, 0x0A, 0x08, 0x33, 0x31 },
{ 0x70, 0x7A, 0x78, 0x43, 0x41 }
};
/* Load the PIO timing active/recovery bits */
pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]);
}
/**
* artop6260_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set PIO mode for device, in host controller PCI config space. For
* ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
* the event UDMA is used the later call to set_dmamode will set the
* bits as required.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ultra;
artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
}
/**
* artop6210_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device whose timings we are configuring
*
* Set DMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
u8 ultra;
if (adev->dma_mode == XFER_MW_DMA_0)
pio = 1;
else
pio = 4;
/* Load the PIO timing active/recovery bits */
artop6210_load_piomode(ap, adev, pio);
pci_read_config_byte(pdev, 0x54, &ultra);
ultra &= ~(3 << (2 * dn));
/* Add ultra DMA bits if in UDMA mode */
if (adev->dma_mode >= XFER_UDMA_0) {
u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock;
if (mode == 0)
mode = 1;
ultra |= (mode << (2 * dn));
}
pci_write_config_byte(pdev, 0x54, ultra);
}
/**
* artop6260_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set DMA mode for device, in host controller PCI config space. The
* ARTOP6260 and relatives store the timing data differently.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ultra;
if (adev->dma_mode == XFER_MW_DMA_0)
pio = 1;
else
pio = 4;
/* Load the PIO timing active/recovery bits */
artop6260_load_piomode(ap, adev, pio);
/* Add ultra DMA bits if in UDMA mode */
pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
if (adev->dma_mode >= XFER_UDMA_0) {
u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock;
if (mode == 0)
mode = 1;
ultra |= (mode << (4 * adev->devno));
}
pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
}
/**
* artop_6210_qc_defer - implement serialization
* @qc: command
*
* Issue commands per host on this chip.
*/
static int artop6210_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_host *host = qc->ap->host;
struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
int rc;
/* First apply the usual rules */
rc = ata_std_qc_defer(qc);
if (rc != 0)
return rc;
/* Now apply serialization rules. Only allow a command if the
other channel state machine is idle */
if (alt && alt->qc_active)
return ATA_DEFER_PORT;
return 0;
}
static struct scsi_host_template artop_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations artop6210_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = artop6210_set_piomode,
.set_dmamode = artop6210_set_dmamode,
.prereset = artop62x0_pre_reset,
.qc_defer = artop6210_qc_defer,
};
static struct ata_port_operations artop6260_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = artop6260_cable_detect,
.set_piomode = artop6260_set_piomode,
.set_dmamode = artop6260_set_dmamode,
.prereset = artop62x0_pre_reset,
};
static void atp8xx_fixup(struct pci_dev *pdev)
{
if (pdev->device == 0x0005)
/* BIOS may have left us in UDMA, clear it before libata probe */
pci_write_config_byte(pdev, 0x54, 0);
else if (pdev->device == 0x0008 || pdev->device == 0x0009) {
u8 reg;
/* Mac systems come up with some registers not set as we
will need them */
/* Clear reset & test bits */
pci_read_config_byte(pdev, 0x49, ®);
pci_write_config_byte(pdev, 0x49, reg & ~0x30);
/* PCI latency must be > 0x80 for burst mode, tweak it
* if required.
*/
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, ®);
if (reg <= 0x80)
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
/* Enable IRQ output and burst mode */
pci_read_config_byte(pdev, 0x4a, ®);
pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
}
}
/**
* artop_init_one - Register ARTOP ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in artop_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info_6210 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &artop6210_ops,
};
static const struct ata_port_info info_626x = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &artop6260_ops,
};
static const struct ata_port_info info_628x = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &artop6260_ops,
};
static const struct ata_port_info info_628x_fast = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &artop6260_ops,
};
const struct ata_port_info *ppi[] = { NULL, NULL };
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if (id->driver_data == 0) /* 6210 variant */
ppi[0] = &info_6210;
else if (id->driver_data == 1) /* 6260 */
ppi[0] = &info_626x;
else if (id->driver_data == 2) { /* 6280 or 6280 + fast */
unsigned long io = pci_resource_start(pdev, 4);
ppi[0] = &info_628x;
if (inb(io) & 0x10)
ppi[0] = &info_628x_fast;
}
BUG_ON(ppi[0] == NULL);
atp8xx_fixup(pdev);
return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
}
static const struct pci_device_id artop_pci_tbl[] = {
{ PCI_VDEVICE(ARTOP, 0x0005), 0 },
{ PCI_VDEVICE(ARTOP, 0x0006), 1 },
{ PCI_VDEVICE(ARTOP, 0x0007), 1 },
{ PCI_VDEVICE(ARTOP, 0x0008), 2 },
{ PCI_VDEVICE(ARTOP, 0x0009), 2 },
{ } /* terminate list */
};
#ifdef CONFIG_PM
static int atp8xx_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
atp8xx_fixup(pdev);
ata_host_resume(host);
return 0;
}
#endif
static struct pci_driver artop_pci_driver = {
.name = DRV_NAME,
.id_table = artop_pci_tbl,
.probe = artop_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = atp8xx_reinit_one,
#endif
};
static int __init artop_init(void)
{
return pci_register_driver(&artop_pci_driver);
}
static void __exit artop_exit(void)
{
pci_unregister_driver(&artop_pci_driver);
}
module_init(artop_init);
module_exit(artop_exit);
MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
Split-Screen/android_kernel_google_msm | drivers/md/persistent-data/dm-block-manager.c | 5121 | 13481 | /*
* Copyright (C) 2011 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm-block-manager.h"
#include "dm-persistent-data-internal.h"
#include "../dm-bufio.h"
#include <linux/crc32c.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/rwsem.h>
#include <linux/device-mapper.h>
#include <linux/stacktrace.h>
#define DM_MSG_PREFIX "block manager"
/*----------------------------------------------------------------*/
/*
* This is a read/write semaphore with a couple of differences.
*
* i) There is a restriction on the number of concurrent read locks that
* may be held at once. This is just an implementation detail.
*
* ii) Recursive locking attempts are detected and return EINVAL. A stack
* trace is also emitted for the previous lock aquisition.
*
* iii) Priority is given to write locks.
*/
#define MAX_HOLDERS 4
#define MAX_STACK 10
typedef unsigned long stack_entries[MAX_STACK];
struct block_lock {
spinlock_t lock;
__s32 count;
struct list_head waiters;
struct task_struct *holders[MAX_HOLDERS];
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
struct stack_trace traces[MAX_HOLDERS];
stack_entries entries[MAX_HOLDERS];
#endif
};
struct waiter {
struct list_head list;
struct task_struct *task;
int wants_write;
};
static unsigned __find_holder(struct block_lock *lock,
struct task_struct *task)
{
unsigned i;
for (i = 0; i < MAX_HOLDERS; i++)
if (lock->holders[i] == task)
break;
BUG_ON(i == MAX_HOLDERS);
return i;
}
/* call this *after* you increment lock->count */
static void __add_holder(struct block_lock *lock, struct task_struct *task)
{
unsigned h = __find_holder(lock, NULL);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
struct stack_trace *t;
#endif
get_task_struct(task);
lock->holders[h] = task;
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
t = lock->traces + h;
t->nr_entries = 0;
t->max_entries = MAX_STACK;
t->entries = lock->entries[h];
t->skip = 2;
save_stack_trace(t);
#endif
}
/* call this *before* you decrement lock->count */
static void __del_holder(struct block_lock *lock, struct task_struct *task)
{
unsigned h = __find_holder(lock, task);
lock->holders[h] = NULL;
put_task_struct(task);
}
static int __check_holder(struct block_lock *lock)
{
unsigned i;
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
static struct stack_trace t;
static stack_entries entries;
#endif
for (i = 0; i < MAX_HOLDERS; i++) {
if (lock->holders[i] == current) {
DMERR("recursive lock detected in pool metadata");
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
DMERR("previously held here:");
print_stack_trace(lock->traces + i, 4);
DMERR("subsequent aquisition attempted here:");
t.nr_entries = 0;
t.max_entries = MAX_STACK;
t.entries = entries;
t.skip = 3;
save_stack_trace(&t);
print_stack_trace(&t, 4);
#endif
return -EINVAL;
}
}
return 0;
}
static void __wait(struct waiter *w)
{
for (;;) {
set_task_state(current, TASK_UNINTERRUPTIBLE);
if (!w->task)
break;
schedule();
}
set_task_state(current, TASK_RUNNING);
}
static void __wake_waiter(struct waiter *w)
{
struct task_struct *task;
list_del(&w->list);
task = w->task;
smp_mb();
w->task = NULL;
wake_up_process(task);
}
/*
* We either wake a few readers or a single writer.
*/
static void __wake_many(struct block_lock *lock)
{
struct waiter *w, *tmp;
BUG_ON(lock->count < 0);
list_for_each_entry_safe(w, tmp, &lock->waiters, list) {
if (lock->count >= MAX_HOLDERS)
return;
if (w->wants_write) {
if (lock->count > 0)
return; /* still read locked */
lock->count = -1;
__add_holder(lock, w->task);
__wake_waiter(w);
return;
}
lock->count++;
__add_holder(lock, w->task);
__wake_waiter(w);
}
}
static void bl_init(struct block_lock *lock)
{
int i;
spin_lock_init(&lock->lock);
lock->count = 0;
INIT_LIST_HEAD(&lock->waiters);
for (i = 0; i < MAX_HOLDERS; i++)
lock->holders[i] = NULL;
}
static int __available_for_read(struct block_lock *lock)
{
return lock->count >= 0 &&
lock->count < MAX_HOLDERS &&
list_empty(&lock->waiters);
}
static int bl_down_read(struct block_lock *lock)
{
int r;
struct waiter w;
spin_lock(&lock->lock);
r = __check_holder(lock);
if (r) {
spin_unlock(&lock->lock);
return r;
}
if (__available_for_read(lock)) {
lock->count++;
__add_holder(lock, current);
spin_unlock(&lock->lock);
return 0;
}
get_task_struct(current);
w.task = current;
w.wants_write = 0;
list_add_tail(&w.list, &lock->waiters);
spin_unlock(&lock->lock);
__wait(&w);
put_task_struct(current);
return 0;
}
static int bl_down_read_nonblock(struct block_lock *lock)
{
int r;
spin_lock(&lock->lock);
r = __check_holder(lock);
if (r)
goto out;
if (__available_for_read(lock)) {
lock->count++;
__add_holder(lock, current);
r = 0;
} else
r = -EWOULDBLOCK;
out:
spin_unlock(&lock->lock);
return r;
}
static void bl_up_read(struct block_lock *lock)
{
spin_lock(&lock->lock);
BUG_ON(lock->count <= 0);
__del_holder(lock, current);
--lock->count;
if (!list_empty(&lock->waiters))
__wake_many(lock);
spin_unlock(&lock->lock);
}
static int bl_down_write(struct block_lock *lock)
{
int r;
struct waiter w;
spin_lock(&lock->lock);
r = __check_holder(lock);
if (r) {
spin_unlock(&lock->lock);
return r;
}
if (lock->count == 0 && list_empty(&lock->waiters)) {
lock->count = -1;
__add_holder(lock, current);
spin_unlock(&lock->lock);
return 0;
}
get_task_struct(current);
w.task = current;
w.wants_write = 1;
/*
* Writers given priority. We know there's only one mutator in the
* system, so ignoring the ordering reversal.
*/
list_add(&w.list, &lock->waiters);
spin_unlock(&lock->lock);
__wait(&w);
put_task_struct(current);
return 0;
}
static void bl_up_write(struct block_lock *lock)
{
spin_lock(&lock->lock);
__del_holder(lock, current);
lock->count = 0;
if (!list_empty(&lock->waiters))
__wake_many(lock);
spin_unlock(&lock->lock);
}
static void report_recursive_bug(dm_block_t b, int r)
{
if (r == -EINVAL)
DMERR("recursive acquisition of block %llu requested.",
(unsigned long long) b);
}
/*----------------------------------------------------------------*/
/*
* Block manager is currently implemented using dm-bufio. struct
* dm_block_manager and struct dm_block map directly onto a couple of
* structs in the bufio interface. I want to retain the freedom to move
* away from bufio in the future. So these structs are just cast within
* this .c file, rather than making it through to the public interface.
*/
static struct dm_buffer *to_buffer(struct dm_block *b)
{
return (struct dm_buffer *) b;
}
static struct dm_bufio_client *to_bufio(struct dm_block_manager *bm)
{
return (struct dm_bufio_client *) bm;
}
dm_block_t dm_block_location(struct dm_block *b)
{
return dm_bufio_get_block_number(to_buffer(b));
}
EXPORT_SYMBOL_GPL(dm_block_location);
void *dm_block_data(struct dm_block *b)
{
return dm_bufio_get_block_data(to_buffer(b));
}
EXPORT_SYMBOL_GPL(dm_block_data);
struct buffer_aux {
struct dm_block_validator *validator;
struct block_lock lock;
int write_locked;
};
static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
{
struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
aux->validator = NULL;
bl_init(&aux->lock);
}
static void dm_block_manager_write_callback(struct dm_buffer *buf)
{
struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
if (aux->validator) {
aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
dm_bufio_get_block_size(dm_bufio_get_client(buf)));
}
}
/*----------------------------------------------------------------
* Public interface
*--------------------------------------------------------------*/
struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
unsigned block_size,
unsigned cache_size,
unsigned max_held_per_thread)
{
return (struct dm_block_manager *)
dm_bufio_client_create(bdev, block_size, max_held_per_thread,
sizeof(struct buffer_aux),
dm_block_manager_alloc_callback,
dm_block_manager_write_callback);
}
EXPORT_SYMBOL_GPL(dm_block_manager_create);
void dm_block_manager_destroy(struct dm_block_manager *bm)
{
return dm_bufio_client_destroy(to_bufio(bm));
}
EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
unsigned dm_bm_block_size(struct dm_block_manager *bm)
{
return dm_bufio_get_block_size(to_bufio(bm));
}
EXPORT_SYMBOL_GPL(dm_bm_block_size);
dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm)
{
return dm_bufio_get_device_size(to_bufio(bm));
}
static int dm_bm_validate_buffer(struct dm_block_manager *bm,
struct dm_buffer *buf,
struct buffer_aux *aux,
struct dm_block_validator *v)
{
if (unlikely(!aux->validator)) {
int r;
if (!v)
return 0;
r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(to_bufio(bm)));
if (unlikely(r))
return r;
aux->validator = v;
} else {
if (unlikely(aux->validator != v)) {
DMERR("validator mismatch (old=%s vs new=%s) for block %llu",
aux->validator->name, v ? v->name : "NULL",
(unsigned long long)
dm_bufio_get_block_number(buf));
return -EINVAL;
}
}
return 0;
}
int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
struct dm_block_validator *v,
struct dm_block **result)
{
struct buffer_aux *aux;
void *p;
int r;
p = dm_bufio_read(to_bufio(bm), b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result));
r = bl_down_read(&aux->lock);
if (unlikely(r)) {
dm_bufio_release(to_buffer(*result));
report_recursive_bug(b, r);
return r;
}
aux->write_locked = 0;
r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
if (unlikely(r)) {
bl_up_read(&aux->lock);
dm_bufio_release(to_buffer(*result));
return r;
}
return 0;
}
EXPORT_SYMBOL_GPL(dm_bm_read_lock);
int dm_bm_write_lock(struct dm_block_manager *bm,
dm_block_t b, struct dm_block_validator *v,
struct dm_block **result)
{
struct buffer_aux *aux;
void *p;
int r;
p = dm_bufio_read(to_bufio(bm), b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result));
r = bl_down_write(&aux->lock);
if (r) {
dm_bufio_release(to_buffer(*result));
report_recursive_bug(b, r);
return r;
}
aux->write_locked = 1;
r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
if (unlikely(r)) {
bl_up_write(&aux->lock);
dm_bufio_release(to_buffer(*result));
return r;
}
return 0;
}
EXPORT_SYMBOL_GPL(dm_bm_write_lock);
int dm_bm_read_try_lock(struct dm_block_manager *bm,
dm_block_t b, struct dm_block_validator *v,
struct dm_block **result)
{
struct buffer_aux *aux;
void *p;
int r;
p = dm_bufio_get(to_bufio(bm), b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
if (unlikely(!p))
return -EWOULDBLOCK;
aux = dm_bufio_get_aux_data(to_buffer(*result));
r = bl_down_read_nonblock(&aux->lock);
if (r < 0) {
dm_bufio_release(to_buffer(*result));
report_recursive_bug(b, r);
return r;
}
aux->write_locked = 0;
r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
if (unlikely(r)) {
bl_up_read(&aux->lock);
dm_bufio_release(to_buffer(*result));
return r;
}
return 0;
}
int dm_bm_write_lock_zero(struct dm_block_manager *bm,
dm_block_t b, struct dm_block_validator *v,
struct dm_block **result)
{
int r;
struct buffer_aux *aux;
void *p;
p = dm_bufio_new(to_bufio(bm), b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
memset(p, 0, dm_bm_block_size(bm));
aux = dm_bufio_get_aux_data(to_buffer(*result));
r = bl_down_write(&aux->lock);
if (r) {
dm_bufio_release(to_buffer(*result));
return r;
}
aux->write_locked = 1;
aux->validator = v;
return 0;
}
int dm_bm_unlock(struct dm_block *b)
{
struct buffer_aux *aux;
aux = dm_bufio_get_aux_data(to_buffer(b));
if (aux->write_locked) {
dm_bufio_mark_buffer_dirty(to_buffer(b));
bl_up_write(&aux->lock);
} else
bl_up_read(&aux->lock);
dm_bufio_release(to_buffer(b));
return 0;
}
EXPORT_SYMBOL_GPL(dm_bm_unlock);
int dm_bm_unlock_move(struct dm_block *b, dm_block_t n)
{
struct buffer_aux *aux;
aux = dm_bufio_get_aux_data(to_buffer(b));
if (aux->write_locked) {
dm_bufio_mark_buffer_dirty(to_buffer(b));
bl_up_write(&aux->lock);
} else
bl_up_read(&aux->lock);
dm_bufio_release_move(to_buffer(b), n);
return 0;
}
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
struct dm_block *superblock)
{
int r;
r = dm_bufio_write_dirty_buffers(to_bufio(bm));
if (unlikely(r))
return r;
r = dm_bufio_issue_flush(to_bufio(bm));
if (unlikely(r))
return r;
dm_bm_unlock(superblock);
r = dm_bufio_write_dirty_buffers(to_bufio(bm));
if (unlikely(r))
return r;
r = dm_bufio_issue_flush(to_bufio(bm));
if (unlikely(r))
return r;
return 0;
}
u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
{
return crc32c(~(u32) 0, data, len) ^ init_xor;
}
EXPORT_SYMBOL_GPL(dm_bm_checksum);
/*----------------------------------------------------------------*/
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_DESCRIPTION("Immutable metadata library for dm");
/*----------------------------------------------------------------*/
| gpl-2.0 |
DirtyDroidX/kernel_jactive | arch/blackfin/kernel/pwm.c | 5121 | 1906 | /*
* Blackfin Pulse Width Modulation (PWM) core
*
* Copyright (c) 2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include <asm/gptimers.h>
#include <asm/portmux.h>
struct pwm_device {
unsigned id;
unsigned short pin;
};
static const unsigned short pwm_to_gptimer_per[] = {
P_TMR0, P_TMR1, P_TMR2, P_TMR3, P_TMR4, P_TMR5,
P_TMR6, P_TMR7, P_TMR8, P_TMR9, P_TMR10, P_TMR11,
};
struct pwm_device *pwm_request(int pwm_id, const char *label)
{
struct pwm_device *pwm;
int ret;
/* XXX: pwm_id really should be unsigned */
if (pwm_id < 0)
return NULL;
pwm = kzalloc(sizeof(*pwm), GFP_KERNEL);
if (!pwm)
return pwm;
pwm->id = pwm_id;
if (pwm->id >= ARRAY_SIZE(pwm_to_gptimer_per))
goto err;
pwm->pin = pwm_to_gptimer_per[pwm->id];
ret = peripheral_request(pwm->pin, label);
if (ret)
goto err;
return pwm;
err:
kfree(pwm);
return NULL;
}
EXPORT_SYMBOL(pwm_request);
void pwm_free(struct pwm_device *pwm)
{
peripheral_free(pwm->pin);
kfree(pwm);
}
EXPORT_SYMBOL(pwm_free);
int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
{
unsigned long period, duty;
unsigned long long val;
if (duty_ns < 0 || duty_ns > period_ns)
return -EINVAL;
val = (unsigned long long)get_sclk() * period_ns;
do_div(val, NSEC_PER_SEC);
period = val;
val = (unsigned long long)period * duty_ns;
do_div(val, period_ns);
duty = period - val;
if (duty >= period)
duty = period - 1;
set_gptimer_config(pwm->id, TIMER_MODE_PWM | TIMER_PERIOD_CNT);
set_gptimer_pwidth(pwm->id, duty);
set_gptimer_period(pwm->id, period);
return 0;
}
EXPORT_SYMBOL(pwm_config);
int pwm_enable(struct pwm_device *pwm)
{
enable_gptimer(pwm->id);
return 0;
}
EXPORT_SYMBOL(pwm_enable);
void pwm_disable(struct pwm_device *pwm)
{
disable_gptimer(pwm->id);
}
EXPORT_SYMBOL(pwm_disable);
| gpl-2.0 |
Mystic-Mirage/android_kernel_gigabyte_roma_r2_plus | drivers/infiniband/ulp/ipoib/ipoib_vlan.c | 9217 | 4914 | /*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include "ipoib.h"
static ssize_t show_parent(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_device *dev = to_net_dev(d);
struct ipoib_dev_priv *priv = netdev_priv(dev);
return sprintf(buf, "%s\n", priv->parent->name);
}
static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv;
char intf_name[IFNAMSIZ];
int result;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
ppriv = netdev_priv(pdev);
if (!rtnl_trylock())
return restart_syscall();
mutex_lock(&ppriv->vlan_mutex);
/*
* First ensure this isn't a duplicate. We check the parent device and
* then all of the child interfaces to make sure the Pkey doesn't match.
*/
if (ppriv->pkey == pkey) {
result = -ENOTUNIQ;
priv = NULL;
goto err;
}
list_for_each_entry(priv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey) {
result = -ENOTUNIQ;
priv = NULL;
goto err;
}
}
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
priv = ipoib_intf_alloc(intf_name);
if (!priv) {
result = -ENOMEM;
goto err;
}
priv->max_ib_mtu = ppriv->max_ib_mtu;
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
result = ipoib_set_dev_features(priv, ppriv->ca);
if (result)
goto err;
priv->pkey = pkey;
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
priv->dev->broadcast[8] = pkey >> 8;
priv->dev->broadcast[9] = pkey & 0xff;
result = ipoib_dev_init(priv->dev, ppriv->ca, ppriv->port);
if (result < 0) {
ipoib_warn(ppriv, "failed to initialize subinterface: "
"device %s, port %d",
ppriv->ca->name, ppriv->port);
goto err;
}
result = register_netdevice(priv->dev);
if (result) {
ipoib_warn(priv, "failed to initialize; error %i", result);
goto register_failed;
}
priv->parent = ppriv->dev;
ipoib_create_debug_files(priv->dev);
if (ipoib_cm_add_mode_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_umcast_attr(priv->dev))
goto sysfs_failed;
if (device_create_file(&priv->dev->dev, &dev_attr_parent))
goto sysfs_failed;
list_add_tail(&priv->list, &ppriv->child_intfs);
mutex_unlock(&ppriv->vlan_mutex);
rtnl_unlock();
return 0;
sysfs_failed:
ipoib_delete_debug_files(priv->dev);
unregister_netdevice(priv->dev);
register_failed:
ipoib_dev_cleanup(priv->dev);
err:
mutex_unlock(&ppriv->vlan_mutex);
rtnl_unlock();
if (priv)
free_netdev(priv->dev);
return result;
}
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv, *tpriv;
struct net_device *dev = NULL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
ppriv = netdev_priv(pdev);
if (!rtnl_trylock())
return restart_syscall();
mutex_lock(&ppriv->vlan_mutex);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey) {
unregister_netdevice(priv->dev);
ipoib_dev_cleanup(priv->dev);
list_del(&priv->list);
dev = priv->dev;
break;
}
}
mutex_unlock(&ppriv->vlan_mutex);
rtnl_unlock();
if (dev) {
free_netdev(dev);
return 0;
}
return -ENODEV;
}
| gpl-2.0 |
wtbdaaaa/i8320kernel | drivers/ide/it8213.c | 9217 | 5669 | /*
* ITE 8213 IDE driver
*
* Copyright (C) 2006 Jack Lee
* Copyright (C) 2006 Alan Cox
* Copyright (C) 2007 Bartlomiej Zolnierkiewicz
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ide.h>
#include <linux/init.h>
#define DRV_NAME "it8213"
/**
* it8213_set_pio_mode - set host controller for PIO mode
* @hwif: port
* @drive: drive
*
* Set the interface PIO mode.
*/
static void it8213_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
int is_slave = drive->dn & 1;
int master_port = 0x40;
int slave_port = 0x44;
unsigned long flags;
u16 master_data;
u8 slave_data;
static DEFINE_SPINLOCK(tune_lock);
int control = 0;
const u8 pio = drive->pio_mode - XFER_PIO_0;
static const u8 timings[][2] = {
{ 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
spin_lock_irqsave(&tune_lock, flags);
pci_read_config_word(dev, master_port, &master_data);
if (pio > 1)
control |= 1; /* Programmable timing on */
if (drive->media != ide_disk)
control |= 4; /* ATAPI */
if (ide_pio_need_iordy(drive, pio))
control |= 2; /* IORDY */
if (is_slave) {
master_data |= 0x4000;
master_data &= ~0x0070;
if (pio > 1)
master_data = master_data | (control << 4);
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data = slave_data & 0xf0;
slave_data = slave_data | (timings[pio][0] << 2) | timings[pio][1];
} else {
master_data &= ~0x3307;
if (pio > 1)
master_data = master_data | control;
master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
spin_unlock_irqrestore(&tune_lock, flags);
}
/**
* it8213_set_dma_mode - set host controller for DMA mode
* @hwif: port
* @drive: drive
*
* Tune the ITE chipset for the DMA mode.
*/
static void it8213_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 maslave = 0x40;
int a_speed = 3 << (drive->dn * 4);
int u_flag = 1 << drive->dn;
int v_flag = 0x01 << drive->dn;
int w_flag = 0x10 << drive->dn;
int u_speed = 0;
u16 reg4042, reg4a;
u8 reg48, reg54, reg55;
const u8 speed = drive->dma_mode;
pci_read_config_word(dev, maslave, ®4042);
pci_read_config_byte(dev, 0x48, ®48);
pci_read_config_word(dev, 0x4a, ®4a);
pci_read_config_byte(dev, 0x54, ®54);
pci_read_config_byte(dev, 0x55, ®55);
if (speed >= XFER_UDMA_0) {
u8 udma = speed - XFER_UDMA_0;
u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
if (!(reg48 & u_flag))
pci_write_config_byte(dev, 0x48, reg48 | u_flag);
if (speed >= XFER_UDMA_5)
pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
else
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
if ((reg4a & a_speed) != u_speed)
pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
if (speed > XFER_UDMA_2) {
if (!(reg54 & v_flag))
pci_write_config_byte(dev, 0x54, reg54 | v_flag);
} else
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
if (reg48 & u_flag)
pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
if (reg4a & a_speed)
pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
if (reg54 & v_flag)
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
if (reg55 & w_flag)
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
if (speed >= XFER_MW_DMA_0)
drive->pio_mode =
mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
else
drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
it8213_set_pio_mode(hwif, drive);
}
}
static u8 it8213_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 reg42h = 0;
pci_read_config_byte(dev, 0x42, ®42h);
return (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
static const struct ide_port_ops it8213_port_ops = {
.set_pio_mode = it8213_set_pio_mode,
.set_dma_mode = it8213_set_dma_mode,
.cable_detect = it8213_cable_detect,
};
static const struct ide_port_info it8213_chipset __devinitdata = {
.name = DRV_NAME,
.enablebits = { {0x41, 0x80, 0x80} },
.port_ops = &it8213_port_ops,
.host_flags = IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.swdma_mask = ATA_SWDMA2_ONLY,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA6,
};
/**
* it8213_init_one - pci layer discovery entry
* @dev: PCI device
* @id: ident table entry
*
* Called by the PCI code when it finds an ITE8213 controller. As
* this device follows the standard interfaces we can use the
* standard helper functions to do almost all the work for us.
*/
static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &it8213_chipset, NULL);
}
static const struct pci_device_id it8213_pci_tbl[] = {
{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), 0 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
static struct pci_driver it8213_pci_driver = {
.name = "ITE8213_IDE",
.id_table = it8213_pci_tbl,
.probe = it8213_init_one,
.remove = ide_pci_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init it8213_ide_init(void)
{
return ide_pci_register_driver(&it8213_pci_driver);
}
static void __exit it8213_ide_exit(void)
{
pci_unregister_driver(&it8213_pci_driver);
}
module_init(it8213_ide_init);
module_exit(it8213_ide_exit);
MODULE_AUTHOR("Jack Lee, Alan Cox");
MODULE_DESCRIPTION("PCI driver module for the ITE 8213");
MODULE_LICENSE("GPL");
| gpl-2.0 |
voltagex/kernel-sprdb2g_gonk4.0_6821 | drivers/ide/aec62xx.c | 9217 | 9421 | /*
* Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/ide.h>
#include <linux/init.h>
#include <asm/io.h>
#define DRV_NAME "aec62xx"
struct chipset_bus_clock_list_entry {
u8 xfer_speed;
u8 chipset_settings;
u8 ultra_settings;
};
static const struct chipset_bus_clock_list_entry aec6xxx_33_base [] = {
{ XFER_UDMA_6, 0x31, 0x07 },
{ XFER_UDMA_5, 0x31, 0x06 },
{ XFER_UDMA_4, 0x31, 0x05 },
{ XFER_UDMA_3, 0x31, 0x04 },
{ XFER_UDMA_2, 0x31, 0x03 },
{ XFER_UDMA_1, 0x31, 0x02 },
{ XFER_UDMA_0, 0x31, 0x01 },
{ XFER_MW_DMA_2, 0x31, 0x00 },
{ XFER_MW_DMA_1, 0x31, 0x00 },
{ XFER_MW_DMA_0, 0x0a, 0x00 },
{ XFER_PIO_4, 0x31, 0x00 },
{ XFER_PIO_3, 0x33, 0x00 },
{ XFER_PIO_2, 0x08, 0x00 },
{ XFER_PIO_1, 0x0a, 0x00 },
{ XFER_PIO_0, 0x00, 0x00 },
{ 0, 0x00, 0x00 }
};
static const struct chipset_bus_clock_list_entry aec6xxx_34_base [] = {
{ XFER_UDMA_6, 0x41, 0x06 },
{ XFER_UDMA_5, 0x41, 0x05 },
{ XFER_UDMA_4, 0x41, 0x04 },
{ XFER_UDMA_3, 0x41, 0x03 },
{ XFER_UDMA_2, 0x41, 0x02 },
{ XFER_UDMA_1, 0x41, 0x01 },
{ XFER_UDMA_0, 0x41, 0x01 },
{ XFER_MW_DMA_2, 0x41, 0x00 },
{ XFER_MW_DMA_1, 0x42, 0x00 },
{ XFER_MW_DMA_0, 0x7a, 0x00 },
{ XFER_PIO_4, 0x41, 0x00 },
{ XFER_PIO_3, 0x43, 0x00 },
{ XFER_PIO_2, 0x78, 0x00 },
{ XFER_PIO_1, 0x7a, 0x00 },
{ XFER_PIO_0, 0x70, 0x00 },
{ 0, 0x00, 0x00 }
};
/*
* TO DO: active tuning and correction of cards without a bios.
*/
static u8 pci_bus_clock_list (u8 speed, struct chipset_bus_clock_list_entry * chipset_table)
{
for ( ; chipset_table->xfer_speed ; chipset_table++)
if (chipset_table->xfer_speed == speed) {
return chipset_table->chipset_settings;
}
return chipset_table->chipset_settings;
}
static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entry * chipset_table)
{
for ( ; chipset_table->xfer_speed ; chipset_table++)
if (chipset_table->xfer_speed == speed) {
return chipset_table->ultra_settings;
}
return chipset_table->ultra_settings;
}
static void aec6210_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
u16 d_conf = 0;
u8 ultra = 0, ultra_conf = 0;
u8 tmp0 = 0, tmp1 = 0, tmp2 = 0;
const u8 speed = drive->dma_mode;
unsigned long flags;
local_irq_save(flags);
/* 0x40|(2*drive->dn): Active, 0x41|(2*drive->dn): Recovery */
pci_read_config_word(dev, 0x40|(2*drive->dn), &d_conf);
tmp0 = pci_bus_clock_list(speed, bus_clock);
d_conf = ((tmp0 & 0xf0) << 4) | (tmp0 & 0xf);
pci_write_config_word(dev, 0x40|(2*drive->dn), d_conf);
tmp1 = 0x00;
tmp2 = 0x00;
pci_read_config_byte(dev, 0x54, &ultra);
tmp1 = ((0x00 << (2*drive->dn)) | (ultra & ~(3 << (2*drive->dn))));
ultra_conf = pci_bus_clock_list_ultra(speed, bus_clock);
tmp2 = ((ultra_conf << (2*drive->dn)) | (tmp1 & ~(3 << (2*drive->dn))));
pci_write_config_byte(dev, 0x54, tmp2);
local_irq_restore(flags);
}
static void aec6260_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
u8 unit = drive->dn & 1;
u8 tmp1 = 0, tmp2 = 0;
u8 ultra = 0, drive_conf = 0, ultra_conf = 0;
const u8 speed = drive->dma_mode;
unsigned long flags;
local_irq_save(flags);
/* high 4-bits: Active, low 4-bits: Recovery */
pci_read_config_byte(dev, 0x40|drive->dn, &drive_conf);
drive_conf = pci_bus_clock_list(speed, bus_clock);
pci_write_config_byte(dev, 0x40|drive->dn, drive_conf);
pci_read_config_byte(dev, (0x44|hwif->channel), &ultra);
tmp1 = ((0x00 << (4*unit)) | (ultra & ~(7 << (4*unit))));
ultra_conf = pci_bus_clock_list_ultra(speed, bus_clock);
tmp2 = ((ultra_conf << (4*unit)) | (tmp1 & ~(7 << (4*unit))));
pci_write_config_byte(dev, (0x44|hwif->channel), tmp2);
local_irq_restore(flags);
}
static void aec_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
drive->dma_mode = drive->pio_mode;
hwif->port_ops->set_dma_mode(hwif, drive);
}
static int init_chipset_aec62xx(struct pci_dev *dev)
{
/* These are necessary to get AEC6280 Macintosh cards to work */
if ((dev->device == PCI_DEVICE_ID_ARTOP_ATP865) ||
(dev->device == PCI_DEVICE_ID_ARTOP_ATP865R)) {
u8 reg49h = 0, reg4ah = 0;
/* Clear reset and test bits. */
pci_read_config_byte(dev, 0x49, ®49h);
pci_write_config_byte(dev, 0x49, reg49h & ~0x30);
/* Enable chip interrupt output. */
pci_read_config_byte(dev, 0x4a, ®4ah);
pci_write_config_byte(dev, 0x4a, reg4ah & ~0x01);
/* Enable burst mode. */
pci_read_config_byte(dev, 0x4a, ®4ah);
pci_write_config_byte(dev, 0x4a, reg4ah | 0x80);
}
return 0;
}
static u8 atp86x_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 ata66 = 0, mask = hwif->channel ? 0x02 : 0x01;
pci_read_config_byte(dev, 0x49, &ata66);
return (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
static const struct ide_port_ops atp850_port_ops = {
.set_pio_mode = aec_set_pio_mode,
.set_dma_mode = aec6210_set_mode,
};
static const struct ide_port_ops atp86x_port_ops = {
.set_pio_mode = aec_set_pio_mode,
.set_dma_mode = aec6260_set_mode,
.cable_detect = atp86x_cable_detect,
};
static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
{ /* 0: AEC6210 */
.name = DRV_NAME,
.init_chipset = init_chipset_aec62xx,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.port_ops = &atp850_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE |
IDE_HFLAG_NO_ATAPI_DMA |
IDE_HFLAG_NO_DSC |
IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
},
{ /* 1: AEC6260 */
.name = DRV_NAME,
.init_chipset = init_chipset_aec62xx,
.port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
},
{ /* 2: AEC6260R */
.name = DRV_NAME,
.init_chipset = init_chipset_aec62xx,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
IDE_HFLAG_NON_BOOTABLE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
},
{ /* 3: AEC6280 */
.name = DRV_NAME,
.init_chipset = init_chipset_aec62xx,
.port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
},
{ /* 4: AEC6280R */
.name = DRV_NAME,
.init_chipset = init_chipset_aec62xx,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
}
};
/**
* aec62xx_init_one - called when a AEC is found
* @dev: the aec62xx device
* @id: the matching pci id
*
* Called when the PCI registration layer (or the IDE initialization)
* finds a device matching our IDE device tables.
*
* NOTE: since we're going to modify the 'name' field for AEC-6[26]80[R]
* chips, pass a local copy of 'struct ide_port_info' down the call chain.
*/
static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct chipset_bus_clock_list_entry *bus_clock;
struct ide_port_info d;
u8 idx = id->driver_data;
int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
int err;
if (bus_speed <= 33)
bus_clock = aec6xxx_33_base;
else
bus_clock = aec6xxx_34_base;
err = pci_enable_device(dev);
if (err)
return err;
d = aec62xx_chipsets[idx];
if (idx == 3 || idx == 4) {
unsigned long dma_base = pci_resource_start(dev, 4);
if (inb(dma_base + 2) & 0x10) {
printk(KERN_INFO DRV_NAME " %s: AEC6880%s card detected"
"\n", pci_name(dev), (idx == 4) ? "R" : "");
d.udma_mask = ATA_UDMA6;
}
}
err = ide_pci_init_one(dev, &d, (void *)bus_clock);
if (err)
pci_disable_device(dev);
return err;
}
static void __devexit aec62xx_remove(struct pci_dev *dev)
{
ide_pci_remove(dev);
pci_disable_device(dev);
}
static const struct pci_device_id aec62xx_pci_tbl[] = {
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF), 0 },
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP860), 1 },
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP860R), 2 },
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP865), 3 },
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP865R), 4 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, aec62xx_pci_tbl);
static struct pci_driver aec62xx_pci_driver = {
.name = "AEC62xx_IDE",
.id_table = aec62xx_pci_tbl,
.probe = aec62xx_init_one,
.remove = __devexit_p(aec62xx_remove),
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init aec62xx_ide_init(void)
{
return ide_pci_register_driver(&aec62xx_pci_driver);
}
static void __exit aec62xx_ide_exit(void)
{
pci_unregister_driver(&aec62xx_pci_driver);
}
module_init(aec62xx_ide_init);
module_exit(aec62xx_ide_exit);
MODULE_AUTHOR("Andre Hedrick");
MODULE_DESCRIPTION("PCI driver module for ARTOP AEC62xx IDE");
MODULE_LICENSE("GPL");
| gpl-2.0 |
pershoot/android_kernel_asus_tf701t | arch/cris/arch-v32/mach-a3/cpufreq.c | 9473 | 3532 | #include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
#include <hwregs/reg_map.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/clkgen_defs.h>
#include <hwregs/ddr2_defs.h>
static int
cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
void *data);
static struct notifier_block cris_sdram_freq_notifier_block = {
.notifier_call = cris_sdram_freq_notifier
};
static struct cpufreq_frequency_table cris_freq_table[] = {
{0x01, 6000},
{0x02, 200000},
{0, CPUFREQ_TABLE_END},
};
static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
{
reg_clkgen_rw_clk_ctrl clk_ctrl;
clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
return clk_ctrl.pll ? 200000 : 6000;
}
static void cris_freq_set_cpu_state(unsigned int state)
{
int i = 0;
struct cpufreq_freqs freqs;
reg_clkgen_rw_clk_ctrl clk_ctrl;
clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
#ifdef CONFIG_SMP
for_each_present_cpu(i)
#endif
{
freqs.old = cris_freq_get_cpu_frequency(i);
freqs.new = cris_freq_table[state].frequency;
freqs.cpu = i;
}
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
local_irq_disable();
/* Even though we may be SMP they will share the same clock
* so all settings are made on CPU0. */
if (cris_freq_table[state].frequency == 200000)
clk_ctrl.pll = 1;
else
clk_ctrl.pll = 0;
REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
local_irq_enable();
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
};
static int cris_freq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
}
static int cris_freq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int newstate = 0;
if (cpufreq_frequency_table_target(policy, cris_freq_table,
target_freq, relation, &newstate))
return -EINVAL;
cris_freq_set_cpu_state(newstate);
return 0;
}
static int cris_freq_cpu_init(struct cpufreq_policy *policy)
{
int result;
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 1000000; /* 1ms */
policy->cur = cris_freq_get_cpu_frequency(0);
result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
if (result)
return (result);
cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
return 0;
}
static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static struct freq_attr *cris_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver cris_freq_driver = {
.get = cris_freq_get_cpu_frequency,
.verify = cris_freq_verify,
.target = cris_freq_target,
.init = cris_freq_cpu_init,
.exit = cris_freq_cpu_exit,
.name = "cris_freq",
.owner = THIS_MODULE,
.attr = cris_freq_attr,
};
static int __init cris_freq_init(void)
{
int ret;
ret = cpufreq_register_driver(&cris_freq_driver);
cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
return ret;
}
static int
cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
int i;
struct cpufreq_freqs *freqs = data;
if (val == CPUFREQ_PRECHANGE) {
reg_ddr2_rw_cfg cfg =
REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg);
cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46);
if (freqs->new == 200000)
for (i = 0; i < 50000; i++);
REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
}
return 0;
}
module_init(cris_freq_init);
| gpl-2.0 |
djmatt604/android_kernel_T989D_JB | fs/nilfs2/btnode.c | 12289 | 7842 | /*
* btnode.c - NILFS B-tree node cache
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* This file was originally written by Seiji Kihara <kihara@osrg.net>
* and fully revised by Ryusuke Konishi <ryusuke@osrg.net> for
* stabilization and simplification.
*
*/
#include <linux/types.h>
#include <linux/buffer_head.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include <linux/gfp.h>
#include "nilfs.h"
#include "mdt.h"
#include "dat.h"
#include "page.h"
#include "btnode.h"
void nilfs_btnode_cache_clear(struct address_space *btnc)
{
invalidate_mapping_pages(btnc, 0, -1);
truncate_inode_pages(btnc, 0);
}
struct buffer_head *
nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
{
struct inode *inode = NILFS_BTNC_I(btnc);
struct buffer_head *bh;
bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
if (unlikely(!bh))
return NULL;
if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
buffer_dirty(bh))) {
brelse(bh);
BUG();
}
memset(bh->b_data, 0, 1 << inode->i_blkbits);
bh->b_bdev = inode->i_sb->s_bdev;
bh->b_blocknr = blocknr;
set_buffer_mapped(bh);
set_buffer_uptodate(bh);
unlock_page(bh->b_page);
page_cache_release(bh->b_page);
return bh;
}
int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
sector_t pblocknr, int mode,
struct buffer_head **pbh, sector_t *submit_ptr)
{
struct buffer_head *bh;
struct inode *inode = NILFS_BTNC_I(btnc);
struct page *page;
int err;
bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
if (unlikely(!bh))
return -ENOMEM;
err = -EEXIST; /* internal code */
page = bh->b_page;
if (buffer_uptodate(bh) || buffer_dirty(bh))
goto found;
if (pblocknr == 0) {
pblocknr = blocknr;
if (inode->i_ino != NILFS_DAT_INO) {
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
/* blocknr is a virtual block number */
err = nilfs_dat_translate(nilfs->ns_dat, blocknr,
&pblocknr);
if (unlikely(err)) {
brelse(bh);
goto out_locked;
}
}
}
if (mode == READA) {
if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
err = -EBUSY; /* internal code */
brelse(bh);
goto out_locked;
}
} else { /* mode == READ */
lock_buffer(bh);
}
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
err = -EEXIST; /* internal code */
goto found;
}
set_buffer_mapped(bh);
bh->b_bdev = inode->i_sb->s_bdev;
bh->b_blocknr = pblocknr; /* set block address for read */
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
submit_bh(mode, bh);
bh->b_blocknr = blocknr; /* set back to the given block address */
*submit_ptr = pblocknr;
err = 0;
found:
*pbh = bh;
out_locked:
unlock_page(page);
page_cache_release(page);
return err;
}
/**
* nilfs_btnode_delete - delete B-tree node buffer
* @bh: buffer to be deleted
*
* nilfs_btnode_delete() invalidates the specified buffer and delete the page
* including the buffer if the page gets unbusy.
*/
void nilfs_btnode_delete(struct buffer_head *bh)
{
struct address_space *mapping;
struct page *page = bh->b_page;
pgoff_t index = page_index(page);
int still_dirty;
page_cache_get(page);
lock_page(page);
wait_on_page_writeback(page);
nilfs_forget_buffer(bh);
still_dirty = PageDirty(page);
mapping = page->mapping;
unlock_page(page);
page_cache_release(page);
if (!still_dirty && mapping)
invalidate_inode_pages2_range(mapping, index, index);
}
/**
* nilfs_btnode_prepare_change_key
* prepare to move contents of the block for old key to one of new key.
* the old buffer will not be removed, but might be reused for new buffer.
* it might return -ENOMEM because of memory allocation errors,
* and might return -EIO because of disk read errors.
*/
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *obh, *nbh;
struct inode *inode = NILFS_BTNC_I(btnc);
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
int err;
if (oldkey == newkey)
return 0;
obh = ctxt->bh;
ctxt->newbh = NULL;
if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
lock_page(obh->b_page);
/*
* We cannot call radix_tree_preload for the kernels older
* than 2.6.23, because it is not exported for modules.
*/
retry:
err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
if (err)
goto failed_unlock;
/* BUG_ON(oldkey != obh->b_page->index); */
if (unlikely(oldkey != obh->b_page->index))
NILFS_PAGE_BUG(obh->b_page,
"invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey,
(unsigned long long)newkey);
spin_lock_irq(&btnc->tree_lock);
err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
spin_unlock_irq(&btnc->tree_lock);
/*
* Note: page->index will not change to newkey until
* nilfs_btnode_commit_change_key() will be called.
* To protect the page in intermediate state, the page lock
* is held.
*/
radix_tree_preload_end();
if (!err)
return 0;
else if (err != -EEXIST)
goto failed_unlock;
err = invalidate_inode_pages2_range(btnc, newkey, newkey);
if (!err)
goto retry;
/* fallback to copy mode */
unlock_page(obh->b_page);
}
nbh = nilfs_btnode_create_block(btnc, newkey);
if (!nbh)
return -ENOMEM;
BUG_ON(nbh == obh);
ctxt->newbh = nbh;
return 0;
failed_unlock:
unlock_page(obh->b_page);
return err;
}
/**
* nilfs_btnode_commit_change_key
* commit the change_key operation prepared by prepare_change_key().
*/
void nilfs_btnode_commit_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
struct page *opage;
if (oldkey == newkey)
return;
if (nbh == NULL) { /* blocksize == pagesize */
opage = obh->b_page;
if (unlikely(oldkey != opage->index))
NILFS_PAGE_BUG(opage,
"invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey,
(unsigned long long)newkey);
mark_buffer_dirty(obh);
spin_lock_irq(&btnc->tree_lock);
radix_tree_delete(&btnc->page_tree, oldkey);
radix_tree_tag_set(&btnc->page_tree, newkey,
PAGECACHE_TAG_DIRTY);
spin_unlock_irq(&btnc->tree_lock);
opage->index = obh->b_blocknr = newkey;
unlock_page(opage);
} else {
nilfs_copy_buffer(nbh, obh);
mark_buffer_dirty(nbh);
nbh->b_blocknr = newkey;
ctxt->bh = nbh;
nilfs_btnode_delete(obh); /* will decrement bh->b_count */
}
}
/**
* nilfs_btnode_abort_change_key
* abort the change_key operation prepared by prepare_change_key().
*/
void nilfs_btnode_abort_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *nbh = ctxt->newbh;
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
if (oldkey == newkey)
return;
if (nbh == NULL) { /* blocksize == pagesize */
spin_lock_irq(&btnc->tree_lock);
radix_tree_delete(&btnc->page_tree, newkey);
spin_unlock_irq(&btnc->tree_lock);
unlock_page(ctxt->bh->b_page);
} else
brelse(nbh);
}
| gpl-2.0 |
tgraf/net-next | drivers/video/fbdev/geode/display_gx.c | 14081 | 4949 | /*
* Geode GX display controller.
*
* Copyright (C) 2005 Arcom Control Systems Ltd.
*
* Portions from AMD's original 2.4 driver:
* Copyright (C) 2004 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by * the
* Free Software Foundation; either version 2 of the License, or * (at your
* option) any later version.
*/
#include <linux/spinlock.h>
#include <linux/fb.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/div64.h>
#include <asm/delay.h>
#include <linux/cs5535.h>
#include "gxfb.h"
unsigned int gx_frame_buffer_size(void)
{
unsigned int val;
if (!cs5535_has_vsa2()) {
uint32_t hi, lo;
/* The number of pages is (PMAX - PMIN)+1 */
rdmsr(MSR_GLIU_P2D_RO0, lo, hi);
/* PMAX */
val = ((hi & 0xff) << 12) | ((lo & 0xfff00000) >> 20);
/* PMIN */
val -= (lo & 0x000fffff);
val += 1;
/* The page size is 4k */
return (val << 12);
}
/* FB size can be obtained from the VSA II */
/* Virtual register class = 0x02 */
/* VG_MEM_SIZE(512Kb units) = 0x00 */
outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
outw(VSA_VR_MEM_SIZE, VSA_VRC_INDEX);
val = (unsigned int)(inw(VSA_VRC_DATA)) & 0xFFl;
return (val << 19);
}
int gx_line_delta(int xres, int bpp)
{
/* Must be a multiple of 8 bytes. */
return (xres * (bpp >> 3) + 7) & ~0x7;
}
void gx_set_mode(struct fb_info *info)
{
struct gxfb_par *par = info->par;
u32 gcfg, dcfg;
int hactive, hblankstart, hsyncstart, hsyncend, hblankend, htotal;
int vactive, vblankstart, vsyncstart, vsyncend, vblankend, vtotal;
/* Unlock the display controller registers. */
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
gcfg = read_dc(par, DC_GENERAL_CFG);
dcfg = read_dc(par, DC_DISPLAY_CFG);
/* Disable the timing generator. */
dcfg &= ~DC_DISPLAY_CFG_TGEN;
write_dc(par, DC_DISPLAY_CFG, dcfg);
/* Wait for pending memory requests before disabling the FIFO load. */
udelay(100);
/* Disable FIFO load and compression. */
gcfg &= ~(DC_GENERAL_CFG_DFLE | DC_GENERAL_CFG_CMPE |
DC_GENERAL_CFG_DECE);
write_dc(par, DC_GENERAL_CFG, gcfg);
/* Setup DCLK and its divisor. */
gx_set_dclk_frequency(info);
/*
* Setup new mode.
*/
/* Clear all unused feature bits. */
gcfg &= DC_GENERAL_CFG_YUVM | DC_GENERAL_CFG_VDSE;
dcfg = 0;
/* Set FIFO priority (default 6/5) and enable. */
/* FIXME: increase fifo priority for 1280x1024 and higher modes? */
gcfg |= (6 << DC_GENERAL_CFG_DFHPEL_SHIFT) |
(5 << DC_GENERAL_CFG_DFHPSL_SHIFT) | DC_GENERAL_CFG_DFLE;
/* Framebuffer start offset. */
write_dc(par, DC_FB_ST_OFFSET, 0);
/* Line delta and line buffer length. */
write_dc(par, DC_GFX_PITCH, info->fix.line_length >> 3);
write_dc(par, DC_LINE_SIZE,
((info->var.xres * info->var.bits_per_pixel/8) >> 3) + 2);
/* Enable graphics and video data and unmask address lines. */
dcfg |= DC_DISPLAY_CFG_GDEN | DC_DISPLAY_CFG_VDEN |
DC_DISPLAY_CFG_A20M | DC_DISPLAY_CFG_A18M;
/* Set pixel format. */
switch (info->var.bits_per_pixel) {
case 8:
dcfg |= DC_DISPLAY_CFG_DISP_MODE_8BPP;
break;
case 16:
dcfg |= DC_DISPLAY_CFG_DISP_MODE_16BPP;
break;
case 32:
dcfg |= DC_DISPLAY_CFG_DISP_MODE_24BPP;
dcfg |= DC_DISPLAY_CFG_PALB;
break;
}
/* Enable timing generator. */
dcfg |= DC_DISPLAY_CFG_TGEN;
/* Horizontal and vertical timings. */
hactive = info->var.xres;
hblankstart = hactive;
hsyncstart = hblankstart + info->var.right_margin;
hsyncend = hsyncstart + info->var.hsync_len;
hblankend = hsyncend + info->var.left_margin;
htotal = hblankend;
vactive = info->var.yres;
vblankstart = vactive;
vsyncstart = vblankstart + info->var.lower_margin;
vsyncend = vsyncstart + info->var.vsync_len;
vblankend = vsyncend + info->var.upper_margin;
vtotal = vblankend;
write_dc(par, DC_H_ACTIVE_TIMING, (hactive - 1) |
((htotal - 1) << 16));
write_dc(par, DC_H_BLANK_TIMING, (hblankstart - 1) |
((hblankend - 1) << 16));
write_dc(par, DC_H_SYNC_TIMING, (hsyncstart - 1) |
((hsyncend - 1) << 16));
write_dc(par, DC_V_ACTIVE_TIMING, (vactive - 1) |
((vtotal - 1) << 16));
write_dc(par, DC_V_BLANK_TIMING, (vblankstart - 1) |
((vblankend - 1) << 16));
write_dc(par, DC_V_SYNC_TIMING, (vsyncstart - 1) |
((vsyncend - 1) << 16));
/* Write final register values. */
write_dc(par, DC_DISPLAY_CFG, dcfg);
write_dc(par, DC_GENERAL_CFG, gcfg);
gx_configure_display(info);
/* Relock display controller registers */
write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
}
void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno,
unsigned red, unsigned green, unsigned blue)
{
struct gxfb_par *par = info->par;
int val;
/* Hardware palette is in RGB 8-8-8 format. */
val = (red << 8) & 0xff0000;
val |= (green) & 0x00ff00;
val |= (blue >> 8) & 0x0000ff;
write_dc(par, DC_PAL_ADDRESS, regno);
write_dc(par, DC_PAL_DATA, val);
}
| gpl-2.0 |
andrewyates/linux-stable | arch/arm/kernel/traps.c | 2 | 22019 | /*
* linux/arch/arm/kernel/traps.c
*
* Copyright (C) 1995-2009 Russell King
* Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 'traps.c' handles hardware exceptions after we have saved some state in
* 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
* kill the offending process.
*/
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/kallsyms.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/irq.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/exception.h>
#include <asm/unistd.h>
#include <asm/traps.h>
#include <asm/ptrace.h>
#include <asm/unwind.h>
#include <asm/tls.h>
#include <asm/system_misc.h>
#include <asm/opcodes.h>
static const char *handler[]= {
"prefetch abort",
"data abort",
"address exception",
"interrupt",
"undefined instruction",
};
void *vectors_page;
#ifdef CONFIG_DEBUG_USER
unsigned int user_debug;
static int __init user_debug_setup(char *str)
{
get_option(&str, &user_debug);
return 1;
}
__setup("user_debug=", user_debug_setup);
#endif
static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
#ifdef CONFIG_KALLSYMS
printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
if (in_exception_text(where))
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
}
#ifndef CONFIG_ARM_UNWIND
/*
* Stack pointers should always be within the kernels view of
* physical memory. If it is not there, then we can't dump
* out any information relating to the stack.
*/
static int verify_stack(unsigned long sp)
{
if (sp < PAGE_OFFSET ||
(sp > (unsigned long)high_memory && high_memory != NULL))
return -EFAULT;
return 0;
}
#endif
/*
* Dump out the contents of some memory nicely...
*/
static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
unsigned long top)
{
unsigned long first;
mm_segment_t fs;
int i;
/*
* We need to switch to kernel mode so that we can use __get_user
* to safely read from kernel space. Note that we now dump the
* code first, just in case the backtrace kills us.
*/
fs = get_fs();
set_fs(KERNEL_DS);
printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
for (first = bottom & ~31; first < top; first += 32) {
unsigned long p;
char str[sizeof(" 12345678") * 8 + 1];
memset(str, ' ', sizeof(str));
str[sizeof(str) - 1] = '\0';
for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
if (p >= bottom && p < top) {
unsigned long val;
if (__get_user(val, (unsigned long *)p) == 0)
sprintf(str + i * 9, " %08lx", val);
else
sprintf(str + i * 9, " ????????");
}
}
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
}
set_fs(fs);
}
static void dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
* We need to switch to kernel mode so that we can use __get_user
* to safely read from kernel space. Note that we now dump the
* code first, just in case the backtrace kills us.
*/
fs = get_fs();
set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
bad = __get_user(val, &((u16 *)addr)[i]);
else
bad = __get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
width, val);
else {
p += sprintf(p, "bad PC value");
break;
}
}
printk("%sCode: %s\n", lvl, str);
set_fs(fs);
}
#ifdef CONFIG_ARM_UNWIND
static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
unwind_backtrace(regs, tsk);
}
#else
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
unsigned int fp, mode;
int ok = 1;
printk("Backtrace: ");
if (!tsk)
tsk = current;
if (regs) {
fp = frame_pointer(regs);
mode = processor_mode(regs);
} else if (tsk != current) {
fp = thread_saved_fp(tsk);
mode = 0x10;
} else {
asm("mov %0, fp" : "=r" (fp) : : "cc");
mode = 0x10;
}
if (!fp) {
pr_cont("no frame pointer");
ok = 0;
} else if (verify_stack(fp)) {
pr_cont("invalid frame pointer 0x%08x", fp);
ok = 0;
} else if (fp < (unsigned long)end_of_stack(tsk))
pr_cont("frame pointer underflow");
pr_cont("\n");
if (ok)
c_backtrace(fp, mode);
}
#endif
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
dump_backtrace(NULL, tsk);
barrier();
}
#ifdef CONFIG_PREEMPT
#define S_PREEMPT " PREEMPT"
#else
#define S_PREEMPT ""
#endif
#ifdef CONFIG_SMP
#define S_SMP " SMP"
#else
#define S_SMP ""
#endif
#ifdef CONFIG_THUMB2_KERNEL
#define S_ISA " THUMB2"
#else
#define S_ISA " ARM"
#endif
static int __die(const char *str, int err, struct pt_regs *regs)
{
struct task_struct *tsk = current;
static int die_counter;
int ret;
pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
if (ret == NOTIFY_STOP)
return 1;
print_modules();
__show_regs(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs);
}
return 0;
}
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
extern void gr_handle_kernel_exploit(void);
static unsigned long oops_begin(void)
{
int cpu;
unsigned long flags;
oops_enter();
/* racy, but better than risking deadlock. */
raw_local_irq_save(flags);
cpu = smp_processor_id();
if (!arch_spin_trylock(&die_lock)) {
if (cpu == die_owner)
/* nested oops. should stop eventually */;
else
arch_spin_lock(&die_lock);
}
die_nest_count++;
die_owner = cpu;
console_verbose();
bust_spinlocks(1);
return flags;
}
static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
if (regs && kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
die_owner = -1;
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
die_nest_count--;
if (!die_nest_count)
/* Nest count reaches zero, release the lock. */
arch_spin_unlock(&die_lock);
raw_local_irq_restore(flags);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
gr_handle_kernel_exploit();
if (signr)
do_exit(signr);
}
/*
* This function is protected against re-entrancy.
*/
void die(const char *str, struct pt_regs *regs, int err)
{
enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
unsigned long flags = oops_begin();
int sig = SIGSEGV;
if (!user_mode(regs))
bug_type = report_bug(regs->ARM_pc, regs);
if (bug_type != BUG_TRAP_TYPE_NONE)
str = "Oops - BUG";
if (__die(str, err, regs))
sig = 0;
oops_end(flags, regs, sig);
}
void arm_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, unsigned long err, unsigned long trap)
{
if (user_mode(regs)) {
current->thread.error_code = err;
current->thread.trap_no = trap;
force_sig_info(info->si_signo, info, current);
} else {
die(str, regs, err);
}
}
#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long pc)
{
#ifdef CONFIG_THUMB2_KERNEL
u16 bkpt;
u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
#else
u32 bkpt;
u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
#endif
if (probe_kernel_address((unsigned *)pc, bkpt))
return 0;
return bkpt == insn;
}
#endif
static LIST_HEAD(undef_hook);
static DEFINE_RAW_SPINLOCK(undef_lock);
void register_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
raw_spin_lock_irqsave(&undef_lock, flags);
list_add(&hook->node, &undef_hook);
raw_spin_unlock_irqrestore(&undef_lock, flags);
}
void unregister_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
raw_spin_lock_irqsave(&undef_lock, flags);
list_del(&hook->node);
raw_spin_unlock_irqrestore(&undef_lock, flags);
}
static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
{
struct undef_hook *hook;
unsigned long flags;
int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
raw_spin_lock_irqsave(&undef_lock, flags);
list_for_each_entry(hook, &undef_hook, node)
if ((instr & hook->instr_mask) == hook->instr_val &&
(regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
fn = hook->fn;
raw_spin_unlock_irqrestore(&undef_lock, flags);
return fn ? fn(regs, instr) : 1;
}
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
{
unsigned int instr;
siginfo_t info;
void __user *pc;
pc = (void __user *)instruction_pointer(regs);
if (processor_mode(regs) == SVC_MODE) {
#ifdef CONFIG_THUMB2_KERNEL
if (thumb_mode(regs)) {
instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
if (is_wide_instruction(instr)) {
u16 inst2;
inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
instr = __opcode_thumb32_compose(instr, inst2);
}
} else
#endif
instr = __mem_to_opcode_arm(*(u32 *) pc);
} else if (thumb_mode(regs)) {
if (get_user(instr, (u16 __user *)pc))
goto die_sig;
instr = __mem_to_opcode_thumb16(instr);
if (is_wide_instruction(instr)) {
unsigned int instr2;
if (get_user(instr2, (u16 __user *)pc+1))
goto die_sig;
instr2 = __mem_to_opcode_thumb16(instr2);
instr = __opcode_thumb32_compose(instr, instr2);
}
} else {
if (get_user(instr, (u32 __user *)pc))
goto die_sig;
instr = __mem_to_opcode_arm(instr);
}
if (call_undef_hook(regs, instr) == 0)
return;
die_sig:
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
pr_info("%s (%d): undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
__show_regs(regs);
dump_instr(KERN_INFO, regs);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = pc;
arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
}
/*
* Handle FIQ similarly to NMI on x86 systems.
*
* The runtime environment for NMIs is extremely restrictive
* (NMIs can pre-empt critical sections meaning almost all locking is
* forbidden) meaning this default FIQ handling must only be used in
* circumstances where non-maskability improves robustness, such as
* watchdog or debug logic.
*
* This handler is not appropriate for general purpose use in drivers
* platform code and can be overrideen using set_fiq_handler.
*/
asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
nmi_enter();
/* nop. FIQ handlers for special arch/arm features can be added here. */
nmi_exit();
set_irq_regs(old_regs);
}
/*
* bad_mode handles the impossible case in the vectors. If you see one of
* these, then it's extremely serious, and could mean you have buggy hardware.
* It never returns, and never tries to sync. We hope that we can at least
* dump out some state information...
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason)
{
console_verbose();
pr_crit("Bad mode in %s handler detected\n", handler[reason]);
die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
}
static int bad_syscall(int n, struct pt_regs *regs)
{
siginfo_t info;
if ((current->personality & PER_MASK) != PER_LINUX) {
send_sig(SIGSEGV, current, 1);
return regs->ARM_r0;
}
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_SYSCALL) {
pr_err("[%d] %s: obsolete system call %08x.\n",
task_pid_nr(current), current->comm, n);
dump_instr(KERN_ERR, regs);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void __user *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
return regs->ARM_r0;
}
static inline int
__do_cache_op(unsigned long start, unsigned long end)
{
int ret;
do {
unsigned long chunk = min(PAGE_SIZE, end - start);
if (fatal_signal_pending(current))
return 0;
ret = flush_cache_user_range(start, start + chunk);
if (ret)
return ret;
cond_resched();
start += chunk;
} while (start < end);
return 0;
}
static inline int
do_cache_op(unsigned long start, unsigned long end, int flags)
{
if (end < start || flags)
return -EINVAL;
if (!access_ok(VERIFY_READ, start, end - start))
return -EFAULT;
return __do_cache_op(start, end);
}
/*
* Handle all unrecognised system calls.
* 0x9f0000 - 0x9fffff are some more esoteric system calls
*/
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
{
siginfo_t info;
if ((no >> 16) != (__ARM_NR_BASE>> 16))
return bad_syscall(no, regs);
switch (no & 0xffff) {
case 0: /* branch through 0 */
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = SEGV_MAPERR;
info.si_addr = NULL;
arm_notify_die("branch through zero", regs, &info, 0, 0);
return 0;
case NR(breakpoint): /* SWI BREAK_POINT */
regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
ptrace_break(current, regs);
return regs->ARM_r0;
/*
* Flush a region from virtual address 'r0' to virtual address 'r1'
* _exclusive_. There is no alignment requirement on either address;
* user space does not need to know the hardware cache layout.
*
* r2 contains flags. It should ALWAYS be passed as ZERO until it
* is defined to be something else. For now we ignore it, but may
* the fires of hell burn in your belly if you break this rule. ;)
*
* (at a later date, we may want to allow this call to not flush
* various aspects of the cache. Passing '0' will guarantee that
* everything necessary gets flushed to maintain consistency in
* the specified region).
*/
case NR(cacheflush):
return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
case NR(usr26):
if (!(elf_hwcap & HWCAP_26BIT))
break;
regs->ARM_cpsr &= ~MODE32_BIT;
return regs->ARM_r0;
case NR(usr32):
if (!(elf_hwcap & HWCAP_26BIT))
break;
regs->ARM_cpsr |= MODE32_BIT;
return regs->ARM_r0;
case NR(set_tls):
set_tls(regs->ARM_r0);
return 0;
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
* Return zero in r0 if *MEM was changed or non-zero if no exchange
* happened. Also set the user C flag accordingly.
* If access permissions have to be fixed up then non-zero is
* returned and the operation has to be re-attempted.
*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
case NR(cmpxchg):
for (;;) {
extern void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs);
unsigned long val;
unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte;
spinlock_t *ptl;
regs->ARM_cpsr &= ~PSR_C_BIT;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
val = *(unsigned long *)addr;
val -= regs->ARM_r0;
if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT;
}
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return val;
bad_access:
up_read(&mm->mmap_sem);
/* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
}
#endif
default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
way the calling program can gracefully determine whether
a feature is supported. */
if ((no & 0xffff) <= 0x7ff)
return -ENOSYS;
break;
}
#ifdef CONFIG_DEBUG_USER
/*
* experience shows that these seem to indicate that
* something catastrophic has happened
*/
if (user_debug & UDBG_SYSCALL) {
pr_err("[%d] %s: arm syscall %d\n",
task_pid_nr(current), current->comm, no);
dump_instr("", regs);
if (user_mode(regs)) {
__show_regs(regs);
c_backtrace(frame_pointer(regs), processor_mode(regs));
}
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void __user *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
return 0;
}
#ifdef CONFIG_TLS_REG_EMUL
/*
* We might be running on an ARMv6+ processor which should have the TLS
* register but for some reason we can't use it, or maybe an SMP system
* using a pre-ARMv6 processor (there are apparently a few prototypes like
* that in existence) and therefore access to that register must be
* emulated.
*/
static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
{
int reg = (instr >> 12) & 15;
if (reg == 15)
return 1;
regs->uregs[reg] = current_thread_info()->tp_value[0];
regs->ARM_pc += 4;
return 0;
}
static struct undef_hook arm_mrc_hook = {
.instr_mask = 0x0fff0fff,
.instr_val = 0x0e1d0f70,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = get_tp_trap,
};
static int __init arm_mrc_hook_init(void)
{
register_undef_hook(&arm_mrc_hook);
return 0;
}
late_initcall(arm_mrc_hook_init);
#endif
void __bad_xchg(volatile void *ptr, int size)
{
pr_err("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
__builtin_return_address(0), ptr, size);
BUG();
}
EXPORT_SYMBOL(__bad_xchg);
/*
* A data abort trap was taken, but we did not handle the instruction.
* Try to abort the user program, or panic if it was the kernel.
*/
asmlinkage void
baddataabort(int code, unsigned long instr, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
siginfo_t info;
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_BADABORT) {
pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
task_pid_nr(current), current->comm, code, instr);
dump_instr(KERN_ERR, regs);
show_pte(current->mm, addr);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = (void __user *)addr;
arm_notify_die("unknown data abort code", regs, &info, instr, 0);
}
void __readwrite_bug(const char *fn)
{
pr_err("%s called, but not implemented\n", fn);
BUG();
}
EXPORT_SYMBOL(__readwrite_bug);
void __pte_error(const char *file, int line, pte_t pte)
{
pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
}
void __pmd_error(const char *file, int line, pmd_t pmd)
{
pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
}
void __pgd_error(const char *file, int line, pgd_t pgd)
{
pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
}
asmlinkage void __div0(void)
{
pr_err("Division by zero in kernel.\n");
dump_stack();
}
EXPORT_SYMBOL(__div0);
void abort(void)
{
BUG();
/* if that doesn't kill us, halt */
panic("Oops failed to kill thread");
}
EXPORT_SYMBOL(abort);
void __init trap_init(void)
{
return;
}
#ifdef CONFIG_KUSER_HELPERS
static void __init kuser_init(void *vectors)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
if (tls_emu || has_tls_reg)
memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
}
#else
static inline void __init kuser_init(void *vectors)
{
}
#endif
void __init early_trap_init(void *vectors_base)
{
#ifndef CONFIG_CPU_V7M
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
unsigned i;
vectors_page = vectors_base;
/*
* Poison the vectors page with an undefined instruction. This
* instruction is chosen to be undefined for both ARM and Thumb
* ISAs. The Thumb version is an undefined instruction with a
* branch back to the undefined instruction.
*/
for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
((u32 *)vectors_base)[i] = 0xe7fddef1;
/*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
#ifndef CONFIG_PAX_MEMORY_UDEREF
modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
#endif
#else /* ifndef CONFIG_CPU_V7M */
/*
* on V7-M there is no need to copy the vector table to a dedicated
* memory area. The address is configurable and so a table in the kernel
* image can be used.
*/
#endif
}
| gpl-2.0 |
Traesh/TrinityCore | src/server/game/Battlegrounds/BattlegroundQueue.cpp | 2 | 51919 | /*
* This file is part of the TrinityCore Project. See AUTHORS file for Copyright information
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "BattlegroundQueue.h"
#include "ArenaTeam.h"
#include "ArenaTeamMgr.h"
#include "BattlegroundMgr.h"
#include "BattlegroundPackets.h"
#include "Chat.h"
#include "DB2Stores.h"
#include "GameTime.h"
#include "Group.h"
#include "Language.h"
#include "Log.h"
#include "ObjectAccessor.h"
#include "Player.h"
#include "World.h"
/*********************************************************/
/*** BATTLEGROUND QUEUE SYSTEM ***/
/*********************************************************/
BattlegroundQueue::BattlegroundQueue()
{
for (uint32 i = 0; i < BG_TEAMS_COUNT; ++i)
{
for (uint32 j = 0; j < MAX_BATTLEGROUND_BRACKETS; ++j)
{
m_SumOfWaitTimes[i][j] = 0;
m_WaitTimeLastPlayer[i][j] = 0;
for (uint32 k = 0; k < COUNT_OF_PLAYERS_TO_AVERAGE_WAIT_TIME; ++k)
m_WaitTimes[i][j][k] = 0;
}
}
}
BattlegroundQueue::~BattlegroundQueue()
{
m_events.KillAllEvents(false);
for (int i = 0; i < MAX_BATTLEGROUND_BRACKETS; ++i)
{
for (uint32 j = 0; j < BG_QUEUE_GROUP_TYPES_COUNT; ++j)
{
for (GroupsQueueType::iterator itr = m_QueuedGroups[i][j].begin(); itr!= m_QueuedGroups[i][j].end(); ++itr)
delete (*itr);
}
}
}
/*********************************************************/
/*** BATTLEGROUND QUEUE SELECTION POOLS ***/
/*********************************************************/
// selection pool initialization, used to clean up from prev selection
void BattlegroundQueue::SelectionPool::Init()
{
SelectedGroups.clear();
PlayerCount = 0;
}
// remove group info from selection pool
// returns true when we need to try to add new group to selection pool
// returns false when selection pool is ok or when we kicked smaller group than we need to kick
// sometimes it can be called on empty selection pool
bool BattlegroundQueue::SelectionPool::KickGroup(uint32 size)
{
//find maxgroup or LAST group with size == size and kick it
bool found = false;
GroupsQueueType::iterator groupToKick = SelectedGroups.begin();
for (GroupsQueueType::iterator itr = groupToKick; itr != SelectedGroups.end(); ++itr)
{
if (abs((int32)((*itr)->Players.size() - size)) <= 1)
{
groupToKick = itr;
found = true;
}
else if (!found && (*itr)->Players.size() >= (*groupToKick)->Players.size())
groupToKick = itr;
}
//if pool is empty, do nothing
if (GetPlayerCount())
{
//update player count
GroupQueueInfo* ginfo = (*groupToKick);
SelectedGroups.erase(groupToKick);
PlayerCount -= ginfo->Players.size();
//return false if we kicked smaller group or there are enough players in selection pool
if (ginfo->Players.size() <= size + 1)
return false;
}
return true;
}
// add group to selection pool
// used when building selection pools
// returns true if we can invite more players, or when we added group to selection pool
// returns false when selection pool is full
bool BattlegroundQueue::SelectionPool::AddGroup(GroupQueueInfo* ginfo, uint32 desiredCount)
{
//if group is larger than desired count - don't allow to add it to pool
if (!ginfo->IsInvitedToBGInstanceGUID && desiredCount >= PlayerCount + ginfo->Players.size())
{
SelectedGroups.push_back(ginfo);
// increase selected players count
PlayerCount += ginfo->Players.size();
return true;
}
if (PlayerCount < desiredCount)
return true;
return false;
}
/*********************************************************/
/*** BATTLEGROUND QUEUES ***/
/*********************************************************/
// add group or player (grp == NULL) to bg queue with the given leader and bg specifications
GroupQueueInfo* BattlegroundQueue::AddGroup(Player* leader, Group* grp, BattlegroundTypeId BgTypeId, PVPDifficultyEntry const* bracketEntry, uint8 ArenaType, bool isRated, bool isPremade, uint32 ArenaRating, uint32 MatchmakerRating, uint32 arenateamid)
{
BattlegroundBracketId bracketId = bracketEntry->GetBracketId();
// create new ginfo
GroupQueueInfo* ginfo = new GroupQueueInfo;
ginfo->BgTypeId = BgTypeId;
ginfo->ArenaType = ArenaType;
ginfo->ArenaTeamId = arenateamid;
ginfo->IsRated = isRated;
ginfo->IsInvitedToBGInstanceGUID = 0;
ginfo->JoinTime = GameTime::GetGameTimeMS();
ginfo->RemoveInviteTime = 0;
ginfo->Team = leader->GetTeam();
ginfo->ArenaTeamRating = ArenaRating;
ginfo->ArenaMatchmakerRating = MatchmakerRating;
ginfo->OpponentsTeamRating = 0;
ginfo->OpponentsMatchmakerRating = 0;
ginfo->Players.clear();
//compute index (if group is premade or joined a rated match) to queues
uint32 index = 0;
if (!isRated && !isPremade)
index += BG_TEAMS_COUNT;
if (ginfo->Team == HORDE)
index++;
TC_LOG_DEBUG("bg.battleground", "Adding Group to BattlegroundQueue bgTypeId : %u, bracket_id : %u, index : %u", BgTypeId, bracketId, index);
uint32 lastOnlineTime = GameTime::GetGameTimeMS();
//announce world (this don't need mutex)
if (isRated && sWorld->getBoolConfig(CONFIG_ARENA_QUEUE_ANNOUNCER_ENABLE))
{
ArenaTeam* team = sArenaTeamMgr->GetArenaTeamById(arenateamid);
if (team)
sWorld->SendWorldText(LANG_ARENA_QUEUE_ANNOUNCE_WORLD_JOIN, team->GetName().c_str(), ginfo->ArenaType, ginfo->ArenaType, ginfo->ArenaTeamRating);
}
//add players from group to ginfo
if (grp)
{
for (GroupReference* itr = grp->GetFirstMember(); itr != NULL; itr = itr->next())
{
Player* member = itr->GetSource();
if (!member)
continue; // this should never happen
PlayerQueueInfo& pl_info = m_QueuedPlayers[member->GetGUID()];
pl_info.LastOnlineTime = lastOnlineTime;
pl_info.GroupInfo = ginfo;
// add the pinfo to ginfo's list
ginfo->Players[member->GetGUID()] = &pl_info;
}
}
else
{
PlayerQueueInfo& pl_info = m_QueuedPlayers[leader->GetGUID()];
pl_info.LastOnlineTime = lastOnlineTime;
pl_info.GroupInfo = ginfo;
ginfo->Players[leader->GetGUID()] = &pl_info;
}
//add GroupInfo to m_QueuedGroups
{
m_QueuedGroups[bracketId][index].push_back(ginfo);
//announce to world, this code needs mutex
if (!isRated && !isPremade && sWorld->getBoolConfig(CONFIG_BATTLEGROUND_QUEUE_ANNOUNCER_ENABLE))
{
if (Battleground* bg = sBattlegroundMgr->GetBattlegroundTemplate(ginfo->BgTypeId))
{
uint32 MinPlayers = bg->GetMinPlayersPerTeam();
uint32 qHorde = 0;
uint32 qAlliance = 0;
uint32 q_min_level = bracketEntry->MinLevel;
uint32 q_max_level = bracketEntry->MaxLevel;
GroupsQueueType::const_iterator itr;
for (itr = m_QueuedGroups[bracketId][BG_QUEUE_NORMAL_ALLIANCE].begin(); itr != m_QueuedGroups[bracketId][BG_QUEUE_NORMAL_ALLIANCE].end(); ++itr)
if (!(*itr)->IsInvitedToBGInstanceGUID)
qAlliance += (*itr)->Players.size();
for (itr = m_QueuedGroups[bracketId][BG_QUEUE_NORMAL_HORDE].begin(); itr != m_QueuedGroups[bracketId][BG_QUEUE_NORMAL_HORDE].end(); ++itr)
if (!(*itr)->IsInvitedToBGInstanceGUID)
qHorde += (*itr)->Players.size();
// Show queue status to player only (when joining queue)
if (sWorld->getBoolConfig(CONFIG_BATTLEGROUND_QUEUE_ANNOUNCER_PLAYERONLY))
{
ChatHandler(leader->GetSession()).PSendSysMessage(LANG_BG_QUEUE_ANNOUNCE_SELF, bg->GetName().c_str(), q_min_level, q_max_level,
qAlliance, (MinPlayers > qAlliance) ? MinPlayers - qAlliance : (uint32)0, qHorde, (MinPlayers > qHorde) ? MinPlayers - qHorde : (uint32)0);
}
// System message
else
{
sWorld->SendWorldText(LANG_BG_QUEUE_ANNOUNCE_WORLD, bg->GetName().c_str(), q_min_level, q_max_level,
qAlliance, (MinPlayers > qAlliance) ? MinPlayers - qAlliance : (uint32)0, qHorde, (MinPlayers > qHorde) ? MinPlayers - qHorde : (uint32)0);
}
}
}
//release mutex
}
return ginfo;
}
void BattlegroundQueue::PlayerInvitedToBGUpdateAverageWaitTime(GroupQueueInfo* ginfo, BattlegroundBracketId bracket_id)
{
uint32 timeInQueue = getMSTimeDiff(ginfo->JoinTime, GameTime::GetGameTimeMS());
uint8 team_index = TEAM_ALLIANCE; //default set to TEAM_ALLIANCE - or non rated arenas!
if (!ginfo->ArenaType)
{
if (ginfo->Team == HORDE)
team_index = TEAM_HORDE;
}
else
{
if (ginfo->IsRated)
team_index = TEAM_HORDE; //for rated arenas use TEAM_HORDE
}
//store pointer to arrayindex of player that was added first
uint32* lastPlayerAddedPointer = &(m_WaitTimeLastPlayer[team_index][bracket_id]);
//remove his time from sum
m_SumOfWaitTimes[team_index][bracket_id] -= m_WaitTimes[team_index][bracket_id][(*lastPlayerAddedPointer)];
//set average time to new
m_WaitTimes[team_index][bracket_id][(*lastPlayerAddedPointer)] = timeInQueue;
//add new time to sum
m_SumOfWaitTimes[team_index][bracket_id] += timeInQueue;
//set index of last player added to next one
(*lastPlayerAddedPointer)++;
(*lastPlayerAddedPointer) %= COUNT_OF_PLAYERS_TO_AVERAGE_WAIT_TIME;
}
uint32 BattlegroundQueue::GetAverageQueueWaitTime(GroupQueueInfo* ginfo, BattlegroundBracketId bracket_id) const
{
uint8 team_index = TEAM_ALLIANCE; //default set to TEAM_ALLIANCE - or non rated arenas!
if (!ginfo->ArenaType)
{
if (ginfo->Team == HORDE)
team_index = TEAM_HORDE;
}
else
{
if (ginfo->IsRated)
team_index = TEAM_HORDE; //for rated arenas use TEAM_HORDE
}
//check if there is enought values(we always add values > 0)
if (m_WaitTimes[team_index][bracket_id][COUNT_OF_PLAYERS_TO_AVERAGE_WAIT_TIME - 1])
return (m_SumOfWaitTimes[team_index][bracket_id] / COUNT_OF_PLAYERS_TO_AVERAGE_WAIT_TIME);
else
//if there aren't enough values return 0 - not available
return 0;
}
//remove player from queue and from group info, if group info is empty then remove it too
void BattlegroundQueue::RemovePlayer(ObjectGuid guid, bool decreaseInvitedCount)
{
int32 bracket_id = -1; // signed for proper for-loop finish
QueuedPlayersMap::iterator itr;
//remove player from map, if he's there
itr = m_QueuedPlayers.find(guid);
if (itr == m_QueuedPlayers.end())
{
//This happens if a player logs out while in a bg because WorldSession::LogoutPlayer() notifies the bg twice
std::string playerName = "Unknown";
if (Player* player = ObjectAccessor::FindPlayer(guid))
playerName = player->GetName();
TC_LOG_DEBUG("bg.battleground", "BattlegroundQueue: couldn't find player %s (%s)", playerName.c_str(), guid.ToString().c_str());
return;
}
GroupQueueInfo* group = itr->second.GroupInfo;
GroupsQueueType::iterator group_itr;
// mostly people with the highest levels are in battlegrounds, thats why
// we count from MAX_BATTLEGROUND_QUEUES - 1 to 0
uint32 index = (group->Team == HORDE) ? BG_QUEUE_PREMADE_HORDE : BG_QUEUE_PREMADE_ALLIANCE;
for (int32 bracket_id_tmp = MAX_BATTLEGROUND_BRACKETS - 1; bracket_id_tmp >= 0 && bracket_id == -1; --bracket_id_tmp)
{
//we must check premade and normal team's queue - because when players from premade are joining bg,
//they leave groupinfo so we can't use its players size to find out index
for (uint32 j = index; j < BG_QUEUE_GROUP_TYPES_COUNT; j += BG_TEAMS_COUNT)
{
GroupsQueueType::iterator k = m_QueuedGroups[bracket_id_tmp][j].begin();
for (; k != m_QueuedGroups[bracket_id_tmp][j].end(); ++k)
{
if ((*k) == group)
{
bracket_id = bracket_id_tmp;
group_itr = k;
//we must store index to be able to erase iterator
index = j;
break;
}
}
}
}
//player can't be in queue without group, but just in case
if (bracket_id == -1)
{
TC_LOG_ERROR("bg.battleground", "BattlegroundQueue: ERROR Cannot find groupinfo for %s", guid.ToString().c_str());
return;
}
TC_LOG_DEBUG("bg.battleground", "BattlegroundQueue: Removing %s, from bracket_id %u", guid.ToString().c_str(), (uint32)bracket_id);
// ALL variables are correctly set
// We can ignore leveling up in queue - it should not cause crash
// remove player from group
// if only one player there, remove group
// remove player queue info from group queue info
std::map<ObjectGuid, PlayerQueueInfo*>::iterator pitr = group->Players.find(guid);
if (pitr != group->Players.end())
group->Players.erase(pitr);
// if invited to bg, and should decrease invited count, then do it
if (decreaseInvitedCount && group->IsInvitedToBGInstanceGUID)
if (Battleground* bg = sBattlegroundMgr->GetBattleground(group->IsInvitedToBGInstanceGUID, group->BgTypeId))
bg->DecreaseInvitedCount(group->Team);
// remove player queue info
m_QueuedPlayers.erase(itr);
// announce to world if arena team left queue for rated match, show only once
if (group->ArenaType && group->IsRated && group->Players.empty() && sWorld->getBoolConfig(CONFIG_ARENA_QUEUE_ANNOUNCER_ENABLE))
if (ArenaTeam* team = sArenaTeamMgr->GetArenaTeamById(group->ArenaTeamId))
sWorld->SendWorldText(LANG_ARENA_QUEUE_ANNOUNCE_WORLD_EXIT, team->GetName().c_str(), group->ArenaType, group->ArenaType, group->ArenaTeamRating);
// if player leaves queue and he is invited to rated arena match, then he have to lose
if (group->IsInvitedToBGInstanceGUID && group->IsRated && decreaseInvitedCount)
{
if (ArenaTeam* at = sArenaTeamMgr->GetArenaTeamById(group->ArenaTeamId))
{
TC_LOG_DEBUG("bg.battleground", "UPDATING memberLost's personal arena rating for %s by opponents rating: %u", guid.ToString().c_str(), group->OpponentsTeamRating);
if (Player* player = ObjectAccessor::FindConnectedPlayer(guid))
at->MemberLost(player, group->OpponentsMatchmakerRating);
else
at->OfflineMemberLost(guid, group->OpponentsMatchmakerRating);
at->SaveToDB();
}
}
// remove group queue info if needed
if (group->Players.empty())
{
m_QueuedGroups[bracket_id][index].erase(group_itr);
delete group;
return;
}
// if group wasn't empty, so it wasn't deleted, and player have left a rated
// queue -> everyone from the group should leave too
// don't remove recursively if already invited to bg!
if (!group->IsInvitedToBGInstanceGUID && group->IsRated)
{
// remove next player, this is recursive
// first send removal information
if (Player* plr2 = ObjectAccessor::FindConnectedPlayer(group->Players.begin()->first))
{
BattlegroundQueueTypeId bgQueueTypeId = BattlegroundMgr::BGQueueTypeId(group->BgTypeId, group->ArenaType);
uint32 queueSlot = plr2->GetBattlegroundQueueIndex(bgQueueTypeId);
plr2->RemoveBattlegroundQueueId(bgQueueTypeId); // must be called this way, because if you move this call to
// queue->removeplayer, it causes bugs
WorldPackets::Battleground::BattlefieldStatusNone battlefieldStatus;
sBattlegroundMgr->BuildBattlegroundStatusNone(&battlefieldStatus, plr2, queueSlot, plr2->GetBattlegroundQueueJoinTime(bgQueueTypeId));
plr2->SendDirectMessage(battlefieldStatus.Write());
}
// then actually delete, this may delete the group as well!
RemovePlayer(group->Players.begin()->first, decreaseInvitedCount);
}
}
//returns true when player pl_guid is in queue and is invited to bgInstanceGuid
bool BattlegroundQueue::IsPlayerInvited(ObjectGuid pl_guid, const uint32 bgInstanceGuid, const uint32 removeTime)
{
QueuedPlayersMap::const_iterator qItr = m_QueuedPlayers.find(pl_guid);
return (qItr != m_QueuedPlayers.end()
&& qItr->second.GroupInfo->IsInvitedToBGInstanceGUID == bgInstanceGuid
&& qItr->second.GroupInfo->RemoveInviteTime == removeTime);
}
bool BattlegroundQueue::GetPlayerGroupInfoData(ObjectGuid guid, GroupQueueInfo* ginfo)
{
QueuedPlayersMap::const_iterator qItr = m_QueuedPlayers.find(guid);
if (qItr == m_QueuedPlayers.end())
return false;
*ginfo = *(qItr->second.GroupInfo);
return true;
}
uint32 BattlegroundQueue::GetPlayersInQueue(TeamId id)
{
return m_SelectionPools[id].GetPlayerCount();
}
bool BattlegroundQueue::InviteGroupToBG(GroupQueueInfo* ginfo, Battleground* bg, uint32 side)
{
// set side if needed
if (side)
ginfo->Team = side;
if (!ginfo->IsInvitedToBGInstanceGUID)
{
// not yet invited
// set invitation
ginfo->IsInvitedToBGInstanceGUID = bg->GetInstanceID();
BattlegroundTypeId bgTypeId = bg->GetTypeID();
BattlegroundQueueTypeId bgQueueTypeId = BattlegroundMgr::BGQueueTypeId(bgTypeId, bg->GetArenaType());
BattlegroundBracketId bracket_id = bg->GetBracketId();
// set ArenaTeamId for rated matches
if (bg->isArena() && bg->isRated())
bg->SetArenaTeamIdForTeam(ginfo->Team, ginfo->ArenaTeamId);
ginfo->RemoveInviteTime = GameTime::GetGameTimeMS() + INVITE_ACCEPT_WAIT_TIME;
// loop through the players
for (std::map<ObjectGuid, PlayerQueueInfo*>::iterator itr = ginfo->Players.begin(); itr != ginfo->Players.end(); ++itr)
{
// get the player
Player* player = ObjectAccessor::FindConnectedPlayer(itr->first);
// if offline, skip him, this should not happen - player is removed from queue when he logs out
if (!player)
continue;
// invite the player
PlayerInvitedToBGUpdateAverageWaitTime(ginfo, bracket_id);
//sBattlegroundMgr->InvitePlayer(player, bg, ginfo->Team);
// set invited player counters
bg->IncreaseInvitedCount(ginfo->Team);
player->SetInviteForBattlegroundQueueType(bgQueueTypeId, ginfo->IsInvitedToBGInstanceGUID);
// create remind invite events
BGQueueInviteEvent* inviteEvent = new BGQueueInviteEvent(player->GetGUID(), ginfo->IsInvitedToBGInstanceGUID, bgTypeId, ginfo->ArenaType, ginfo->RemoveInviteTime);
m_events.AddEvent(inviteEvent, m_events.CalculateTime(INVITATION_REMIND_TIME));
// create automatic remove events
BGQueueRemoveEvent* removeEvent = new BGQueueRemoveEvent(player->GetGUID(), ginfo->IsInvitedToBGInstanceGUID, bgTypeId, bgQueueTypeId, ginfo->RemoveInviteTime);
m_events.AddEvent(removeEvent, m_events.CalculateTime(INVITE_ACCEPT_WAIT_TIME));
uint32 queueSlot = player->GetBattlegroundQueueIndex(bgQueueTypeId);
TC_LOG_DEBUG("bg.battleground", "Battleground: invited player %s (%s) to BG instance %u queueindex %u bgtype %u",
player->GetName().c_str(), player->GetGUID().ToString().c_str(), bg->GetInstanceID(), queueSlot, bg->GetTypeID());
WorldPackets::Battleground::BattlefieldStatusNeedConfirmation battlefieldStatus;
sBattlegroundMgr->BuildBattlegroundStatusNeedConfirmation(&battlefieldStatus, bg, player, queueSlot, player->GetBattlegroundQueueJoinTime(bgQueueTypeId), INVITE_ACCEPT_WAIT_TIME, ginfo->ArenaType);
player->SendDirectMessage(battlefieldStatus.Write());
}
return true;
}
return false;
}
/*
This function is inviting players to already running battlegrounds
Invitation type is based on config file
large groups are disadvantageous, because they will be kicked first if invitation type = 1
*/
void BattlegroundQueue::FillPlayersToBG(Battleground* bg, BattlegroundBracketId bracket_id)
{
int32 hordeFree = bg->GetFreeSlotsForTeam(HORDE);
int32 aliFree = bg->GetFreeSlotsForTeam(ALLIANCE);
uint32 aliCount = m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE].size();
uint32 hordeCount = m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_HORDE].size();
// try to get even teams
if (sWorld->getIntConfig(CONFIG_BATTLEGROUND_INVITATION_TYPE) == BG_QUEUE_INVITATION_TYPE_EVEN)
{
// check if the teams are even
if (hordeFree == 1 && aliFree == 1)
{
// if we are here, the teams have the same amount of players
// then we have to allow to join the same amount of players
int32 hordeExtra = hordeCount - aliCount;
int32 aliExtra = aliCount - hordeCount;
hordeExtra = std::max(hordeExtra, 0);
aliExtra = std::max(aliExtra, 0);
if (aliCount != hordeCount)
{
aliFree -= aliExtra;
hordeFree -= hordeExtra;
aliFree = std::max(aliFree, 0);
hordeFree = std::max(hordeFree, 0);
}
}
}
//iterator for iterating through bg queue
GroupsQueueType::const_iterator Ali_itr = m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE].begin();
//count of groups in queue - used to stop cycles
//index to queue which group is current
uint32 aliIndex = 0;
for (; aliIndex < aliCount && m_SelectionPools[TEAM_ALLIANCE].AddGroup((*Ali_itr), aliFree); aliIndex++)
++Ali_itr;
//the same thing for horde
GroupsQueueType::const_iterator Horde_itr = m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_HORDE].begin();
uint32 hordeIndex = 0;
for (; hordeIndex < hordeCount && m_SelectionPools[TEAM_HORDE].AddGroup((*Horde_itr), hordeFree); hordeIndex++)
++Horde_itr;
//if ofc like BG queue invitation is set in config, then we are happy
if (sWorld->getIntConfig(CONFIG_BATTLEGROUND_INVITATION_TYPE) == BG_QUEUE_INVITATION_TYPE_NO_BALANCE)
return;
/*
if we reached this code, then we have to solve NP - complete problem called Subset sum problem
So one solution is to check all possible invitation subgroups, or we can use these conditions:
1. Last time when BattlegroundQueue::Update was executed we invited all possible players - so there is only small possibility
that we will invite now whole queue, because only 1 change has been made to queues from the last BattlegroundQueue::Update call
2. Other thing we should consider is group order in queue
*/
// At first we need to compare free space in bg and our selection pool
int32 diffAli = aliFree - int32(m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount());
int32 diffHorde = hordeFree - int32(m_SelectionPools[TEAM_HORDE].GetPlayerCount());
while (abs(diffAli - diffHorde) > 1 && (m_SelectionPools[TEAM_HORDE].GetPlayerCount() > 0 || m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount() > 0))
{
//each cycle execution we need to kick at least 1 group
if (diffAli < diffHorde)
{
//kick alliance group, add to pool new group if needed
if (m_SelectionPools[TEAM_ALLIANCE].KickGroup(diffHorde - diffAli))
{
for (; aliIndex < aliCount && m_SelectionPools[TEAM_ALLIANCE].AddGroup((*Ali_itr), (aliFree >= diffHorde) ? aliFree - diffHorde : 0); aliIndex++)
++Ali_itr;
}
//if ali selection is already empty, then kick horde group, but if there are less horde than ali in bg - break;
if (!m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount())
{
if (aliFree <= diffHorde + 1)
break;
m_SelectionPools[TEAM_HORDE].KickGroup(diffHorde - diffAli);
}
}
else
{
//kick horde group, add to pool new group if needed
if (m_SelectionPools[TEAM_HORDE].KickGroup(diffAli - diffHorde))
{
for (; hordeIndex < hordeCount && m_SelectionPools[TEAM_HORDE].AddGroup((*Horde_itr), (hordeFree >= diffAli) ? hordeFree - diffAli : 0); hordeIndex++)
++Horde_itr;
}
if (!m_SelectionPools[TEAM_HORDE].GetPlayerCount())
{
if (hordeFree <= diffAli + 1)
break;
m_SelectionPools[TEAM_ALLIANCE].KickGroup(diffAli - diffHorde);
}
}
//count diffs after small update
diffAli = aliFree - int32(m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount());
diffHorde = hordeFree - int32(m_SelectionPools[TEAM_HORDE].GetPlayerCount());
}
}
// this method checks if premade versus premade battleground is possible
// then after 30 mins (default) in queue it moves premade group to normal queue
// it tries to invite as much players as it can - to MaxPlayersPerTeam, because premade groups have more than MinPlayersPerTeam players
bool BattlegroundQueue::CheckPremadeMatch(BattlegroundBracketId bracket_id, uint32 MinPlayersPerTeam, uint32 MaxPlayersPerTeam)
{
//check match
if (!m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE].empty() && !m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_HORDE].empty())
{
//start premade match
//if groups aren't invited
GroupsQueueType::const_iterator ali_group, horde_group;
for (ali_group = m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE].begin(); ali_group != m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE].end(); ++ali_group)
if (!(*ali_group)->IsInvitedToBGInstanceGUID)
break;
for (horde_group = m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_HORDE].begin(); horde_group != m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_HORDE].end(); ++horde_group)
if (!(*horde_group)->IsInvitedToBGInstanceGUID)
break;
if (ali_group != m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE].end() && horde_group != m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_HORDE].end())
{
m_SelectionPools[TEAM_ALLIANCE].AddGroup((*ali_group), MaxPlayersPerTeam);
m_SelectionPools[TEAM_HORDE].AddGroup((*horde_group), MaxPlayersPerTeam);
//add groups/players from normal queue to size of bigger group
uint32 maxPlayers = std::min(m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount(), m_SelectionPools[TEAM_HORDE].GetPlayerCount());
GroupsQueueType::const_iterator itr;
for (uint32 i = 0; i < BG_TEAMS_COUNT; i++)
{
for (itr = m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + i].begin(); itr != m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + i].end(); ++itr)
{
//if itr can join BG and player count is less that maxPlayers, then add group to selectionpool
if (!(*itr)->IsInvitedToBGInstanceGUID && !m_SelectionPools[i].AddGroup((*itr), maxPlayers))
break;
}
}
//premade selection pools are set
return true;
}
}
// now check if we can move group from Premade queue to normal queue (timer has expired) or group size lowered!!
// this could be 2 cycles but i'm checking only first team in queue - it can cause problem -
// if first is invited to BG and seconds timer expired, but we can ignore it, because players have only 80 seconds to click to enter bg
// and when they click or after 80 seconds the queue info is removed from queue
uint32 time_before = GameTime::GetGameTimeMS() - sWorld->getIntConfig(CONFIG_BATTLEGROUND_PREMADE_GROUP_WAIT_FOR_MATCH);
for (uint32 i = 0; i < BG_TEAMS_COUNT; i++)
{
if (!m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE + i].empty())
{
GroupsQueueType::iterator itr = m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE + i].begin();
if (!(*itr)->IsInvitedToBGInstanceGUID && ((*itr)->JoinTime < time_before || (*itr)->Players.size() < MinPlayersPerTeam))
{
//we must insert group to normal queue and erase pointer from premade queue
m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + i].push_front((*itr));
m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE + i].erase(itr);
}
}
}
//selection pools are not set
return false;
}
// this method tries to create battleground or arena with MinPlayersPerTeam against MinPlayersPerTeam
bool BattlegroundQueue::CheckNormalMatch(Battleground* /*bg_template*/, BattlegroundBracketId bracket_id, uint32 minPlayers, uint32 maxPlayers)
{
GroupsQueueType::const_iterator itr_team[BG_TEAMS_COUNT];
for (uint32 i = 0; i < BG_TEAMS_COUNT; i++)
{
itr_team[i] = m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + i].begin();
for (; itr_team[i] != m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + i].end(); ++(itr_team[i]))
{
if (!(*(itr_team[i]))->IsInvitedToBGInstanceGUID)
{
m_SelectionPools[i].AddGroup(*(itr_team[i]), maxPlayers);
if (m_SelectionPools[i].GetPlayerCount() >= minPlayers)
break;
}
}
}
//try to invite same number of players - this cycle may cause longer wait time even if there are enough players in queue, but we want ballanced bg
uint32 j = TEAM_ALLIANCE;
if (m_SelectionPools[TEAM_HORDE].GetPlayerCount() < m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount())
j = TEAM_HORDE;
if (sWorld->getIntConfig(CONFIG_BATTLEGROUND_INVITATION_TYPE) != BG_QUEUE_INVITATION_TYPE_NO_BALANCE
&& m_SelectionPools[TEAM_HORDE].GetPlayerCount() >= minPlayers && m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount() >= minPlayers)
{
//we will try to invite more groups to team with less players indexed by j
++(itr_team[j]); //this will not cause a crash, because for cycle above reached break;
for (; itr_team[j] != m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + j].end(); ++(itr_team[j]))
{
if (!(*(itr_team[j]))->IsInvitedToBGInstanceGUID)
if (!m_SelectionPools[j].AddGroup(*(itr_team[j]), m_SelectionPools[(j + 1) % BG_TEAMS_COUNT].GetPlayerCount()))
break;
}
// do not allow to start bg with more than 2 players more on 1 faction
if (abs((int32)(m_SelectionPools[TEAM_HORDE].GetPlayerCount() - m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount())) > 2)
return false;
}
//allow 1v0 if debug bg
if (sBattlegroundMgr->isTesting() && (m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount() || m_SelectionPools[TEAM_HORDE].GetPlayerCount()))
return true;
//return true if there are enough players in selection pools - enable to work .debug bg command correctly
return m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount() >= minPlayers && m_SelectionPools[TEAM_HORDE].GetPlayerCount() >= minPlayers;
}
// this method will check if we can invite players to same faction skirmish match
bool BattlegroundQueue::CheckSkirmishForSameFaction(BattlegroundBracketId bracket_id, uint32 minPlayersPerTeam)
{
if (m_SelectionPools[TEAM_ALLIANCE].GetPlayerCount() < minPlayersPerTeam && m_SelectionPools[TEAM_HORDE].GetPlayerCount() < minPlayersPerTeam)
return false;
uint32 teamIndex = TEAM_ALLIANCE;
uint32 otherTeam = TEAM_HORDE;
uint32 otherTeamId = HORDE;
if (m_SelectionPools[TEAM_HORDE].GetPlayerCount() == minPlayersPerTeam)
{
teamIndex = TEAM_HORDE;
otherTeam = TEAM_ALLIANCE;
otherTeamId = ALLIANCE;
}
//clear other team's selection
m_SelectionPools[otherTeam].Init();
//store last ginfo pointer
GroupQueueInfo* ginfo = m_SelectionPools[teamIndex].SelectedGroups.back();
//set itr_team to group that was added to selection pool latest
GroupsQueueType::iterator itr_team = m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + teamIndex].begin();
for (; itr_team != m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + teamIndex].end(); ++itr_team)
if (ginfo == *itr_team)
break;
if (itr_team == m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + teamIndex].end())
return false;
GroupsQueueType::iterator itr_team2 = itr_team;
++itr_team2;
//invite players to other selection pool
for (; itr_team2 != m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + teamIndex].end(); ++itr_team2)
{
//if selection pool is full then break;
if (!(*itr_team2)->IsInvitedToBGInstanceGUID && !m_SelectionPools[otherTeam].AddGroup(*itr_team2, minPlayersPerTeam))
break;
}
if (m_SelectionPools[otherTeam].GetPlayerCount() != minPlayersPerTeam)
return false;
//here we have correct 2 selections and we need to change one teams team and move selection pool teams to other team's queue
for (GroupsQueueType::iterator itr = m_SelectionPools[otherTeam].SelectedGroups.begin(); itr != m_SelectionPools[otherTeam].SelectedGroups.end(); ++itr)
{
//set correct team
(*itr)->Team = otherTeamId;
//add team to other queue
m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + otherTeam].push_front(*itr);
//remove team from old queue
GroupsQueueType::iterator itr2 = itr_team;
++itr2;
for (; itr2 != m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + teamIndex].end(); ++itr2)
{
if (*itr2 == *itr)
{
m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE + teamIndex].erase(itr2);
break;
}
}
}
return true;
}
void BattlegroundQueue::UpdateEvents(uint32 diff)
{
m_events.Update(diff);
}
/*
this method is called when group is inserted, or player / group is removed from BG Queue - there is only one player's status changed, so we don't use while (true) cycles to invite whole queue
it must be called after fully adding the members of a group to ensure group joining
should be called from Battleground::RemovePlayer function in some cases
*/
void BattlegroundQueue::BattlegroundQueueUpdate(uint32 /*diff*/, BattlegroundTypeId bgTypeId, BattlegroundBracketId bracket_id, uint8 arenaType, bool isRated, uint32 arenaRating)
{
//if no players in queue - do nothing
if (m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE].empty() &&
m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_HORDE].empty() &&
m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_ALLIANCE].empty() &&
m_QueuedGroups[bracket_id][BG_QUEUE_NORMAL_HORDE].empty())
return;
// battleground with free slot for player should be always in the beggining of the queue
// maybe it would be better to create bgfreeslotqueue for each bracket_id
BGFreeSlotQueueContainer& bgQueues = sBattlegroundMgr->GetBGFreeSlotQueueStore(bgTypeId);
for (BGFreeSlotQueueContainer::iterator itr = bgQueues.begin(); itr != bgQueues.end();)
{
Battleground* bg = *itr; ++itr;
// DO NOT allow queue manager to invite new player to rated games
if (!bg->isRated() && bg->GetTypeID() == bgTypeId && bg->GetBracketId() == bracket_id &&
bg->GetStatus() > STATUS_WAIT_QUEUE && bg->GetStatus() < STATUS_WAIT_LEAVE)
{
// clear selection pools
m_SelectionPools[TEAM_ALLIANCE].Init();
m_SelectionPools[TEAM_HORDE].Init();
// call a function that does the job for us
FillPlayersToBG(bg, bracket_id);
// now everything is set, invite players
for (GroupsQueueType::const_iterator citr = m_SelectionPools[TEAM_ALLIANCE].SelectedGroups.begin(); citr != m_SelectionPools[TEAM_ALLIANCE].SelectedGroups.end(); ++citr)
InviteGroupToBG((*citr), bg, (*citr)->Team);
for (GroupsQueueType::const_iterator citr = m_SelectionPools[TEAM_HORDE].SelectedGroups.begin(); citr != m_SelectionPools[TEAM_HORDE].SelectedGroups.end(); ++citr)
InviteGroupToBG((*citr), bg, (*citr)->Team);
if (!bg->HasFreeSlots())
bg->RemoveFromBGFreeSlotQueue();
}
}
// finished iterating through the bgs with free slots, maybe we need to create a new bg
Battleground* bg_template = sBattlegroundMgr->GetBattlegroundTemplate(bgTypeId);
if (!bg_template)
{
TC_LOG_ERROR("bg.battleground", "Battleground: Update: bg template not found for %u", bgTypeId);
return;
}
PVPDifficultyEntry const* bracketEntry = DB2Manager::GetBattlegroundBracketById(bg_template->GetMapId(), bracket_id);
if (!bracketEntry)
{
TC_LOG_ERROR("bg.battleground", "Battleground: Update: bg bracket entry not found for map %u bracket id %u", bg_template->GetMapId(), bracket_id);
return;
}
// get the min. players per team, properly for larger arenas as well. (must have full teams for arena matches!)
uint32 MinPlayersPerTeam = bg_template->GetMinPlayersPerTeam();
uint32 MaxPlayersPerTeam = bg_template->GetMaxPlayersPerTeam();
if (bg_template->isArena())
{
MaxPlayersPerTeam = arenaType;
MinPlayersPerTeam = sBattlegroundMgr->isArenaTesting() ? 1 : arenaType;
}
else if (sBattlegroundMgr->isTesting())
MinPlayersPerTeam = 1;
m_SelectionPools[TEAM_ALLIANCE].Init();
m_SelectionPools[TEAM_HORDE].Init();
if (bg_template->isBattleground())
{
if (CheckPremadeMatch(bracket_id, MinPlayersPerTeam, MaxPlayersPerTeam))
{
// create new battleground
Battleground* bg2 = sBattlegroundMgr->CreateNewBattleground(bgTypeId, bracketEntry, 0, false);
if (!bg2)
{
TC_LOG_ERROR("bg.battleground", "BattlegroundQueue::Update - Cannot create battleground: %u", bgTypeId);
return;
}
// invite those selection pools
for (uint32 i = 0; i < BG_TEAMS_COUNT; i++)
for (GroupsQueueType::const_iterator citr = m_SelectionPools[TEAM_ALLIANCE + i].SelectedGroups.begin(); citr != m_SelectionPools[TEAM_ALLIANCE + i].SelectedGroups.end(); ++citr)
InviteGroupToBG((*citr), bg2, (*citr)->Team);
bg2->StartBattleground();
//clear structures
m_SelectionPools[TEAM_ALLIANCE].Init();
m_SelectionPools[TEAM_HORDE].Init();
}
}
// now check if there are in queues enough players to start new game of (normal battleground, or non-rated arena)
if (!isRated)
{
// if there are enough players in pools, start new battleground or non rated arena
if (CheckNormalMatch(bg_template, bracket_id, MinPlayersPerTeam, MaxPlayersPerTeam)
|| (bg_template->isArena() && CheckSkirmishForSameFaction(bracket_id, MinPlayersPerTeam)))
{
// we successfully created a pool
Battleground* bg2 = sBattlegroundMgr->CreateNewBattleground(bgTypeId, bracketEntry, arenaType, false);
if (!bg2)
{
TC_LOG_ERROR("bg.battleground", "BattlegroundQueue::Update - Cannot create battleground: %u", bgTypeId);
return;
}
// invite those selection pools
for (uint32 i = 0; i < BG_TEAMS_COUNT; i++)
for (GroupsQueueType::const_iterator citr = m_SelectionPools[TEAM_ALLIANCE + i].SelectedGroups.begin(); citr != m_SelectionPools[TEAM_ALLIANCE + i].SelectedGroups.end(); ++citr)
InviteGroupToBG((*citr), bg2, (*citr)->Team);
// start bg
bg2->StartBattleground();
}
}
else if (bg_template->isArena())
{
// found out the minimum and maximum ratings the newly added team should battle against
// arenaRating is the rating of the latest joined team, or 0
// 0 is on (automatic update call) and we must set it to team's with longest wait time
if (!arenaRating)
{
GroupQueueInfo* front1 = NULL;
GroupQueueInfo* front2 = NULL;
if (!m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE].empty())
{
front1 = m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE].front();
arenaRating = front1->ArenaMatchmakerRating;
}
if (!m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_HORDE].empty())
{
front2 = m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_HORDE].front();
arenaRating = front2->ArenaMatchmakerRating;
}
if (front1 && front2)
{
if (front1->JoinTime < front2->JoinTime)
arenaRating = front1->ArenaMatchmakerRating;
}
else if (!front1 && !front2)
return; //queues are empty
}
//set rating range
uint32 arenaMinRating = (arenaRating <= sBattlegroundMgr->GetMaxRatingDifference()) ? 0 : arenaRating - sBattlegroundMgr->GetMaxRatingDifference();
uint32 arenaMaxRating = arenaRating + sBattlegroundMgr->GetMaxRatingDifference();
// if max rating difference is set and the time past since server startup is greater than the rating discard time
// (after what time the ratings aren't taken into account when making teams) then
// the discard time is current_time - time_to_discard, teams that joined after that, will have their ratings taken into account
// else leave the discard time on 0, this way all ratings will be discarded
// this has to be signed value - when the server starts, this value would be negative and thus overflow
int32 discardTime = GameTime::GetGameTimeMS() - sBattlegroundMgr->GetRatingDiscardTimer();
// we need to find 2 teams which will play next game
GroupsQueueType::iterator itr_teams[BG_TEAMS_COUNT];
uint8 found = 0;
uint8 team = 0;
for (uint8 i = BG_QUEUE_PREMADE_ALLIANCE; i < BG_QUEUE_NORMAL_ALLIANCE; i++)
{
// take the group that joined first
GroupsQueueType::iterator itr2 = m_QueuedGroups[bracket_id][i].begin();
for (; itr2 != m_QueuedGroups[bracket_id][i].end(); ++itr2)
{
// if group match conditions, then add it to pool
if (!(*itr2)->IsInvitedToBGInstanceGUID
&& (((*itr2)->ArenaMatchmakerRating >= arenaMinRating && (*itr2)->ArenaMatchmakerRating <= arenaMaxRating)
|| (int32)(*itr2)->JoinTime < discardTime))
{
itr_teams[found++] = itr2;
team = i;
break;
}
}
}
if (!found)
return;
if (found == 1)
{
for (GroupsQueueType::iterator itr3 = itr_teams[0]; itr3 != m_QueuedGroups[bracket_id][team].end(); ++itr3)
{
if (!(*itr3)->IsInvitedToBGInstanceGUID
&& (((*itr3)->ArenaMatchmakerRating >= arenaMinRating && (*itr3)->ArenaMatchmakerRating <= arenaMaxRating)
|| (int32)(*itr3)->JoinTime < discardTime)
&& (*itr_teams[0])->ArenaTeamId != (*itr3)->ArenaTeamId)
{
itr_teams[found++] = itr3;
break;
}
}
}
//if we have 2 teams, then start new arena and invite players!
if (found == 2)
{
GroupQueueInfo* aTeam = *itr_teams[TEAM_ALLIANCE];
GroupQueueInfo* hTeam = *itr_teams[TEAM_HORDE];
Battleground* arena = sBattlegroundMgr->CreateNewBattleground(bgTypeId, bracketEntry, arenaType, true);
if (!arena)
{
TC_LOG_ERROR("bg.battleground", "BattlegroundQueue::Update couldn't create arena instance for rated arena match!");
return;
}
aTeam->OpponentsTeamRating = hTeam->ArenaTeamRating;
hTeam->OpponentsTeamRating = aTeam->ArenaTeamRating;
aTeam->OpponentsMatchmakerRating = hTeam->ArenaMatchmakerRating;
hTeam->OpponentsMatchmakerRating = aTeam->ArenaMatchmakerRating;
TC_LOG_DEBUG("bg.battleground", "setting oposite teamrating for team %u to %u", aTeam->ArenaTeamId, aTeam->OpponentsTeamRating);
TC_LOG_DEBUG("bg.battleground", "setting oposite teamrating for team %u to %u", hTeam->ArenaTeamId, hTeam->OpponentsTeamRating);
// now we must move team if we changed its faction to another faction queue, because then we will spam log by errors in Queue::RemovePlayer
if (aTeam->Team != ALLIANCE)
{
m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE].push_front(aTeam);
m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_HORDE].erase(itr_teams[TEAM_ALLIANCE]);
}
if (hTeam->Team != HORDE)
{
m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_HORDE].push_front(hTeam);
m_QueuedGroups[bracket_id][BG_QUEUE_PREMADE_ALLIANCE].erase(itr_teams[TEAM_HORDE]);
}
arena->SetArenaMatchmakerRating(ALLIANCE, aTeam->ArenaMatchmakerRating);
arena->SetArenaMatchmakerRating( HORDE, hTeam->ArenaMatchmakerRating);
InviteGroupToBG(aTeam, arena, ALLIANCE);
InviteGroupToBG(hTeam, arena, HORDE);
TC_LOG_DEBUG("bg.battleground", "Starting rated arena match!");
arena->StartBattleground();
}
}
}
/*********************************************************/
/*** BATTLEGROUND QUEUE EVENTS ***/
/*********************************************************/
bool BGQueueInviteEvent::Execute(uint64 /*e_time*/, uint32 /*p_time*/)
{
Player* player = ObjectAccessor::FindConnectedPlayer(m_PlayerGuid);
// player logged off (we should do nothing, he is correctly removed from queue in another procedure)
if (!player)
return true;
Battleground* bg = sBattlegroundMgr->GetBattleground(m_BgInstanceGUID, m_BgTypeId);
//if battleground ended and its instance deleted - do nothing
if (!bg)
return true;
BattlegroundQueueTypeId bgQueueTypeId = BattlegroundMgr::BGQueueTypeId(bg->GetTypeID(), bg->GetArenaType());
uint32 queueSlot = player->GetBattlegroundQueueIndex(bgQueueTypeId);
if (queueSlot < PLAYER_MAX_BATTLEGROUND_QUEUES) // player is in queue or in battleground
{
// check if player is invited to this bg
BattlegroundQueue &bgQueue = sBattlegroundMgr->GetBattlegroundQueue(bgQueueTypeId);
if (bgQueue.IsPlayerInvited(m_PlayerGuid, m_BgInstanceGUID, m_RemoveTime))
{
WorldPackets::Battleground::BattlefieldStatusNeedConfirmation battlefieldStatus;
sBattlegroundMgr->BuildBattlegroundStatusNeedConfirmation(&battlefieldStatus, bg, player, queueSlot, player->GetBattlegroundQueueJoinTime(bgQueueTypeId), INVITE_ACCEPT_WAIT_TIME - INVITATION_REMIND_TIME, m_ArenaType);
player->SendDirectMessage(battlefieldStatus.Write());
}
}
return true; //event will be deleted
}
void BGQueueInviteEvent::Abort(uint64 /*e_time*/)
{
//do nothing
}
/*
this event has many possibilities when it is executed:
1. player is in battleground (he clicked enter on invitation window)
2. player left battleground queue and he isn't there any more
3. player left battleground queue and he joined it again and IsInvitedToBGInstanceGUID = 0
4. player left queue and he joined again and he has been invited to same battleground again -> we should not remove him from queue yet
5. player is invited to bg and he didn't choose what to do and timer expired - only in this condition we should call queue::RemovePlayer
we must remove player in the 5. case even if battleground object doesn't exist!
*/
bool BGQueueRemoveEvent::Execute(uint64 /*e_time*/, uint32 /*p_time*/)
{
Player* player = ObjectAccessor::FindConnectedPlayer(m_PlayerGuid);
if (!player)
// player logged off (we should do nothing, he is correctly removed from queue in another procedure)
return true;
Battleground* bg = sBattlegroundMgr->GetBattleground(m_BgInstanceGUID, m_BgTypeId);
//battleground can be deleted already when we are removing queue info
//bg pointer can be NULL! so use it carefully!
uint32 queueSlot = player->GetBattlegroundQueueIndex(m_BgQueueTypeId);
if (queueSlot < PLAYER_MAX_BATTLEGROUND_QUEUES) // player is in queue, or in Battleground
{
// check if player is in queue for this BG and if we are removing his invite event
BattlegroundQueue &bgQueue = sBattlegroundMgr->GetBattlegroundQueue(m_BgQueueTypeId);
if (bgQueue.IsPlayerInvited(m_PlayerGuid, m_BgInstanceGUID, m_RemoveTime))
{
TC_LOG_DEBUG("bg.battleground", "Battleground: removing %s from bg queue for instance %u because of not pressing enter battle in time.", player->GetGUID().ToString().c_str(), m_BgInstanceGUID);
player->RemoveBattlegroundQueueId(m_BgQueueTypeId);
bgQueue.RemovePlayer(m_PlayerGuid, true);
//update queues if battleground isn't ended
if (bg && bg->isBattleground() && bg->GetStatus() != STATUS_WAIT_LEAVE)
sBattlegroundMgr->ScheduleQueueUpdate(0, 0, m_BgQueueTypeId, m_BgTypeId, bg->GetBracketId());
WorldPackets::Battleground::BattlefieldStatusNone battlefieldStatus;
sBattlegroundMgr->BuildBattlegroundStatusNone(&battlefieldStatus, player, queueSlot, player->GetBattlegroundQueueJoinTime(m_BgQueueTypeId));
player->SendDirectMessage(battlefieldStatus.Write());
}
}
//event will be deleted
return true;
}
void BGQueueRemoveEvent::Abort(uint64 /*e_time*/)
{
//do nothing
}
| gpl-2.0 |
lovelorn/STM32F4xx_DSP_StdPeriph_Lib_V1.3.0 | Project/STM32F4xx_StdPeriph_Examples/SPI/SPI_FLASH/main.c | 2 | 5935 | /**
******************************************************************************
* @file SPI/SPI_FLASH/main.c
* @author MCD Application Team
* @version V1.3.0
* @date 13-November-2013
* @brief Main program body
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2013 STMicroelectronics</center></h2>
*
* Licensed under MCD-ST Liberty SW License Agreement V2, (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.st.com/software_license_agreement_liberty_v2
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#if defined (USE_STM324xG_EVAL)
#include "stm324xg_eval.h"
#elif defined (USE_STM324x7I_EVAL)
#include "stm324x7i_eval.h"
#else
#error "Please select first the Evaluation board used in your application (in Project Options)"
#endif
#include "spi_flash.h"
/** @addtogroup STM32F4xx_StdPeriph_Examples
* @{
*/
/** @addtogroup SPI_FLASH
* @{
*/
/* Private typedef -----------------------------------------------------------*/
typedef enum {FAILED = 0, PASSED = !FAILED} TestStatus;
/* Private define ------------------------------------------------------------*/
#define FLASH_WRITE_ADDRESS 0x700000
#define FLASH_READ_ADDRESS FLASH_WRITE_ADDRESS
#define FLASH_SECTOR_TO_ERASE FLASH_WRITE_ADDRESS
#define BufferSize (countof(Tx_Buffer)-1)
/* Private macro -------------------------------------------------------------*/
#define countof(a) (sizeof(a) / sizeof(*(a)))
/* Private variables ---------------------------------------------------------*/
uint8_t Tx_Buffer[] = "STM32F4xx SPI Firmware Library Example: communication with an M25P SPI FLASH";
uint8_t Rx_Buffer[BufferSize];
__IO uint8_t Index = 0x0;
volatile TestStatus TransferStatus1 = FAILED, TransferStatus2 = PASSED;
__IO uint32_t FlashID = 0;
/* Private functions ---------------------------------------------------------*/
TestStatus Buffercmp(uint8_t* pBuffer1, uint8_t* pBuffer2, uint16_t BufferLength);
/**
* @brief Main program
* @param None
* @retval None
*/
int main(void)
{
/*!< At this stage the microcontroller clock setting is already configured,
this is done through SystemInit() function which is called from startup
files (startup_stm32f40_41xxx.s/startup_stm32f427_437xx.s/startup_stm32f429_439xx.s)
before to branch to application main.
*/
/* Initialize LEDs mounted on EVAL board */
STM_EVAL_LEDInit(LED1);
STM_EVAL_LEDInit(LED2);
/* Initialize the SPI FLASH driver */
sFLASH_Init();
/* Get SPI Flash ID */
FlashID = sFLASH_ReadID();
/* Check the SPI Flash ID */
if (FlashID == sFLASH_M25P64_ID)
{
/* OK: Turn on LD1 */
STM_EVAL_LEDOn(LED1);
/* Perform a write in the Flash followed by a read of the written data */
/* Erase SPI FLASH Sector to write on */
sFLASH_EraseSector(FLASH_SECTOR_TO_ERASE);
/* Write Tx_Buffer data to SPI FLASH memory */
sFLASH_WriteBuffer(Tx_Buffer, FLASH_WRITE_ADDRESS, BufferSize);
/* Read data from SPI FLASH memory */
sFLASH_ReadBuffer(Rx_Buffer, FLASH_READ_ADDRESS, BufferSize);
/* Check the correctness of written dada */
TransferStatus1 = Buffercmp(Tx_Buffer, Rx_Buffer, BufferSize);
/* TransferStatus1 = PASSED, if the transmitted and received data by SPI1
are the same */
/* TransferStatus1 = FAILED, if the transmitted and received data by SPI1
are different */
/* Perform an erase in the Flash followed by a read of the written data */
/* Erase SPI FLASH Sector to write on */
sFLASH_EraseSector(FLASH_SECTOR_TO_ERASE);
/* Read data from SPI FLASH memory */
sFLASH_ReadBuffer(Rx_Buffer, FLASH_READ_ADDRESS, BufferSize);
/* Check the correctness of erasing operation dada */
for (Index = 0; Index < BufferSize; Index++)
{
if (Rx_Buffer[Index] != 0xFF)
{
TransferStatus2 = FAILED;
}
}
/* TransferStatus2 = PASSED, if the specified sector part is erased */
/* TransferStatus2 = FAILED, if the specified sector part is not well erased */
}
else
{
/* Error: Turn on LD2 */
STM_EVAL_LEDOn(LED2);
}
while (1)
{}
}
/**
* @brief Compares two buffers.
* @param pBuffer1, pBuffer2: buffers to be compared.
* @param BufferLength: buffer's length
* @retval PASSED: pBuffer1 identical to pBuffer2
* FAILED: pBuffer1 differs from pBuffer2
*/
TestStatus Buffercmp(uint8_t* pBuffer1, uint8_t* pBuffer2, uint16_t BufferLength)
{
while (BufferLength--)
{
if (*pBuffer1 != *pBuffer2)
{
return FAILED;
}
pBuffer1++;
pBuffer2++;
}
return PASSED;
}
#ifdef USE_FULL_ASSERT
/**
* @brief Reports the name of the source file and the source line number
* where the assert_param error has occurred.
* @param file: pointer to the source file name
* @param line: assert_param error line source number
* @retval None
*/
void assert_failed(uint8_t* file, uint32_t line)
{
/* User can add his own implementation to report the file name and line number,
ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */
/* Infinite loop */
while (1)
{}
}
#endif
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
| gpl-2.0 |
szezso/android_kernel_motorola_msm8916 | drivers/video/msm/mdss/mdss_fb.c | 2 | 94014 | /*
* Core MDSS framebuffer driver.
*
* Copyright (C) 2007 Google Incorporated
* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-buf.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/msm_mdp.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/proc_fs.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/sync.h>
#include <linux/sw_sync.h>
#include <linux/file.h>
#include <linux/kthread.h>
#include <linux/qcom_iommu.h>
#include <linux/msm_iommu_domains.h>
#include "mdss_fb.h"
#include "mdss_mdp_splash_logo.h"
#include "mdss_livedisplay.h"
#ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
#define MDSS_FB_NUM 3
#else
#define MDSS_FB_NUM 2
#endif
#ifndef EXPORT_COMPAT
#define EXPORT_COMPAT(x)
#endif
#define MAX_FBI_LIST 32
static struct fb_info *fbi_list[MAX_FBI_LIST];
static int fbi_list_index;
struct sys_panelinfo panelinfo = {NULL, NULL, NULL};
static u32 mdss_fb_pseudo_palette[16] = {
0x00000000, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
};
static struct msm_mdp_interface *mdp_instance;
static int mdss_fb_register(struct msm_fb_data_type *mfd);
static int mdss_fb_open(struct fb_info *info, int user);
static int mdss_fb_release(struct fb_info *info, int user);
static int mdss_fb_release_all(struct fb_info *info, bool release_all);
static int mdss_fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
static int mdss_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
static int mdss_fb_set_par(struct fb_info *info);
static int mdss_fb_blank_sub(int blank_mode, struct fb_info *info,
int op_enable);
static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd);
static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg);
static int mdss_fb_fbmem_ion_mmap(struct fb_info *info,
struct vm_area_struct *vma);
static int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd,
size_t size);
static void mdss_fb_release_fences(struct msm_fb_data_type *mfd);
static int __mdss_fb_sync_buf_done_callback(struct notifier_block *p,
unsigned long val, void *data);
static int __mdss_fb_display_thread(void *data);
static int mdss_fb_pan_idle(struct msm_fb_data_type *mfd);
static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
int event, void *arg);
static void mdss_fb_set_mdp_sync_pt_threshold(struct msm_fb_data_type *mfd);
static void mdss_fb_scale_bl(struct msm_fb_data_type *mfd, u32 *bl_lvl);
void mdss_fb_no_update_notify_timer_cb(unsigned long data)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
if (!mfd) {
pr_err("%s mfd NULL\n", __func__);
return;
}
mfd->no_update.value = NOTIFY_TYPE_NO_UPDATE;
complete(&mfd->no_update.comp);
}
void mdss_fb_bl_update_notify(struct msm_fb_data_type *mfd)
{
if (!mfd) {
pr_err("%s mfd NULL\n", __func__);
return;
}
mutex_lock(&mfd->update.lock);
if (mfd->update.ref_count > 0) {
mutex_unlock(&mfd->update.lock);
mfd->update.value = NOTIFY_TYPE_BL_UPDATE;
complete(&mfd->update.comp);
mutex_lock(&mfd->update.lock);
}
mutex_unlock(&mfd->update.lock);
mutex_lock(&mfd->no_update.lock);
if (mfd->no_update.ref_count > 0) {
mutex_unlock(&mfd->no_update.lock);
mfd->no_update.value = NOTIFY_TYPE_BL_UPDATE;
complete(&mfd->no_update.comp);
mutex_lock(&mfd->no_update.lock);
}
mutex_unlock(&mfd->no_update.lock);
}
static int mdss_fb_notify_update(struct msm_fb_data_type *mfd,
unsigned long *argp)
{
int ret;
unsigned int notify = 0x0, to_user = 0x0;
ret = copy_from_user(¬ify, argp, sizeof(unsigned int));
if (ret) {
pr_err("%s:ioctl failed\n", __func__);
return ret;
}
if (notify > NOTIFY_UPDATE_POWER_OFF)
return -EINVAL;
if (notify == NOTIFY_UPDATE_INIT) {
mutex_lock(&mfd->update.lock);
mfd->update.init_done = true;
mutex_unlock(&mfd->update.lock);
ret = 1;
} else if (notify == NOTIFY_UPDATE_DEINIT) {
mutex_lock(&mfd->update.lock);
mfd->update.init_done = false;
mutex_unlock(&mfd->update.lock);
complete(&mfd->update.comp);
complete(&mfd->no_update.comp);
ret = 1;
} else if (mfd->update.is_suspend) {
to_user = NOTIFY_TYPE_SUSPEND;
mfd->update.is_suspend = 0;
ret = 1;
} else if (notify == NOTIFY_UPDATE_START) {
mutex_lock(&mfd->update.lock);
if (mfd->update.init_done)
INIT_COMPLETION(mfd->update.comp);
else {
mutex_unlock(&mfd->update.lock);
pr_err("notify update start called without init\n");
return -EINVAL;
}
mfd->update.ref_count++;
mutex_unlock(&mfd->update.lock);
ret = wait_for_completion_interruptible_timeout(
&mfd->update.comp, 4 * HZ);
mutex_lock(&mfd->update.lock);
mfd->update.ref_count--;
mutex_unlock(&mfd->update.lock);
to_user = (unsigned int)mfd->update.value;
if (mfd->update.type == NOTIFY_TYPE_SUSPEND) {
to_user = (unsigned int)mfd->update.type;
ret = 1;
}
} else if (notify == NOTIFY_UPDATE_STOP) {
mutex_lock(&mfd->update.lock);
if (mfd->update.init_done)
INIT_COMPLETION(mfd->no_update.comp);
else {
mutex_unlock(&mfd->update.lock);
pr_err("notify update stop called without init\n");
return -EINVAL;
}
mutex_unlock(&mfd->update.lock);
mutex_lock(&mfd->no_update.lock);
mfd->no_update.ref_count++;
mutex_unlock(&mfd->no_update.lock);
ret = wait_for_completion_interruptible_timeout(
&mfd->no_update.comp, 4 * HZ);
mutex_lock(&mfd->no_update.lock);
mfd->no_update.ref_count--;
mutex_unlock(&mfd->no_update.lock);
to_user = (unsigned int)mfd->no_update.value;
} else {
if (mdss_fb_is_power_on(mfd)) {
INIT_COMPLETION(mfd->power_off_comp);
ret = wait_for_completion_interruptible_timeout(
&mfd->power_off_comp, 1 * HZ);
}
}
if (ret == 0)
ret = -ETIMEDOUT;
else if (ret > 0)
ret = copy_to_user(argp, &to_user, sizeof(unsigned int));
return ret;
}
static int lcd_backlight_registered;
static void mdss_fb_set_bl_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct msm_fb_data_type *mfd = dev_get_drvdata(led_cdev->dev->parent);
int bl_lvl;
if (value > mfd->panel_info->brightness_max)
value = mfd->panel_info->brightness_max;
/* This maps android backlight level 0 to 255 into
driver backlight level 0 to bl_max with rounding */
MDSS_BRIGHT_TO_BL(bl_lvl, value, mfd->panel_info->bl_max,
mfd->panel_info->brightness_max);
if (!bl_lvl && value)
bl_lvl = 1;
if (!IS_CALIB_MODE_BL(mfd) && (!mfd->ext_bl_ctrl || !value ||
!mfd->bl_level)) {
mutex_lock(&mfd->bl_lock);
mdss_fb_set_backlight(mfd, bl_lvl);
mutex_unlock(&mfd->bl_lock);
}
}
static struct led_classdev backlight_led = {
.name = "lcd-backlight",
.brightness = MDSS_MAX_BL_BRIGHTNESS,
.brightness_set = mdss_fb_set_bl_brightness,
.max_brightness = MDSS_MAX_BL_BRIGHTNESS,
};
static ssize_t mdss_fb_get_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret = 0;
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
switch (mfd->panel.type) {
case NO_PANEL:
ret = snprintf(buf, PAGE_SIZE, "no panel\n");
break;
case HDMI_PANEL:
ret = snprintf(buf, PAGE_SIZE, "hdmi panel\n");
break;
case LVDS_PANEL:
ret = snprintf(buf, PAGE_SIZE, "lvds panel\n");
break;
case DTV_PANEL:
ret = snprintf(buf, PAGE_SIZE, "dtv panel\n");
break;
case MIPI_VIDEO_PANEL:
ret = snprintf(buf, PAGE_SIZE, "mipi dsi video panel\n");
break;
case MIPI_CMD_PANEL:
ret = snprintf(buf, PAGE_SIZE, "mipi dsi cmd panel\n");
break;
case WRITEBACK_PANEL:
ret = snprintf(buf, PAGE_SIZE, "writeback panel\n");
break;
case EDP_PANEL:
ret = snprintf(buf, PAGE_SIZE, "edp panel\n");
break;
default:
ret = snprintf(buf, PAGE_SIZE, "unknown panel\n");
break;
}
return ret;
}
static int mdss_fb_get_panel_xres(struct mdss_panel_info *pinfo)
{
struct mdss_panel_data *pdata;
int xres;
pdata = container_of(pinfo, struct mdss_panel_data, panel_info);
xres = pinfo->xres;
if (pdata->next)
xres += mdss_fb_get_panel_xres(&pdata->next->panel_info);
return xres;
}
static inline int mdss_fb_validate_split(int left, int right,
struct msm_fb_data_type *mfd)
{
int rc = -EINVAL;
u32 panel_xres = mdss_fb_get_panel_xres(mfd->panel_info);
/* more validate condition could be added if needed */
if (left && right) {
if (panel_xres == left + right) {
mfd->split_fb_left = left;
mfd->split_fb_right = right;
rc = 0;
}
} else {
if (is_split_lm(mfd)) {
mfd->split_fb_left = mfd->panel_info->xres;
mfd->split_fb_right = panel_xres - mfd->split_fb_left;
rc = 0;
} else {
mfd->split_fb_left = mfd->split_fb_right = 0;
}
}
return rc;
}
static void mdss_fb_parse_dt_split(struct msm_fb_data_type *mfd)
{
u32 data[2] = {0};
struct platform_device *pdev = mfd->pdev;
of_property_read_u32_array(pdev->dev.of_node,
"qcom,mdss-fb-split", data, 2);
if (!mdss_fb_validate_split(data[0], data[1], mfd))
pr_debug("dt split_left=%d split_right=%d\n", data[0], data[1]);
}
static ssize_t mdss_fb_store_split(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
u32 data[2] = {0};
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
if (2 != sscanf(buf, "%d %d", &data[0], &data[1])) {
pr_debug("Not able to read split values\n");
} else if (!mdss_fb_validate_split(data[0], data[1], mfd)) {
mfd->mdss_fb_split_stored = 1;
pr_debug("sys split_left=%d split_right=%d\n",
data[0], data[1]);
}
return len;
}
static ssize_t mdss_fb_show_split(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret = 0;
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
ret = snprintf(buf, PAGE_SIZE, "%d %d\n",
mfd->split_fb_left, mfd->split_fb_right);
return ret;
}
static void mdss_fb_get_split(struct msm_fb_data_type *mfd)
{
if (mfd->index != 0)
return;
if (!mfd->mdss_fb_split_stored)
mdss_fb_parse_dt_split(mfd);
if (mfd->split_fb_left || mfd->split_fb_right)
pr_debug("split framebuffer left=%d right=%d\n",
mfd->split_fb_left, mfd->split_fb_right);
}
static ssize_t mdss_fb_get_thermal_level(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
int ret;
ret = scnprintf(buf, PAGE_SIZE, "thermal_level=%d\n",
mfd->thermal_level);
return ret;
}
static ssize_t mdss_fb_set_thermal_level(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
int rc = 0;
int thermal_level = 0;
rc = kstrtoint(buf, 10, &thermal_level);
if (rc) {
pr_err("kstrtoint failed. rc=%d\n", rc);
return rc;
}
pr_debug("Thermal level set to %d\n", thermal_level);
mfd->thermal_level = thermal_level;
sysfs_notify(&mfd->fbi->dev->kobj, NULL, "msm_fb_thermal_level");
return count;
}
static ssize_t mdss_mdp_show_blank_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
int ret;
pr_debug("fb%d panel_power_state = %d\n", mfd->index,
mfd->panel_power_state);
ret = scnprintf(buf, PAGE_SIZE, "panel_power_on = %d\n",
mfd->panel_power_state);
return ret;
}
static void __mdss_fb_idle_notify_work(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct msm_fb_data_type *mfd = container_of(dw, struct msm_fb_data_type,
idle_notify_work);
/* Notify idle-ness here */
pr_debug("Idle timeout %dms expired!\n", mfd->idle_time);
sysfs_notify(&mfd->fbi->dev->kobj, NULL, "idle_notify");
}
static ssize_t mdss_fb_get_idle_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
int ret;
ret = scnprintf(buf, PAGE_SIZE, "%d", mfd->idle_time);
return ret;
}
static ssize_t mdss_fb_set_idle_time(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
int rc = 0;
int idle_time = 0;
rc = kstrtoint(buf, 10, &idle_time);
if (rc) {
pr_err("kstrtoint failed. rc=%d\n", rc);
return rc;
}
pr_debug("Idle time = %d\n", idle_time);
mfd->idle_time = idle_time;
return count;
}
static ssize_t mdss_fb_get_idle_notify(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
int ret;
ret = scnprintf(buf, PAGE_SIZE, "%s",
work_busy(&mfd->idle_notify_work.work) ? "no" : "yes");
return ret;
}
static ssize_t mdss_fb_get_panel_info(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
struct mdss_panel_info *pinfo = mfd->panel_info;
int ret;
ret = scnprintf(buf, PAGE_SIZE,
"pu_en=%d\nxstart=%d\nwalign=%d\nystart=%d\nhalign=%d\n"
"min_w=%d\nmin_h=%d\nroi_merge=%d\ndyn_fps_en=%d\n"
"min_fps=%d\nmax_fps=%d\npanel_name=%s\n"
"primary_panel=%d\n",
pinfo->partial_update_enabled, pinfo->xstart_pix_align,
pinfo->width_pix_align, pinfo->ystart_pix_align,
pinfo->height_pix_align, pinfo->min_width,
pinfo->min_height, pinfo->partial_update_roi_merge,
pinfo->dynamic_fps, pinfo->min_fps, pinfo->max_fps,
pinfo->panel_name, pinfo->is_prim_panel);
return ret;
}
/*
* mdss_fb_lpm_enable() - Function to Control LowPowerMode
* @mfd: Framebuffer data structure for display
* @mode: Enabled/Disable LowPowerMode
* 1: Enter into LowPowerMode
* 0: Exit from LowPowerMode
*
* This Function dynamically switches to and from LowPowerMode
* based on the argument @mode.
*/
static int mdss_fb_lpm_enable(struct msm_fb_data_type *mfd, int mode)
{
int ret = 0;
u32 bl_lvl = 0;
struct mdss_panel_info *pinfo = NULL;
struct mdss_panel_data *pdata;
if (!mfd || !mfd->panel_info)
return -EINVAL;
pinfo = mfd->panel_info;
if (!pinfo->mipi.dynamic_switch_enabled) {
pr_warn("Panel does not support dynamic switch!\n");
return 0;
}
if (mode == pinfo->mipi.mode) {
pr_debug("Already in requested mode!\n");
return 0;
}
pr_debug("Enter mode: %d\n", mode);
pdata = dev_get_platdata(&mfd->pdev->dev);
pdata->panel_info.dynamic_switch_pending = true;
mdss_fb_pan_idle(mfd);
mutex_lock(&mfd->bl_lock);
bl_lvl = mfd->bl_level;
mdss_fb_set_backlight(mfd, 0);
mutex_unlock(&mfd->bl_lock);
lock_fb_info(mfd->fbi);
ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, mfd->fbi,
mfd->op_enable);
if (ret) {
pr_err("can't turn off display!\n");
unlock_fb_info(mfd->fbi);
return ret;
}
mfd->op_enable = false;
ret = mfd->mdp.configure_panel(mfd, mode);
mdss_fb_set_mdp_sync_pt_threshold(mfd);
mfd->op_enable = true;
ret = mdss_fb_blank_sub(FB_BLANK_UNBLANK, mfd->fbi,
mfd->op_enable);
if (ret) {
pr_err("can't turn on display!\n");
unlock_fb_info(mfd->fbi);
return ret;
}
unlock_fb_info(mfd->fbi);
mutex_lock(&mfd->bl_lock);
mfd->bl_updated = true;
mdss_fb_set_backlight(mfd, bl_lvl);
mutex_unlock(&mfd->bl_lock);
pdata->panel_info.dynamic_switch_pending = false;
pdata->panel_info.is_lpm_mode = mode ? 1 : 0;
if (ret) {
pr_err("can't turn on display!\n");
return ret;
}
pr_debug("Exit mode: %d\n", mode);
return 0;
}
static ssize_t mdss_fb_get_src_split_info(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
int ret = 0;
if ((mfd->split_mode == MDP_SPLIT_MODE_LM) &&
(fbi->var.yres > 2048) && (fbi->var.yres > fbi->var.xres))
ret = scnprintf(buf, PAGE_SIZE,
"src_split_always\n");
return ret;
}
static ssize_t mdss_fb_set_doze_mode(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
int rc = 0;
int doze_mode = 0;
rc = kstrtoint(buf, 10, &doze_mode);
if (rc) {
pr_err("kstrtoint failed. rc=%d\n", rc);
return rc;
}
pr_debug("Always-on mode %s\n", doze_mode ? "enabled" : "disabled");
if (mfd->panel_info->type != MIPI_CMD_PANEL)
pr_err("Always on mode only supported for cmd mode panel\n");
else
mfd->doze_mode = doze_mode;
return count;
}
static ssize_t mdss_fb_get_doze_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
return scnprintf(buf, PAGE_SIZE, "%d\n", mfd->doze_mode);
}
static DEVICE_ATTR(msm_fb_type, S_IRUGO, mdss_fb_get_type, NULL);
static DEVICE_ATTR(msm_fb_split, S_IRUGO | S_IWUSR, mdss_fb_show_split,
mdss_fb_store_split);
static DEVICE_ATTR(show_blank_event, S_IRUGO, mdss_mdp_show_blank_event, NULL);
static DEVICE_ATTR(idle_time, S_IRUGO | S_IWUSR | S_IWGRP,
mdss_fb_get_idle_time, mdss_fb_set_idle_time);
static DEVICE_ATTR(idle_notify, S_IRUGO, mdss_fb_get_idle_notify, NULL);
static DEVICE_ATTR(msm_fb_panel_info, S_IRUGO, mdss_fb_get_panel_info, NULL);
static DEVICE_ATTR(msm_fb_src_split_info, S_IRUGO, mdss_fb_get_src_split_info,
NULL);
static DEVICE_ATTR(msm_fb_thermal_level, S_IRUGO | S_IWUSR,
mdss_fb_get_thermal_level, mdss_fb_set_thermal_level);
static DEVICE_ATTR(always_on, S_IRUGO | S_IWUSR | S_IWGRP,
mdss_fb_get_doze_mode, mdss_fb_set_doze_mode);
static struct attribute *mdss_fb_attrs[] = {
&dev_attr_msm_fb_type.attr,
&dev_attr_msm_fb_split.attr,
&dev_attr_show_blank_event.attr,
&dev_attr_idle_time.attr,
&dev_attr_idle_notify.attr,
&dev_attr_msm_fb_panel_info.attr,
&dev_attr_msm_fb_src_split_info.attr,
&dev_attr_msm_fb_thermal_level.attr,
&dev_attr_always_on.attr,
NULL,
};
static struct attribute_group mdss_fb_attr_group = {
.attrs = mdss_fb_attrs,
};
static ssize_t panel_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", panelinfo.panel_name);
}
static ssize_t panel_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%016llx\n", *panelinfo.panel_ver);
}
static ssize_t panel_supplier_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", panelinfo.panel_supplier);
}
static ssize_t panel_man_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 panel_ver = (u32)(*panelinfo.panel_ver);
return snprintf(buf, PAGE_SIZE, "0x%02x\n", panel_ver & 0xff);
}
static ssize_t panel_controller_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 panel_ver = (u32)(*panelinfo.panel_ver);
return snprintf(buf, PAGE_SIZE, "0x%02x\n", (panel_ver & 0xff00) >> 8);
}
static ssize_t panel_controller_drv_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 panel_ver = (u32)(*panelinfo.panel_ver);
return snprintf(buf, PAGE_SIZE, "0x%02x\n",
(panel_ver & 0xff0000) >> 16);
}
static DEVICE_ATTR(panel_name, S_IRUGO,
panel_name_show, NULL);
static DEVICE_ATTR(panel_ver, S_IRUGO,
panel_ver_show, NULL);
static DEVICE_ATTR(panel_supplier, S_IRUGO,
panel_supplier_show, NULL);
static DEVICE_ATTR(man_id, S_IRUGO,
panel_man_id_show, NULL);
static DEVICE_ATTR(controller_ver, S_IRUGO,
panel_controller_ver_show, NULL);
static DEVICE_ATTR(controller_drv_ver, S_IRUGO,
panel_controller_drv_ver_show, NULL);
static struct attribute *panel_id_attrs[] = {
&dev_attr_panel_name.attr,
&dev_attr_panel_ver.attr,
&dev_attr_panel_supplier.attr,
&dev_attr_man_id.attr,
&dev_attr_controller_ver.attr,
&dev_attr_controller_drv_ver.attr,
NULL,
};
static struct attribute_group panel_id_attr_group = {
.attrs = panel_id_attrs,
};
static int mdss_fb_create_sysfs(struct msm_fb_data_type *mfd)
{
int rc;
rc = sysfs_create_group(&mfd->fbi->dev->kobj, &mdss_fb_attr_group);
if (rc) {
pr_err("sysfs group creation failed, rc=%d\n", rc);
goto err;
}
rc = sysfs_create_group(&mfd->fbi->dev->kobj, &panel_id_attr_group);
if (rc)
pr_err("panel id group creation failed, rc=%d\n", rc);
err:
return mdss_livedisplay_create_sysfs(mfd);
}
static void mdss_fb_remove_sysfs(struct msm_fb_data_type *mfd)
{
sysfs_remove_group(&mfd->fbi->dev->kobj, &mdss_fb_attr_group);
}
static void mdss_fb_shutdown(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
mfd->shutdown_pending = true;
lock_fb_info(mfd->fbi);
mdss_fb_release_all(mfd->fbi, true);
sysfs_notify(&mfd->fbi->dev->kobj, NULL, "show_blank_event");
unlock_fb_info(mfd->fbi);
}
static int mdss_fb_probe(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd = NULL;
struct mdss_panel_data *pdata;
struct fb_info *fbi;
int rc;
u32 cell_index = 0;
if (fbi_list_index >= MAX_FBI_LIST)
return -ENOMEM;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata)
return -EPROBE_DEFER;
of_property_read_u32(pdev->dev.of_node, "cell-index", &cell_index);
if (cell_index > fbi_list_index)
return -EPROBE_DEFER;
/*
* alloc framebuffer info + par data
*/
fbi = framebuffer_alloc(sizeof(struct msm_fb_data_type), NULL);
if (fbi == NULL) {
pr_err("can't allocate framebuffer info data!\n");
return -ENOMEM;
}
mfd = (struct msm_fb_data_type *)fbi->par;
mfd->key = MFD_KEY;
mfd->fbi = fbi;
mfd->panel_info = &pdata->panel_info;
mfd->panel.type = pdata->panel_info.type;
mfd->panel.id = mfd->index;
mfd->fb_page = MDSS_FB_NUM;
mfd->index = fbi_list_index;
mfd->mdp_fb_page_protection = MDP_FB_PAGE_PROTECTION_WRITECOMBINE;
mfd->ext_ad_ctrl = -1;
mfd->bl_level = 0;
mfd->bl_scale = 1024;
mfd->bl_min_lvl = 30;
mfd->ad_bl_level = 0;
mfd->fb_imgType = MDP_RGBA_8888;
if (mfd->panel_info->cont_splash_enabled) {
mfd->bl_updated = true;
MDSS_BRIGHT_TO_BL(mfd->bl_level,
mfd->panel_info->brightness_max,
mfd->panel_info->bl_max,
mfd->panel_info->brightness_max);
mfd->bl_level_scaled = mfd->bl_level;
if (!IS_CALIB_MODE_BL(mfd))
mdss_fb_scale_bl(mfd, &mfd->bl_level_scaled);
}
mfd->pdev = pdev;
mfd->split_mode = MDP_SPLIT_MODE_NONE;
if (pdata->next)
mfd->split_mode = MDP_SPLIT_MODE_LM;
mfd->mdp = *mdp_instance;
INIT_LIST_HEAD(&mfd->proc_list);
mutex_init(&mfd->bl_lock);
fbi_list[fbi_list_index++] = fbi;
platform_set_drvdata(pdev, mfd);
rc = mdss_fb_register(mfd);
if (rc)
return rc;
if (mfd->mdp.init_fnc) {
rc = mfd->mdp.init_fnc(mfd);
if (rc) {
pr_err("init_fnc failed\n");
return rc;
}
}
rc = pm_runtime_set_active(mfd->fbi->dev);
if (rc < 0)
pr_err("pm_runtime: fail to set active.\n");
pm_runtime_enable(mfd->fbi->dev);
/* android supports only one lcd-backlight/lcd for now */
if (!lcd_backlight_registered) {
backlight_led.brightness = mfd->panel_info->brightness_max;
backlight_led.max_brightness = mfd->panel_info->brightness_max;
if (led_classdev_register(&pdev->dev, &backlight_led))
pr_err("led_classdev_register failed\n");
else
lcd_backlight_registered = 1;
}
mdss_fb_create_sysfs(mfd);
mdss_fb_send_panel_event(mfd, MDSS_EVENT_FB_REGISTERED, fbi);
mfd->mdp_sync_pt_data.fence_name = "mdp-fence";
if (mfd->mdp_sync_pt_data.timeline == NULL) {
char timeline_name[16];
snprintf(timeline_name, sizeof(timeline_name),
"mdss_fb_%d", mfd->index);
mfd->mdp_sync_pt_data.timeline =
sw_sync_timeline_create(timeline_name);
if (mfd->mdp_sync_pt_data.timeline == NULL) {
pr_err("cannot create release fence time line\n");
return -ENOMEM;
}
mfd->mdp_sync_pt_data.notifier.notifier_call =
__mdss_fb_sync_buf_done_callback;
}
mdss_fb_set_mdp_sync_pt_threshold(mfd);
if (mfd->mdp.splash_init_fnc)
mfd->mdp.splash_init_fnc(mfd);
INIT_DELAYED_WORK(&mfd->idle_notify_work, __mdss_fb_idle_notify_work);
return rc;
}
static void mdss_fb_set_mdp_sync_pt_threshold(struct msm_fb_data_type *mfd)
{
if (!mfd)
return;
switch (mfd->panel.type) {
case WRITEBACK_PANEL:
mfd->mdp_sync_pt_data.threshold = 1;
mfd->mdp_sync_pt_data.retire_threshold = 0;
break;
case MIPI_CMD_PANEL:
mfd->mdp_sync_pt_data.threshold = 1;
mfd->mdp_sync_pt_data.retire_threshold = 1;
break;
default:
mfd->mdp_sync_pt_data.threshold = 2;
mfd->mdp_sync_pt_data.retire_threshold = 0;
break;
}
}
static int mdss_fb_remove(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd;
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
if (!mfd)
return -ENODEV;
mdss_fb_remove_sysfs(mfd);
pm_runtime_disable(mfd->fbi->dev);
if (mfd->key != MFD_KEY)
return -EINVAL;
if (mdss_fb_suspend_sub(mfd))
pr_err("msm_fb_remove: can't stop the device %d\n",
mfd->index);
/* remove /dev/fb* */
unregister_framebuffer(mfd->fbi);
if (lcd_backlight_registered) {
lcd_backlight_registered = 0;
led_classdev_unregister(&backlight_led);
}
return 0;
}
static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
int event, void *arg)
{
struct mdss_panel_data *pdata;
pdata = dev_get_platdata(&mfd->pdev->dev);
if (!pdata) {
pr_err("no panel connected\n");
return -ENODEV;
}
pr_debug("sending event=%d for fb%d\n", event, mfd->index);
if (pdata->event_handler)
return pdata->event_handler(pdata, event, arg);
return 0;
}
static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd)
{
int ret = 0;
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
pr_debug("mdss_fb suspend index=%d\n", mfd->index);
mdss_fb_pan_idle(mfd);
ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND, NULL);
if (ret) {
pr_warn("unable to suspend fb%d (%d)\n", mfd->index, ret);
return ret;
}
mfd->suspend.op_enable = mfd->op_enable;
mfd->suspend.panel_power_state = mfd->panel_power_state;
if (mfd->op_enable) {
/*
* Ideally, display should have been blanked by now.
* If not, then blank the display based on whether always-on
* feature is enabled or not
*/
int unblank_flag = mfd->doze_mode ? FB_BLANK_VSYNC_SUSPEND :
FB_BLANK_POWERDOWN;
ret = mdss_fb_blank_sub(unblank_flag, mfd->fbi,
mfd->suspend.op_enable);
if (ret) {
pr_warn("can't turn off display!\n");
return ret;
}
mfd->op_enable = false;
fb_set_suspend(mfd->fbi, FBINFO_STATE_SUSPENDED);
}
return 0;
}
static int mdss_fb_resume_sub(struct msm_fb_data_type *mfd)
{
int ret = 0;
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
INIT_COMPLETION(mfd->power_set_comp);
mfd->is_power_setting = true;
pr_debug("mdss_fb resume index=%d\n", mfd->index);
mdss_fb_pan_idle(mfd);
ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME, NULL);
if (ret) {
pr_warn("unable to resume fb%d (%d)\n", mfd->index, ret);
return ret;
}
/* resume state var recover */
mfd->op_enable = mfd->suspend.op_enable;
if (mdss_panel_is_power_on(mfd->suspend.panel_power_state)) {
ret = mdss_fb_blank_sub(FB_BLANK_UNBLANK, mfd->fbi,
mfd->op_enable);
if (ret)
pr_warn("can't turn on display!\n");
else
fb_set_suspend(mfd->fbi, FBINFO_STATE_RUNNING);
}
mfd->is_power_setting = false;
complete_all(&mfd->power_set_comp);
return ret;
}
#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
static int mdss_fb_suspend(struct platform_device *pdev, pm_message_t state)
{
struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
if (!mfd)
return -ENODEV;
dev_dbg(&pdev->dev, "display suspend\n");
return mdss_fb_suspend_sub(mfd);
}
static int mdss_fb_resume(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
if (!mfd)
return -ENODEV;
dev_dbg(&pdev->dev, "display resume\n");
return mdss_fb_resume_sub(mfd);
}
#else
#define mdss_fb_suspend NULL
#define mdss_fb_resume NULL
#endif
#ifdef CONFIG_PM_SLEEP
static int mdss_fb_pm_suspend(struct device *dev)
{
struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
if (!mfd)
return -ENODEV;
dev_dbg(dev, "display pm suspend\n");
return mdss_fb_suspend_sub(mfd);
}
static int mdss_fb_pm_resume(struct device *dev)
{
struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
if (!mfd)
return -ENODEV;
dev_dbg(dev, "display pm resume\n");
/*
* It is possible that the runtime status of the fb device may
* have been active when the system was suspended. Reset the runtime
* status to suspended state after a complete system resume.
*/
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_enable(dev);
return mdss_fb_resume_sub(mfd);
}
#endif
static const struct dev_pm_ops mdss_fb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mdss_fb_pm_suspend, mdss_fb_pm_resume)
};
static const struct of_device_id mdss_fb_dt_match[] = {
{ .compatible = "qcom,mdss-fb",},
{}
};
EXPORT_COMPAT("qcom,mdss-fb");
static struct platform_driver mdss_fb_driver = {
.probe = mdss_fb_probe,
.remove = mdss_fb_remove,
.suspend = mdss_fb_suspend,
.resume = mdss_fb_resume,
.shutdown = mdss_fb_shutdown,
.driver = {
.name = "mdss_fb",
.of_match_table = mdss_fb_dt_match,
.pm = &mdss_fb_pm_ops,
},
};
static void mdss_fb_scale_bl(struct msm_fb_data_type *mfd, u32 *bl_lvl)
{
u32 temp = *bl_lvl;
pr_debug("input = %d, scale = %d\n", temp, mfd->bl_scale);
if (temp >= mfd->bl_min_lvl) {
if (temp > mfd->panel_info->bl_max) {
pr_warn("%s: invalid bl level\n",
__func__);
temp = mfd->panel_info->bl_max;
}
if (mfd->bl_scale > 1024) {
pr_warn("%s: invalid bl scale\n",
__func__);
mfd->bl_scale = 1024;
}
/*
* bl_scale is the numerator of
* scaling fraction (x/1024)
*/
temp = (temp * mfd->bl_scale) / 1024;
/*if less than minimum level, use min level*/
if (temp < mfd->bl_min_lvl)
temp = mfd->bl_min_lvl;
}
pr_debug("output = %d\n", temp);
(*bl_lvl) = temp;
}
/* must call this function from within mfd->bl_lock */
void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl)
{
struct mdss_panel_data *pdata;
u32 temp = bkl_lvl;
bool bl_notify_needed = false;
/* todo: temporary workaround to support doze mode */
if ((bkl_lvl == 0) && (mfd->doze_mode)) {
pr_debug("keeping backlight on with always-on displays\n");
mfd->unset_bl_level = 0;
return;
}
if (((mdss_fb_is_power_off(mfd) && mfd->dcm_state != DCM_ENTER)
|| !mfd->bl_updated) && !IS_CALIB_MODE_BL(mfd) &&
mfd->panel_info->cont_splash_enabled) {
mfd->unset_bl_level = bkl_lvl;
return;
} else {
mfd->unset_bl_level = 0;
}
pdata = dev_get_platdata(&mfd->pdev->dev);
if ((pdata) && (pdata->set_backlight)) {
if (mfd->mdp.ad_calc_bl)
(*mfd->mdp.ad_calc_bl)(mfd, temp, &temp,
&bl_notify_needed);
if (!IS_CALIB_MODE_BL(mfd))
mdss_fb_scale_bl(mfd, &temp);
/*
* Even though backlight has been scaled, want to show that
* backlight has been set to bkl_lvl to those that read from
* sysfs node. Thus, need to set bl_level even if it appears
* the backlight has already been set to the level it is at,
* as well as setting bl_level to bkl_lvl even though the
* backlight has been set to the scaled value.
*/
if (mfd->bl_level_scaled == temp) {
mfd->bl_level = bkl_lvl;
} else {
pr_debug("backlight sent to panel :%d\n", temp);
pdata->set_backlight(pdata, temp);
mfd->bl_level = bkl_lvl;
mfd->bl_level_scaled = temp;
mfd->bl_updated = 1;
bl_notify_needed = true;
}
if (bl_notify_needed)
mdss_fb_bl_update_notify(mfd);
}
}
void mdss_fb_update_backlight(struct msm_fb_data_type *mfd)
{
struct mdss_panel_data *pdata;
u32 temp;
bool bl_notify = false;
if (!mfd->unset_bl_level)
return;
mutex_lock(&mfd->bl_lock);
if (!mfd->bl_updated) {
pdata = dev_get_platdata(&mfd->pdev->dev);
if ((pdata) && (pdata->set_backlight)) {
mfd->bl_level = mfd->unset_bl_level;
temp = mfd->bl_level;
if (mfd->mdp.ad_calc_bl)
(*mfd->mdp.ad_calc_bl)(mfd, temp, &temp,
&bl_notify);
pdata->set_backlight(pdata, temp);
mfd->bl_level_scaled = mfd->unset_bl_level;
mfd->bl_updated = 1;
mdss_fb_bl_update_notify(mfd);
}
}
mutex_unlock(&mfd->bl_lock);
}
static int mdss_fb_start_disp_thread(struct msm_fb_data_type *mfd)
{
int ret = 0;
pr_debug("%pS: start display thread fb%d\n",
__builtin_return_address(0), mfd->index);
mdss_fb_get_split(mfd);
atomic_set(&mfd->commits_pending, 0);
mfd->disp_thread = kthread_run(__mdss_fb_display_thread,
mfd, "mdss_fb%d", mfd->index);
if (IS_ERR(mfd->disp_thread)) {
pr_err("ERROR: unable to start display thread %d\n",
mfd->index);
ret = PTR_ERR(mfd->disp_thread);
mfd->disp_thread = NULL;
}
return ret;
}
static void mdss_fb_stop_disp_thread(struct msm_fb_data_type *mfd)
{
pr_debug("%pS: stop display thread fb%d\n",
__builtin_return_address(0), mfd->index);
kthread_stop(mfd->disp_thread);
mfd->disp_thread = NULL;
}
static int mdss_fb_unblank_sub(struct msm_fb_data_type *mfd)
{
int ret = 0;
int cur_power_state;
if (!mfd)
return -EINVAL;
/* Start Display thread */
if (mfd->disp_thread == NULL) {
ret = mdss_fb_start_disp_thread(mfd);
if (IS_ERR_VALUE(ret))
return ret;
}
cur_power_state = mfd->panel_power_state;
if (!mdss_panel_is_power_on_interactive(cur_power_state) &&
mfd->mdp.on_fnc) {
ret = mfd->mdp.on_fnc(mfd);
if (ret == 0) {
mfd->panel_power_state = MDSS_PANEL_POWER_ON;
mfd->panel_info->panel_dead = false;
} else if (mfd->disp_thread) {
mdss_fb_stop_disp_thread(mfd);
goto error;
}
mutex_lock(&mfd->update.lock);
mfd->update.type = NOTIFY_TYPE_UPDATE;
mfd->update.is_suspend = 0;
mutex_unlock(&mfd->update.lock);
/* Start the work thread to signal idle time */
if (mfd->idle_time)
schedule_delayed_work(&mfd->idle_notify_work,
msecs_to_jiffies(mfd->idle_time));
}
error:
return ret;
}
static int mdss_fb_blank_sub(int blank_mode, struct fb_info *info,
int op_enable)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int ret = 0;
int cur_power_state, req_power_state = MDSS_PANEL_POWER_OFF;
if (!mfd || !op_enable)
return -EPERM;
if (mfd->dcm_state == DCM_ENTER)
return -EPERM;
pr_debug("%pS mode:%d\n", __builtin_return_address(0),
blank_mode);
cur_power_state = mfd->panel_power_state;
/*
* If doze mode is requested for video mode panels, treat
* the request as full unblank as there are no low power mode
* settings for video mode panels.
*/
if ((FB_BLANK_VSYNC_SUSPEND == blank_mode) &&
(mfd->panel_info->type != MIPI_CMD_PANEL)) {
pr_debug("Doze mode only valid for cmd mode panels\n");
if (mdss_panel_is_power_on(cur_power_state))
return 0;
else
blank_mode = FB_BLANK_UNBLANK;
}
switch (blank_mode) {
case FB_BLANK_UNBLANK:
pr_debug("unblank called. cur pwr state=%d\n", cur_power_state);
ret = mdss_fb_unblank_sub(mfd);
break;
case FB_BLANK_VSYNC_SUSPEND:
req_power_state = MDSS_PANEL_POWER_DOZE;
pr_debug("Doze power mode requested\n");
/*
* If doze mode is requested when panel is already off,
* then first unblank the panel before entering doze mode
*/
if (mdss_fb_is_power_off(mfd) && mfd->mdp.on_fnc) {
pr_debug("off --> doze. switch to on first\n");
ret = mdss_fb_unblank_sub(mfd);
}
/* Enter doze mode only if unblank succeeded */
if (ret)
break;
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_NORMAL:
case FB_BLANK_POWERDOWN:
default:
pr_debug("blank powerdown called. cur mode=%d, req mode=%d\n",
cur_power_state, req_power_state);
if (mdss_fb_is_power_on(mfd) && mfd->mdp.off_fnc) {
int bl_level_old;
cur_power_state = mfd->panel_power_state;
mutex_lock(&mfd->update.lock);
mfd->update.type = NOTIFY_TYPE_SUSPEND;
mfd->update.is_suspend = 1;
mutex_unlock(&mfd->update.lock);
complete(&mfd->update.comp);
del_timer(&mfd->no_update.timer);
mfd->no_update.value = NOTIFY_TYPE_SUSPEND;
complete(&mfd->no_update.comp);
mfd->op_enable = false;
mutex_lock(&mfd->bl_lock);
if (mdss_panel_is_power_off(req_power_state)) {
/* Stop Display thread */
if (mfd->disp_thread)
mdss_fb_stop_disp_thread(mfd);
if (mfd->bl_updated)
bl_level_old = mfd->bl_level;
else
bl_level_old = mfd->unset_bl_level;
mdss_fb_set_backlight(mfd, 0);
mfd->unset_bl_level = bl_level_old;
mfd->bl_updated = 0;
if (mfd->shutdown_pending &&
mfd->panel_info->bl_shutdown_delay)
usleep_range(
mfd->panel_info->bl_shutdown_delay
* 1000,
mfd->panel_info->bl_shutdown_delay
* 1000);
}
mfd->panel_power_state = req_power_state;
mutex_unlock(&mfd->bl_lock);
ret = mfd->mdp.off_fnc(mfd);
if (ret)
mfd->panel_power_state = cur_power_state;
else if (mdss_panel_is_power_off(req_power_state))
mdss_fb_release_fences(mfd);
mfd->op_enable = true;
complete(&mfd->power_off_comp);
}
break;
}
/* Notify listeners */
sysfs_notify(&mfd->fbi->dev->kobj, NULL, "show_blank_event");
return ret;
}
static int mdss_fb_blank(int blank_mode, struct fb_info *info)
{
struct mdss_panel_data *pdata;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
mdss_fb_pan_idle(mfd);
if (mfd->op_enable == 0) {
if (blank_mode == FB_BLANK_UNBLANK)
mfd->suspend.panel_power_state = MDSS_PANEL_POWER_ON;
else if (blank_mode == FB_BLANK_VSYNC_SUSPEND)
mfd->suspend.panel_power_state = MDSS_PANEL_POWER_DOZE;
else
mfd->suspend.panel_power_state = MDSS_PANEL_POWER_OFF;
return 0;
}
pr_debug("mode: %d\n", blank_mode);
pdata = dev_get_platdata(&mfd->pdev->dev);
if (pdata->panel_info.is_lpm_mode &&
blank_mode == FB_BLANK_UNBLANK) {
pr_debug("panel is in lpm mode\n");
mfd->mdp.configure_panel(mfd, 0);
mdss_fb_set_mdp_sync_pt_threshold(mfd);
pdata->panel_info.is_lpm_mode = false;
}
return mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
}
static inline int mdss_fb_create_ion_client(struct msm_fb_data_type *mfd)
{
mfd->fb_ion_client = msm_ion_client_create("mdss_fb_iclient");
if (IS_ERR_OR_NULL(mfd->fb_ion_client)) {
pr_err("Err:client not created, val %d\n",
PTR_RET(mfd->fb_ion_client));
mfd->fb_ion_client = NULL;
return PTR_RET(mfd->fb_ion_client);
}
return 0;
}
void mdss_fb_free_fb_ion_memory(struct msm_fb_data_type *mfd)
{
if (!mfd) {
pr_err("no mfd\n");
return;
}
if (!mfd->fbi->screen_base)
return;
if (!mfd->fb_ion_client || !mfd->fb_ion_handle) {
pr_err("invalid input parameters for fb%d\n", mfd->index);
return;
}
mfd->fbi->screen_base = NULL;
mfd->fbi->fix.smem_start = 0;
ion_unmap_kernel(mfd->fb_ion_client, mfd->fb_ion_handle);
if (mfd->mdp.fb_mem_get_iommu_domain) {
ion_unmap_iommu(mfd->fb_ion_client, mfd->fb_ion_handle,
mfd->mdp.fb_mem_get_iommu_domain(), 0);
}
dma_buf_put(mfd->fbmem_buf);
ion_free(mfd->fb_ion_client, mfd->fb_ion_handle);
mfd->fb_ion_handle = NULL;
}
int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size)
{
unsigned long buf_size;
int rc;
void *vaddr;
if (!mfd) {
pr_err("Invalid input param - no mfd\n");
return -EINVAL;
}
if (!mfd->fb_ion_client) {
rc = mdss_fb_create_ion_client(mfd);
if (rc < 0) {
pr_err("fb ion client couldn't be created - %d\n", rc);
return rc;
}
}
pr_debug("size for mmap = %zu\n", fb_size);
mfd->fb_ion_handle = ion_alloc(mfd->fb_ion_client, fb_size, SZ_4K,
ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
if (IS_ERR_OR_NULL(mfd->fb_ion_handle)) {
pr_err("unable to alloc fbmem from ion - %ld\n",
PTR_ERR(mfd->fb_ion_handle));
return PTR_ERR(mfd->fb_ion_handle);
}
if (mfd->mdp.fb_mem_get_iommu_domain) {
rc = ion_map_iommu(mfd->fb_ion_client, mfd->fb_ion_handle,
mfd->mdp.fb_mem_get_iommu_domain(), 0, SZ_4K, 0,
&mfd->iova, &buf_size, 0, 0);
if (rc) {
pr_err("Cannot map fb_mem to IOMMU. rc=%d\n", rc);
goto fb_mmap_failed;
}
} else {
pr_err("No IOMMU Domain\n");
goto fb_mmap_failed;
}
mfd->fbmem_buf = ion_share_dma_buf(mfd->fb_ion_client,
mfd->fb_ion_handle);
vaddr = ion_map_kernel(mfd->fb_ion_client, mfd->fb_ion_handle);
if (IS_ERR_OR_NULL(vaddr)) {
pr_err("ION memory mapping failed - %ld\n", PTR_ERR(vaddr));
rc = PTR_ERR(vaddr);
if (mfd->mdp.fb_mem_get_iommu_domain) {
ion_unmap_iommu(mfd->fb_ion_client, mfd->fb_ion_handle,
mfd->mdp.fb_mem_get_iommu_domain(), 0);
}
goto fb_mmap_failed;
}
pr_debug("alloc 0x%zuB vaddr = %p (%pa iova) for fb%d\n", fb_size,
vaddr, &mfd->iova, mfd->index);
mfd->fbi->screen_base = (char *) vaddr;
mfd->fbi->fix.smem_start = (unsigned int) mfd->iova;
mfd->fbi->fix.smem_len = fb_size;
return rc;
fb_mmap_failed:
ion_free(mfd->fb_ion_client, mfd->fb_ion_handle);
return rc;
}
/**
* mdss_fb_fbmem_ion_mmap() - Custom fb mmap() function for MSM driver.
*
* @info - Framebuffer info.
* @vma - VM area which is part of the process virtual memory.
*
* This framebuffer mmap function differs from standard mmap() function by
* allowing for customized page-protection and dynamically allocate framebuffer
* memory from system heap and map to iommu virtual address.
*
* Return: virtual address is returned through vma
*/
static int mdss_fb_fbmem_ion_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
int rc = 0;
size_t req_size, fb_size;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct sg_table *table;
unsigned long addr = vma->vm_start;
unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
struct scatterlist *sg;
unsigned int i;
struct page *page;
if (!mfd || !mfd->pdev || !mfd->pdev->dev.of_node) {
pr_err("Invalid device node\n");
return -ENODEV;
}
req_size = vma->vm_end - vma->vm_start;
fb_size = mfd->fbi->fix.smem_len;
if (req_size > fb_size) {
pr_warn("requested map is greater than framebuffer\n");
return -EOVERFLOW;
}
if (!mfd->fbi->screen_base) {
rc = mdss_fb_alloc_fb_ion_memory(mfd, fb_size);
if (rc < 0) {
pr_err("fb mmap failed!!!!\n");
return rc;
}
}
table = ion_sg_table(mfd->fb_ion_client, mfd->fb_ion_handle);
if (IS_ERR(table)) {
pr_err("Unable to get sg_table from ion:%ld\n", PTR_ERR(table));
mfd->fbi->screen_base = NULL;
return PTR_ERR(table);
} else if (!table) {
pr_err("sg_list is NULL\n");
mfd->fbi->screen_base = NULL;
return -EINVAL;
}
page = sg_page(table->sgl);
if (page) {
for_each_sg(table->sgl, sg, table->nents, i) {
unsigned long remainder = vma->vm_end - addr;
unsigned long len = sg->length;
page = sg_page(sg);
if (offset >= sg->length) {
offset -= sg->length;
continue;
} else if (offset) {
page += offset / PAGE_SIZE;
len = sg->length - offset;
offset = 0;
}
len = min(len, remainder);
if (mfd->mdp_fb_page_protection ==
MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
vma->vm_page_prot =
pgprot_writecombine(vma->vm_page_prot);
pr_debug("vma=%p, addr=%x len=%ld\n",
vma, (unsigned int)addr, len);
pr_debug("vm_start=%x vm_end=%x vm_page_prot=%ld\n",
(unsigned int)vma->vm_start,
(unsigned int)vma->vm_end,
(unsigned long int)vma->vm_page_prot);
io_remap_pfn_range(vma, addr, page_to_pfn(page), len,
vma->vm_page_prot);
addr += len;
if (addr >= vma->vm_end)
break;
}
} else {
pr_err("PAGE is null\n");
mdss_fb_free_fb_ion_memory(mfd);
return -ENOMEM;
}
return rc;
}
/*
* mdss_fb_physical_mmap() - Custom fb mmap() function for MSM driver.
*
* @info - Framebuffer info.
* @vma - VM area which is part of the process virtual memory.
*
* This framebuffer mmap function differs from standard mmap() function as
* map to framebuffer memory from the CMA memory which is allocated during
* bootup.
*
* Return: virtual address is returned through vma
*/
static int mdss_fb_physical_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
/* Get frame buffer memory range. */
unsigned long start = info->fix.smem_start;
u32 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (!start) {
pr_warn("No framebuffer memory is allocated\n");
return -ENOMEM;
}
/* Set VM flags. */
start &= PAGE_MASK;
if ((vma->vm_end <= vma->vm_start) ||
(off >= len) ||
((vma->vm_end - vma->vm_start) > (len - off)))
return -EINVAL;
off += start;
if (off < start)
return -EINVAL;
vma->vm_pgoff = off >> PAGE_SHIFT;
/* This is an IO map - tell maydump to skip this VMA */
vma->vm_flags |= VM_IO;
if (mfd->mdp_fb_page_protection == MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
/* Remap the frame buffer I/O range */
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static int mdss_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int rc = -EINVAL;
if (mfd->fb_mmap_type == MDP_FB_MMAP_ION_ALLOC) {
rc = mdss_fb_fbmem_ion_mmap(info, vma);
} else if (mfd->fb_mmap_type == MDP_FB_MMAP_PHYSICAL_ALLOC) {
rc = mdss_fb_physical_mmap(info, vma);
} else {
if (!info->fix.smem_start && !mfd->fb_ion_handle) {
rc = mdss_fb_fbmem_ion_mmap(info, vma);
mfd->fb_mmap_type = MDP_FB_MMAP_ION_ALLOC;
} else {
rc = mdss_fb_physical_mmap(info, vma);
mfd->fb_mmap_type = MDP_FB_MMAP_PHYSICAL_ALLOC;
}
}
if (rc < 0)
pr_err("fb mmap failed with rc = %d\n", rc);
return rc;
}
static struct fb_ops mdss_fb_ops = {
.owner = THIS_MODULE,
.fb_open = mdss_fb_open,
.fb_release = mdss_fb_release,
.fb_check_var = mdss_fb_check_var, /* vinfo check */
.fb_set_par = mdss_fb_set_par, /* set the video mode */
.fb_blank = mdss_fb_blank, /* blank display */
.fb_pan_display = mdss_fb_pan_display, /* pan display */
.fb_ioctl = mdss_fb_ioctl, /* perform fb specific ioctl */
#ifdef CONFIG_COMPAT
.fb_compat_ioctl = mdss_fb_compat_ioctl,
#endif
.fb_mmap = mdss_fb_mmap,
};
static int mdss_fb_alloc_fbmem_iommu(struct msm_fb_data_type *mfd, int dom)
{
void *virt = NULL;
phys_addr_t phys = 0;
size_t size = 0;
struct platform_device *pdev = mfd->pdev;
int rc = 0;
struct device_node *fbmem_pnode = NULL;
if (!pdev || !pdev->dev.of_node) {
pr_err("Invalid device node\n");
return -ENODEV;
}
fbmem_pnode = of_parse_phandle(pdev->dev.of_node,
"linux,contiguous-region", 0);
if (!fbmem_pnode) {
pr_debug("fbmem is not reserved for %s\n", pdev->name);
mfd->fbi->screen_base = NULL;
mfd->fbi->fix.smem_start = 0;
return 0;
} else {
const u32 *addr;
u64 len;
addr = of_get_address(fbmem_pnode, 0, &len, NULL);
if (!addr) {
pr_err("fbmem size is not specified\n");
of_node_put(fbmem_pnode);
return -EINVAL;
}
size = (size_t)len;
of_node_put(fbmem_pnode);
}
pr_debug("%s frame buffer reserve_size=0x%zx\n", __func__, size);
if (size < PAGE_ALIGN(mfd->fbi->fix.line_length *
mfd->fbi->var.yres_virtual))
pr_warn("reserve size is smaller than framebuffer size\n");
virt = dma_alloc_coherent(&pdev->dev, size, &phys, GFP_KERNEL);
if (!virt) {
pr_err("unable to alloc fbmem size=%zx\n", size);
return -ENOMEM;
}
if (MDSS_LPAE_CHECK(phys)) {
pr_warn("fb mem phys %pa > 4GB is not supported.\n", &phys);
dma_free_coherent(&pdev->dev, size, &virt, GFP_KERNEL);
return -ERANGE;
}
rc = msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K, 0,
&mfd->iova);
if (rc)
pr_warn("Cannot map fb_mem %pa to IOMMU. rc=%d\n", &phys, rc);
pr_debug("alloc 0x%zxB @ (%pa phys) (0x%p virt) (%pa iova) for fb%d\n",
size, &phys, virt, &mfd->iova, mfd->index);
mfd->fbi->screen_base = virt;
mfd->fbi->fix.smem_start = phys;
mfd->fbi->fix.smem_len = size;
return 0;
}
static int mdss_fb_alloc_fbmem(struct msm_fb_data_type *mfd)
{
if (mfd->mdp.fb_mem_alloc_fnc) {
return mfd->mdp.fb_mem_alloc_fnc(mfd);
} else if (mfd->mdp.fb_mem_get_iommu_domain) {
int dom = mfd->mdp.fb_mem_get_iommu_domain();
if (dom >= 0)
return mdss_fb_alloc_fbmem_iommu(mfd, dom);
else
return -ENOMEM;
} else {
pr_err("no fb memory allocator function defined\n");
return -ENOMEM;
}
}
static int mdss_fb_register(struct msm_fb_data_type *mfd)
{
int ret = -ENODEV;
int bpp;
struct mdss_panel_info *panel_info = mfd->panel_info;
struct fb_info *fbi = mfd->fbi;
struct fb_fix_screeninfo *fix;
struct fb_var_screeninfo *var;
int *id;
/*
* fb info initialization
*/
fix = &fbi->fix;
var = &fbi->var;
fix->type_aux = 0; /* if type == FB_TYPE_INTERLEAVED_PLANES */
fix->visual = FB_VISUAL_TRUECOLOR; /* True Color */
fix->ywrapstep = 0; /* No support */
fix->mmio_start = 0; /* No MMIO Address */
fix->mmio_len = 0; /* No MMIO Address */
fix->accel = FB_ACCEL_NONE;/* FB_ACCEL_MSM needes to be added in fb.h */
var->xoffset = 0, /* Offset from virtual to visible */
var->yoffset = 0, /* resolution */
var->grayscale = 0, /* No graylevels */
var->nonstd = 0, /* standard pixel format */
var->activate = FB_ACTIVATE_VBL, /* activate it at vsync */
var->height = -1, /* height of picture in mm */
var->width = -1, /* width of picture in mm */
var->accel_flags = 0, /* acceleration flags */
var->sync = 0, /* see FB_SYNC_* */
var->rotate = 0, /* angle we rotate counter clockwise */
mfd->op_enable = false;
switch (mfd->fb_imgType) {
case MDP_RGB_565:
fix->type = FB_TYPE_PACKED_PIXELS;
fix->xpanstep = 1;
fix->ypanstep = 1;
var->vmode = FB_VMODE_NONINTERLACED;
var->blue.offset = 0;
var->green.offset = 5;
var->red.offset = 11;
var->blue.length = 5;
var->green.length = 6;
var->red.length = 5;
var->blue.msb_right = 0;
var->green.msb_right = 0;
var->red.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 0;
bpp = 2;
break;
case MDP_RGB_888:
fix->type = FB_TYPE_PACKED_PIXELS;
fix->xpanstep = 1;
fix->ypanstep = 1;
var->vmode = FB_VMODE_NONINTERLACED;
var->blue.offset = 0;
var->green.offset = 8;
var->red.offset = 16;
var->blue.length = 8;
var->green.length = 8;
var->red.length = 8;
var->blue.msb_right = 0;
var->green.msb_right = 0;
var->red.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 0;
bpp = 3;
break;
case MDP_ARGB_8888:
fix->type = FB_TYPE_PACKED_PIXELS;
fix->xpanstep = 1;
fix->ypanstep = 1;
var->vmode = FB_VMODE_NONINTERLACED;
var->blue.offset = 24;
var->green.offset = 16;
var->red.offset = 8;
var->blue.length = 8;
var->green.length = 8;
var->red.length = 8;
var->blue.msb_right = 0;
var->green.msb_right = 0;
var->red.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 8;
bpp = 4;
break;
case MDP_RGBA_8888:
fix->type = FB_TYPE_PACKED_PIXELS;
fix->xpanstep = 1;
fix->ypanstep = 1;
var->vmode = FB_VMODE_NONINTERLACED;
var->blue.offset = 16;
var->green.offset = 8;
var->red.offset = 0;
var->blue.length = 8;
var->green.length = 8;
var->red.length = 8;
var->blue.msb_right = 0;
var->green.msb_right = 0;
var->red.msb_right = 0;
var->transp.offset = 24;
var->transp.length = 8;
bpp = 4;
break;
case MDP_YCRYCB_H2V1:
fix->type = FB_TYPE_INTERLEAVED_PLANES;
fix->xpanstep = 2;
fix->ypanstep = 1;
var->vmode = FB_VMODE_NONINTERLACED;
/* how about R/G/B offset? */
var->blue.offset = 0;
var->green.offset = 5;
var->red.offset = 11;
var->blue.length = 5;
var->green.length = 6;
var->red.length = 5;
var->blue.msb_right = 0;
var->green.msb_right = 0;
var->red.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 0;
bpp = 2;
break;
default:
pr_err("msm_fb_init: fb %d unkown image type!\n",
mfd->index);
return ret;
}
var->xres = mdss_fb_get_panel_xres(panel_info);
fix->type = panel_info->is_3d_panel;
if (mfd->mdp.fb_stride)
fix->line_length = mfd->mdp.fb_stride(mfd->index, var->xres,
bpp);
else
fix->line_length = var->xres * bpp;
var->yres = panel_info->yres;
if (panel_info->physical_width)
var->width = panel_info->physical_width;
if (panel_info->physical_height)
var->height = panel_info->physical_height;
var->xres_virtual = var->xres;
var->yres_virtual = panel_info->yres * mfd->fb_page;
var->bits_per_pixel = bpp * 8; /* FrameBuffer color depth */
var->upper_margin = panel_info->lcdc.v_back_porch;
var->lower_margin = panel_info->lcdc.v_front_porch;
var->vsync_len = panel_info->lcdc.v_pulse_width;
var->left_margin = panel_info->lcdc.h_back_porch;
var->right_margin = panel_info->lcdc.h_front_porch;
var->hsync_len = panel_info->lcdc.h_pulse_width;
var->pixclock = panel_info->clk_rate / 1000;
/*
* Populate smem length here for uspace to get the
* Framebuffer size when FBIO_FSCREENINFO ioctl is
* called.
*/
fix->smem_len = PAGE_ALIGN(fix->line_length * var->yres) * mfd->fb_page;
/* id field for fb app */
id = (int *)&mfd->panel;
snprintf(fix->id, sizeof(fix->id), "mdssfb_%x", (u32) *id);
fbi->fbops = &mdss_fb_ops;
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->pseudo_palette = mdss_fb_pseudo_palette;
mfd->ref_cnt = 0;
mfd->panel_power_state = MDSS_PANEL_POWER_OFF;
mfd->dcm_state = DCM_UNINIT;
if (mdss_fb_alloc_fbmem(mfd))
pr_warn("unable to allocate fb memory in fb register\n");
mfd->op_enable = true;
mutex_init(&mfd->update.lock);
mutex_init(&mfd->no_update.lock);
mutex_init(&mfd->mdp_sync_pt_data.sync_mutex);
atomic_set(&mfd->mdp_sync_pt_data.commit_cnt, 0);
atomic_set(&mfd->commits_pending, 0);
atomic_set(&mfd->ioctl_ref_cnt, 0);
atomic_set(&mfd->kickoff_pending, 0);
init_timer(&mfd->no_update.timer);
mfd->no_update.timer.function = mdss_fb_no_update_notify_timer_cb;
mfd->no_update.timer.data = (unsigned long)mfd;
mfd->update.ref_count = 0;
mfd->no_update.ref_count = 0;
mfd->update.init_done = false;
init_completion(&mfd->update.comp);
init_completion(&mfd->no_update.comp);
init_completion(&mfd->power_off_comp);
init_completion(&mfd->power_set_comp);
init_waitqueue_head(&mfd->commit_wait_q);
init_waitqueue_head(&mfd->idle_wait_q);
init_waitqueue_head(&mfd->ioctl_q);
init_waitqueue_head(&mfd->kickoff_wait_q);
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret)
pr_err("fb_alloc_cmap() failed!\n");
if (register_framebuffer(fbi) < 0) {
fb_dealloc_cmap(&fbi->cmap);
mfd->op_enable = false;
return -EPERM;
}
pr_info("FrameBuffer[%d] %dx%d registered successfully!\n", mfd->index,
fbi->var.xres, fbi->var.yres);
return 0;
}
/**
* mdss_fb_release_file_entry() - Releases file node entry from list
* @info: Frame buffer info
* @pinfo: Process list node in which file node entry is going to
* be removed
* @release_all: Releases all file node entries from list if this parameter
* is true
*
* This function is called to remove the file node entry/entries from main
* list. It also helps to find the process id if fb_open and fb_close
* callers are different.
*/
static struct mdss_fb_proc_info *mdss_fb_release_file_entry(
struct fb_info *info,
struct mdss_fb_proc_info *pinfo, bool release_all)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct mdss_fb_file_info *file_info = NULL, *temp_file_info = NULL;
struct mdss_fb_proc_info *proc_info = NULL, *temp_proc_info = NULL;
struct file *file = info->file;
bool node_found = false;
if (!pinfo && release_all) {
pr_err("process node not provided for release all case\n");
goto end;
}
if (pinfo) {
proc_info = pinfo;
list_for_each_entry_safe(file_info, temp_file_info,
&pinfo->file_list, list) {
if (!release_all && file_info->file != file)
continue;
list_del(&file_info->list);
kfree(file_info);
node_found = true;
if (!release_all)
break;
}
}
if (!node_found) {
list_for_each_entry_safe(proc_info, temp_proc_info,
&mfd->proc_list, list) {
list_for_each_entry_safe(file_info, temp_file_info,
&proc_info->file_list, list) {
if (file_info->file == file) {
list_del(&file_info->list);
kfree(file_info);
goto end;
}
}
}
}
end:
return proc_info;
}
static int mdss_fb_open(struct fb_info *info, int user)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct mdss_fb_proc_info *pinfo = NULL;
struct mdss_fb_file_info *file_info = NULL;
int result;
int pid = current->tgid;
struct task_struct *task = current->group_leader;
if (mfd->shutdown_pending) {
pr_err("Shutdown pending. Aborting operation. Request from pid:%d name=%s\n",
pid, task->comm);
return -EPERM;
}
file_info = kmalloc(sizeof(*file_info), GFP_KERNEL);
if (!file_info) {
pr_err("unable to alloc file info\n");
return -ENOMEM;
}
list_for_each_entry(pinfo, &mfd->proc_list, list) {
if (pinfo->pid == pid)
break;
}
if ((pinfo == NULL) || (pinfo->pid != pid)) {
pinfo = kmalloc(sizeof(*pinfo), GFP_KERNEL);
if (!pinfo) {
pr_err("unable to alloc process info\n");
kfree(file_info);
return -ENOMEM;
}
pinfo->pid = pid;
pinfo->ref_cnt = 0;
list_add(&pinfo->list, &mfd->proc_list);
INIT_LIST_HEAD(&pinfo->file_list);
pr_debug("new process entry pid=%d\n", pinfo->pid);
}
file_info->file = info->file;
list_add(&file_info->list, &pinfo->file_list);
result = pm_runtime_get_sync(info->dev);
if (result < 0) {
pr_err("pm_runtime: fail to wake up\n");
goto pm_error;
}
if (!mfd->ref_cnt) {
result = mdss_fb_blank_sub(FB_BLANK_UNBLANK, info,
mfd->op_enable);
if (result) {
pr_err("can't turn on fb%d! rc=%d\n", mfd->index,
result);
goto blank_error;
}
}
pinfo->ref_cnt++;
mfd->ref_cnt++;
return 0;
blank_error:
pm_runtime_put(info->dev);
pm_error:
if (pinfo && !pinfo->ref_cnt) {
list_del(&pinfo->list);
kfree(pinfo);
}
list_del(&file_info->list);
kfree(file_info);
return result;
}
static int mdss_fb_release_all(struct fb_info *info, bool release_all)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct mdss_fb_proc_info *pinfo = NULL, *temp_pinfo = NULL;
struct mdss_fb_proc_info *proc_info = NULL;
int ret = 0;
int pid = current->tgid;
bool unknown_pid = true, release_needed = false;
struct task_struct *task = current->group_leader;
if (!mfd->ref_cnt) {
pr_info("try to close unopened fb %d! from %s\n", mfd->index,
task->comm);
return -EINVAL;
}
if (!wait_event_timeout(mfd->ioctl_q,
!atomic_read(&mfd->ioctl_ref_cnt) || !release_all,
msecs_to_jiffies(1000)))
pr_warn("fb%d ioctl could not finish. waited 1 sec.\n",
mfd->index);
mdss_fb_pan_idle(mfd);
pr_debug("release_all = %s\n", release_all ? "true" : "false");
list_for_each_entry_safe(pinfo, temp_pinfo, &mfd->proc_list, list) {
if (!release_all && (pinfo->pid != pid))
continue;
unknown_pid = false;
pr_debug("found process %s pid=%d mfd->ref=%d pinfo->ref=%d\n",
task->comm, mfd->ref_cnt, pinfo->pid, pinfo->ref_cnt);
proc_info = mdss_fb_release_file_entry(info, pinfo,
release_all);
/*
* if fb_release is called from different known process then
* release the ref_count of original proc_info instead of
* current process.
*/
if (!release_all && proc_info && proc_info != pinfo) {
pr_info("fb_release called from different process for current file node\n");
pinfo = proc_info;
}
do {
if (mfd->ref_cnt < pinfo->ref_cnt)
pr_warn("WARN:mfd->ref=%d < pinfo->ref=%d\n",
mfd->ref_cnt, pinfo->ref_cnt);
else
mfd->ref_cnt--;
pinfo->ref_cnt--;
pm_runtime_put(info->dev);
} while (release_all && pinfo->ref_cnt);
/* we need to stop display thread before release */
if (release_all && mfd->disp_thread)
mdss_fb_stop_disp_thread(mfd);
if (pinfo->ref_cnt == 0) {
list_del(&pinfo->list);
kfree(pinfo);
release_needed = !release_all;
}
if (!release_all)
break;
}
if (unknown_pid) {
pinfo = mdss_fb_release_file_entry(info, NULL, false);
if (pinfo) {
pr_debug("found known pid=%d reference for unknown caller pid=%d\n",
pinfo->pid, pid);
pid = pinfo->pid;
mfd->ref_cnt--;
pinfo->ref_cnt--;
pm_runtime_put(info->dev);
if (!pinfo->ref_cnt) {
list_del(&pinfo->list);
kfree(pinfo);
release_needed = true;
}
} else {
WARN("unknown caller:: process %s mfd->ref=%d\n",
task->comm, mfd->ref_cnt);
}
}
if (release_needed) {
pr_debug("current process=%s pid=%d known pid=%d mfd->ref=%d\n",
task->comm, current->tgid, pid, mfd->ref_cnt);
if (mfd->mdp.release_fnc) {
ret = mfd->mdp.release_fnc(mfd, false, pid);
if (ret)
pr_err("error releasing fb%d for current pid=%d known pid=%d\n",
mfd->index, current->tgid, pid);
}
} else if (release_all && mfd->ref_cnt) {
pr_err("reference count mismatch with proc list entries\n");
}
if (!mfd->ref_cnt) {
if (mfd->mdp.release_fnc) {
ret = mfd->mdp.release_fnc(mfd, true, pid);
if (ret)
pr_err("error fb%d release current process=%s pid=%d known pid=%d\n",
mfd->index, task->comm, current->tgid, pid);
}
if (mfd->fb_ion_handle)
mdss_fb_free_fb_ion_memory(mfd);
ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info,
mfd->op_enable);
if (ret) {
pr_err("can't turn off fb%d! rc=%d current process=%s pid=%d known pid=%d\n",
mfd->index, ret, task->comm, current->tgid, pid);
return ret;
}
atomic_set(&mfd->ioctl_ref_cnt, 0);
}
return ret;
}
static int mdss_fb_release(struct fb_info *info, int user)
{
return mdss_fb_release_all(info, false);
}
static void mdss_fb_power_setting_idle(struct msm_fb_data_type *mfd)
{
int ret;
if (mfd->is_power_setting) {
ret = wait_for_completion_timeout(
&mfd->power_set_comp,
msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
if (ret < 0)
ret = -ERESTARTSYS;
else if (!ret)
pr_err("%s wait for power_set_comp timeout %d %d",
__func__, ret, mfd->is_power_setting);
if (ret <= 0) {
mfd->is_power_setting = false;
complete_all(&mfd->power_set_comp);
}
}
}
static void __mdss_fb_copy_fence(struct msm_sync_pt_data *sync_pt_data,
struct sync_fence **fences, u32 *fence_cnt)
{
pr_debug("%s: wait for fences\n", sync_pt_data->fence_name);
mutex_lock(&sync_pt_data->sync_mutex);
/*
* Assuming that acq_fen_cnt is sanitized in bufsync ioctl
* to check for sync_pt_data->acq_fen_cnt <= MDP_MAX_FENCE_FD
*/
*fence_cnt = sync_pt_data->acq_fen_cnt;
sync_pt_data->acq_fen_cnt = 0;
if (*fence_cnt)
memcpy(fences, sync_pt_data->acq_fen,
*fence_cnt * sizeof(struct sync_fence *));
mutex_unlock(&sync_pt_data->sync_mutex);
}
static void __mdss_fb_wait_for_fence_sub(struct msm_sync_pt_data *sync_pt_data,
struct sync_fence **fences, int fence_cnt)
{
int i, ret = 0;
/* buf sync */
for (i = 0; i < fence_cnt && !ret; i++) {
ret = sync_fence_wait(fences[i],
WAIT_FENCE_FIRST_TIMEOUT);
if (ret == -ETIME) {
pr_warn("%s: sync_fence_wait timed out! ",
sync_pt_data->fence_name);
pr_cont("Waiting %ld more seconds\n",
WAIT_FENCE_FINAL_TIMEOUT/MSEC_PER_SEC);
ret = sync_fence_wait(fences[i],
WAIT_FENCE_FINAL_TIMEOUT);
}
sync_fence_put(fences[i]);
}
if (ret < 0) {
pr_err("%s: sync_fence_wait failed! ret = %x\n",
sync_pt_data->fence_name, ret);
for (; i < fence_cnt; i++)
sync_fence_put(fences[i]);
}
}
int mdss_fb_wait_for_fence(struct msm_sync_pt_data *sync_pt_data)
{
struct sync_fence *fences[MDP_MAX_FENCE_FD];
int fence_cnt = 0;
__mdss_fb_copy_fence(sync_pt_data, fences, &fence_cnt);
if (fence_cnt)
__mdss_fb_wait_for_fence_sub(sync_pt_data,
fences, fence_cnt);
return fence_cnt;
}
/**
* mdss_fb_signal_timeline() - signal a single release fence
* @sync_pt_data: Sync point data structure for the timeline which
* should be signaled.
*
* This is called after a frame has been pushed to display. This signals the
* timeline to release the fences associated with this frame.
*/
void mdss_fb_signal_timeline(struct msm_sync_pt_data *sync_pt_data)
{
mutex_lock(&sync_pt_data->sync_mutex);
if (atomic_add_unless(&sync_pt_data->commit_cnt, -1, 0) &&
sync_pt_data->timeline) {
sw_sync_timeline_inc(sync_pt_data->timeline, 1);
sync_pt_data->timeline_value++;
pr_debug("%s: buffer signaled! timeline val=%d remaining=%d\n",
sync_pt_data->fence_name, sync_pt_data->timeline_value,
atomic_read(&sync_pt_data->commit_cnt));
} else {
pr_debug("%s timeline signaled without commits val=%d\n",
sync_pt_data->fence_name, sync_pt_data->timeline_value);
}
mutex_unlock(&sync_pt_data->sync_mutex);
}
/**
* mdss_fb_release_fences() - signal all pending release fences
* @mfd: Framebuffer data structure for display
*
* Release all currently pending release fences, including those that are in
* the process to be commited.
*
* Note: this should only be called during close or suspend sequence.
*/
static void mdss_fb_release_fences(struct msm_fb_data_type *mfd)
{
struct msm_sync_pt_data *sync_pt_data = &mfd->mdp_sync_pt_data;
int val;
mutex_lock(&sync_pt_data->sync_mutex);
if (sync_pt_data->timeline) {
val = sync_pt_data->threshold +
atomic_read(&sync_pt_data->commit_cnt);
sw_sync_timeline_inc(sync_pt_data->timeline, val);
sync_pt_data->timeline_value += val;
atomic_set(&sync_pt_data->commit_cnt, 0);
}
mutex_unlock(&sync_pt_data->sync_mutex);
}
static void mdss_fb_release_kickoff(struct msm_fb_data_type *mfd)
{
if (mfd->wait_for_kickoff) {
atomic_set(&mfd->kickoff_pending, 0);
wake_up_all(&mfd->kickoff_wait_q);
}
}
/**
* __mdss_fb_sync_buf_done_callback() - process async display events
* @p: Notifier block registered for async events.
* @event: Event enum to identify the event.
* @data: Optional argument provided with the event.
*
* See enum mdp_notify_event for events handled.
*/
static int __mdss_fb_sync_buf_done_callback(struct notifier_block *p,
unsigned long event, void *data)
{
struct msm_sync_pt_data *sync_pt_data;
struct msm_fb_data_type *mfd;
int fence_cnt;
sync_pt_data = container_of(p, struct msm_sync_pt_data, notifier);
mfd = container_of(sync_pt_data, struct msm_fb_data_type,
mdp_sync_pt_data);
switch (event) {
case MDP_NOTIFY_FRAME_BEGIN:
if (mfd->idle_time)
mod_delayed_work(system_wq, &mfd->idle_notify_work,
msecs_to_jiffies(mfd->idle_time));
break;
case MDP_NOTIFY_FRAME_READY:
if (sync_pt_data->async_wait_fences &&
sync_pt_data->temp_fen_cnt) {
fence_cnt = sync_pt_data->temp_fen_cnt;
sync_pt_data->temp_fen_cnt = 0;
__mdss_fb_wait_for_fence_sub(sync_pt_data,
sync_pt_data->temp_fen, fence_cnt);
}
break;
case MDP_NOTIFY_FRAME_FLUSHED:
pr_debug("%s: frame flushed\n", sync_pt_data->fence_name);
sync_pt_data->flushed = true;
break;
case MDP_NOTIFY_FRAME_TIMEOUT:
pr_err("%s: frame timeout\n", sync_pt_data->fence_name);
mdss_fb_signal_timeline(sync_pt_data);
break;
case MDP_NOTIFY_FRAME_DONE:
pr_debug("%s: frame done\n", sync_pt_data->fence_name);
mdss_fb_signal_timeline(sync_pt_data);
break;
case MDP_NOTIFY_FRAME_CFG_DONE:
if (sync_pt_data->async_wait_fences)
__mdss_fb_copy_fence(sync_pt_data,
sync_pt_data->temp_fen,
&sync_pt_data->temp_fen_cnt);
break;
case MDP_NOTIFY_FRAME_CTX_DONE:
mdss_fb_release_kickoff(mfd);
break;
}
return NOTIFY_OK;
}
/**
* mdss_fb_pan_idle() - wait for panel programming to be idle
* @mfd: Framebuffer data structure for display
*
* Wait for any pending programming to be done if in the process of programming
* hardware configuration. After this function returns it is safe to perform
* software updates for next frame.
*/
static int mdss_fb_pan_idle(struct msm_fb_data_type *mfd)
{
int ret = 0;
ret = wait_event_timeout(mfd->idle_wait_q,
(!atomic_read(&mfd->commits_pending) ||
mfd->shutdown_pending),
msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
if (!ret) {
pr_err("wait for idle timeout %d pending=%d\n",
ret, atomic_read(&mfd->commits_pending));
mdss_fb_signal_timeline(&mfd->mdp_sync_pt_data);
} else if (mfd->shutdown_pending) {
pr_debug("Shutdown signalled\n");
return -EPERM;
}
return 0;
}
static int mdss_fb_wait_for_kickoff(struct msm_fb_data_type *mfd)
{
int ret = 0;
if (!mfd->wait_for_kickoff)
return mdss_fb_pan_idle(mfd);
ret = wait_event_timeout(mfd->kickoff_wait_q,
(!atomic_read(&mfd->kickoff_pending) ||
mfd->shutdown_pending),
msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT / 2));
if (!ret) {
pr_err("wait for kickoff timeout %d pending=%d\n",
ret, atomic_read(&mfd->kickoff_pending));
} else if (mfd->shutdown_pending) {
pr_debug("Shutdown signalled\n");
return -EPERM;
}
return 0;
}
static int mdss_fb_pan_display_ex(struct fb_info *info,
struct mdp_display_commit *disp_commit)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct fb_var_screeninfo *var = &disp_commit->var;
u32 wait_for_finish = disp_commit->wait_for_finish;
int ret = 0;
if (!mfd || (!mfd->op_enable))
return -EPERM;
if ((mdss_fb_is_power_off(mfd)) &&
!((mfd->dcm_state == DCM_ENTER) &&
(mfd->panel.type == MIPI_CMD_PANEL)))
return -EPERM;
if (var->xoffset > (info->var.xres_virtual - info->var.xres))
return -EINVAL;
if (var->yoffset > (info->var.yres_virtual - info->var.yres))
return -EINVAL;
ret = mdss_fb_pan_idle(mfd);
if (ret) {
pr_err("Shutdown pending. Aborting operation\n");
return ret;
}
mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
if (info->fix.xpanstep)
info->var.xoffset =
(var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
if (info->fix.ypanstep)
info->var.yoffset =
(var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
mfd->msm_fb_backup.info = *info;
mfd->msm_fb_backup.disp_commit = *disp_commit;
atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
atomic_inc(&mfd->commits_pending);
atomic_inc(&mfd->kickoff_pending);
wake_up_all(&mfd->commit_wait_q);
mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
if (wait_for_finish)
mdss_fb_pan_idle(mfd);
return ret;
}
static int mdss_fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct mdp_display_commit disp_commit;
memset(&disp_commit, 0, sizeof(disp_commit));
disp_commit.wait_for_finish = true;
memcpy(&disp_commit.var, var, sizeof(struct fb_var_screeninfo));
return mdss_fb_pan_display_ex(info, &disp_commit);
}
static int mdss_fb_pan_display_sub(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (!mfd->op_enable)
return -EPERM;
if ((mdss_fb_is_power_off(mfd)) &&
!((mfd->dcm_state == DCM_ENTER) &&
(mfd->panel.type == MIPI_CMD_PANEL)))
return -EPERM;
if (var->xoffset > (info->var.xres_virtual - info->var.xres))
return -EINVAL;
if (var->yoffset > (info->var.yres_virtual - info->var.yres))
return -EINVAL;
if (info->fix.xpanstep)
info->var.xoffset =
(var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
if (info->fix.ypanstep)
info->var.yoffset =
(var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
if (mfd->mdp.dma_fnc)
mfd->mdp.dma_fnc(mfd);
else
pr_warn("dma function not set for panel type=%d\n",
mfd->panel.type);
return 0;
}
static void mdss_fb_var_to_panelinfo(struct fb_var_screeninfo *var,
struct mdss_panel_info *pinfo)
{
pinfo->xres = var->xres;
pinfo->yres = var->yres;
pinfo->lcdc.v_front_porch = var->lower_margin;
pinfo->lcdc.v_back_porch = var->upper_margin;
pinfo->lcdc.v_pulse_width = var->vsync_len;
pinfo->lcdc.h_front_porch = var->right_margin;
pinfo->lcdc.h_back_porch = var->left_margin;
pinfo->lcdc.h_pulse_width = var->hsync_len;
pinfo->clk_rate = var->pixclock;
}
/**
* __mdss_fb_perform_commit() - process a frame to display
* @mfd: Framebuffer data structure for display
*
* Processes all layers and buffers programmed and ensures all pending release
* fences are signaled once the buffer is transfered to display.
*/
static int __mdss_fb_perform_commit(struct msm_fb_data_type *mfd)
{
struct msm_sync_pt_data *sync_pt_data = &mfd->mdp_sync_pt_data;
struct msm_fb_backup_type *fb_backup = &mfd->msm_fb_backup;
int ret = -ENOSYS;
if (!sync_pt_data->async_wait_fences)
mdss_fb_wait_for_fence(sync_pt_data);
sync_pt_data->flushed = false;
if (fb_backup->disp_commit.flags & MDP_DISPLAY_COMMIT_OVERLAY) {
if (mfd->mdp.kickoff_fnc)
ret = mfd->mdp.kickoff_fnc(mfd,
&fb_backup->disp_commit);
else
pr_warn("no kickoff function setup for fb%d\n",
mfd->index);
} else {
ret = mdss_fb_pan_display_sub(&fb_backup->disp_commit.var,
&fb_backup->info);
if (ret)
pr_err("pan display failed %x on fb%d\n", ret,
mfd->index);
}
if (!ret)
mdss_fb_update_backlight(mfd);
if (IS_ERR_VALUE(ret) || !sync_pt_data->flushed) {
mdss_fb_release_kickoff(mfd);
mdss_fb_signal_timeline(sync_pt_data);
}
return ret;
}
static int __mdss_fb_display_thread(void *data)
{
struct msm_fb_data_type *mfd = data;
int ret;
struct sched_param param;
/*
* this priority was found during empiric testing to have appropriate
* realtime scheduling to process display updates and interact with
* other real time and normal priority tasks
*/
param.sched_priority = 16;
ret = sched_setscheduler(current, SCHED_FIFO, ¶m);
if (ret)
pr_warn("set priority failed for fb%d display thread\n",
mfd->index);
while (1) {
wait_event(mfd->commit_wait_q,
(atomic_read(&mfd->commits_pending) ||
kthread_should_stop()));
if (kthread_should_stop())
break;
ret = __mdss_fb_perform_commit(mfd);
atomic_dec(&mfd->commits_pending);
wake_up_all(&mfd->idle_wait_q);
}
mdss_fb_release_kickoff(mfd);
atomic_set(&mfd->commits_pending, 0);
wake_up_all(&mfd->idle_wait_q);
return ret;
}
static int mdss_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (var->rotate != FB_ROTATE_UR)
return -EINVAL;
if (var->grayscale != info->var.grayscale)
return -EINVAL;
switch (var->bits_per_pixel) {
case 16:
if ((var->green.offset != 5) ||
!((var->blue.offset == 11)
|| (var->blue.offset == 0)) ||
!((var->red.offset == 11)
|| (var->red.offset == 0)) ||
(var->blue.length != 5) ||
(var->green.length != 6) ||
(var->red.length != 5) ||
(var->blue.msb_right != 0) ||
(var->green.msb_right != 0) ||
(var->red.msb_right != 0) ||
(var->transp.offset != 0) ||
(var->transp.length != 0))
return -EINVAL;
break;
case 24:
if ((var->blue.offset != 0) ||
(var->green.offset != 8) ||
(var->red.offset != 16) ||
(var->blue.length != 8) ||
(var->green.length != 8) ||
(var->red.length != 8) ||
(var->blue.msb_right != 0) ||
(var->green.msb_right != 0) ||
(var->red.msb_right != 0) ||
!(((var->transp.offset == 0) &&
(var->transp.length == 0)) ||
((var->transp.offset == 24) &&
(var->transp.length == 8))))
return -EINVAL;
break;
case 32:
/* Check user specified color format BGRA/ARGB/RGBA
and verify the position of the RGB components */
if (!((var->transp.offset == 24) &&
(var->blue.offset == 0) &&
(var->green.offset == 8) &&
(var->red.offset == 16)) &&
!((var->transp.offset == 24) &&
(var->blue.offset == 16) &&
(var->green.offset == 8) &&
(var->red.offset == 0)))
return -EINVAL;
/* Check the common values for both RGBA and ARGB */
if ((var->blue.length != 8) ||
(var->green.length != 8) ||
(var->red.length != 8) ||
(var->transp.length != 8) ||
(var->blue.msb_right != 0) ||
(var->green.msb_right != 0) ||
(var->red.msb_right != 0))
return -EINVAL;
break;
default:
return -EINVAL;
}
if ((var->xres_virtual <= 0) || (var->yres_virtual <= 0))
return -EINVAL;
if (info->fix.smem_start) {
u32 len = var->xres_virtual * var->yres_virtual *
(var->bits_per_pixel / 8);
if (len > info->fix.smem_len)
return -EINVAL;
}
if ((var->xres == 0) || (var->yres == 0))
return -EINVAL;
if (var->xoffset > (var->xres_virtual - var->xres))
return -EINVAL;
if (var->yoffset > (var->yres_virtual - var->yres))
return -EINVAL;
if (mfd->panel_info) {
int rc;
memcpy(&mfd->reconfig_panel_info, mfd->panel_info,
sizeof(mfd->reconfig_panel_info));
mdss_fb_var_to_panelinfo(var, &mfd->reconfig_panel_info);
rc = mdss_fb_send_panel_event(mfd, MDSS_EVENT_CHECK_PARAMS,
&mfd->reconfig_panel_info);
if (IS_ERR_VALUE(rc))
return rc;
mfd->panel_reconfig = rc;
}
return 0;
}
static int mdss_fb_set_par(struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct fb_var_screeninfo *var = &info->var;
int old_imgType;
int ret = 0;
ret = mdss_fb_pan_idle(mfd);
if (ret) {
pr_err("Shutdown pending. Aborting operation\n");
return ret;
}
old_imgType = mfd->fb_imgType;
switch (var->bits_per_pixel) {
case 16:
if (var->red.offset == 0)
mfd->fb_imgType = MDP_BGR_565;
else
mfd->fb_imgType = MDP_RGB_565;
break;
case 24:
if ((var->transp.offset == 0) && (var->transp.length == 0))
mfd->fb_imgType = MDP_RGB_888;
else if ((var->transp.offset == 24) &&
(var->transp.length == 8)) {
mfd->fb_imgType = MDP_ARGB_8888;
info->var.bits_per_pixel = 32;
}
break;
case 32:
if ((var->red.offset == 0) &&
(var->green.offset == 8) &&
(var->blue.offset == 16) &&
(var->transp.offset == 24))
mfd->fb_imgType = MDP_RGBA_8888;
else if ((var->red.offset == 16) &&
(var->green.offset == 8) &&
(var->blue.offset == 0) &&
(var->transp.offset == 24))
mfd->fb_imgType = MDP_BGRA_8888;
else if ((var->red.offset == 8) &&
(var->green.offset == 16) &&
(var->blue.offset == 24) &&
(var->transp.offset == 0))
mfd->fb_imgType = MDP_ARGB_8888;
else
mfd->fb_imgType = MDP_RGBA_8888;
break;
default:
return -EINVAL;
}
if (mfd->mdp.fb_stride)
mfd->fbi->fix.line_length = mfd->mdp.fb_stride(mfd->index,
var->xres,
var->bits_per_pixel / 8);
else
mfd->fbi->fix.line_length = var->xres * var->bits_per_pixel / 8;
mfd->fbi->fix.smem_len = PAGE_ALIGN(mfd->fbi->fix.line_length *
mfd->fbi->var.yres) * mfd->fb_page;
if (mfd->panel_reconfig || (mfd->fb_imgType != old_imgType)) {
mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable);
mdss_fb_var_to_panelinfo(var, mfd->panel_info);
mdss_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
mfd->panel_reconfig = false;
}
return ret;
}
int mdss_fb_dcm(struct msm_fb_data_type *mfd, int req_state)
{
int ret = 0;
if (req_state == mfd->dcm_state) {
pr_warn("Already in correct DCM/DTM state\n");
return ret;
}
switch (req_state) {
case DCM_UNBLANK:
if (mfd->dcm_state == DCM_UNINIT &&
mdss_fb_is_power_off(mfd) && mfd->mdp.on_fnc) {
if (mfd->disp_thread == NULL) {
ret = mdss_fb_start_disp_thread(mfd);
if (ret < 0)
return ret;
}
ret = mfd->mdp.on_fnc(mfd);
if (ret == 0) {
mfd->panel_power_state = MDSS_PANEL_POWER_ON;
mfd->dcm_state = DCM_UNBLANK;
}
}
break;
case DCM_ENTER:
if (mfd->dcm_state == DCM_UNBLANK) {
/*
* Keep unblank path available for only
* DCM operation
*/
mfd->panel_power_state = MDSS_PANEL_POWER_OFF;
mfd->dcm_state = DCM_ENTER;
}
break;
case DCM_EXIT:
if (mfd->dcm_state == DCM_ENTER) {
/* Release the unblank path for exit */
mfd->panel_power_state = MDSS_PANEL_POWER_ON;
mfd->dcm_state = DCM_EXIT;
}
break;
case DCM_BLANK:
if ((mfd->dcm_state == DCM_EXIT ||
mfd->dcm_state == DCM_UNBLANK) &&
mdss_fb_is_power_on(mfd) && mfd->mdp.off_fnc) {
mfd->panel_power_state = MDSS_PANEL_POWER_OFF;
ret = mfd->mdp.off_fnc(mfd);
if (ret == 0)
mfd->dcm_state = DCM_UNINIT;
else
pr_err("DCM_BLANK failed\n");
if (mfd->disp_thread)
mdss_fb_stop_disp_thread(mfd);
}
break;
case DTM_ENTER:
if (mfd->dcm_state == DCM_UNINIT)
mfd->dcm_state = DTM_ENTER;
break;
case DTM_EXIT:
if (mfd->dcm_state == DTM_ENTER)
mfd->dcm_state = DCM_UNINIT;
break;
}
return ret;
}
static int mdss_fb_cursor(struct fb_info *info, void __user *p)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct fb_cursor cursor;
int ret;
if (!mfd->mdp.cursor_update)
return -ENODEV;
ret = copy_from_user(&cursor, p, sizeof(cursor));
if (ret)
return ret;
return mfd->mdp.cursor_update(mfd, &cursor);
}
static int mdss_fb_set_lut(struct fb_info *info, void __user *p)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct fb_cmap cmap;
int ret;
if (!mfd->mdp.lut_update)
return -ENODEV;
ret = copy_from_user(&cmap, p, sizeof(cmap));
if (ret)
return ret;
mfd->mdp.lut_update(mfd, &cmap);
return 0;
}
/**
* mdss_fb_sync_get_fence() - get fence from timeline
* @timeline: Timeline to create the fence on
* @fence_name: Name of the fence that will be created for debugging
* @val: Timeline value at which the fence will be signaled
*
* Function returns a fence on the timeline given with the name provided.
* The fence created will be signaled when the timeline is advanced.
*/
struct sync_fence *mdss_fb_sync_get_fence(struct sw_sync_timeline *timeline,
const char *fence_name, int val)
{
struct sync_pt *sync_pt;
struct sync_fence *fence;
pr_debug("%s: buf sync fence timeline=%d\n", fence_name, val);
sync_pt = sw_sync_pt_create(timeline, val);
if (sync_pt == NULL) {
pr_err("%s: cannot create sync point\n", fence_name);
return NULL;
}
/* create fence */
fence = sync_fence_create(fence_name, sync_pt);
if (fence == NULL) {
sync_pt_free(sync_pt);
pr_err("%s: cannot create fence\n", fence_name);
return NULL;
}
return fence;
}
static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
struct mdp_buf_sync *buf_sync)
{
int i, ret = 0;
int acq_fen_fd[MDP_MAX_FENCE_FD];
struct sync_fence *fence, *rel_fence, *retire_fence;
int rel_fen_fd;
int retire_fen_fd;
int val;
if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
(sync_pt_data->timeline == NULL))
return -EINVAL;
if (buf_sync->acq_fen_fd_cnt)
ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
buf_sync->acq_fen_fd_cnt * sizeof(int));
if (ret) {
pr_err("%s: copy_from_user failed\n", sync_pt_data->fence_name);
return ret;
}
i = mdss_fb_wait_for_fence(sync_pt_data);
if (i > 0)
pr_warn("%s: waited on %d active fences\n",
sync_pt_data->fence_name, i);
mutex_lock(&sync_pt_data->sync_mutex);
for (i = 0; i < buf_sync->acq_fen_fd_cnt; i++) {
fence = sync_fence_fdget(acq_fen_fd[i]);
if (fence == NULL) {
pr_err("%s: null fence! i=%d fd=%d\n",
sync_pt_data->fence_name, i,
acq_fen_fd[i]);
ret = -EINVAL;
break;
}
sync_pt_data->acq_fen[i] = fence;
}
sync_pt_data->acq_fen_cnt = i;
if (ret)
goto buf_sync_err_1;
val = sync_pt_data->timeline_value + sync_pt_data->threshold +
atomic_read(&sync_pt_data->commit_cnt);
/* Set release fence */
rel_fence = mdss_fb_sync_get_fence(sync_pt_data->timeline,
sync_pt_data->fence_name, val);
if (IS_ERR_OR_NULL(rel_fence)) {
pr_err("%s: unable to retrieve release fence\n",
sync_pt_data->fence_name);
ret = rel_fence ? PTR_ERR(rel_fence) : -ENOMEM;
goto buf_sync_err_1;
}
/* create fd */
rel_fen_fd = get_unused_fd_flags(0);
if (rel_fen_fd < 0) {
pr_err("%s: get_unused_fd_flags failed error:0x%x\n",
sync_pt_data->fence_name, rel_fen_fd);
ret = rel_fen_fd;
goto buf_sync_err_2;
}
sync_fence_install(rel_fence, rel_fen_fd);
ret = copy_to_user(buf_sync->rel_fen_fd, &rel_fen_fd, sizeof(int));
if (ret) {
pr_err("%s: copy_to_user failed\n", sync_pt_data->fence_name);
goto buf_sync_err_3;
}
if (!(buf_sync->flags & MDP_BUF_SYNC_FLAG_RETIRE_FENCE))
goto skip_retire_fence;
if (sync_pt_data->get_retire_fence)
retire_fence = sync_pt_data->get_retire_fence(sync_pt_data);
else
retire_fence = NULL;
if (IS_ERR_OR_NULL(retire_fence)) {
val += sync_pt_data->retire_threshold;
retire_fence = mdss_fb_sync_get_fence(
sync_pt_data->timeline, "mdp-retire", val);
}
if (IS_ERR_OR_NULL(retire_fence)) {
pr_err("%s: unable to retrieve retire fence\n",
sync_pt_data->fence_name);
ret = retire_fence ? PTR_ERR(rel_fence) : -ENOMEM;
goto buf_sync_err_3;
}
retire_fen_fd = get_unused_fd_flags(0);
if (retire_fen_fd < 0) {
pr_err("%s: get_unused_fd_flags failed for retire fence error:0x%x\n",
sync_pt_data->fence_name, retire_fen_fd);
ret = retire_fen_fd;
sync_fence_put(retire_fence);
goto buf_sync_err_3;
}
sync_fence_install(retire_fence, retire_fen_fd);
ret = copy_to_user(buf_sync->retire_fen_fd, &retire_fen_fd,
sizeof(int));
if (ret) {
pr_err("%s: copy_to_user failed for retire fence\n",
sync_pt_data->fence_name);
put_unused_fd(retire_fen_fd);
sync_fence_put(retire_fence);
goto buf_sync_err_3;
}
skip_retire_fence:
mutex_unlock(&sync_pt_data->sync_mutex);
if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
mdss_fb_wait_for_fence(sync_pt_data);
return ret;
buf_sync_err_3:
put_unused_fd(rel_fen_fd);
buf_sync_err_2:
sync_fence_put(rel_fence);
buf_sync_err_1:
for (i = 0; i < sync_pt_data->acq_fen_cnt; i++)
sync_fence_put(sync_pt_data->acq_fen[i]);
sync_pt_data->acq_fen_cnt = 0;
mutex_unlock(&sync_pt_data->sync_mutex);
return ret;
}
static int mdss_fb_display_commit(struct fb_info *info,
unsigned long *argp)
{
int ret;
struct mdp_display_commit disp_commit;
ret = copy_from_user(&disp_commit, argp,
sizeof(disp_commit));
if (ret) {
pr_err("%s:copy_from_user failed\n", __func__);
return ret;
}
ret = mdss_fb_pan_display_ex(info, &disp_commit);
return ret;
}
static int __ioctl_wait_idle(struct msm_fb_data_type *mfd, u32 cmd)
{
int ret = 0;
if (mfd->wait_for_kickoff &&
((cmd == MSMFB_OVERLAY_PREPARE) ||
(cmd == MSMFB_BUFFER_SYNC) ||
(cmd == MSMFB_OVERLAY_SET))) {
ret = mdss_fb_wait_for_kickoff(mfd);
} else if ((cmd != MSMFB_VSYNC_CTRL) &&
(cmd != MSMFB_OVERLAY_VSYNC_CTRL) &&
(cmd != MSMFB_ASYNC_BLIT) &&
(cmd != MSMFB_BLIT) &&
(cmd != MSMFB_NOTIFY_UPDATE) &&
(cmd != MSMFB_OVERLAY_PREPARE)) {
ret = mdss_fb_pan_idle(mfd);
}
if (ret)
pr_debug("Shutdown pending. Aborting operation %x\n", cmd);
return ret;
}
/*
* mdss_fb_do_ioctl() - MDSS Framebuffer ioctl function
* @info: pointer to framebuffer info
* @cmd: ioctl command
* @arg: argument to ioctl
*
* This function provides an architecture agnostic implementation
* of the mdss framebuffer ioctl. This function can be called
* by compat ioctl or regular ioctl to handle the supported commands.
*/
int mdss_fb_do_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct msm_fb_data_type *mfd;
void __user *argp = (void __user *)arg;
struct mdp_page_protection fb_page_protection;
int ret = -ENOSYS;
struct mdp_buf_sync buf_sync;
struct msm_sync_pt_data *sync_pt_data = NULL;
unsigned int dsi_mode = 0;
struct mdss_panel_data *pdata = NULL;
if (!info || !info->par)
return -EINVAL;
mfd = (struct msm_fb_data_type *)info->par;
if (!mfd)
return -EINVAL;
if (mfd->shutdown_pending)
return -EPERM;
pdata = dev_get_platdata(&mfd->pdev->dev);
if (!pdata || pdata->panel_info.dynamic_switch_pending)
return -EPERM;
atomic_inc(&mfd->ioctl_ref_cnt);
mdss_fb_power_setting_idle(mfd);
ret = __ioctl_wait_idle(mfd, cmd);
if (ret)
goto exit;
switch (cmd) {
case MSMFB_CURSOR:
ret = mdss_fb_cursor(info, argp);
break;
case MSMFB_SET_LUT:
ret = mdss_fb_set_lut(info, argp);
break;
case MSMFB_GET_PAGE_PROTECTION:
fb_page_protection.page_protection =
mfd->mdp_fb_page_protection;
ret = copy_to_user(argp, &fb_page_protection,
sizeof(fb_page_protection));
if (ret)
goto exit;
break;
case MSMFB_BUFFER_SYNC:
ret = copy_from_user(&buf_sync, argp, sizeof(buf_sync));
if (ret)
goto exit;
if ((!mfd->op_enable) || (mdss_fb_is_power_off(mfd))) {
ret = -EPERM;
goto exit;
}
if (mfd->mdp.get_sync_fnc)
sync_pt_data = mfd->mdp.get_sync_fnc(mfd, &buf_sync);
if (!sync_pt_data)
sync_pt_data = &mfd->mdp_sync_pt_data;
ret = mdss_fb_handle_buf_sync_ioctl(sync_pt_data, &buf_sync);
if (!ret)
ret = copy_to_user(argp, &buf_sync, sizeof(buf_sync));
break;
case MSMFB_NOTIFY_UPDATE:
ret = mdss_fb_notify_update(mfd, argp);
break;
case MSMFB_DISPLAY_COMMIT:
ret = mdss_fb_display_commit(info, argp);
break;
case MSMFB_LPM_ENABLE:
ret = copy_from_user(&dsi_mode, argp, sizeof(dsi_mode));
if (ret) {
pr_err("%s: MSMFB_LPM_ENABLE ioctl failed\n", __func__);
goto exit;
}
ret = mdss_fb_lpm_enable(mfd, dsi_mode);
break;
default:
if (mfd->mdp.ioctl_handler)
ret = mfd->mdp.ioctl_handler(mfd, cmd, argp);
break;
}
if (ret == -ENOSYS)
pr_err("unsupported ioctl (%x)\n", cmd);
exit:
if (!atomic_dec_return(&mfd->ioctl_ref_cnt))
wake_up_all(&mfd->ioctl_q);
return ret;
}
static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
if (!info || !info->par)
return -EINVAL;
return mdss_fb_do_ioctl(info, cmd, arg);
}
struct fb_info *msm_fb_get_writeback_fb(void)
{
int c = 0;
for (c = 0; c < fbi_list_index; ++c) {
struct msm_fb_data_type *mfd;
mfd = (struct msm_fb_data_type *)fbi_list[c]->par;
if (mfd->panel.type == WRITEBACK_PANEL)
return fbi_list[c];
}
return NULL;
}
EXPORT_SYMBOL(msm_fb_get_writeback_fb);
static int mdss_fb_register_extra_panel(struct platform_device *pdev,
struct mdss_panel_data *pdata)
{
struct mdss_panel_data *fb_pdata;
fb_pdata = dev_get_platdata(&pdev->dev);
if (!fb_pdata) {
pr_err("framebuffer device %s contains invalid panel data\n",
dev_name(&pdev->dev));
return -EINVAL;
}
if (fb_pdata->next) {
pr_err("split panel already setup for framebuffer device %s\n",
dev_name(&pdev->dev));
return -EEXIST;
}
fb_pdata->next = pdata;
return 0;
}
int mdss_register_panel(struct platform_device *pdev,
struct mdss_panel_data *pdata)
{
struct platform_device *fb_pdev, *mdss_pdev;
struct device_node *node;
int rc = 0;
bool master_panel = true;
if (!pdev || !pdev->dev.of_node) {
pr_err("Invalid device node\n");
return -ENODEV;
}
if (!mdp_instance) {
pr_err("mdss mdp resource not initialized yet\n");
return -EPROBE_DEFER;
}
node = of_parse_phandle(pdev->dev.of_node, "qcom,mdss-fb-map", 0);
if (!node) {
pr_err("Unable to find fb node for device: %s\n",
pdev->name);
return -ENODEV;
}
mdss_pdev = of_find_device_by_node(node->parent);
if (!mdss_pdev) {
pr_err("Unable to find mdss for node: %s\n", node->full_name);
rc = -ENODEV;
goto mdss_notfound;
}
fb_pdev = of_find_device_by_node(node);
if (fb_pdev) {
rc = mdss_fb_register_extra_panel(fb_pdev, pdata);
if (rc == 0)
master_panel = false;
} else {
pr_info("adding framebuffer device %s\n", dev_name(&pdev->dev));
fb_pdev = of_platform_device_create(node, NULL,
&mdss_pdev->dev);
if (fb_pdev)
fb_pdev->dev.platform_data = pdata;
}
if (master_panel && mdp_instance->panel_register_done)
mdp_instance->panel_register_done(pdata);
mdss_notfound:
of_node_put(node);
return rc;
}
EXPORT_SYMBOL(mdss_register_panel);
int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp)
{
if (mdp_instance) {
pr_err("multiple MDP instance registration\n");
return -EINVAL;
}
mdp_instance = mdp;
return 0;
}
EXPORT_SYMBOL(mdss_fb_register_mdp_instance);
int mdss_fb_get_phys_info(dma_addr_t *start, unsigned long *len, int fb_num)
{
struct fb_info *info;
struct msm_fb_data_type *mfd;
if (fb_num >= MAX_FBI_LIST)
return -EINVAL;
info = fbi_list[fb_num];
if (!info)
return -ENOENT;
mfd = (struct msm_fb_data_type *)info->par;
if (!mfd)
return -ENODEV;
if (mfd->iova)
*start = mfd->iova;
else
*start = info->fix.smem_start;
*len = info->fix.smem_len;
return 0;
}
EXPORT_SYMBOL(mdss_fb_get_phys_info);
int __init mdss_fb_init(void)
{
int rc = -ENODEV;
if (platform_driver_register(&mdss_fb_driver))
return rc;
return 0;
}
module_init(mdss_fb_init);
int mdss_fb_suspres_panel(struct device *dev, void *data)
{
struct msm_fb_data_type *mfd;
int rc = 0;
u32 event;
if (!data) {
pr_err("Device state not defined\n");
return -EINVAL;
}
mfd = dev_get_drvdata(dev);
if (!mfd)
return 0;
event = *((bool *) data) ? MDSS_EVENT_RESUME : MDSS_EVENT_SUSPEND;
/* Do not send runtime suspend/resume for HDMI primary */
if (!mdss_fb_is_hdmi_primary(mfd)) {
rc = mdss_fb_send_panel_event(mfd, event, NULL);
if (rc)
pr_warn("unable to %s fb%d (%d)\n",
event == MDSS_EVENT_RESUME ?
"resume" : "suspend",
mfd->index, rc);
}
return rc;
}
| gpl-2.0 |
zapashcanon/lmms | plugins/MidiExport/MidiExport.cpp | 2 | 8747 | /*
* MidiExport.cpp - support for Exporting MIDI files
*
* Copyright (c) 2015 Mohamed Abdel Maksoud <mohamed at amaksoud.com>
* Copyright (c) 2017 Hyunjin Song <tteu.ingog/at/gmail.com>
*
* This file is part of LMMS - https://lmms.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program (see COPYING); if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA.
*
*/
#include <QDomDocument>
#include <QDir>
#include <QApplication>
#include <QMessageBox>
#include <QProgressDialog>
#include "MidiExport.h"
#include "lmms_math.h"
#include "TrackContainer.h"
#include "BBTrack.h"
#include "InstrumentTrack.h"
extern "C"
{
Plugin::Descriptor PLUGIN_EXPORT midiexport_plugin_descriptor =
{
STRINGIFY( PLUGIN_NAME ),
"MIDI Export",
QT_TRANSLATE_NOOP( "pluginBrowser",
"Filter for exporting MIDI-files from LMMS" ),
"Mohamed Abdel Maksoud <mohamed at amaksoud.com> and "
"Hyunjin Song <tteu.ingog/at/gmail.com>",
0x0100,
Plugin::ExportFilter,
NULL,
NULL,
NULL
} ;
}
MidiExport::MidiExport() : ExportFilter( &midiexport_plugin_descriptor)
{
}
MidiExport::~MidiExport()
{
}
bool MidiExport::tryExport(const TrackContainer::TrackList &tracks,
const TrackContainer::TrackList &tracks_BB,
int tempo, int masterPitch, const QString &filename)
{
QFile f(filename);
f.open(QIODevice::WriteOnly);
QDataStream midiout(&f);
InstrumentTrack* instTrack;
BBTrack* bbTrack;
QDomElement element;
int nTracks = 0;
uint8_t buffer[BUFFER_SIZE];
uint32_t size;
for (const Track* track : tracks) if (track->type() == Track::InstrumentTrack) nTracks++;
for (const Track* track : tracks_BB) if (track->type() == Track::InstrumentTrack) nTracks++;
// midi header
MidiFile::MIDIHeader header(nTracks);
size = header.writeToBuffer(buffer);
midiout.writeRawData((char *)buffer, size);
std::vector<std::vector<std::pair<int,int>>> plists;
// midi tracks
for (Track* track : tracks)
{
DataFile dataFile(DataFile::SongProject);
MTrack mtrack;
if (track->type() == Track::InstrumentTrack)
{
mtrack.addName(track->name().toStdString(), 0);
//mtrack.addProgramChange(0, 0);
mtrack.addTempo(tempo, 0);
instTrack = dynamic_cast<InstrumentTrack *>(track);
element = instTrack->saveState(dataFile, dataFile.content());
int base_pitch = 0;
double base_volume = 1.0;
int base_time = 0;
MidiNoteVector pat;
for (QDomNode n = element.firstChild(); !n.isNull(); n = n.nextSibling())
{
if (n.nodeName() == "instrumenttrack")
{
QDomElement it = n.toElement();
// transpose +12 semitones, workaround for #1857
base_pitch = (69 - it.attribute("basenote", "57").toInt());
if (it.attribute("usemasterpitch", "1").toInt())
{
base_pitch += masterPitch;
}
base_volume = it.attribute("volume", "100").toDouble()/100.0;
}
if (n.nodeName() == "pattern")
{
base_time = n.toElement().attribute("pos", "0").toInt();
writePattern(pat, n, base_pitch, base_volume, base_time);
}
}
ProcessBBNotes(pat, INT_MAX);
writePatternToTrack(mtrack, pat);
size = mtrack.writeToBuffer(buffer);
midiout.writeRawData((char *)buffer, size);
}
if (track->type() == Track::BBTrack)
{
bbTrack = dynamic_cast<BBTrack *>(track);
element = bbTrack->saveState(dataFile, dataFile.content());
std::vector<std::pair<int,int>> plist;
for (QDomNode n = element.firstChild(); !n.isNull(); n = n.nextSibling())
{
if (n.nodeName() == "bbtco")
{
QDomElement it = n.toElement();
int pos = it.attribute("pos", "0").toInt();
int len = it.attribute("len", "0").toInt();
plist.push_back(std::pair<int,int>(pos, pos+len));
}
}
std::sort(plist.begin(), plist.end());
plists.push_back(plist);
}
} // for each track
// midi tracks in BB tracks
for (Track* track : tracks_BB)
{
DataFile dataFile(DataFile::SongProject);
MTrack mtrack;
auto itr = plists.begin();
std::vector<std::pair<int,int>> st;
if (track->type() != Track::InstrumentTrack) continue;
mtrack.addName(track->name().toStdString(), 0);
//mtrack.addProgramChange(0, 0);
mtrack.addTempo(tempo, 0);
instTrack = dynamic_cast<InstrumentTrack *>(track);
element = instTrack->saveState(dataFile, dataFile.content());
int base_pitch = 0;
double base_volume = 1.0;
for (QDomNode n = element.firstChild(); !n.isNull(); n = n.nextSibling())
{
if (n.nodeName() == "instrumenttrack")
{
QDomElement it = n.toElement();
// transpose +12 semitones, workaround for #1857
base_pitch = (69 - it.attribute("basenote", "57").toInt());
if (it.attribute("usemasterpitch", "1").toInt())
{
base_pitch += masterPitch;
}
base_volume = it.attribute("volume", "100").toDouble() / 100.0;
}
if (n.nodeName() == "pattern")
{
std::vector<std::pair<int,int>> &plist = *itr;
MidiNoteVector nv, pat;
writePattern(pat, n, base_pitch, base_volume, 0);
// workaround for nested BBTCOs
int pos = 0;
int len = n.toElement().attribute("steps", "1").toInt() * 12;
for (auto it = plist.begin(); it != plist.end(); ++it)
{
while (!st.empty() && st.back().second <= it->first)
{
writeBBPattern(pat, nv, len, st.back().first, pos, st.back().second);
pos = st.back().second;
st.pop_back();
}
if (!st.empty() && st.back().second <= it->second)
{
writeBBPattern(pat, nv, len, st.back().first, pos, it->first);
pos = it->first;
while (!st.empty() && st.back().second <= it->second)
{
st.pop_back();
}
}
st.push_back(*it);
pos = it->first;
}
while (!st.empty())
{
writeBBPattern(pat, nv, len, st.back().first, pos, st.back().second);
pos = st.back().second;
st.pop_back();
}
ProcessBBNotes(nv, pos);
writePatternToTrack(mtrack, nv);
++itr;
}
}
size = mtrack.writeToBuffer(buffer);
midiout.writeRawData((char *)buffer, size);
}
return true;
}
void MidiExport::writePattern(MidiNoteVector &pat, QDomNode n,
int base_pitch, double base_volume, int base_time)
{
// TODO interpret steps="12" muted="0" type="1" name="Piano1" len="2592"
for (QDomNode nn = n.firstChild(); !nn.isNull(); nn = nn.nextSibling())
{
QDomElement note = nn.toElement();
if (note.attribute("len", "0") == "0") continue;
// TODO interpret pan="0" fxch="0" pitchrange="1"
MidiNote mnote;
mnote.pitch = qMax(0, qMin(127, note.attribute("key", "0").toInt() + base_pitch));
mnote.volume = qMin(qRound(base_volume * note.attribute("vol", "100").toDouble()), 127);
mnote.time = base_time + note.attribute("pos", "0").toInt();
mnote.duration = note.attribute("len", "0").toInt();
pat.push_back(mnote);
}
}
void MidiExport::writePatternToTrack(MTrack &mtrack, MidiNoteVector &nv)
{
for (auto it = nv.begin(); it != nv.end(); ++it)
{
mtrack.addNote(it->pitch, it->volume, it->time / 48.0, it->duration / 48.0);
}
}
void MidiExport::writeBBPattern(MidiNoteVector &src, MidiNoteVector &dst,
int len, int base, int start, int end)
{
if (start >= end) { return; }
start -= base;
end -= base;
std::sort(src.begin(), src.end());
for (auto it = src.begin(); it != src.end(); ++it)
{
for (int time = it->time + ceil((start - it->time) / len)
* len; time < end; time += len)
{
MidiNote note;
note.duration = it->duration;
note.pitch = it->pitch;
note.time = base + time;
note.volume = it->volume;
dst.push_back(note);
}
}
}
void MidiExport::ProcessBBNotes(MidiNoteVector &nv, int cutPos)
{
std::sort(nv.begin(), nv.end());
int cur = INT_MAX, next = INT_MAX;
for (auto it = nv.rbegin(); it != nv.rend(); ++it)
{
if (it->time < cur)
{
next = cur;
cur = it->time;
}
if (it->duration < 0)
{
it->duration = qMin(qMin(-it->duration, next - cur), cutPos - it->time);
}
}
}
void MidiExport::error()
{
//qDebug() << "MidiExport error: " << m_error ;
}
extern "C"
{
// necessary for getting instance out of shared lib
PLUGIN_EXPORT Plugin * lmms_plugin_main( Model *, void * _data )
{
return new MidiExport();
}
}
| gpl-2.0 |
ElvishArtisan/rivendell | rdadmin/edit_feed_perms.cpp | 2 | 4223 | // edit_feed_perms.cpp
//
// Edit Rivendell RSS Feed Permissions
//
// (C) Copyright 2002-2019 Fred Gleason <fredg@paravelsystems.com>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public
// License along with this program; if not, write to the Free Software
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
//
#include <qstring.h>
#include <qpushbutton.h>
#include <q3listbox.h>
#include <q3textedit.h>
#include <qlabel.h>
#include <qpainter.h>
#include <qevent.h>
#include <qmessagebox.h>
#include <qcheckbox.h>
#include <q3buttongroup.h>
#include <rddb.h>
#include <rdescape_string.h>
#include <rdpasswd.h>
#include <rduser.h>
#include "edit_feed_perms.h"
EditFeedPerms::EditFeedPerms(RDUser *user,QWidget *parent)
: RDDialog(parent)
{
setModal(true);
QString sql;
RDSqlQuery *q;
feed_user=user;
//
// Fix the Window Size
//
setMinimumWidth(sizeHint().width());
setMaximumWidth(sizeHint().width());
setMinimumHeight(sizeHint().height());
setMaximumHeight(sizeHint().height());
setWindowTitle("RDAdmin - "+tr("User: ")+feed_user->name());
//
// Feeds Selector
//
feed_host_sel=new RDListSelector(this);
feed_host_sel->sourceSetLabel(tr("Available Feeds"));
feed_host_sel->destSetLabel(tr("Enabled Feeds"));
feed_host_sel->setGeometry(10,10,380,130);
//
// Ok Button
//
QPushButton *ok_button=new QPushButton(this);
ok_button->setGeometry(sizeHint().width()-180,sizeHint().height()-60,80,50);
ok_button->setDefault(true);
ok_button->setFont(buttonFont());
ok_button->setText(tr("&OK"));
connect(ok_button,SIGNAL(clicked()),this,SLOT(okData()));
//
// Cancel Button
//
QPushButton *cancel_button=new QPushButton(this);
cancel_button->setGeometry(sizeHint().width()-90,sizeHint().height()-60,
80,50);
cancel_button->setFont(buttonFont());
cancel_button->setText(tr("&Cancel"));
connect(cancel_button,SIGNAL(clicked()),this,SLOT(cancelData()));
//
// Populate Fields
//
sql=QString("select KEY_NAME from FEED_PERMS where ")+
"USER_NAME=\""+RDEscapeString(feed_user->name())+"\"";
q=new RDSqlQuery(sql);
while(q->next()) {
feed_host_sel->destInsertItem(q->value(0).toString());
}
delete q;
sql=QString().sprintf("select KEY_NAME from FEEDS");
q=new RDSqlQuery(sql);
while(q->next()) {
if(feed_host_sel->destFindItem(q->value(0).toString(),Q3ListBox::ExactMatch)==0) {
feed_host_sel->sourceInsertItem(q->value(0).toString());
}
}
delete q;
}
EditFeedPerms::~EditFeedPerms()
{
}
QSize EditFeedPerms::sizeHint() const
{
return QSize(400,212);
}
QSizePolicy EditFeedPerms::sizePolicy() const
{
return QSizePolicy(QSizePolicy::Fixed,QSizePolicy::Fixed);
}
void EditFeedPerms::okData()
{
RDSqlQuery *q;
QString sql;
//
// Add New Groups
//
for(unsigned i=0;i<feed_host_sel->destCount();i++) {
sql=QString("select KEY_NAME from FEED_PERMS where ")+
"USER_NAME=\""+RDEscapeString(feed_user->name())+"\" && "+
"KEY_NAME=\""+RDEscapeString(feed_host_sel->destText(i))+"\"";
q=new RDSqlQuery(sql);
if(q->size()==0) {
delete q;
sql=QString("insert into FEED_PERMS (USER_NAME,KEY_NAME) ")+
"values (\""+RDEscapeString(feed_user->name())+"\","+
"\""+RDEscapeString(feed_host_sel->destText(i))+"\")";
q=new RDSqlQuery(sql);
}
delete q;
}
//
// Delete Old Groups
//
sql=QString("delete from FEED_PERMS where ")+
"USER_NAME=\""+RDEscapeString(feed_user->name())+"\"";
for(unsigned i=0;i<feed_host_sel->destCount();i++) {
sql+=QString(" && KEY_NAME<>\"")+
RDEscapeString(feed_host_sel->destText(i))+"\"";
}
q=new RDSqlQuery(sql);
delete q;
done(0);
}
void EditFeedPerms::cancelData()
{
done(1);
}
| gpl-2.0 |
ppiecuch/tiled | src/tiled/tileselectiontool.cpp | 2 | 3966 | /*
* tileselectiontool.cpp
* Copyright 2009-2010, Thorbjørn Lindeijer <thorbjorn@lindeijer.nl>
*
* This file is part of Tiled.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tileselectiontool.h"
#include "brushitem.h"
#include "changetileselection.h"
#include "map.h"
#include "mapdocument.h"
#include "mapscene.h"
#include "tilelayer.h"
using namespace Tiled;
using namespace Tiled::Internal;
TileSelectionTool::TileSelectionTool(QObject *parent)
: AbstractTileTool(tr("Rectangular Select"),
QIcon(QLatin1String(
":images/22x22/stock-tool-rect-select.png")),
QKeySequence(tr("R")),
parent)
, mSelectionMode(Replace)
, mSelecting(false)
{
setTilePositionMethod(BetweenTiles);
}
void TileSelectionTool::tilePositionChanged(const QPoint &)
{
if (mSelecting)
brushItem()->setTileRegion(selectedArea());
}
void TileSelectionTool::updateStatusInfo()
{
if (!isBrushVisible() || !mSelecting) {
AbstractTileTool::updateStatusInfo();
return;
}
const QPoint pos = tilePosition();
const QRect area = selectedArea();
setStatusInfo(tr("%1, %2 - Rectangle: (%3 x %4)")
.arg(pos.x()).arg(pos.y())
.arg(area.width()).arg(area.height()));
}
void TileSelectionTool::mousePressed(QGraphicsSceneMouseEvent *event)
{
const Qt::MouseButton button = event->button();
const Qt::KeyboardModifiers modifiers = event->modifiers();
if (button == Qt::LeftButton) {
if (modifiers == Qt::ControlModifier) {
mSelectionMode = Subtract;
} else if (modifiers == Qt::ShiftModifier) {
mSelectionMode = Add;
} else if (modifiers == (Qt::ControlModifier | Qt::ShiftModifier)) {
mSelectionMode = Intersect;
} else {
mSelectionMode = Replace;
}
mSelecting = true;
mSelectionStart = tilePosition();
brushItem()->setTileRegion(QRegion());
}
}
void TileSelectionTool::mouseReleased(QGraphicsSceneMouseEvent *event)
{
if (event->button() == Qt::LeftButton) {
mSelecting = false;
MapDocument *document = mapDocument();
QRegion selection = document->tileSelection();
const QRect area = selectedArea();
switch (mSelectionMode) {
case Replace: selection = area; break;
case Add: selection += area; break;
case Subtract: selection -= area; break;
case Intersect: selection &= area; break;
}
if (selection != document->tileSelection()) {
QUndoCommand *cmd = new ChangeTileSelection(document, selection);
document->undoStack()->push(cmd);
}
brushItem()->setTileRegion(QRegion());
updateStatusInfo();
}
}
void TileSelectionTool::languageChanged()
{
setName(tr("Rectangular Select"));
setShortcut(QKeySequence(tr("R")));
}
QRect TileSelectionTool::selectedArea() const
{
const QPoint tilePos = tilePosition();
const QPoint pos(qMin(tilePos.x(), mSelectionStart.x()),
qMin(tilePos.y(), mSelectionStart.y()));
const QSize size(qAbs(tilePos.x() - mSelectionStart.x()),
qAbs(tilePos.y() - mSelectionStart.y()));
return QRect(pos, size);
}
| gpl-2.0 |
linux-wpan/linux-wpan | arch/arm/mach-shmobile/board-kzm9g.c | 2 | 22257 | /*
* KZM-A9-GT board support
*
* Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/i2c/pcf857x.h>
#include <linux/input.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/as3711.h>
#include <linux/mfd/tmio.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/usb/r8a66597.h>
#include <linux/usb/renesas_usbhs.h>
#include <linux/videodev2.h>
#include <sound/sh_fsi.h>
#include <sound/simple_card.h>
#include <mach/irqs.h>
#include <mach/sh73a0.h>
#include <mach/common.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <video/sh_mobile_lcdc.h>
/*
* external GPIO
*/
#define GPIO_PCF8575_BASE (310)
#define GPIO_PCF8575_PORT10 (GPIO_PCF8575_BASE + 8)
#define GPIO_PCF8575_PORT11 (GPIO_PCF8575_BASE + 9)
#define GPIO_PCF8575_PORT12 (GPIO_PCF8575_BASE + 10)
#define GPIO_PCF8575_PORT13 (GPIO_PCF8575_BASE + 11)
#define GPIO_PCF8575_PORT14 (GPIO_PCF8575_BASE + 12)
#define GPIO_PCF8575_PORT15 (GPIO_PCF8575_BASE + 13)
#define GPIO_PCF8575_PORT16 (GPIO_PCF8575_BASE + 14)
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
};
/*
* FSI-AK4648
*
* this command is required when playback.
*
* # amixer set "LINEOUT Mixer DACL" on
*/
/* SMSC 9221 */
static struct resource smsc9221_resources[] = {
[0] = {
.start = 0x10000000, /* CS4 */
.end = 0x100000ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = irq_pin(3), /* IRQ3 */
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc9221_platdata = {
.flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
.phy_interface = PHY_INTERFACE_MODE_MII,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
};
static struct platform_device smsc_device = {
.name = "smsc911x",
.dev = {
.platform_data = &smsc9221_platdata,
},
.resource = smsc9221_resources,
.num_resources = ARRAY_SIZE(smsc9221_resources),
};
/* USB external chip */
static struct r8a66597_platdata usb_host_data = {
.on_chip = 0,
.xtal = R8A66597_PLATDATA_XTAL_48MHZ,
};
static struct resource usb_resources[] = {
[0] = {
.start = 0x10010000,
.end = 0x1001ffff - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = irq_pin(1), /* IRQ1 */
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usb_host_device = {
.name = "r8a66597_hcd",
.dev = {
.platform_data = &usb_host_data,
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(usb_resources),
.resource = usb_resources,
};
/* USB Func CN17 */
struct usbhs_private {
void __iomem *phy;
void __iomem *cr2;
struct renesas_usbhs_platform_info info;
};
#define IRQ15 irq_pin(15)
#define USB_PHY_MODE (1 << 4)
#define USB_PHY_INT_EN ((1 << 3) | (1 << 2))
#define USB_PHY_ON (1 << 1)
#define USB_PHY_OFF (1 << 0)
#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF)
#define usbhs_get_priv(pdev) \
container_of(renesas_usbhs_get_info(pdev), struct usbhs_private, info)
static int usbhs_get_vbus(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
return !((1 << 7) & __raw_readw(priv->cr2));
}
static int usbhs_phy_reset(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
/* init phy */
__raw_writew(0x8a0a, priv->cr2);
return 0;
}
static int usbhs_get_id(struct platform_device *pdev)
{
return USBHS_GADGET;
}
static irqreturn_t usbhs_interrupt(int irq, void *data)
{
struct platform_device *pdev = data;
struct usbhs_private *priv = usbhs_get_priv(pdev);
renesas_usbhs_call_notify_hotplug(pdev);
/* clear status */
__raw_writew(__raw_readw(priv->phy) | USB_PHY_INT_CLR, priv->phy);
return IRQ_HANDLED;
}
static int usbhs_hardware_init(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
int ret;
/* clear interrupt status */
__raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy);
ret = request_irq(IRQ15, usbhs_interrupt, IRQF_TRIGGER_HIGH,
dev_name(&pdev->dev), pdev);
if (ret) {
dev_err(&pdev->dev, "request_irq err\n");
return ret;
}
/* enable USB phy interrupt */
__raw_writew(USB_PHY_MODE | USB_PHY_INT_EN, priv->phy);
return 0;
}
static int usbhs_hardware_exit(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
/* clear interrupt status */
__raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy);
free_irq(IRQ15, pdev);
return 0;
}
static u32 usbhs_pipe_cfg[] = {
USB_ENDPOINT_XFER_CONTROL,
USB_ENDPOINT_XFER_ISOC,
USB_ENDPOINT_XFER_ISOC,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_INT,
USB_ENDPOINT_XFER_INT,
USB_ENDPOINT_XFER_INT,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_BULK,
};
static struct usbhs_private usbhs_private = {
.phy = IOMEM(0xe60781e0), /* USBPHYINT */
.cr2 = IOMEM(0xe605810c), /* USBCR2 */
.info = {
.platform_callback = {
.hardware_init = usbhs_hardware_init,
.hardware_exit = usbhs_hardware_exit,
.get_id = usbhs_get_id,
.phy_reset = usbhs_phy_reset,
.get_vbus = usbhs_get_vbus,
},
.driver_param = {
.buswait_bwait = 4,
.has_otg = 1,
.pipe_type = usbhs_pipe_cfg,
.pipe_size = ARRAY_SIZE(usbhs_pipe_cfg),
},
},
};
static struct resource usbhs_resources[] = {
[0] = {
.start = 0xE6890000,
.end = 0xE68900e6 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = gic_spi(62),
.end = gic_spi(62),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usbhs_device = {
.name = "renesas_usbhs",
.id = -1,
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
.platform_data = &usbhs_private.info,
},
.num_resources = ARRAY_SIZE(usbhs_resources),
.resource = usbhs_resources,
};
/* LCDC */
static struct fb_videomode kzm_lcdc_mode = {
.name = "WVGA Panel",
.xres = 800,
.yres = 480,
.left_margin = 220,
.right_margin = 110,
.hsync_len = 70,
.upper_margin = 20,
.lower_margin = 5,
.vsync_len = 5,
.sync = 0,
};
static struct sh_mobile_lcdc_info lcdc_info = {
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.fourcc = V4L2_PIX_FMT_RGB565,
.interface_type = RGB24,
.lcd_modes = &kzm_lcdc_mode,
.num_modes = 1,
.clock_divider = 5,
.flags = 0,
.panel_cfg = {
.width = 152,
.height = 91,
},
}
};
static struct resource lcdc_resources[] = {
[0] = {
.name = "LCDC",
.start = 0xfe940000,
.end = 0xfe943fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = intcs_evt2irq(0x580),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device lcdc_device = {
.name = "sh_mobile_lcdc_fb",
.num_resources = ARRAY_SIZE(lcdc_resources),
.resource = lcdc_resources,
.dev = {
.platform_data = &lcdc_info,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/* Fixed 1.8V regulator to be used by MMCIF */
static struct regulator_consumer_supply fixed1v8_power_consumers[] =
{
REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
};
/* MMCIF */
static struct resource sh_mmcif_resources[] = {
[0] = {
.name = "MMCIF",
.start = 0xe6bd0000,
.end = 0xe6bd00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = gic_spi(140),
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = gic_spi(141),
.flags = IORESOURCE_IRQ,
},
};
static struct sh_mmcif_plat_data sh_mmcif_platdata = {
.ocr = MMC_VDD_165_195,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
.ccs_unsupported = true,
.slave_id_tx = SHDMA_SLAVE_MMCIF_TX,
.slave_id_rx = SHDMA_SLAVE_MMCIF_RX,
};
static struct platform_device mmc_device = {
.name = "sh_mmcif",
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
.platform_data = &sh_mmcif_platdata,
},
.num_resources = ARRAY_SIZE(sh_mmcif_resources),
.resource = sh_mmcif_resources,
};
/* Fixed 3.3V regulators to be used by SDHI0 */
static struct regulator_consumer_supply vcc_sdhi0_consumers[] =
{
REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
};
static struct regulator_init_data vcc_sdhi0_init_data = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(vcc_sdhi0_consumers),
.consumer_supplies = vcc_sdhi0_consumers,
};
static struct fixed_voltage_config vcc_sdhi0_info = {
.supply_name = "SDHI0 Vcc",
.microvolts = 3300000,
.gpio = 15,
.enable_high = 1,
.init_data = &vcc_sdhi0_init_data,
};
static struct platform_device vcc_sdhi0 = {
.name = "reg-fixed-voltage",
.id = 0,
.dev = {
.platform_data = &vcc_sdhi0_info,
},
};
/* Fixed 3.3V regulators to be used by SDHI2 */
static struct regulator_consumer_supply vcc_sdhi2_consumers[] =
{
REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.2"),
};
static struct regulator_init_data vcc_sdhi2_init_data = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(vcc_sdhi2_consumers),
.consumer_supplies = vcc_sdhi2_consumers,
};
static struct fixed_voltage_config vcc_sdhi2_info = {
.supply_name = "SDHI2 Vcc",
.microvolts = 3300000,
.gpio = 14,
.enable_high = 1,
.init_data = &vcc_sdhi2_init_data,
};
static struct platform_device vcc_sdhi2 = {
.name = "reg-fixed-voltage",
.id = 1,
.dev = {
.platform_data = &vcc_sdhi2_info,
},
};
/* SDHI */
static struct sh_mobile_sdhi_info sdhi0_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_POWER_OFF_CARD,
};
static struct resource sdhi0_resources[] = {
[0] = {
.name = "SDHI0",
.start = 0xee100000,
.end = 0xee1000ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = SH_MOBILE_SDHI_IRQ_CARD_DETECT,
.start = gic_spi(83),
.flags = IORESOURCE_IRQ,
},
[2] = {
.name = SH_MOBILE_SDHI_IRQ_SDCARD,
.start = gic_spi(84),
.flags = IORESOURCE_IRQ,
},
[3] = {
.name = SH_MOBILE_SDHI_IRQ_SDIO,
.start = gic_spi(85),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device sdhi0_device = {
.name = "sh_mobile_sdhi",
.num_resources = ARRAY_SIZE(sdhi0_resources),
.resource = sdhi0_resources,
.dev = {
.platform_data = &sdhi0_info,
},
};
/* Micro SD */
static struct sh_mobile_sdhi_info sdhi2_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI2_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI2_RX,
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT |
TMIO_MMC_USE_GPIO_CD |
TMIO_MMC_WRPROTECT_DISABLE,
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_POWER_OFF_CARD,
.cd_gpio = 13,
};
static struct resource sdhi2_resources[] = {
[0] = {
.name = "SDHI2",
.start = 0xee140000,
.end = 0xee1400ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = SH_MOBILE_SDHI_IRQ_CARD_DETECT,
.start = gic_spi(103),
.flags = IORESOURCE_IRQ,
},
[2] = {
.name = SH_MOBILE_SDHI_IRQ_SDCARD,
.start = gic_spi(104),
.flags = IORESOURCE_IRQ,
},
[3] = {
.name = SH_MOBILE_SDHI_IRQ_SDIO,
.start = gic_spi(105),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device sdhi2_device = {
.name = "sh_mobile_sdhi",
.id = 2,
.num_resources = ARRAY_SIZE(sdhi2_resources),
.resource = sdhi2_resources,
.dev = {
.platform_data = &sdhi2_info,
},
};
/* KEY */
#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
static struct gpio_keys_button gpio_buttons[] = {
GPIO_KEY(KEY_BACK, GPIO_PCF8575_PORT10, "SW3"),
GPIO_KEY(KEY_RIGHT, GPIO_PCF8575_PORT11, "SW2-R"),
GPIO_KEY(KEY_LEFT, GPIO_PCF8575_PORT12, "SW2-L"),
GPIO_KEY(KEY_ENTER, GPIO_PCF8575_PORT13, "SW2-P"),
GPIO_KEY(KEY_UP, GPIO_PCF8575_PORT14, "SW2-U"),
GPIO_KEY(KEY_DOWN, GPIO_PCF8575_PORT15, "SW2-D"),
GPIO_KEY(KEY_HOME, GPIO_PCF8575_PORT16, "SW1"),
};
static struct gpio_keys_platform_data gpio_key_info = {
.buttons = gpio_buttons,
.nbuttons = ARRAY_SIZE(gpio_buttons),
};
static struct platform_device gpio_keys_device = {
.name = "gpio-keys",
.dev = {
.platform_data = &gpio_key_info,
},
};
/* FSI-AK4648 */
static struct sh_fsi_platform_info fsi_info = {
.port_a = {
.tx_id = SHDMA_SLAVE_FSI2A_TX,
},
};
static struct resource fsi_resources[] = {
[0] = {
.name = "FSI",
.start = 0xEC230000,
.end = 0xEC230400 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = gic_spi(146),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device fsi_device = {
.name = "sh_fsi2",
.id = -1,
.num_resources = ARRAY_SIZE(fsi_resources),
.resource = fsi_resources,
.dev = {
.platform_data = &fsi_info,
},
};
static struct asoc_simple_card_info fsi2_ak4648_info = {
.name = "AK4648",
.card = "FSI2A-AK4648",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi2",
.daifmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM,
.cpu_dai = {
.name = "fsia-dai",
},
.codec_dai = {
.name = "ak4642-hifi",
.sysclk = 11289600,
},
};
static struct platform_device fsi_ak4648_device = {
.name = "asoc-simple-card",
.dev = {
.platform_data = &fsi2_ak4648_info,
.coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &fsi_ak4648_device.dev.coherent_dma_mask,
},
};
/* I2C */
/* StepDown1 is used to supply 1.315V to the CPU */
static struct regulator_init_data as3711_sd1 = {
.constraints = {
.name = "1.315V CPU",
.boot_on = 1,
.always_on = 1,
.min_uV = 1315000,
.max_uV = 1335000,
},
};
/* StepDown2 is used to supply 1.8V to the CPU and to the board */
static struct regulator_init_data as3711_sd2 = {
.constraints = {
.name = "1.8V",
.boot_on = 1,
.always_on = 1,
.min_uV = 1800000,
.max_uV = 1800000,
},
};
/*
* StepDown3 is switched in parallel with StepDown2, seems to be off,
* according to read-back pre-set register values
*/
/* StepDown4 is used to supply 1.215V to the CPU and to the board */
static struct regulator_init_data as3711_sd4 = {
.constraints = {
.name = "1.215V",
.boot_on = 1,
.always_on = 1,
.min_uV = 1215000,
.max_uV = 1235000,
},
};
/* LDO1 is unused and unconnected */
/* LDO2 is used to supply 2.8V to the CPU */
static struct regulator_init_data as3711_ldo2 = {
.constraints = {
.name = "2.8V CPU",
.boot_on = 1,
.always_on = 1,
.min_uV = 2800000,
.max_uV = 2800000,
},
};
/* LDO3 is used to supply 3.0V to the CPU */
static struct regulator_init_data as3711_ldo3 = {
.constraints = {
.name = "3.0V CPU",
.boot_on = 1,
.always_on = 1,
.min_uV = 3000000,
.max_uV = 3000000,
},
};
/* LDO4 is used to supply 2.8V to the board */
static struct regulator_init_data as3711_ldo4 = {
.constraints = {
.name = "2.8V",
.boot_on = 1,
.always_on = 1,
.min_uV = 2800000,
.max_uV = 2800000,
},
};
/* LDO5 is switched parallel to LDO4, also set to 2.8V */
static struct regulator_init_data as3711_ldo5 = {
.constraints = {
.name = "2.8V #2",
.boot_on = 1,
.always_on = 1,
.min_uV = 2800000,
.max_uV = 2800000,
},
};
/* LDO6 is unused and unconnected */
/* LDO7 is used to supply 1.15V to the CPU */
static struct regulator_init_data as3711_ldo7 = {
.constraints = {
.name = "1.15V CPU",
.boot_on = 1,
.always_on = 1,
.min_uV = 1150000,
.max_uV = 1150000,
},
};
/* LDO8 is switched parallel to LDO7, also set to 1.15V */
static struct regulator_init_data as3711_ldo8 = {
.constraints = {
.name = "1.15V CPU #2",
.boot_on = 1,
.always_on = 1,
.min_uV = 1150000,
.max_uV = 1150000,
},
};
static struct as3711_platform_data as3711_pdata = {
.regulator = {
.init_data = {
[AS3711_REGULATOR_SD_1] = &as3711_sd1,
[AS3711_REGULATOR_SD_2] = &as3711_sd2,
[AS3711_REGULATOR_SD_4] = &as3711_sd4,
[AS3711_REGULATOR_LDO_2] = &as3711_ldo2,
[AS3711_REGULATOR_LDO_3] = &as3711_ldo3,
[AS3711_REGULATOR_LDO_4] = &as3711_ldo4,
[AS3711_REGULATOR_LDO_5] = &as3711_ldo5,
[AS3711_REGULATOR_LDO_7] = &as3711_ldo7,
[AS3711_REGULATOR_LDO_8] = &as3711_ldo8,
},
},
.backlight = {
.su2_fb = "sh_mobile_lcdc_fb.0",
.su2_max_uA = 36000,
.su2_feedback = AS3711_SU2_CURR_AUTO,
.su2_fbprot = AS3711_SU2_GPIO4,
.su2_auto_curr1 = true,
.su2_auto_curr2 = true,
.su2_auto_curr3 = true,
},
};
static struct pcf857x_platform_data pcf8575_pdata = {
.gpio_base = GPIO_PCF8575_BASE,
};
static struct i2c_board_info i2c0_devices[] = {
{
I2C_BOARD_INFO("ak4648", 0x12),
},
{
I2C_BOARD_INFO("r2025sd", 0x32),
},
{
I2C_BOARD_INFO("ak8975", 0x0c),
.irq = irq_pin(28), /* IRQ28 */
},
{
I2C_BOARD_INFO("adxl34x", 0x1d),
.irq = irq_pin(26), /* IRQ26 */
},
{
I2C_BOARD_INFO("as3711", 0x40),
.irq = intcs_evt2irq(0x3300), /* IRQ24 */
.platform_data = &as3711_pdata,
},
};
static struct i2c_board_info i2c1_devices[] = {
{
I2C_BOARD_INFO("st1232-ts", 0x55),
.irq = irq_pin(8), /* IRQ8 */
},
};
static struct i2c_board_info i2c3_devices[] = {
{
I2C_BOARD_INFO("pcf8575", 0x20),
.irq = irq_pin(19), /* IRQ19 */
.platform_data = &pcf8575_pdata,
},
};
static struct platform_device *kzm_devices[] __initdata = {
&smsc_device,
&usb_host_device,
&usbhs_device,
&lcdc_device,
&mmc_device,
&vcc_sdhi0,
&vcc_sdhi2,
&sdhi0_device,
&sdhi2_device,
&gpio_keys_device,
&fsi_device,
&fsi_ak4648_device,
};
static unsigned long pin_pullup_conf[] = {
PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 0),
};
static const struct pinctrl_map kzm_pinctrl_map[] = {
/* FSIA (AK4648) */
PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0",
"fsia_mclk_in", "fsia"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0",
"fsia_sclk_in", "fsia"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0",
"fsia_data_in", "fsia"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0",
"fsia_data_out", "fsia"),
/* I2C3 */
PIN_MAP_MUX_GROUP_DEFAULT("i2c-sh_mobile.3", "pfc-sh73a0",
"i2c3_1", "i2c3"),
/* LCD */
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-sh73a0",
"lcd_data24", "lcd"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-sh73a0",
"lcd_sync", "lcd"),
/* MMCIF */
PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
"mmc0_data8_0", "mmc0"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
"mmc0_ctrl_0", "mmc0"),
PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
"PORT279", pin_pullup_conf),
PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
"mmc0_data8_0", pin_pullup_conf),
/* SCIFA4 */
PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0",
"scifa4_data", "scifa4"),
PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0",
"scifa4_ctrl", "scifa4"),
/* SDHI0 */
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
"sdhi0_data4", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
"sdhi0_ctrl", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
"sdhi0_cd", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
"sdhi0_wp", "sdhi0"),
/* SDHI2 */
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-sh73a0",
"sdhi2_data4", "sdhi2"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-sh73a0",
"sdhi2_ctrl", "sdhi2"),
/* SMSC */
PIN_MAP_MUX_GROUP_DEFAULT("smsc911x.0", "pfc-sh73a0",
"bsc_cs4", "bsc"),
/* USB */
PIN_MAP_MUX_GROUP_DEFAULT("renesas_usbhs", "pfc-sh73a0",
"usb_vbus", "usb"),
};
static void __init kzm_init(void)
{
regulator_register_always_on(2, "fixed-1.8V", fixed1v8_power_consumers,
ARRAY_SIZE(fixed1v8_power_consumers), 1800000);
regulator_register_fixed(3, dummy_supplies, ARRAY_SIZE(dummy_supplies));
pinctrl_register_mappings(kzm_pinctrl_map, ARRAY_SIZE(kzm_pinctrl_map));
sh73a0_pinmux_init();
/* SMSC */
gpio_request_one(224, GPIOF_IN, NULL); /* IRQ3 */
/* LCDC */
gpio_request_one(222, GPIOF_OUT_INIT_HIGH, NULL); /* LCDCDON */
gpio_request_one(226, GPIOF_OUT_INIT_HIGH, NULL); /* SC */
/* Touchscreen */
gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */
#ifdef CONFIG_CACHE_L2X0
/* Shared attribute override enable, 64K*8way */
l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
#endif
i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
i2c_register_board_info(1, i2c1_devices, ARRAY_SIZE(i2c1_devices));
i2c_register_board_info(3, i2c3_devices, ARRAY_SIZE(i2c3_devices));
sh73a0_add_standard_devices();
platform_add_devices(kzm_devices, ARRAY_SIZE(kzm_devices));
sh73a0_pm_init();
}
static void kzm9g_restart(enum reboot_mode mode, const char *cmd)
{
#define RESCNT2 IOMEM(0xe6188020)
/* Do soft power on reset */
writel((1 << 31), RESCNT2);
}
static const char *kzm9g_boards_compat_dt[] __initdata = {
"renesas,kzm9g",
NULL,
};
DT_MACHINE_START(KZM9G_DT, "kzm9g")
.smp = smp_ops(sh73a0_smp_ops),
.map_io = sh73a0_map_io,
.init_early = sh73a0_add_early_devices,
.nr_irqs = NR_IRQS_LEGACY,
.init_irq = sh73a0_init_irq,
.init_machine = kzm_init,
.init_late = shmobile_init_late,
.init_time = sh73a0_earlytimer_init,
.restart = kzm9g_restart,
.dt_compat = kzm9g_boards_compat_dt,
MACHINE_END
| gpl-2.0 |
igraph/igraph | tests/regression/igraph_layout_reingold_tilford_bug_879.c | 2 | 1722 | /* -*- mode: C -*- */
/*
IGraph library.
Copyright (C) 2006-2012 Gabor Csardi <csardi.gabor@gmail.com>
334 Harvard st, Cambridge MA, 02139 USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
*/
#include <igraph.h>
#include <math.h>
#include "../unit/test_utilities.h"
int main(void) {
igraph_t g;
FILE *f;
igraph_matrix_t coords;
igraph_vector_int_t roots;
igraph_integer_t i, n;
f = fopen("igraph_layout_reingold_tilford_bug_879.in", "r");
IGRAPH_ASSERT(f != NULL);
igraph_read_graph_edgelist(&g, f, 0, IGRAPH_UNDIRECTED);
igraph_matrix_init(&coords, 0, 0);
igraph_vector_int_init(&roots, 0);
igraph_vector_int_push_back(&roots, 0);
igraph_layout_reingold_tilford(&g, &coords, IGRAPH_OUT, &roots, 0);
n = igraph_vcount(&g);
for (i = 0; i < n; i++) {
printf("%6.3f %6.3f\n", MATRIX(coords, i, 0), MATRIX(coords, i, 1));
}
igraph_matrix_destroy(&coords);
igraph_vector_int_destroy(&roots);
igraph_destroy(&g);
VERIFY_FINALLY_STACK();
return 0;
}
| gpl-2.0 |
ioan-chera/prboom-plus-24 | src/p_genlin.c | 2 | 33870 | /* Emacs style mode select -*- C++ -*-
*-----------------------------------------------------------------------------
*
*
* PrBoom: a Doom port merged with LxDoom and LSDLDoom
* based on BOOM, a modified and improved DOOM engine
* Copyright (C) 1999 by
* id Software, Chi Hoang, Lee Killough, Jim Flynn, Rand Phares, Ty Halderman
* Copyright (C) 1999-2000 by
* Jess Haas, Nicolas Kalkhof, Colin Phipps, Florian Schulze
* Copyright 2005, 2006 by
* Florian Schulze, Colin Phipps, Neil Stevens, Andrey Budko
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*
* DESCRIPTION:
* Generalized linedef type handlers
* Floors, Ceilings, Doors, Locked Doors, Lifts, Stairs, Crushers
*
*-----------------------------------------------------------------------------*/
#include "doomstat.h" //jff 6/19/98 for demo_compatibility
#include "r_main.h"
#include "p_spec.h"
#include "p_tick.h"
#include "m_random.h"
#include "s_sound.h"
#include "sounds.h"
#include "e6y.h"
//////////////////////////////////////////////////////////
//
// Generalized Linedef Type handlers
//
//////////////////////////////////////////////////////////
//
// EV_DoGenFloor()
//
// Handle generalized floor types
//
// Passed the line activating the generalized floor function
// Returns true if a thinker is created
//
// jff 02/04/98 Added this routine (and file) to handle generalized
// floor movers using bit fields in the line special type.
//
int EV_DoGenFloor
( line_t* line )
{
int secnum;
int rtn;
dboolean manual;
sector_t* sec;
floormove_t* floor;
unsigned value = (unsigned)line->special - GenFloorBase;
// parse the bit fields in the line's special type
int Crsh = (value & FloorCrush) >> FloorCrushShift;
int ChgT = (value & FloorChange) >> FloorChangeShift;
int Targ = (value & FloorTarget) >> FloorTargetShift;
int Dirn = (value & FloorDirection) >> FloorDirectionShift;
int ChgM = (value & FloorModel) >> FloorModelShift;
int Sped = (value & FloorSpeed) >> FloorSpeedShift;
int Trig = (value & TriggerType) >> TriggerTypeShift;
rtn = 0;
if (ProcessNoTagLines(line, &sec, &secnum)) {if (zerotag_manual) {manual = true; goto manual_floor;} else {return rtn;}};//e6y
// check if a manual trigger, if so do just the sector on the backside
manual = false;
if (Trig==PushOnce || Trig==PushMany)
{
if (!(sec = line->backsector))
return rtn;
secnum = sec->iSectorID;
manual = true;
goto manual_floor;
}
secnum = -1;
// if not manual do all sectors tagged the same as the line
while ((secnum = P_FindSectorFromLineTag(line,secnum)) >= 0)
{
sec = §ors[secnum];
manual_floor:
// Do not start another function if floor already moving
if (P_SectorActive(floor_special,sec))
{
if (!manual)
continue;
else
return rtn;
}
// new floor thinker
rtn = 1;
floor = Z_Malloc (sizeof(*floor), PU_LEVSPEC, 0);
memset(floor, 0, sizeof(*floor));
P_AddThinker (&floor->thinker);
sec->floordata = floor;
floor->thinker.function = T_MoveFloor;
floor->crush = Crsh;
floor->direction = Dirn? 1 : -1;
floor->sector = sec;
floor->texture = sec->floorpic;
floor->newspecial = sec->special;
//jff 3/14/98 transfer old special field too
floor->oldspecial = sec->oldspecial;
floor->type = genFloor;
// set the speed of motion
switch (Sped)
{
case SpeedSlow:
floor->speed = FLOORSPEED;
break;
case SpeedNormal:
floor->speed = FLOORSPEED*2;
break;
case SpeedFast:
floor->speed = FLOORSPEED*4;
break;
case SpeedTurbo:
floor->speed = FLOORSPEED*8;
break;
default:
break;
}
// set the destination height
switch(Targ)
{
case FtoHnF:
floor->floordestheight = P_FindHighestFloorSurrounding(sec);
break;
case FtoLnF:
floor->floordestheight = P_FindLowestFloorSurrounding(sec);
break;
case FtoNnF:
floor->floordestheight = Dirn?
P_FindNextHighestFloor(sec,sec->floorheight) :
P_FindNextLowestFloor(sec,sec->floorheight);
break;
case FtoLnC:
floor->floordestheight = P_FindLowestCeilingSurrounding(sec);
break;
case FtoC:
floor->floordestheight = sec->ceilingheight;
break;
case FbyST:
floor->floordestheight = (floor->sector->floorheight>>FRACBITS) +
floor->direction * (P_FindShortestTextureAround(secnum)>>FRACBITS);
if (floor->floordestheight>32000) //jff 3/13/98 prevent overflow
floor->floordestheight=32000; // wraparound in floor height
if (floor->floordestheight<-32000)
floor->floordestheight=-32000;
floor->floordestheight<<=FRACBITS;
break;
case Fby24:
floor->floordestheight = floor->sector->floorheight +
floor->direction * 24*FRACUNIT;
break;
case Fby32:
floor->floordestheight = floor->sector->floorheight +
floor->direction * 32*FRACUNIT;
break;
default:
break;
}
// set texture/type change properties
if (ChgT) // if a texture change is indicated
{
if (ChgM) // if a numeric model change
{
sector_t *sec;
//jff 5/23/98 find model with ceiling at target height if target
//is a ceiling type
sec = (Targ==FtoLnC || Targ==FtoC)?
P_FindModelCeilingSector(floor->floordestheight,secnum) :
P_FindModelFloorSector(floor->floordestheight,secnum);
if (sec)
{
floor->texture = sec->floorpic;
switch(ChgT)
{
case FChgZero: // zero type
floor->newspecial = 0;
//jff 3/14/98 change old field too
floor->oldspecial = 0;
floor->type = genFloorChg0;
break;
case FChgTyp: // copy type
floor->newspecial = sec->special;
//jff 3/14/98 change old field too
floor->oldspecial = sec->oldspecial;
floor->type = genFloorChgT;
break;
case FChgTxt: // leave type be
floor->type = genFloorChg;
break;
default:
break;
}
}
}
else // else if a trigger model change
{
floor->texture = line->frontsector->floorpic;
switch (ChgT)
{
case FChgZero: // zero type
floor->newspecial = 0;
//jff 3/14/98 change old field too
floor->oldspecial = 0;
floor->type = genFloorChg0;
break;
case FChgTyp: // copy type
floor->newspecial = line->frontsector->special;
//jff 3/14/98 change old field too
floor->oldspecial = line->frontsector->oldspecial;
floor->type = genFloorChgT;
break;
case FChgTxt: // leave type be
floor->type = genFloorChg;
default:
break;
}
}
}
if (manual) return rtn;
}
return rtn;
}
//
// EV_DoGenCeiling()
//
// Handle generalized ceiling types
//
// Passed the linedef activating the ceiling function
// Returns true if a thinker created
//
// jff 02/04/98 Added this routine (and file) to handle generalized
// floor movers using bit fields in the line special type.
//
int EV_DoGenCeiling
( line_t* line )
{
int secnum;
int rtn;
dboolean manual;
fixed_t targheight;
sector_t* sec;
ceiling_t* ceiling;
unsigned value = (unsigned)line->special - GenCeilingBase;
// parse the bit fields in the line's special type
int Crsh = (value & CeilingCrush) >> CeilingCrushShift;
int ChgT = (value & CeilingChange) >> CeilingChangeShift;
int Targ = (value & CeilingTarget) >> CeilingTargetShift;
int Dirn = (value & CeilingDirection) >> CeilingDirectionShift;
int ChgM = (value & CeilingModel) >> CeilingModelShift;
int Sped = (value & CeilingSpeed) >> CeilingSpeedShift;
int Trig = (value & TriggerType) >> TriggerTypeShift;
rtn = 0;
if (ProcessNoTagLines(line, &sec, &secnum)) {if (zerotag_manual) {manual = true; goto manual_ceiling;} else {return rtn;}};//e6y
// check if a manual trigger, if so do just the sector on the backside
manual = false;
if (Trig==PushOnce || Trig==PushMany)
{
if (!(sec = line->backsector))
return rtn;
secnum = sec->iSectorID;
manual = true;
goto manual_ceiling;
}
secnum = -1;
// if not manual do all sectors tagged the same as the line
while ((secnum = P_FindSectorFromLineTag(line,secnum)) >= 0)
{
sec = §ors[secnum];
manual_ceiling:
// Do not start another function if ceiling already moving
if (P_SectorActive(ceiling_special,sec)) //jff 2/22/98
{
if (!manual)
continue;
else
return rtn;
}
// new ceiling thinker
rtn = 1;
ceiling = Z_Malloc (sizeof(*ceiling), PU_LEVSPEC, 0);
memset(ceiling, 0, sizeof(*ceiling));
P_AddThinker (&ceiling->thinker);
sec->ceilingdata = ceiling; //jff 2/22/98
ceiling->thinker.function = T_MoveCeiling;
ceiling->crush = Crsh;
ceiling->direction = Dirn? 1 : -1;
ceiling->sector = sec;
ceiling->texture = sec->ceilingpic;
ceiling->newspecial = sec->special;
//jff 3/14/98 change old field too
ceiling->oldspecial = sec->oldspecial;
ceiling->tag = sec->tag;
ceiling->type = genCeiling;
// set speed of motion
switch (Sped)
{
case SpeedSlow:
ceiling->speed = CEILSPEED;
break;
case SpeedNormal:
ceiling->speed = CEILSPEED*2;
break;
case SpeedFast:
ceiling->speed = CEILSPEED*4;
break;
case SpeedTurbo:
ceiling->speed = CEILSPEED*8;
break;
default:
break;
}
// set destination target height
targheight = sec->ceilingheight;
switch(Targ)
{
case CtoHnC:
targheight = P_FindHighestCeilingSurrounding(sec);
break;
case CtoLnC:
targheight = P_FindLowestCeilingSurrounding(sec);
break;
case CtoNnC:
targheight = Dirn?
P_FindNextHighestCeiling(sec,sec->ceilingheight) :
P_FindNextLowestCeiling(sec,sec->ceilingheight);
break;
case CtoHnF:
targheight = P_FindHighestFloorSurrounding(sec);
break;
case CtoF:
targheight = sec->floorheight;
break;
case CbyST:
targheight = (ceiling->sector->ceilingheight>>FRACBITS) +
ceiling->direction * (P_FindShortestUpperAround(secnum)>>FRACBITS);
if (targheight>32000) //jff 3/13/98 prevent overflow
targheight=32000; // wraparound in ceiling height
if (targheight<-32000)
targheight=-32000;
targheight<<=FRACBITS;
break;
case Cby24:
targheight = ceiling->sector->ceilingheight +
ceiling->direction * 24*FRACUNIT;
break;
case Cby32:
targheight = ceiling->sector->ceilingheight +
ceiling->direction * 32*FRACUNIT;
break;
default:
break;
}
if (Dirn) ceiling->topheight = targheight;
else ceiling->bottomheight = targheight;
// set texture/type change properties
if (ChgT) // if a texture change is indicated
{
if (ChgM) // if a numeric model change
{
sector_t *sec;
//jff 5/23/98 find model with floor at target height if target
//is a floor type
sec = (Targ==CtoHnF || Targ==CtoF)?
P_FindModelFloorSector(targheight,secnum) :
P_FindModelCeilingSector(targheight,secnum);
if (sec)
{
ceiling->texture = sec->ceilingpic;
switch (ChgT)
{
case CChgZero: // type is zeroed
ceiling->newspecial = 0;
//jff 3/14/98 change old field too
ceiling->oldspecial = 0;
ceiling->type = genCeilingChg0;
break;
case CChgTyp: // type is copied
ceiling->newspecial = sec->special;
//jff 3/14/98 change old field too
ceiling->oldspecial = sec->oldspecial;
ceiling->type = genCeilingChgT;
break;
case CChgTxt: // type is left alone
ceiling->type = genCeilingChg;
break;
default:
break;
}
}
}
else // else if a trigger model change
{
ceiling->texture = line->frontsector->ceilingpic;
switch (ChgT)
{
case CChgZero: // type is zeroed
ceiling->newspecial = 0;
//jff 3/14/98 change old field too
ceiling->oldspecial = 0;
ceiling->type = genCeilingChg0;
break;
case CChgTyp: // type is copied
ceiling->newspecial = line->frontsector->special;
//jff 3/14/98 change old field too
ceiling->oldspecial = line->frontsector->oldspecial;
ceiling->type = genCeilingChgT;
break;
case CChgTxt: // type is left alone
ceiling->type = genCeilingChg;
break;
default:
break;
}
}
}
P_AddActiveCeiling(ceiling); // add this ceiling to the active list
if (manual) return rtn;
}
return rtn;
}
//
// EV_DoGenLift()
//
// Handle generalized lift types
//
// Passed the linedef activating the lift
// Returns true if a thinker is created
//
int EV_DoGenLift
( line_t* line )
{
plat_t* plat;
int secnum;
int rtn;
dboolean manual;
sector_t* sec;
unsigned value = (unsigned)line->special - GenLiftBase;
// parse the bit fields in the line's special type
int Targ = (value & LiftTarget) >> LiftTargetShift;
int Dely = (value & LiftDelay) >> LiftDelayShift;
int Sped = (value & LiftSpeed) >> LiftSpeedShift;
int Trig = (value & TriggerType) >> TriggerTypeShift;
secnum = -1;
rtn = 0;
// Activate all <type> plats that are in_stasis
if (Targ==LnF2HnF)
P_ActivateInStasis(line->tag);
if (ProcessNoTagLines(line, &sec, &secnum)) {if (zerotag_manual) {manual = true; goto manual_lift;} else {return rtn;}};//e6y
// check if a manual trigger, if so do just the sector on the backside
manual = false;
if (Trig==PushOnce || Trig==PushMany)
{
if (!(sec = line->backsector))
return rtn;
secnum = sec->iSectorID;
manual = true;
goto manual_lift;
}
// if not manual do all sectors tagged the same as the line
while ((secnum = P_FindSectorFromLineTag(line,secnum)) >= 0)
{
sec = §ors[secnum];
manual_lift:
// Do not start another function if floor already moving
if (P_SectorActive(floor_special,sec))
{
if (!manual)
continue;
else
return rtn;
}
// Setup the plat thinker
rtn = 1;
plat = Z_Malloc( sizeof(*plat), PU_LEVSPEC, 0);
memset(plat, 0, sizeof(*plat));
P_AddThinker(&plat->thinker);
plat->sector = sec;
plat->sector->floordata = plat;
plat->thinker.function = T_PlatRaise;
plat->crush = false;
plat->tag = line->tag;
plat->type = genLift;
plat->high = sec->floorheight;
plat->status = down;
// setup the target destination height
switch(Targ)
{
case F2LnF:
plat->low = P_FindLowestFloorSurrounding(sec);
if (plat->low > sec->floorheight)
plat->low = sec->floorheight;
break;
case F2NnF:
plat->low = P_FindNextLowestFloor(sec,sec->floorheight);
break;
case F2LnC:
plat->low = P_FindLowestCeilingSurrounding(sec);
if (plat->low > sec->floorheight)
plat->low = sec->floorheight;
break;
case LnF2HnF:
plat->type = genPerpetual;
plat->low = P_FindLowestFloorSurrounding(sec);
if (plat->low > sec->floorheight)
plat->low = sec->floorheight;
plat->high = P_FindHighestFloorSurrounding(sec);
if (plat->high < sec->floorheight)
plat->high = sec->floorheight;
plat->status = P_Random(pr_genlift)&1;
break;
default:
break;
}
// setup the speed of motion
switch(Sped)
{
case SpeedSlow:
plat->speed = PLATSPEED * 2;
break;
case SpeedNormal:
plat->speed = PLATSPEED * 4;
break;
case SpeedFast:
plat->speed = PLATSPEED * 8;
break;
case SpeedTurbo:
plat->speed = PLATSPEED * 16;
break;
default:
break;
}
// setup the delay time before the floor returns
switch(Dely)
{
case 0:
plat->wait = 1*35;
break;
case 1:
plat->wait = PLATWAIT*35;
break;
case 2:
plat->wait = 5*35;
break;
case 3:
plat->wait = 10*35;
break;
}
S_StartSound((mobj_t *)&sec->soundorg,sfx_pstart);
P_AddActivePlat(plat); // add this plat to the list of active plats
if (manual)
return rtn;
}
return rtn;
}
//
// EV_DoGenStairs()
//
// Handle generalized stair building
//
// Passed the linedef activating the stairs
// Returns true if a thinker is created
//
int EV_DoGenStairs
( line_t* line )
{
int secnum;
int osecnum; //jff 3/4/98 preserve loop index
int height;
int i;
int newsecnum;
int texture;
int ok;
int rtn;
dboolean manual;
sector_t* sec;
sector_t* tsec;
floormove_t* floor;
fixed_t stairsize;
fixed_t speed;
unsigned value = (unsigned)line->special - GenStairsBase;
// parse the bit fields in the line's special type
int Igno = (value & StairIgnore) >> StairIgnoreShift;
int Dirn = (value & StairDirection) >> StairDirectionShift;
int Step = (value & StairStep) >> StairStepShift;
int Sped = (value & StairSpeed) >> StairSpeedShift;
int Trig = (value & TriggerType) >> TriggerTypeShift;
rtn = 0;
if (ProcessNoTagLines(line, &sec, &secnum)) {if (zerotag_manual) {manual = true; goto manual_stair;} else {return rtn;}};//e6y
// check if a manual trigger, if so do just the sector on the backside
manual = false;
if (Trig==PushOnce || Trig==PushMany)
{
if (!(sec = line->backsector))
return rtn;
secnum = sec->iSectorID;
manual = true;
goto manual_stair;
}
secnum = -1;
// if not manual do all sectors tagged the same as the line
while ((secnum = P_FindSectorFromLineTag(line,secnum)) >= 0)
{
sec = §ors[secnum];
manual_stair:
//Do not start another function if floor already moving
//jff 2/26/98 add special lockout condition to wait for entire
//staircase to build before retriggering
if (P_SectorActive(floor_special,sec) || sec->stairlock)
{
if (!manual)
continue;
else
return rtn;
}
// new floor thinker
rtn = 1;
floor = Z_Malloc (sizeof(*floor), PU_LEVSPEC, 0);
memset(floor, 0, sizeof(*floor));
P_AddThinker (&floor->thinker);
sec->floordata = floor;
floor->thinker.function = T_MoveFloor;
floor->direction = Dirn? 1 : -1;
floor->sector = sec;
// setup speed of stair building
switch(Sped)
{
default:
case SpeedSlow:
floor->speed = FLOORSPEED/4;
break;
case SpeedNormal:
floor->speed = FLOORSPEED/2;
break;
case SpeedFast:
floor->speed = FLOORSPEED*2;
break;
case SpeedTurbo:
floor->speed = FLOORSPEED*4;
break;
}
// setup stepsize for stairs
switch(Step)
{
default:
case 0:
stairsize = 4*FRACUNIT;
break;
case 1:
stairsize = 8*FRACUNIT;
break;
case 2:
stairsize = 16*FRACUNIT;
break;
case 3:
stairsize = 24*FRACUNIT;
break;
}
speed = floor->speed;
height = sec->floorheight + floor->direction * stairsize;
floor->floordestheight = height;
texture = sec->floorpic;
floor->crush = false;
floor->type = genBuildStair; // jff 3/31/98 do not leave uninited
sec->stairlock = -2; // jff 2/26/98 set up lock on current sector
sec->nextsec = -1;
sec->prevsec = -1;
osecnum = secnum; //jff 3/4/98 preserve loop index
// Find next sector to raise
// 1. Find 2-sided line with same sector side[0]
// 2. Other side is the next sector to raise
do
{
ok = 0;
for (i = 0;i < sec->linecount;i++)
{
if ( !((sec->lines[i])->backsector) )
continue;
tsec = (sec->lines[i])->frontsector;
newsecnum = tsec->iSectorID;
if (secnum != newsecnum)
continue;
tsec = (sec->lines[i])->backsector;
newsecnum = tsec->iSectorID;
if (!Igno && tsec->floorpic != texture)
continue;
/* jff 6/19/98 prevent double stepsize */
if (compatibility_level < boom_202_compatibility)
height += floor->direction * stairsize;
//jff 2/26/98 special lockout condition for retriggering
if (P_SectorActive(floor_special,tsec) || tsec->stairlock)
continue;
/* jff 6/19/98 increase height AFTER continue */
if (compatibility_level >= boom_202_compatibility)
height += floor->direction * stairsize;
// jff 2/26/98
// link the stair chain in both directions
// lock the stair sector until building complete
sec->nextsec = newsecnum; // link step to next
tsec->prevsec = secnum; // link next back
tsec->nextsec = -1; // set next forward link as end
tsec->stairlock = -2; // lock the step
sec = tsec;
secnum = newsecnum;
floor = Z_Malloc (sizeof(*floor), PU_LEVSPEC, 0);
memset(floor, 0, sizeof(*floor));
P_AddThinker (&floor->thinker);
sec->floordata = floor;
floor->thinker.function = T_MoveFloor;
floor->direction = Dirn? 1 : -1;
floor->sector = sec;
floor->speed = speed;
floor->floordestheight = height;
floor->crush = false;
floor->type = genBuildStair; // jff 3/31/98 do not leave uninited
ok = 1;
break;
}
} while(ok);
if (manual)
return rtn;
secnum = osecnum; //jff 3/4/98 restore old loop index
}
// retriggerable generalized stairs build up or down alternately
if (rtn)
line->special ^= StairDirection; // alternate dir on succ activations
return rtn;
}
//
// EV_DoGenCrusher()
//
// Handle generalized crusher types
//
// Passed the linedef activating the crusher
// Returns true if a thinker created
//
int EV_DoGenCrusher
( line_t* line )
{
int secnum;
int rtn;
dboolean manual;
sector_t* sec;
ceiling_t* ceiling;
unsigned value = (unsigned)line->special - GenCrusherBase;
// parse the bit fields in the line's special type
int Slnt = (value & CrusherSilent) >> CrusherSilentShift;
int Sped = (value & CrusherSpeed) >> CrusherSpeedShift;
int Trig = (value & TriggerType) >> TriggerTypeShift;
//jff 2/22/98 Reactivate in-stasis ceilings...for certain types.
//jff 4/5/98 return if activated
rtn = P_ActivateInStasisCeiling(line);
if (ProcessNoTagLines(line, &sec, &secnum)) {if (zerotag_manual) {manual = true; goto manual_crusher;} else {return rtn;}};//e6y
// check if a manual trigger, if so do just the sector on the backside
manual = false;
if (Trig==PushOnce || Trig==PushMany)
{
if (!(sec = line->backsector))
return rtn;
secnum = sec->iSectorID;
manual = true;
goto manual_crusher;
}
secnum = -1;
// if not manual do all sectors tagged the same as the line
while ((secnum = P_FindSectorFromLineTag(line,secnum)) >= 0)
{
sec = §ors[secnum];
manual_crusher:
// Do not start another function if ceiling already moving
if (P_SectorActive(ceiling_special,sec)) //jff 2/22/98
{
if (!manual)
continue;
else
return rtn;
}
// new ceiling thinker
rtn = 1;
ceiling = Z_Malloc (sizeof(*ceiling), PU_LEVSPEC, 0);
memset(ceiling, 0, sizeof(*ceiling));
P_AddThinker (&ceiling->thinker);
sec->ceilingdata = ceiling; //jff 2/22/98
ceiling->thinker.function = T_MoveCeiling;
ceiling->crush = true;
ceiling->direction = -1;
ceiling->sector = sec;
ceiling->texture = sec->ceilingpic;
ceiling->newspecial = sec->special;
ceiling->tag = sec->tag;
ceiling->type = Slnt? genSilentCrusher : genCrusher;
ceiling->topheight = sec->ceilingheight;
ceiling->bottomheight = sec->floorheight + (8*FRACUNIT);
// setup ceiling motion speed
switch (Sped)
{
case SpeedSlow:
ceiling->speed = CEILSPEED;
break;
case SpeedNormal:
ceiling->speed = CEILSPEED*2;
break;
case SpeedFast:
ceiling->speed = CEILSPEED*4;
break;
case SpeedTurbo:
ceiling->speed = CEILSPEED*8;
break;
default:
break;
}
ceiling->oldspeed=ceiling->speed;
P_AddActiveCeiling(ceiling); // add to list of active ceilings
if (manual) return rtn;
}
return rtn;
}
//
// EV_DoGenLockedDoor()
//
// Handle generalized locked door types
//
// Passed the linedef activating the generalized locked door
// Returns true if a thinker created
//
int EV_DoGenLockedDoor
( line_t* line )
{
int secnum,rtn;
sector_t* sec;
vldoor_t* door;
dboolean manual;
unsigned value = (unsigned)line->special - GenLockedBase;
// parse the bit fields in the line's special type
int Kind = (value & LockedKind) >> LockedKindShift;
int Sped = (value & LockedSpeed) >> LockedSpeedShift;
int Trig = (value & TriggerType) >> TriggerTypeShift;
rtn = 0;
if (ProcessNoTagLines(line, &sec, &secnum)) {if (zerotag_manual) {manual = true; goto manual_locked;} else {return rtn;}};//e6y
// check if a manual trigger, if so do just the sector on the backside
manual = false;
if (Trig==PushOnce || Trig==PushMany)
{
if (!(sec = line->backsector))
return rtn;
secnum = sec->iSectorID;
manual = true;
goto manual_locked;
}
secnum = -1;
rtn = 0;
// if not manual do all sectors tagged the same as the line
while ((secnum = P_FindSectorFromLineTag(line,secnum)) >= 0)
{
sec = §ors[secnum];
manual_locked:
// Do not start another function if ceiling already moving
if (P_SectorActive(ceiling_special,sec)) //jff 2/22/98
{
if (!manual)
continue;
else
return rtn;
}
// new door thinker
rtn = 1;
door = Z_Malloc (sizeof(*door), PU_LEVSPEC, 0);
memset(door, 0, sizeof(*door));
P_AddThinker (&door->thinker);
sec->ceilingdata = door; //jff 2/22/98
door->thinker.function = T_VerticalDoor;
door->sector = sec;
door->topwait = VDOORWAIT;
door->line = line;
door->topheight = P_FindLowestCeilingSurrounding(sec);
door->topheight -= 4*FRACUNIT;
door->direction = 1;
/* killough 10/98: implement gradual lighting */
door->lighttag = !comp[comp_doorlight] &&
(line->special&6) == 6 &&
line->special > GenLockedBase ? line->tag : 0;
// setup speed of door motion
switch(Sped)
{
default:
case SpeedSlow:
door->type = Kind? genOpen : genRaise;
door->speed = VDOORSPEED;
break;
case SpeedNormal:
door->type = Kind? genOpen : genRaise;
door->speed = VDOORSPEED*2;
break;
case SpeedFast:
door->type = Kind? genBlazeOpen : genBlazeRaise;
door->speed = VDOORSPEED*4;
break;
case SpeedTurbo:
door->type = Kind? genBlazeOpen : genBlazeRaise;
door->speed = VDOORSPEED*8;
break;
}
// killough 4/15/98: fix generalized door opening sounds
// (previously they always had the blazing door close sound)
S_StartSound((mobj_t *)&door->sector->soundorg, // killough 4/15/98
door->speed >= VDOORSPEED*4 ? sfx_bdopn : sfx_doropn);
if (manual)
return rtn;
}
return rtn;
}
//
// EV_DoGenDoor()
//
// Handle generalized door types
//
// Passed the linedef activating the generalized door
// Returns true if a thinker created
//
int EV_DoGenDoor
( line_t* line )
{
int secnum,rtn;
sector_t* sec;
dboolean manual;
vldoor_t* door;
unsigned value = (unsigned)line->special - GenDoorBase;
// parse the bit fields in the line's special type
int Dely = (value & DoorDelay) >> DoorDelayShift;
int Kind = (value & DoorKind) >> DoorKindShift;
int Sped = (value & DoorSpeed) >> DoorSpeedShift;
int Trig = (value & TriggerType) >> TriggerTypeShift;
rtn = 0;
if (ProcessNoTagLines(line, &sec, &secnum)) {if (zerotag_manual) {manual = true; goto manual_door;} else {return rtn;}};//e6y
// check if a manual trigger, if so do just the sector on the backside
manual = false;
if (Trig==PushOnce || Trig==PushMany)
{
if (!(sec = line->backsector))
return rtn;
secnum = sec->iSectorID;
manual = true;
goto manual_door;
}
secnum = -1;
rtn = 0;
// if not manual do all sectors tagged the same as the line
while ((secnum = P_FindSectorFromLineTag(line,secnum)) >= 0)
{
sec = §ors[secnum];
manual_door:
// Do not start another function if ceiling already moving
if (P_SectorActive(ceiling_special,sec)) //jff 2/22/98
{
if (!manual)
continue;
else
return rtn;
}
// new door thinker
rtn = 1;
door = Z_Malloc (sizeof(*door), PU_LEVSPEC, 0);
memset(door, 0, sizeof(*door));
P_AddThinker (&door->thinker);
sec->ceilingdata = door; //jff 2/22/98
door->thinker.function = T_VerticalDoor;
door->sector = sec;
// setup delay for door remaining open/closed
switch(Dely)
{
default:
case 0:
door->topwait = 35;
break;
case 1:
door->topwait = VDOORWAIT;
break;
case 2:
door->topwait = 2*VDOORWAIT;
break;
case 3:
door->topwait = 7*VDOORWAIT;
break;
}
// setup speed of door motion
switch(Sped)
{
default:
case SpeedSlow:
door->speed = VDOORSPEED;
break;
case SpeedNormal:
door->speed = VDOORSPEED*2;
break;
case SpeedFast:
door->speed = VDOORSPEED*4;
break;
case SpeedTurbo:
door->speed = VDOORSPEED*8;
break;
}
door->line = line; // jff 1/31/98 remember line that triggered us
/* killough 10/98: implement gradual lighting */
door->lighttag = !comp[comp_doorlight] &&
(line->special&6) == 6 &&
line->special > GenLockedBase ? line->tag : 0;
// set kind of door, whether it opens then close, opens, closes etc.
// assign target heights accordingly
switch(Kind)
{
case OdCDoor:
door->direction = 1;
door->topheight = P_FindLowestCeilingSurrounding(sec);
door->topheight -= 4*FRACUNIT;
if (door->topheight != sec->ceilingheight)
S_StartSound((mobj_t *)&door->sector->soundorg,Sped>=SpeedFast || comp[comp_sound] ? sfx_bdopn : sfx_doropn);
door->type = Sped>=SpeedFast? genBlazeRaise : genRaise;
break;
case ODoor:
door->direction = 1;
door->topheight = P_FindLowestCeilingSurrounding(sec);
door->topheight -= 4*FRACUNIT;
if (door->topheight != sec->ceilingheight)
S_StartSound((mobj_t *)&door->sector->soundorg,Sped>=SpeedFast || comp[comp_sound] ? sfx_bdopn : sfx_doropn);
door->type = Sped>=SpeedFast? genBlazeOpen : genOpen;
break;
case CdODoor:
door->topheight = sec->ceilingheight;
door->direction = -1;
S_StartSound((mobj_t *)&door->sector->soundorg,Sped>=SpeedFast && !comp[comp_sound] ? sfx_bdcls : sfx_dorcls);
door->type = Sped>=SpeedFast? genBlazeCdO : genCdO;
break;
case CDoor:
door->topheight = P_FindLowestCeilingSurrounding(sec);
door->topheight -= 4*FRACUNIT;
door->direction = -1;
S_StartSound((mobj_t *)&door->sector->soundorg,Sped>=SpeedFast && !comp[comp_sound] ? sfx_bdcls : sfx_dorcls);
door->type = Sped>=SpeedFast? genBlazeClose : genClose;
break;
default:
break;
}
if (manual)
return rtn;
}
return rtn;
}
| gpl-2.0 |
VisualIdeation/3DVisualizer | Templatized/SliceCaseTableSimplex.cpp | 2 | 2052 | /***********************************************************************
SliceCaseTableSimplex - Specialized versions of SliceCaseTable for two-
and three-dimensional simplices.
Copyright (c) 2005-2007 Oliver Kreylos
This file is part of the 3D Data Visualizer (Visualizer).
The 3D Data Visualizer is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
The 3D Data Visualizer is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with the 3D Data Visualizer; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
***********************************************************************/
#include <Templatized/SliceCaseTableSimplex.h>
namespace Visualization {
namespace Templatized {
/****************************************************
Static elements of class SliceCaseTable<Simplex<3> >:
****************************************************/
const int SliceCaseTable<Simplex<3> >::edgeIndices[16][5]=
{
{-1, -1, -1, -1, -1}, // 0
{ 0, 1, 2, -1, -1}, // 1
{ 0, 4, 3, -1, -1}, // 2
{ 1, 2, 4, 3, -1}, // 3
{ 1, 3, 5, -1, -1}, // 4
{ 0, 3, 5, 2, -1}, // 5
{ 0, 4, 5, 1, -1}, // 6
{ 2, 4, 5, -1, -1}, // 7
{ 2, 5, 4, -1, -1}, // 8
{ 0, 1, 5, 4, -1}, // 9
{ 0, 2, 5, 3, -1}, // 10
{ 1, 5, 3, -1, -1}, // 11
{ 1, 3, 4, 2, -1}, // 12
{ 0, 3, 4, -1, -1}, // 13
{ 0, 2, 1, -1, -1}, // 14
{-1, -1, -1, -1, -1} // 15
};
const int SliceCaseTable<Simplex<3> >::neighbourMasks[16]=
{
0x0000, 0x000e, 0x000d, 0x000f,
0x000b, 0x000f, 0x000f, 0x0007,
0x0007, 0x000f, 0x000f, 0x000b,
0x000f, 0x000d, 0x000e, 0x0000
};
}
}
| gpl-2.0 |
iPodLinux/linux-2.6.7-ipod | arch/sparc64/lib/debuglocks.c | 2 | 8094 | /* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $
* debuglocks.c: Debugging versions of SMP locking primitives.
*
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <asm/system.h>
#ifdef CONFIG_SMP
#define GET_CALLER(PC) __asm__ __volatile__("mov %%i7, %0" : "=r" (PC))
static inline void show (char *str, spinlock_t *lock, unsigned long caller)
{
int cpu = smp_processor_id();
printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n",
str, lock, cpu, (unsigned int) caller,
lock->owner_pc, lock->owner_cpu);
}
static inline void show_read (char *str, rwlock_t *lock, unsigned long caller)
{
int cpu = smp_processor_id();
printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n",
str, lock, cpu, (unsigned int) caller,
lock->writer_pc, lock->writer_cpu);
}
static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
{
int cpu = smp_processor_id();
int i;
printk("%s(%p) CPU#%d stuck at %08x\n",
str, lock, cpu, (unsigned int) caller);
printk("Writer: PC(%08x):CPU(%x)\n",
lock->writer_pc, lock->writer_cpu);
printk("Readers:");
for (i = 0; i < NR_CPUS; i++)
if (lock->reader_pc[i])
printk(" %d[%08x]", i, lock->reader_pc[i]);
printk("\n");
}
#undef INIT_STUCK
#define INIT_STUCK 100000000
void _do_spin_lock(spinlock_t *lock, char *str)
{
unsigned long caller, val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
GET_CALLER(caller);
again:
__asm__ __volatile__("ldstub [%1], %0"
: "=r" (val)
: "r" (&(lock->lock))
: "memory");
membar("#StoreLoad | #StoreStore");
if (val) {
while (lock->lock) {
if (!--stuck) {
if (shown++ <= 2)
show(str, lock, caller);
stuck = INIT_STUCK;
}
membar("#LoadLoad");
}
goto again;
}
lock->owner_pc = ((unsigned int)caller);
lock->owner_cpu = cpu;
current->thread.smp_lock_count++;
current->thread.smp_lock_pc = ((unsigned int)caller);
put_cpu();
}
int _spin_trylock(spinlock_t *lock)
{
unsigned long val, caller;
int cpu = get_cpu();
GET_CALLER(caller);
__asm__ __volatile__("ldstub [%1], %0"
: "=r" (val)
: "r" (&(lock->lock))
: "memory");
membar("#StoreLoad | #StoreStore");
if (!val) {
lock->owner_pc = ((unsigned int)caller);
lock->owner_cpu = cpu;
current->thread.smp_lock_count++;
current->thread.smp_lock_pc = ((unsigned int)caller);
}
put_cpu();
return val == 0;
}
void _do_spin_unlock(spinlock_t *lock)
{
lock->owner_pc = 0;
lock->owner_cpu = NO_PROC_ID;
membar("#StoreStore | #LoadStore");
lock->lock = 0;
current->thread.smp_lock_count--;
}
/* Keep INIT_STUCK the same... */
void _do_read_lock (rwlock_t *rw, char *str)
{
unsigned long caller, val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
GET_CALLER(caller);
wlock_again:
/* Wait for any writer to go away. */
while (((long)(rw->lock)) < 0) {
if (!--stuck) {
if (shown++ <= 2)
show_read(str, rw, caller);
stuck = INIT_STUCK;
}
membar("#LoadLoad");
}
/* Try once to increment the counter. */
__asm__ __volatile__(
" ldx [%0], %%g5\n"
" brlz,a,pn %%g5, 2f\n"
" mov 1, %0\n"
" add %%g5, 1, %%g7\n"
" casx [%0], %%g5, %%g7\n"
" sub %%g5, %%g7, %0\n"
"2:" : "=r" (val)
: "0" (&(rw->lock))
: "g5", "g7", "memory");
membar("#StoreLoad | #StoreStore");
if (val)
goto wlock_again;
rw->reader_pc[cpu] = ((unsigned int)caller);
current->thread.smp_lock_count++;
current->thread.smp_lock_pc = ((unsigned int)caller);
put_cpu();
}
void _do_read_unlock (rwlock_t *rw, char *str)
{
unsigned long caller, val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
GET_CALLER(caller);
/* Drop our identity _first_. */
rw->reader_pc[cpu] = 0;
current->thread.smp_lock_count--;
runlock_again:
/* Spin trying to decrement the counter using casx. */
__asm__ __volatile__(
" ldx [%0], %%g5\n"
" sub %%g5, 1, %%g7\n"
" casx [%0], %%g5, %%g7\n"
" membar #StoreLoad | #StoreStore\n"
" sub %%g5, %%g7, %0\n"
: "=r" (val)
: "0" (&(rw->lock))
: "g5", "g7", "memory");
if (val) {
if (!--stuck) {
if (shown++ <= 2)
show_read(str, rw, caller);
stuck = INIT_STUCK;
}
goto runlock_again;
}
put_cpu();
}
void _do_write_lock (rwlock_t *rw, char *str)
{
unsigned long caller, val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
GET_CALLER(caller);
wlock_again:
/* Spin while there is another writer. */
while (((long)rw->lock) < 0) {
if (!--stuck) {
if (shown++ <= 2)
show_write(str, rw, caller);
stuck = INIT_STUCK;
}
membar("#LoadLoad");
}
/* Try to acuire the write bit. */
__asm__ __volatile__(
" mov 1, %%g3\n"
" sllx %%g3, 63, %%g3\n"
" ldx [%0], %%g5\n"
" brlz,pn %%g5, 1f\n"
" or %%g5, %%g3, %%g7\n"
" casx [%0], %%g5, %%g7\n"
" membar #StoreLoad | #StoreStore\n"
" ba,pt %%xcc, 2f\n"
" sub %%g5, %%g7, %0\n"
"1: mov 1, %0\n"
"2:" : "=r" (val)
: "0" (&(rw->lock))
: "g3", "g5", "g7", "memory");
if (val) {
/* We couldn't get the write bit. */
if (!--stuck) {
if (shown++ <= 2)
show_write(str, rw, caller);
stuck = INIT_STUCK;
}
goto wlock_again;
}
if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
/* Readers still around, drop the write
* lock, spin, and try again.
*/
if (!--stuck) {
if (shown++ <= 2)
show_write(str, rw, caller);
stuck = INIT_STUCK;
}
__asm__ __volatile__(
" mov 1, %%g3\n"
" sllx %%g3, 63, %%g3\n"
"1: ldx [%0], %%g5\n"
" andn %%g5, %%g3, %%g7\n"
" casx [%0], %%g5, %%g7\n"
" cmp %%g5, %%g7\n"
" bne,pn %%xcc, 1b\n"
" membar #StoreLoad | #StoreStore"
: /* no outputs */
: "r" (&(rw->lock))
: "g3", "g5", "g7", "cc", "memory");
while(rw->lock != 0) {
if (!--stuck) {
if (shown++ <= 2)
show_write(str, rw, caller);
stuck = INIT_STUCK;
}
membar("#LoadLoad");
}
goto wlock_again;
}
/* We have it, say who we are. */
rw->writer_pc = ((unsigned int)caller);
rw->writer_cpu = cpu;
current->thread.smp_lock_count++;
current->thread.smp_lock_pc = ((unsigned int)caller);
put_cpu();
}
void _do_write_unlock(rwlock_t *rw)
{
unsigned long caller, val;
int stuck = INIT_STUCK;
int shown = 0;
GET_CALLER(caller);
/* Drop our identity _first_ */
rw->writer_pc = 0;
rw->writer_cpu = NO_PROC_ID;
current->thread.smp_lock_count--;
wlock_again:
__asm__ __volatile__(
" mov 1, %%g3\n"
" sllx %%g3, 63, %%g3\n"
" ldx [%0], %%g5\n"
" andn %%g5, %%g3, %%g7\n"
" casx [%0], %%g5, %%g7\n"
" membar #StoreLoad | #StoreStore\n"
" sub %%g5, %%g7, %0\n"
: "=r" (val)
: "0" (&(rw->lock))
: "g3", "g5", "g7", "memory");
if (val) {
if (!--stuck) {
if (shown++ <= 2)
show_write("write_unlock", rw, caller);
stuck = INIT_STUCK;
}
goto wlock_again;
}
}
int _do_write_trylock (rwlock_t *rw, char *str)
{
unsigned long caller, val;
int cpu = get_cpu();
GET_CALLER(caller);
/* Try to acuire the write bit. */
__asm__ __volatile__(
" mov 1, %%g3\n"
" sllx %%g3, 63, %%g3\n"
" ldx [%0], %%g5\n"
" brlz,pn %%g5, 1f\n"
" or %%g5, %%g3, %%g7\n"
" casx [%0], %%g5, %%g7\n"
" membar #StoreLoad | #StoreStore\n"
" ba,pt %%xcc, 2f\n"
" sub %%g5, %%g7, %0\n"
"1: mov 1, %0\n"
"2:" : "=r" (val)
: "0" (&(rw->lock))
: "g3", "g5", "g7", "memory");
if (val) {
put_cpu();
return 0;
}
if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
/* Readers still around, drop the write
* lock, return failure.
*/
__asm__ __volatile__(
" mov 1, %%g3\n"
" sllx %%g3, 63, %%g3\n"
"1: ldx [%0], %%g5\n"
" andn %%g5, %%g3, %%g7\n"
" casx [%0], %%g5, %%g7\n"
" cmp %%g5, %%g7\n"
" bne,pn %%xcc, 1b\n"
" membar #StoreLoad | #StoreStore"
: /* no outputs */
: "r" (&(rw->lock))
: "g3", "g5", "g7", "cc", "memory");
put_cpu();
return 0;
}
/* We have it, say who we are. */
rw->writer_pc = ((unsigned int)caller);
rw->writer_cpu = cpu;
current->thread.smp_lock_count++;
current->thread.smp_lock_pc = ((unsigned int)caller);
put_cpu();
return 1;
}
#endif /* CONFIG_SMP */
| gpl-2.0 |
iains/darwin-gcc-4-9 | gcc/fortran/trans-expr.c | 2 | 250427 | /* Expression translation
Copyright (C) 2002-2014 Free Software Foundation, Inc.
Contributed by Paul Brook <paul@nowt.org>
and Steven Bosscher <s.bosscher@student.tudelft.nl>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* trans-expr.c-- generate GENERIC trees for gfc_expr. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tree.h"
#include "stringpool.h"
#include "diagnostic-core.h" /* For fatal_error. */
#include "langhooks.h"
#include "flags.h"
#include "gfortran.h"
#include "arith.h"
#include "constructor.h"
#include "trans.h"
#include "trans-const.h"
#include "trans-types.h"
#include "trans-array.h"
/* Only for gfc_trans_assign and gfc_trans_pointer_assign. */
#include "trans-stmt.h"
#include "dependency.h"
#include "gimplify.h"
/* Convert a scalar to an array descriptor. To be used for assumed-rank
arrays. */
static tree
get_scalar_to_descriptor_type (tree scalar, symbol_attribute attr)
{
enum gfc_array_kind akind;
if (attr.pointer)
akind = GFC_ARRAY_POINTER_CONT;
else if (attr.allocatable)
akind = GFC_ARRAY_ALLOCATABLE;
else
akind = GFC_ARRAY_ASSUMED_SHAPE_CONT;
return gfc_get_array_type_bounds (TREE_TYPE (scalar), 0, 0, NULL, NULL, 1,
akind, !(attr.pointer || attr.target));
}
tree
gfc_conv_scalar_to_descriptor (gfc_se *se, tree scalar, symbol_attribute attr)
{
tree desc, type;
type = get_scalar_to_descriptor_type (scalar, attr);
desc = gfc_create_var (type, "desc");
DECL_ARTIFICIAL (desc) = 1;
if (!POINTER_TYPE_P (TREE_TYPE (scalar)))
scalar = gfc_build_addr_expr (NULL_TREE, scalar);
gfc_add_modify (&se->pre, gfc_conv_descriptor_dtype (desc),
gfc_get_dtype (type));
gfc_conv_descriptor_data_set (&se->pre, desc, scalar);
/* Copy pointer address back - but only if it could have changed and
if the actual argument is a pointer and not, e.g., NULL(). */
if ((attr.pointer || attr.allocatable) && attr.intent != INTENT_IN)
gfc_add_modify (&se->post, scalar,
fold_convert (TREE_TYPE (scalar),
gfc_conv_descriptor_data_get (desc)));
return desc;
}
/* This is the seed for an eventual trans-class.c
The following parameters should not be used directly since they might
in future implementations. Use the corresponding APIs. */
#define CLASS_DATA_FIELD 0
#define CLASS_VPTR_FIELD 1
#define CLASS_LEN_FIELD 2
#define VTABLE_HASH_FIELD 0
#define VTABLE_SIZE_FIELD 1
#define VTABLE_EXTENDS_FIELD 2
#define VTABLE_DEF_INIT_FIELD 3
#define VTABLE_COPY_FIELD 4
#define VTABLE_FINAL_FIELD 5
tree
gfc_class_set_static_fields (tree decl, tree vptr, tree data)
{
tree tmp;
tree field;
vec<constructor_elt, va_gc> *init = NULL;
field = TYPE_FIELDS (TREE_TYPE (decl));
tmp = gfc_advance_chain (field, CLASS_DATA_FIELD);
CONSTRUCTOR_APPEND_ELT (init, tmp, data);
tmp = gfc_advance_chain (field, CLASS_VPTR_FIELD);
CONSTRUCTOR_APPEND_ELT (init, tmp, vptr);
return build_constructor (TREE_TYPE (decl), init);
}
tree
gfc_class_data_get (tree decl)
{
tree data;
if (POINTER_TYPE_P (TREE_TYPE (decl)))
decl = build_fold_indirect_ref_loc (input_location, decl);
data = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (decl)),
CLASS_DATA_FIELD);
return fold_build3_loc (input_location, COMPONENT_REF,
TREE_TYPE (data), decl, data,
NULL_TREE);
}
tree
gfc_class_vptr_get (tree decl)
{
tree vptr;
if (POINTER_TYPE_P (TREE_TYPE (decl)))
decl = build_fold_indirect_ref_loc (input_location, decl);
vptr = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (decl)),
CLASS_VPTR_FIELD);
return fold_build3_loc (input_location, COMPONENT_REF,
TREE_TYPE (vptr), decl, vptr,
NULL_TREE);
}
tree
gfc_class_len_get (tree decl)
{
tree len;
if (POINTER_TYPE_P (TREE_TYPE (decl)))
decl = build_fold_indirect_ref_loc (input_location, decl);
len = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (decl)),
CLASS_LEN_FIELD);
return fold_build3_loc (input_location, COMPONENT_REF,
TREE_TYPE (len), decl, len,
NULL_TREE);
}
static tree
gfc_vtable_field_get (tree decl, int field)
{
tree size;
tree vptr;
vptr = gfc_class_vptr_get (decl);
vptr = build_fold_indirect_ref_loc (input_location, vptr);
size = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (vptr)),
field);
size = fold_build3_loc (input_location, COMPONENT_REF,
TREE_TYPE (size), vptr, size,
NULL_TREE);
/* Always return size as an array index type. */
if (field == VTABLE_SIZE_FIELD)
size = fold_convert (gfc_array_index_type, size);
gcc_assert (size);
return size;
}
tree
gfc_vtable_hash_get (tree decl)
{
return gfc_vtable_field_get (decl, VTABLE_HASH_FIELD);
}
tree
gfc_vtable_size_get (tree decl)
{
return gfc_vtable_field_get (decl, VTABLE_SIZE_FIELD);
}
tree
gfc_vtable_extends_get (tree decl)
{
return gfc_vtable_field_get (decl, VTABLE_EXTENDS_FIELD);
}
tree
gfc_vtable_def_init_get (tree decl)
{
return gfc_vtable_field_get (decl, VTABLE_DEF_INIT_FIELD);
}
tree
gfc_vtable_copy_get (tree decl)
{
return gfc_vtable_field_get (decl, VTABLE_COPY_FIELD);
}
tree
gfc_vtable_final_get (tree decl)
{
return gfc_vtable_field_get (decl, VTABLE_FINAL_FIELD);
}
#undef CLASS_DATA_FIELD
#undef CLASS_VPTR_FIELD
#undef VTABLE_HASH_FIELD
#undef VTABLE_SIZE_FIELD
#undef VTABLE_EXTENDS_FIELD
#undef VTABLE_DEF_INIT_FIELD
#undef VTABLE_COPY_FIELD
#undef VTABLE_FINAL_FIELD
/* Reset the vptr to the declared type, e.g. after deallocation. */
void
gfc_reset_vptr (stmtblock_t *block, gfc_expr *e)
{
gfc_expr *rhs, *lhs = gfc_copy_expr (e);
gfc_symbol *vtab;
tree tmp;
gfc_ref *ref;
/* If we have a class array, we need go back to the class
container. */
if (lhs->ref && lhs->ref->next && !lhs->ref->next->next
&& lhs->ref->next->type == REF_ARRAY
&& lhs->ref->next->u.ar.type == AR_FULL
&& lhs->ref->type == REF_COMPONENT
&& strcmp (lhs->ref->u.c.component->name, "_data") == 0)
{
gfc_free_ref_list (lhs->ref);
lhs->ref = NULL;
}
else
for (ref = lhs->ref; ref; ref = ref->next)
if (ref->next && ref->next->next && !ref->next->next->next
&& ref->next->next->type == REF_ARRAY
&& ref->next->next->u.ar.type == AR_FULL
&& ref->next->type == REF_COMPONENT
&& strcmp (ref->next->u.c.component->name, "_data") == 0)
{
gfc_free_ref_list (ref->next);
ref->next = NULL;
}
gfc_add_vptr_component (lhs);
if (UNLIMITED_POLY (e))
rhs = gfc_get_null_expr (NULL);
else
{
vtab = gfc_find_derived_vtab (e->ts.u.derived);
rhs = gfc_lval_expr_from_sym (vtab);
}
tmp = gfc_trans_pointer_assignment (lhs, rhs);
gfc_add_expr_to_block (block, tmp);
gfc_free_expr (lhs);
gfc_free_expr (rhs);
}
/* Obtain the vptr of the last class reference in an expression.
Return NULL_TREE if no class reference is found. */
tree
gfc_get_vptr_from_expr (tree expr)
{
tree tmp;
tree type;
for (tmp = expr; tmp; tmp = TREE_OPERAND (tmp, 0))
{
type = TREE_TYPE (tmp);
while (type)
{
if (GFC_CLASS_TYPE_P (type))
return gfc_class_vptr_get (tmp);
if (type != TYPE_CANONICAL (type))
type = TYPE_CANONICAL (type);
else
type = NULL_TREE;
}
if (TREE_CODE (tmp) == VAR_DECL)
break;
}
return NULL_TREE;
}
static void
class_array_data_assign (stmtblock_t *block, tree lhs_desc, tree rhs_desc,
bool lhs_type)
{
tree tmp, tmp2, type;
gfc_conv_descriptor_data_set (block, lhs_desc,
gfc_conv_descriptor_data_get (rhs_desc));
gfc_conv_descriptor_offset_set (block, lhs_desc,
gfc_conv_descriptor_offset_get (rhs_desc));
gfc_add_modify (block, gfc_conv_descriptor_dtype (lhs_desc),
gfc_conv_descriptor_dtype (rhs_desc));
/* Assign the dimension as range-ref. */
tmp = gfc_get_descriptor_dimension (lhs_desc);
tmp2 = gfc_get_descriptor_dimension (rhs_desc);
type = lhs_type ? TREE_TYPE (tmp) : TREE_TYPE (tmp2);
tmp = build4_loc (input_location, ARRAY_RANGE_REF, type, tmp,
gfc_index_zero_node, NULL_TREE, NULL_TREE);
tmp2 = build4_loc (input_location, ARRAY_RANGE_REF, type, tmp2,
gfc_index_zero_node, NULL_TREE, NULL_TREE);
gfc_add_modify (block, tmp, tmp2);
}
/* Takes a derived type expression and returns the address of a temporary
class object of the 'declared' type. If vptr is not NULL, this is
used for the temporary class object.
optional_alloc_ptr is false when the dummy is neither allocatable
nor a pointer; that's only relevant for the optional handling. */
void
gfc_conv_derived_to_class (gfc_se *parmse, gfc_expr *e,
gfc_typespec class_ts, tree vptr, bool optional,
bool optional_alloc_ptr)
{
gfc_symbol *vtab;
tree cond_optional = NULL_TREE;
gfc_ss *ss;
tree ctree;
tree var;
tree tmp;
/* The derived type needs to be converted to a temporary
CLASS object. */
tmp = gfc_typenode_for_spec (&class_ts);
var = gfc_create_var (tmp, "class");
/* Set the vptr. */
ctree = gfc_class_vptr_get (var);
if (vptr != NULL_TREE)
{
/* Use the dynamic vptr. */
tmp = vptr;
}
else
{
/* In this case the vtab corresponds to the derived type and the
vptr must point to it. */
vtab = gfc_find_derived_vtab (e->ts.u.derived);
gcc_assert (vtab);
tmp = gfc_build_addr_expr (NULL_TREE, gfc_get_symbol_decl (vtab));
}
gfc_add_modify (&parmse->pre, ctree,
fold_convert (TREE_TYPE (ctree), tmp));
/* Now set the data field. */
ctree = gfc_class_data_get (var);
if (optional)
cond_optional = gfc_conv_expr_present (e->symtree->n.sym);
if (parmse->ss && parmse->ss->info->useflags)
{
/* For an array reference in an elemental procedure call we need
to retain the ss to provide the scalarized array reference. */
gfc_conv_expr_reference (parmse, e);
tmp = fold_convert (TREE_TYPE (ctree), parmse->expr);
if (optional)
tmp = build3_loc (input_location, COND_EXPR, TREE_TYPE (tmp),
cond_optional, tmp,
fold_convert (TREE_TYPE (tmp), null_pointer_node));
gfc_add_modify (&parmse->pre, ctree, tmp);
}
else
{
ss = gfc_walk_expr (e);
if (ss == gfc_ss_terminator)
{
parmse->ss = NULL;
gfc_conv_expr_reference (parmse, e);
/* Scalar to an assumed-rank array. */
if (class_ts.u.derived->components->as)
{
tree type;
type = get_scalar_to_descriptor_type (parmse->expr,
gfc_expr_attr (e));
gfc_add_modify (&parmse->pre, gfc_conv_descriptor_dtype (ctree),
gfc_get_dtype (type));
if (optional)
parmse->expr = build3_loc (input_location, COND_EXPR,
TREE_TYPE (parmse->expr),
cond_optional, parmse->expr,
fold_convert (TREE_TYPE (parmse->expr),
null_pointer_node));
gfc_conv_descriptor_data_set (&parmse->pre, ctree, parmse->expr);
}
else
{
tmp = fold_convert (TREE_TYPE (ctree), parmse->expr);
if (optional)
tmp = build3_loc (input_location, COND_EXPR, TREE_TYPE (tmp),
cond_optional, tmp,
fold_convert (TREE_TYPE (tmp),
null_pointer_node));
gfc_add_modify (&parmse->pre, ctree, tmp);
}
}
else
{
stmtblock_t block;
gfc_init_block (&block);
parmse->ss = ss;
gfc_conv_expr_descriptor (parmse, e);
if (e->rank != class_ts.u.derived->components->as->rank)
{
gcc_assert (class_ts.u.derived->components->as->type
== AS_ASSUMED_RANK);
class_array_data_assign (&block, ctree, parmse->expr, false);
}
else
{
if (gfc_expr_attr (e).codimension)
parmse->expr = fold_build1_loc (input_location,
VIEW_CONVERT_EXPR,
TREE_TYPE (ctree),
parmse->expr);
gfc_add_modify (&block, ctree, parmse->expr);
}
if (optional)
{
tmp = gfc_finish_block (&block);
gfc_init_block (&block);
gfc_conv_descriptor_data_set (&block, ctree, null_pointer_node);
tmp = build3_v (COND_EXPR, cond_optional, tmp,
gfc_finish_block (&block));
gfc_add_expr_to_block (&parmse->pre, tmp);
}
else
gfc_add_block_to_block (&parmse->pre, &block);
}
}
/* Pass the address of the class object. */
parmse->expr = gfc_build_addr_expr (NULL_TREE, var);
if (optional && optional_alloc_ptr)
parmse->expr = build3_loc (input_location, COND_EXPR,
TREE_TYPE (parmse->expr),
cond_optional, parmse->expr,
fold_convert (TREE_TYPE (parmse->expr),
null_pointer_node));
}
/* Create a new class container, which is required as scalar coarrays
have an array descriptor while normal scalars haven't. Optionally,
NULL pointer checks are added if the argument is OPTIONAL. */
static void
class_scalar_coarray_to_class (gfc_se *parmse, gfc_expr *e,
gfc_typespec class_ts, bool optional)
{
tree var, ctree, tmp;
stmtblock_t block;
gfc_ref *ref;
gfc_ref *class_ref;
gfc_init_block (&block);
class_ref = NULL;
for (ref = e->ref; ref; ref = ref->next)
{
if (ref->type == REF_COMPONENT
&& ref->u.c.component->ts.type == BT_CLASS)
class_ref = ref;
}
if (class_ref == NULL
&& e->symtree && e->symtree->n.sym->ts.type == BT_CLASS)
tmp = e->symtree->n.sym->backend_decl;
else
{
/* Remove everything after the last class reference, convert the
expression and then recover its tailend once more. */
gfc_se tmpse;
ref = class_ref->next;
class_ref->next = NULL;
gfc_init_se (&tmpse, NULL);
gfc_conv_expr (&tmpse, e);
class_ref->next = ref;
tmp = tmpse.expr;
}
var = gfc_typenode_for_spec (&class_ts);
var = gfc_create_var (var, "class");
ctree = gfc_class_vptr_get (var);
gfc_add_modify (&block, ctree,
fold_convert (TREE_TYPE (ctree), gfc_class_vptr_get (tmp)));
ctree = gfc_class_data_get (var);
tmp = gfc_conv_descriptor_data_get (gfc_class_data_get (tmp));
gfc_add_modify (&block, ctree, fold_convert (TREE_TYPE (ctree), tmp));
/* Pass the address of the class object. */
parmse->expr = gfc_build_addr_expr (NULL_TREE, var);
if (optional)
{
tree cond = gfc_conv_expr_present (e->symtree->n.sym);
tree tmp2;
tmp = gfc_finish_block (&block);
gfc_init_block (&block);
tmp2 = gfc_class_data_get (var);
gfc_add_modify (&block, tmp2, fold_convert (TREE_TYPE (tmp2),
null_pointer_node));
tmp2 = gfc_finish_block (&block);
tmp = build3_loc (input_location, COND_EXPR, void_type_node,
cond, tmp, tmp2);
gfc_add_expr_to_block (&parmse->pre, tmp);
}
else
gfc_add_block_to_block (&parmse->pre, &block);
}
/* Takes an intrinsic type expression and returns the address of a temporary
class object of the 'declared' type. */
void
gfc_conv_intrinsic_to_class (gfc_se *parmse, gfc_expr *e,
gfc_typespec class_ts)
{
gfc_symbol *vtab;
gfc_ss *ss;
tree ctree;
tree var;
tree tmp;
/* The intrinsic type needs to be converted to a temporary
CLASS object. */
tmp = gfc_typenode_for_spec (&class_ts);
var = gfc_create_var (tmp, "class");
/* Set the vptr. */
ctree = gfc_class_vptr_get (var);
vtab = gfc_find_vtab (&e->ts);
gcc_assert (vtab);
tmp = gfc_build_addr_expr (NULL_TREE, gfc_get_symbol_decl (vtab));
gfc_add_modify (&parmse->pre, ctree,
fold_convert (TREE_TYPE (ctree), tmp));
/* Now set the data field. */
ctree = gfc_class_data_get (var);
if (parmse->ss && parmse->ss->info->useflags)
{
/* For an array reference in an elemental procedure call we need
to retain the ss to provide the scalarized array reference. */
gfc_conv_expr_reference (parmse, e);
tmp = fold_convert (TREE_TYPE (ctree), parmse->expr);
gfc_add_modify (&parmse->pre, ctree, tmp);
}
else
{
ss = gfc_walk_expr (e);
if (ss == gfc_ss_terminator)
{
parmse->ss = NULL;
gfc_conv_expr_reference (parmse, e);
tmp = fold_convert (TREE_TYPE (ctree), parmse->expr);
gfc_add_modify (&parmse->pre, ctree, tmp);
}
else
{
parmse->ss = ss;
parmse->use_offset = 1;
gfc_conv_expr_descriptor (parmse, e);
gfc_add_modify (&parmse->pre, ctree, parmse->expr);
}
}
/* When the actual arg is a char array, then set the _len component of the
unlimited polymorphic entity, too. */
if (e->ts.type == BT_CHARACTER)
{
ctree = gfc_class_len_get (var);
/* Start with parmse->string_length because this seems to be set to a
correct value more often. */
if (parmse->string_length)
gfc_add_modify (&parmse->pre, ctree, parmse->string_length);
/* When the string_length is not yet set, then try the backend_decl of
the cl. */
else if (e->ts.u.cl->backend_decl)
gfc_add_modify (&parmse->pre, ctree, e->ts.u.cl->backend_decl);
/* If both of the above approaches fail, then try to generate an
expression from the input, which is only feasible currently, when the
expression can be evaluated to a constant one. */
else
{
/* Try to simplify the expression. */
gfc_simplify_expr (e, 0);
if (e->expr_type == EXPR_CONSTANT && !e->ts.u.cl->resolved)
{
/* Amazingly all data is present to compute the length of a
constant string, but the expression is not yet there. */
e->ts.u.cl->length = gfc_get_constant_expr (BT_INTEGER, 4,
&e->where);
mpz_set_ui (e->ts.u.cl->length->value.integer,
e->value.character.length);
gfc_conv_const_charlen (e->ts.u.cl);
e->ts.u.cl->resolved = 1;
gfc_add_modify (&parmse->pre, ctree, e->ts.u.cl->backend_decl);
}
else
{
gfc_error ("Can't compute the length of the char array at %L.",
&e->where);
}
}
}
/* Pass the address of the class object. */
parmse->expr = gfc_build_addr_expr (NULL_TREE, var);
}
/* Takes a scalarized class array expression and returns the
address of a temporary scalar class object of the 'declared'
type.
OOP-TODO: This could be improved by adding code that branched on
the dynamic type being the same as the declared type. In this case
the original class expression can be passed directly.
optional_alloc_ptr is false when the dummy is neither allocatable
nor a pointer; that's relevant for the optional handling.
Set copyback to true if class container's _data and _vtab pointers
might get modified. */
void
gfc_conv_class_to_class (gfc_se *parmse, gfc_expr *e, gfc_typespec class_ts,
bool elemental, bool copyback, bool optional,
bool optional_alloc_ptr)
{
tree ctree;
tree var;
tree tmp;
tree vptr;
tree cond = NULL_TREE;
gfc_ref *ref;
gfc_ref *class_ref;
stmtblock_t block;
bool full_array = false;
gfc_init_block (&block);
class_ref = NULL;
for (ref = e->ref; ref; ref = ref->next)
{
if (ref->type == REF_COMPONENT
&& ref->u.c.component->ts.type == BT_CLASS)
class_ref = ref;
if (ref->next == NULL)
break;
}
if ((ref == NULL || class_ref == ref)
&& (!class_ts.u.derived->components->as
|| class_ts.u.derived->components->as->rank != -1))
return;
/* Test for FULL_ARRAY. */
if (e->rank == 0 && gfc_expr_attr (e).codimension
&& gfc_expr_attr (e).dimension)
full_array = true;
else
gfc_is_class_array_ref (e, &full_array);
/* The derived type needs to be converted to a temporary
CLASS object. */
tmp = gfc_typenode_for_spec (&class_ts);
var = gfc_create_var (tmp, "class");
/* Set the data. */
ctree = gfc_class_data_get (var);
if (class_ts.u.derived->components->as
&& e->rank != class_ts.u.derived->components->as->rank)
{
if (e->rank == 0)
{
tree type = get_scalar_to_descriptor_type (parmse->expr,
gfc_expr_attr (e));
gfc_add_modify (&block, gfc_conv_descriptor_dtype (ctree),
gfc_get_dtype (type));
tmp = gfc_class_data_get (parmse->expr);
if (!POINTER_TYPE_P (TREE_TYPE (tmp)))
tmp = gfc_build_addr_expr (NULL_TREE, tmp);
gfc_conv_descriptor_data_set (&block, ctree, tmp);
}
else
class_array_data_assign (&block, ctree, parmse->expr, false);
}
else
{
if (TREE_TYPE (parmse->expr) != TREE_TYPE (ctree))
parmse->expr = fold_build1_loc (input_location, VIEW_CONVERT_EXPR,
TREE_TYPE (ctree), parmse->expr);
gfc_add_modify (&block, ctree, parmse->expr);
}
/* Return the data component, except in the case of scalarized array
references, where nullification of the cannot occur and so there
is no need. */
if (!elemental && full_array && copyback)
{
if (class_ts.u.derived->components->as
&& e->rank != class_ts.u.derived->components->as->rank)
{
if (e->rank == 0)
gfc_add_modify (&parmse->post, gfc_class_data_get (parmse->expr),
gfc_conv_descriptor_data_get (ctree));
else
class_array_data_assign (&parmse->post, parmse->expr, ctree, true);
}
else
gfc_add_modify (&parmse->post, parmse->expr, ctree);
}
/* Set the vptr. */
ctree = gfc_class_vptr_get (var);
/* The vptr is the second field of the actual argument.
First we have to find the corresponding class reference. */
tmp = NULL_TREE;
if (class_ref == NULL
&& e->symtree && e->symtree->n.sym->ts.type == BT_CLASS)
tmp = e->symtree->n.sym->backend_decl;
else
{
/* Remove everything after the last class reference, convert the
expression and then recover its tailend once more. */
gfc_se tmpse;
ref = class_ref->next;
class_ref->next = NULL;
gfc_init_se (&tmpse, NULL);
gfc_conv_expr (&tmpse, e);
class_ref->next = ref;
tmp = tmpse.expr;
}
gcc_assert (tmp != NULL_TREE);
/* Dereference if needs be. */
if (TREE_CODE (TREE_TYPE (tmp)) == REFERENCE_TYPE)
tmp = build_fold_indirect_ref_loc (input_location, tmp);
vptr = gfc_class_vptr_get (tmp);
gfc_add_modify (&block, ctree,
fold_convert (TREE_TYPE (ctree), vptr));
/* Return the vptr component, except in the case of scalarized array
references, where the dynamic type cannot change. */
if (!elemental && full_array && copyback)
gfc_add_modify (&parmse->post, vptr,
fold_convert (TREE_TYPE (vptr), ctree));
if (optional)
{
tree tmp2;
cond = gfc_conv_expr_present (e->symtree->n.sym);
tmp = gfc_finish_block (&block);
if (optional_alloc_ptr)
tmp2 = build_empty_stmt (input_location);
else
{
gfc_init_block (&block);
tmp2 = gfc_conv_descriptor_data_get (gfc_class_data_get (var));
gfc_add_modify (&block, tmp2, fold_convert (TREE_TYPE (tmp2),
null_pointer_node));
tmp2 = gfc_finish_block (&block);
}
tmp = build3_loc (input_location, COND_EXPR, void_type_node,
cond, tmp, tmp2);
gfc_add_expr_to_block (&parmse->pre, tmp);
}
else
gfc_add_block_to_block (&parmse->pre, &block);
/* Pass the address of the class object. */
parmse->expr = gfc_build_addr_expr (NULL_TREE, var);
if (optional && optional_alloc_ptr)
parmse->expr = build3_loc (input_location, COND_EXPR,
TREE_TYPE (parmse->expr),
cond, parmse->expr,
fold_convert (TREE_TYPE (parmse->expr),
null_pointer_node));
}
/* Given a class array declaration and an index, returns the address
of the referenced element. */
tree
gfc_get_class_array_ref (tree index, tree class_decl)
{
tree data = gfc_class_data_get (class_decl);
tree size = gfc_vtable_size_get (class_decl);
tree offset = fold_build2_loc (input_location, MULT_EXPR,
gfc_array_index_type,
index, size);
tree ptr;
data = gfc_conv_descriptor_data_get (data);
ptr = fold_convert (pvoid_type_node, data);
ptr = fold_build_pointer_plus_loc (input_location, ptr, offset);
return fold_convert (TREE_TYPE (data), ptr);
}
/* Copies one class expression to another, assuming that if either
'to' or 'from' are arrays they are packed. Should 'from' be
NULL_TREE, the initialization expression for 'to' is used, assuming
that the _vptr is set. */
tree
gfc_copy_class_to_class (tree from, tree to, tree nelems)
{
tree fcn;
tree fcn_type;
tree from_data;
tree to_data;
tree to_ref;
tree from_ref;
vec<tree, va_gc> *args;
tree tmp;
tree index;
stmtblock_t loopbody;
stmtblock_t body;
gfc_loopinfo loop;
args = NULL;
if (from != NULL_TREE)
fcn = gfc_vtable_copy_get (from);
else
fcn = gfc_vtable_copy_get (to);
fcn_type = TREE_TYPE (TREE_TYPE (fcn));
if (from != NULL_TREE)
from_data = gfc_class_data_get (from);
else
from_data = gfc_vtable_def_init_get (to);
to_data = gfc_class_data_get (to);
if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (to_data)))
{
gfc_init_block (&body);
tmp = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type, nelems,
gfc_index_one_node);
nelems = gfc_evaluate_now (tmp, &body);
index = gfc_create_var (gfc_array_index_type, "S");
if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (from_data)))
{
from_ref = gfc_get_class_array_ref (index, from);
vec_safe_push (args, from_ref);
}
else
vec_safe_push (args, from_data);
to_ref = gfc_get_class_array_ref (index, to);
vec_safe_push (args, to_ref);
tmp = build_call_vec (fcn_type, fcn, args);
/* Build the body of the loop. */
gfc_init_block (&loopbody);
gfc_add_expr_to_block (&loopbody, tmp);
/* Build the loop and return. */
gfc_init_loopinfo (&loop);
loop.dimen = 1;
loop.from[0] = gfc_index_zero_node;
loop.loopvar[0] = index;
loop.to[0] = nelems;
gfc_trans_scalarizing_loops (&loop, &loopbody);
gfc_add_block_to_block (&body, &loop.pre);
tmp = gfc_finish_block (&body);
gfc_cleanup_loop (&loop);
}
else
{
gcc_assert (!GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (from_data)));
vec_safe_push (args, from_data);
vec_safe_push (args, to_data);
tmp = build_call_vec (fcn_type, fcn, args);
}
return tmp;
}
static tree
gfc_trans_class_array_init_assign (gfc_expr *rhs, gfc_expr *lhs, gfc_expr *obj)
{
gfc_actual_arglist *actual;
gfc_expr *ppc;
gfc_code *ppc_code;
tree res;
actual = gfc_get_actual_arglist ();
actual->expr = gfc_copy_expr (rhs);
actual->next = gfc_get_actual_arglist ();
actual->next->expr = gfc_copy_expr (lhs);
ppc = gfc_copy_expr (obj);
gfc_add_vptr_component (ppc);
gfc_add_component_ref (ppc, "_copy");
ppc_code = gfc_get_code (EXEC_CALL);
ppc_code->resolved_sym = ppc->symtree->n.sym;
/* Although '_copy' is set to be elemental in class.c, it is
not staying that way. Find out why, sometime.... */
ppc_code->resolved_sym->attr.elemental = 1;
ppc_code->ext.actual = actual;
ppc_code->expr1 = ppc;
/* Since '_copy' is elemental, the scalarizer will take care
of arrays in gfc_trans_call. */
res = gfc_trans_call (ppc_code, false, NULL, NULL, false);
gfc_free_statements (ppc_code);
return res;
}
/* Special case for initializing a polymorphic dummy with INTENT(OUT).
A MEMCPY is needed to copy the full data from the default initializer
of the dynamic type. */
tree
gfc_trans_class_init_assign (gfc_code *code)
{
stmtblock_t block;
tree tmp;
gfc_se dst,src,memsz;
gfc_expr *lhs, *rhs, *sz;
gfc_start_block (&block);
lhs = gfc_copy_expr (code->expr1);
gfc_add_data_component (lhs);
rhs = gfc_copy_expr (code->expr1);
gfc_add_vptr_component (rhs);
/* Make sure that the component backend_decls have been built, which
will not have happened if the derived types concerned have not
been referenced. */
gfc_get_derived_type (rhs->ts.u.derived);
gfc_add_def_init_component (rhs);
if (code->expr1->ts.type == BT_CLASS
&& CLASS_DATA (code->expr1)->attr.dimension)
tmp = gfc_trans_class_array_init_assign (rhs, lhs, code->expr1);
else
{
sz = gfc_copy_expr (code->expr1);
gfc_add_vptr_component (sz);
gfc_add_size_component (sz);
gfc_init_se (&dst, NULL);
gfc_init_se (&src, NULL);
gfc_init_se (&memsz, NULL);
gfc_conv_expr (&dst, lhs);
gfc_conv_expr (&src, rhs);
gfc_conv_expr (&memsz, sz);
gfc_add_block_to_block (&block, &src.pre);
src.expr = gfc_build_addr_expr (NULL_TREE, src.expr);
tmp = gfc_build_memcpy_call (dst.expr, src.expr, memsz.expr);
}
if (code->expr1->symtree->n.sym->attr.optional
|| code->expr1->symtree->n.sym->ns->proc_name->attr.entry_master)
{
tree present = gfc_conv_expr_present (code->expr1->symtree->n.sym);
tmp = build3_loc (input_location, COND_EXPR, TREE_TYPE (tmp),
present, tmp,
build_empty_stmt (input_location));
}
gfc_add_expr_to_block (&block, tmp);
return gfc_finish_block (&block);
}
/* Translate an assignment to a CLASS object
(pointer or ordinary assignment). */
tree
gfc_trans_class_assign (gfc_expr *expr1, gfc_expr *expr2, gfc_exec_op op)
{
stmtblock_t block;
tree tmp;
gfc_expr *lhs;
gfc_expr *rhs;
gfc_ref *ref;
gfc_start_block (&block);
ref = expr1->ref;
while (ref && ref->next)
ref = ref->next;
/* Class valued proc_pointer assignments do not need any further
preparation. */
if (ref && ref->type == REF_COMPONENT
&& ref->u.c.component->attr.proc_pointer
&& expr2->expr_type == EXPR_VARIABLE
&& expr2->symtree->n.sym->attr.flavor == FL_PROCEDURE
&& op == EXEC_POINTER_ASSIGN)
goto assign;
if (expr2->ts.type != BT_CLASS)
{
/* Insert an additional assignment which sets the '_vptr' field. */
gfc_symbol *vtab = NULL;
gfc_symtree *st;
lhs = gfc_copy_expr (expr1);
gfc_add_vptr_component (lhs);
if (UNLIMITED_POLY (expr1)
&& expr2->expr_type == EXPR_NULL && expr2->ts.type == BT_UNKNOWN)
{
rhs = gfc_get_null_expr (&expr2->where);
goto assign_vptr;
}
if (expr2->expr_type == EXPR_NULL)
vtab = gfc_find_vtab (&expr1->ts);
else
vtab = gfc_find_vtab (&expr2->ts);
gcc_assert (vtab);
rhs = gfc_get_expr ();
rhs->expr_type = EXPR_VARIABLE;
gfc_find_sym_tree (vtab->name, vtab->ns, 1, &st);
rhs->symtree = st;
rhs->ts = vtab->ts;
assign_vptr:
tmp = gfc_trans_pointer_assignment (lhs, rhs);
gfc_add_expr_to_block (&block, tmp);
gfc_free_expr (lhs);
gfc_free_expr (rhs);
}
else if (expr1->ts.type == BT_DERIVED && UNLIMITED_POLY (expr2))
{
/* F2003:C717 only sequence and bind-C types can come here. */
gcc_assert (expr1->ts.u.derived->attr.sequence
|| expr1->ts.u.derived->attr.is_bind_c);
gfc_add_data_component (expr2);
goto assign;
}
else if (CLASS_DATA (expr2)->attr.dimension && expr2->expr_type != EXPR_FUNCTION)
{
/* Insert an additional assignment which sets the '_vptr' field. */
lhs = gfc_copy_expr (expr1);
gfc_add_vptr_component (lhs);
rhs = gfc_copy_expr (expr2);
gfc_add_vptr_component (rhs);
tmp = gfc_trans_pointer_assignment (lhs, rhs);
gfc_add_expr_to_block (&block, tmp);
gfc_free_expr (lhs);
gfc_free_expr (rhs);
}
/* Do the actual CLASS assignment. */
if (expr2->ts.type == BT_CLASS
&& !CLASS_DATA (expr2)->attr.dimension)
op = EXEC_ASSIGN;
else if (expr2->expr_type != EXPR_FUNCTION || expr2->ts.type != BT_CLASS
|| !CLASS_DATA (expr2)->attr.dimension)
gfc_add_data_component (expr1);
assign:
if (op == EXEC_ASSIGN)
tmp = gfc_trans_assignment (expr1, expr2, false, true);
else if (op == EXEC_POINTER_ASSIGN)
tmp = gfc_trans_pointer_assignment (expr1, expr2);
else
gcc_unreachable();
gfc_add_expr_to_block (&block, tmp);
return gfc_finish_block (&block);
}
/* End of prototype trans-class.c */
static void
realloc_lhs_warning (bt type, bool array, locus *where)
{
if (array && type != BT_CLASS && type != BT_DERIVED
&& gfc_option.warn_realloc_lhs)
gfc_warning ("Code for reallocating the allocatable array at %L will "
"be added", where);
else if (gfc_option.warn_realloc_lhs_all)
gfc_warning ("Code for reallocating the allocatable variable at %L "
"will be added", where);
}
static tree gfc_trans_structure_assign (tree dest, gfc_expr * expr);
static void gfc_apply_interface_mapping_to_expr (gfc_interface_mapping *,
gfc_expr *);
/* Copy the scalarization loop variables. */
static void
gfc_copy_se_loopvars (gfc_se * dest, gfc_se * src)
{
dest->ss = src->ss;
dest->loop = src->loop;
}
/* Initialize a simple expression holder.
Care must be taken when multiple se are created with the same parent.
The child se must be kept in sync. The easiest way is to delay creation
of a child se until after after the previous se has been translated. */
void
gfc_init_se (gfc_se * se, gfc_se * parent)
{
memset (se, 0, sizeof (gfc_se));
gfc_init_block (&se->pre);
gfc_init_block (&se->post);
se->parent = parent;
if (parent)
gfc_copy_se_loopvars (se, parent);
}
/* Advances to the next SS in the chain. Use this rather than setting
se->ss = se->ss->next because all the parents needs to be kept in sync.
See gfc_init_se. */
void
gfc_advance_se_ss_chain (gfc_se * se)
{
gfc_se *p;
gfc_ss *ss;
gcc_assert (se != NULL && se->ss != NULL && se->ss != gfc_ss_terminator);
p = se;
/* Walk down the parent chain. */
while (p != NULL)
{
/* Simple consistency check. */
gcc_assert (p->parent == NULL || p->parent->ss == p->ss
|| p->parent->ss->nested_ss == p->ss);
/* If we were in a nested loop, the next scalarized expression can be
on the parent ss' next pointer. Thus we should not take the next
pointer blindly, but rather go up one nest level as long as next
is the end of chain. */
ss = p->ss;
while (ss->next == gfc_ss_terminator && ss->parent != NULL)
ss = ss->parent;
p->ss = ss->next;
p = p->parent;
}
}
/* Ensures the result of the expression as either a temporary variable
or a constant so that it can be used repeatedly. */
void
gfc_make_safe_expr (gfc_se * se)
{
tree var;
if (CONSTANT_CLASS_P (se->expr))
return;
/* We need a temporary for this result. */
var = gfc_create_var (TREE_TYPE (se->expr), NULL);
gfc_add_modify (&se->pre, var, se->expr);
se->expr = var;
}
/* Return an expression which determines if a dummy parameter is present.
Also used for arguments to procedures with multiple entry points. */
tree
gfc_conv_expr_present (gfc_symbol * sym)
{
tree decl, cond;
gcc_assert (sym->attr.dummy);
decl = gfc_get_symbol_decl (sym);
/* Intrinsic scalars with VALUE attribute which are passed by value
use a hidden argument to denote the present status. */
if (sym->attr.value && sym->ts.type != BT_CHARACTER
&& sym->ts.type != BT_CLASS && sym->ts.type != BT_DERIVED
&& !sym->attr.dimension)
{
char name[GFC_MAX_SYMBOL_LEN + 2];
tree tree_name;
gcc_assert (TREE_CODE (decl) == PARM_DECL);
name[0] = '_';
strcpy (&name[1], sym->name);
tree_name = get_identifier (name);
/* Walk function argument list to find hidden arg. */
cond = DECL_ARGUMENTS (DECL_CONTEXT (decl));
for ( ; cond != NULL_TREE; cond = TREE_CHAIN (cond))
if (DECL_NAME (cond) == tree_name)
break;
gcc_assert (cond);
return cond;
}
if (TREE_CODE (decl) != PARM_DECL)
{
/* Array parameters use a temporary descriptor, we want the real
parameter. */
gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl))
|| GFC_ARRAY_TYPE_P (TREE_TYPE (decl)));
decl = GFC_DECL_SAVED_DESCRIPTOR (decl);
}
cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, decl,
fold_convert (TREE_TYPE (decl), null_pointer_node));
/* Fortran 2008 allows to pass null pointers and non-associated pointers
as actual argument to denote absent dummies. For array descriptors,
we thus also need to check the array descriptor. For BT_CLASS, it
can also occur for scalars and F2003 due to type->class wrapping and
class->class wrapping. Note further that BT_CLASS always uses an
array descriptor for arrays, also for explicit-shape/assumed-size. */
if (!sym->attr.allocatable
&& ((sym->ts.type != BT_CLASS && !sym->attr.pointer)
|| (sym->ts.type == BT_CLASS
&& !CLASS_DATA (sym)->attr.allocatable
&& !CLASS_DATA (sym)->attr.class_pointer))
&& ((gfc_option.allow_std & GFC_STD_F2008) != 0
|| sym->ts.type == BT_CLASS))
{
tree tmp;
if ((sym->as && (sym->as->type == AS_ASSUMED_SHAPE
|| sym->as->type == AS_ASSUMED_RANK
|| sym->attr.codimension))
|| (sym->ts.type == BT_CLASS && CLASS_DATA (sym)->as))
{
tmp = build_fold_indirect_ref_loc (input_location, decl);
if (sym->ts.type == BT_CLASS)
tmp = gfc_class_data_get (tmp);
tmp = gfc_conv_array_data (tmp);
}
else if (sym->ts.type == BT_CLASS)
tmp = gfc_class_data_get (decl);
else
tmp = NULL_TREE;
if (tmp != NULL_TREE)
{
tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, tmp,
fold_convert (TREE_TYPE (tmp), null_pointer_node));
cond = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
boolean_type_node, cond, tmp);
}
}
return cond;
}
/* Converts a missing, dummy argument into a null or zero. */
void
gfc_conv_missing_dummy (gfc_se * se, gfc_expr * arg, gfc_typespec ts, int kind)
{
tree present;
tree tmp;
present = gfc_conv_expr_present (arg->symtree->n.sym);
if (kind > 0)
{
/* Create a temporary and convert it to the correct type. */
tmp = gfc_get_int_type (kind);
tmp = fold_convert (tmp, build_fold_indirect_ref_loc (input_location,
se->expr));
/* Test for a NULL value. */
tmp = build3_loc (input_location, COND_EXPR, TREE_TYPE (tmp), present,
tmp, fold_convert (TREE_TYPE (tmp), integer_one_node));
tmp = gfc_evaluate_now (tmp, &se->pre);
se->expr = gfc_build_addr_expr (NULL_TREE, tmp);
}
else
{
tmp = build3_loc (input_location, COND_EXPR, TREE_TYPE (se->expr),
present, se->expr,
build_zero_cst (TREE_TYPE (se->expr)));
tmp = gfc_evaluate_now (tmp, &se->pre);
se->expr = tmp;
}
if (ts.type == BT_CHARACTER)
{
tmp = build_int_cst (gfc_charlen_type_node, 0);
tmp = fold_build3_loc (input_location, COND_EXPR, gfc_charlen_type_node,
present, se->string_length, tmp);
tmp = gfc_evaluate_now (tmp, &se->pre);
se->string_length = tmp;
}
return;
}
/* Get the character length of an expression, looking through gfc_refs
if necessary. */
tree
gfc_get_expr_charlen (gfc_expr *e)
{
gfc_ref *r;
tree length;
gcc_assert (e->expr_type == EXPR_VARIABLE
&& e->ts.type == BT_CHARACTER);
length = NULL; /* To silence compiler warning. */
if (is_subref_array (e) && e->ts.u.cl->length)
{
gfc_se tmpse;
gfc_init_se (&tmpse, NULL);
gfc_conv_expr_type (&tmpse, e->ts.u.cl->length, gfc_charlen_type_node);
e->ts.u.cl->backend_decl = tmpse.expr;
return tmpse.expr;
}
/* First candidate: if the variable is of type CHARACTER, the
expression's length could be the length of the character
variable. */
if (e->symtree->n.sym->ts.type == BT_CHARACTER)
length = e->symtree->n.sym->ts.u.cl->backend_decl;
/* Look through the reference chain for component references. */
for (r = e->ref; r; r = r->next)
{
switch (r->type)
{
case REF_COMPONENT:
if (r->u.c.component->ts.type == BT_CHARACTER)
length = r->u.c.component->ts.u.cl->backend_decl;
break;
case REF_ARRAY:
/* Do nothing. */
break;
default:
/* We should never got substring references here. These will be
broken down by the scalarizer. */
gcc_unreachable ();
break;
}
}
gcc_assert (length != NULL);
return length;
}
/* Return for an expression the backend decl of the coarray. */
static tree
get_tree_for_caf_expr (gfc_expr *expr)
{
tree caf_decl = NULL_TREE;
gfc_ref *ref;
gcc_assert (expr && expr->expr_type == EXPR_VARIABLE);
if (expr->symtree->n.sym->attr.codimension)
caf_decl = expr->symtree->n.sym->backend_decl;
for (ref = expr->ref; ref; ref = ref->next)
if (ref->type == REF_COMPONENT)
{
gfc_component *comp = ref->u.c.component;
if (comp->attr.pointer || comp->attr.allocatable)
caf_decl = NULL_TREE;
if (comp->attr.codimension)
caf_decl = comp->backend_decl;
}
gcc_assert (caf_decl != NULL_TREE);
return caf_decl;
}
/* For each character array constructor subexpression without a ts.u.cl->length,
replace it by its first element (if there aren't any elements, the length
should already be set to zero). */
static void
flatten_array_ctors_without_strlen (gfc_expr* e)
{
gfc_actual_arglist* arg;
gfc_constructor* c;
if (!e)
return;
switch (e->expr_type)
{
case EXPR_OP:
flatten_array_ctors_without_strlen (e->value.op.op1);
flatten_array_ctors_without_strlen (e->value.op.op2);
break;
case EXPR_COMPCALL:
/* TODO: Implement as with EXPR_FUNCTION when needed. */
gcc_unreachable ();
case EXPR_FUNCTION:
for (arg = e->value.function.actual; arg; arg = arg->next)
flatten_array_ctors_without_strlen (arg->expr);
break;
case EXPR_ARRAY:
/* We've found what we're looking for. */
if (e->ts.type == BT_CHARACTER && !e->ts.u.cl->length)
{
gfc_constructor *c;
gfc_expr* new_expr;
gcc_assert (e->value.constructor);
c = gfc_constructor_first (e->value.constructor);
new_expr = c->expr;
c->expr = NULL;
flatten_array_ctors_without_strlen (new_expr);
gfc_replace_expr (e, new_expr);
break;
}
/* Otherwise, fall through to handle constructor elements. */
case EXPR_STRUCTURE:
for (c = gfc_constructor_first (e->value.constructor);
c; c = gfc_constructor_next (c))
flatten_array_ctors_without_strlen (c->expr);
break;
default:
break;
}
}
/* Generate code to initialize a string length variable. Returns the
value. For array constructors, cl->length might be NULL and in this case,
the first element of the constructor is needed. expr is the original
expression so we can access it but can be NULL if this is not needed. */
void
gfc_conv_string_length (gfc_charlen * cl, gfc_expr * expr, stmtblock_t * pblock)
{
gfc_se se;
gfc_init_se (&se, NULL);
if (!cl->length
&& cl->backend_decl
&& TREE_CODE (cl->backend_decl) == VAR_DECL)
return;
/* If cl->length is NULL, use gfc_conv_expr to obtain the string length but
"flatten" array constructors by taking their first element; all elements
should be the same length or a cl->length should be present. */
if (!cl->length)
{
gfc_expr* expr_flat;
gcc_assert (expr);
expr_flat = gfc_copy_expr (expr);
flatten_array_ctors_without_strlen (expr_flat);
gfc_resolve_expr (expr_flat);
gfc_conv_expr (&se, expr_flat);
gfc_add_block_to_block (pblock, &se.pre);
cl->backend_decl = convert (gfc_charlen_type_node, se.string_length);
gfc_free_expr (expr_flat);
return;
}
/* Convert cl->length. */
gcc_assert (cl->length);
gfc_conv_expr_type (&se, cl->length, gfc_charlen_type_node);
se.expr = fold_build2_loc (input_location, MAX_EXPR, gfc_charlen_type_node,
se.expr, build_int_cst (gfc_charlen_type_node, 0));
gfc_add_block_to_block (pblock, &se.pre);
if (cl->backend_decl)
gfc_add_modify (pblock, cl->backend_decl, se.expr);
else
cl->backend_decl = gfc_evaluate_now (se.expr, pblock);
}
static void
gfc_conv_substring (gfc_se * se, gfc_ref * ref, int kind,
const char *name, locus *where)
{
tree tmp;
tree type;
tree fault;
gfc_se start;
gfc_se end;
char *msg;
mpz_t length;
type = gfc_get_character_type (kind, ref->u.ss.length);
type = build_pointer_type (type);
gfc_init_se (&start, se);
gfc_conv_expr_type (&start, ref->u.ss.start, gfc_charlen_type_node);
gfc_add_block_to_block (&se->pre, &start.pre);
if (integer_onep (start.expr))
gfc_conv_string_parameter (se);
else
{
tmp = start.expr;
STRIP_NOPS (tmp);
/* Avoid multiple evaluation of substring start. */
if (!CONSTANT_CLASS_P (tmp) && !DECL_P (tmp))
start.expr = gfc_evaluate_now (start.expr, &se->pre);
/* Change the start of the string. */
if (TYPE_STRING_FLAG (TREE_TYPE (se->expr)))
tmp = se->expr;
else
tmp = build_fold_indirect_ref_loc (input_location,
se->expr);
tmp = gfc_build_array_ref (tmp, start.expr, NULL);
se->expr = gfc_build_addr_expr (type, tmp);
}
/* Length = end + 1 - start. */
gfc_init_se (&end, se);
if (ref->u.ss.end == NULL)
end.expr = se->string_length;
else
{
gfc_conv_expr_type (&end, ref->u.ss.end, gfc_charlen_type_node);
gfc_add_block_to_block (&se->pre, &end.pre);
}
tmp = end.expr;
STRIP_NOPS (tmp);
if (!CONSTANT_CLASS_P (tmp) && !DECL_P (tmp))
end.expr = gfc_evaluate_now (end.expr, &se->pre);
if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
{
tree nonempty = fold_build2_loc (input_location, LE_EXPR,
boolean_type_node, start.expr,
end.expr);
/* Check lower bound. */
fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
start.expr,
build_int_cst (gfc_charlen_type_node, 1));
fault = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
boolean_type_node, nonempty, fault);
if (name)
asprintf (&msg, "Substring out of bounds: lower bound (%%ld) of '%s' "
"is less than one", name);
else
asprintf (&msg, "Substring out of bounds: lower bound (%%ld)"
"is less than one");
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node,
start.expr));
free (msg);
/* Check upper bound. */
fault = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
end.expr, se->string_length);
fault = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
boolean_type_node, nonempty, fault);
if (name)
asprintf (&msg, "Substring out of bounds: upper bound (%%ld) of '%s' "
"exceeds string length (%%ld)", name);
else
asprintf (&msg, "Substring out of bounds: upper bound (%%ld) "
"exceeds string length (%%ld)");
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, end.expr),
fold_convert (long_integer_type_node,
se->string_length));
free (msg);
}
/* Try to calculate the length from the start and end expressions. */
if (ref->u.ss.end
&& gfc_dep_difference (ref->u.ss.end, ref->u.ss.start, &length))
{
int i_len;
i_len = mpz_get_si (length) + 1;
if (i_len < 0)
i_len = 0;
tmp = build_int_cst (gfc_charlen_type_node, i_len);
mpz_clear (length); /* Was initialized by gfc_dep_difference. */
}
else
{
tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_charlen_type_node,
end.expr, start.expr);
tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_charlen_type_node,
build_int_cst (gfc_charlen_type_node, 1), tmp);
tmp = fold_build2_loc (input_location, MAX_EXPR, gfc_charlen_type_node,
tmp, build_int_cst (gfc_charlen_type_node, 0));
}
se->string_length = tmp;
}
/* Convert a derived type component reference. */
static void
gfc_conv_component_ref (gfc_se * se, gfc_ref * ref)
{
gfc_component *c;
tree tmp;
tree decl;
tree field;
c = ref->u.c.component;
if (c->backend_decl == NULL_TREE
&& ref->u.c.sym != NULL)
gfc_get_derived_type (ref->u.c.sym);
field = c->backend_decl;
gcc_assert (field && TREE_CODE (field) == FIELD_DECL);
decl = se->expr;
/* Components can correspond to fields of different containing
types, as components are created without context, whereas
a concrete use of a component has the type of decl as context.
So, if the type doesn't match, we search the corresponding
FIELD_DECL in the parent type. To not waste too much time
we cache this result in norestrict_decl. */
if (DECL_FIELD_CONTEXT (field) != TREE_TYPE (decl))
{
tree f2 = c->norestrict_decl;
if (!f2 || DECL_FIELD_CONTEXT (f2) != TREE_TYPE (decl))
for (f2 = TYPE_FIELDS (TREE_TYPE (decl)); f2; f2 = DECL_CHAIN (f2))
if (TREE_CODE (f2) == FIELD_DECL
&& DECL_NAME (f2) == DECL_NAME (field))
break;
gcc_assert (f2);
c->norestrict_decl = f2;
field = f2;
}
tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
decl, field, NULL_TREE);
se->expr = tmp;
if (c->ts.type == BT_CHARACTER && !c->attr.proc_pointer)
{
tmp = c->ts.u.cl->backend_decl;
/* Components must always be constant length. */
gcc_assert (tmp && INTEGER_CST_P (tmp));
se->string_length = tmp;
}
if (gfc_deferred_strlen (c, &field))
{
tmp = fold_build3_loc (input_location, COMPONENT_REF,
TREE_TYPE (field),
decl, field, NULL_TREE);
se->string_length = tmp;
}
if (((c->attr.pointer || c->attr.allocatable)
&& (!c->attr.dimension && !c->attr.codimension)
&& c->ts.type != BT_CHARACTER)
|| c->attr.proc_pointer)
se->expr = build_fold_indirect_ref_loc (input_location,
se->expr);
}
/* This function deals with component references to components of the
parent type for derived type extensions. */
static void
conv_parent_component_references (gfc_se * se, gfc_ref * ref)
{
gfc_component *c;
gfc_component *cmp;
gfc_symbol *dt;
gfc_ref parent;
dt = ref->u.c.sym;
c = ref->u.c.component;
/* Return if the component is in the parent type. */
for (cmp = dt->components; cmp; cmp = cmp->next)
if (strcmp (c->name, cmp->name) == 0)
return;
/* Build a gfc_ref to recursively call gfc_conv_component_ref. */
parent.type = REF_COMPONENT;
parent.next = NULL;
parent.u.c.sym = dt;
parent.u.c.component = dt->components;
if (dt->backend_decl == NULL)
gfc_get_derived_type (dt);
/* Build the reference and call self. */
gfc_conv_component_ref (se, &parent);
parent.u.c.sym = dt->components->ts.u.derived;
parent.u.c.component = c;
conv_parent_component_references (se, &parent);
}
/* Return the contents of a variable. Also handles reference/pointer
variables (all Fortran pointer references are implicit). */
static void
gfc_conv_variable (gfc_se * se, gfc_expr * expr)
{
gfc_ss *ss;
gfc_ref *ref;
gfc_symbol *sym;
tree parent_decl = NULL_TREE;
int parent_flag;
bool return_value;
bool alternate_entry;
bool entry_master;
sym = expr->symtree->n.sym;
ss = se->ss;
if (ss != NULL)
{
gfc_ss_info *ss_info = ss->info;
/* Check that something hasn't gone horribly wrong. */
gcc_assert (ss != gfc_ss_terminator);
gcc_assert (ss_info->expr == expr);
/* A scalarized term. We already know the descriptor. */
se->expr = ss_info->data.array.descriptor;
se->string_length = ss_info->string_length;
ref = ss_info->data.array.ref;
if (ref)
gcc_assert (ref->type == REF_ARRAY
&& ref->u.ar.type != AR_ELEMENT);
else
gfc_conv_tmp_array_ref (se);
}
else
{
tree se_expr = NULL_TREE;
se->expr = gfc_get_symbol_decl (sym);
/* Deal with references to a parent results or entries by storing
the current_function_decl and moving to the parent_decl. */
return_value = sym->attr.function && sym->result == sym;
alternate_entry = sym->attr.function && sym->attr.entry
&& sym->result == sym;
entry_master = sym->attr.result
&& sym->ns->proc_name->attr.entry_master
&& !gfc_return_by_reference (sym->ns->proc_name);
if (current_function_decl)
parent_decl = DECL_CONTEXT (current_function_decl);
if ((se->expr == parent_decl && return_value)
|| (sym->ns && sym->ns->proc_name
&& parent_decl
&& sym->ns->proc_name->backend_decl == parent_decl
&& (alternate_entry || entry_master)))
parent_flag = 1;
else
parent_flag = 0;
/* Special case for assigning the return value of a function.
Self recursive functions must have an explicit return value. */
if (return_value && (se->expr == current_function_decl || parent_flag))
se_expr = gfc_get_fake_result_decl (sym, parent_flag);
/* Similarly for alternate entry points. */
else if (alternate_entry
&& (sym->ns->proc_name->backend_decl == current_function_decl
|| parent_flag))
{
gfc_entry_list *el = NULL;
for (el = sym->ns->entries; el; el = el->next)
if (sym == el->sym)
{
se_expr = gfc_get_fake_result_decl (sym, parent_flag);
break;
}
}
else if (entry_master
&& (sym->ns->proc_name->backend_decl == current_function_decl
|| parent_flag))
se_expr = gfc_get_fake_result_decl (sym, parent_flag);
if (se_expr)
se->expr = se_expr;
/* Procedure actual arguments. */
else if (sym->attr.flavor == FL_PROCEDURE
&& se->expr != current_function_decl)
{
if (!sym->attr.dummy && !sym->attr.proc_pointer)
{
gcc_assert (TREE_CODE (se->expr) == FUNCTION_DECL);
se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
}
return;
}
/* Dereference the expression, where needed. Since characters
are entirely different from other types, they are treated
separately. */
if (sym->ts.type == BT_CHARACTER)
{
/* Dereference character pointer dummy arguments
or results. */
if ((sym->attr.pointer || sym->attr.allocatable)
&& (sym->attr.dummy
|| sym->attr.function
|| sym->attr.result))
se->expr = build_fold_indirect_ref_loc (input_location,
se->expr);
}
else if (!sym->attr.value)
{
/* Dereference non-character scalar dummy arguments. */
if (sym->attr.dummy && !sym->attr.dimension
&& !(sym->attr.codimension && sym->attr.allocatable))
se->expr = build_fold_indirect_ref_loc (input_location,
se->expr);
/* Dereference scalar hidden result. */
if (gfc_option.flag_f2c && sym->ts.type == BT_COMPLEX
&& (sym->attr.function || sym->attr.result)
&& !sym->attr.dimension && !sym->attr.pointer
&& !sym->attr.always_explicit)
se->expr = build_fold_indirect_ref_loc (input_location,
se->expr);
/* Dereference non-character pointer variables.
These must be dummies, results, or scalars. */
if ((sym->attr.pointer || sym->attr.allocatable
|| gfc_is_associate_pointer (sym)
|| (sym->as && sym->as->type == AS_ASSUMED_RANK))
&& (sym->attr.dummy
|| sym->attr.function
|| sym->attr.result
|| (!sym->attr.dimension
&& (!sym->attr.codimension || !sym->attr.allocatable))))
se->expr = build_fold_indirect_ref_loc (input_location,
se->expr);
}
ref = expr->ref;
}
/* For character variables, also get the length. */
if (sym->ts.type == BT_CHARACTER)
{
/* If the character length of an entry isn't set, get the length from
the master function instead. */
if (sym->attr.entry && !sym->ts.u.cl->backend_decl)
se->string_length = sym->ns->proc_name->ts.u.cl->backend_decl;
else
se->string_length = sym->ts.u.cl->backend_decl;
gcc_assert (se->string_length);
}
while (ref)
{
switch (ref->type)
{
case REF_ARRAY:
/* Return the descriptor if that's what we want and this is an array
section reference. */
if (se->descriptor_only && ref->u.ar.type != AR_ELEMENT)
return;
/* TODO: Pointers to single elements of array sections, eg elemental subs. */
/* Return the descriptor for array pointers and allocations. */
if (se->want_pointer
&& ref->next == NULL && (se->descriptor_only))
return;
gfc_conv_array_ref (se, &ref->u.ar, expr, &expr->where);
/* Return a pointer to an element. */
break;
case REF_COMPONENT:
if (ref->u.c.sym->attr.extension)
conv_parent_component_references (se, ref);
gfc_conv_component_ref (se, ref);
if (!ref->next && ref->u.c.sym->attr.codimension
&& se->want_pointer && se->descriptor_only)
return;
break;
case REF_SUBSTRING:
gfc_conv_substring (se, ref, expr->ts.kind,
expr->symtree->name, &expr->where);
break;
default:
gcc_unreachable ();
break;
}
ref = ref->next;
}
/* Pointer assignment, allocation or pass by reference. Arrays are handled
separately. */
if (se->want_pointer)
{
if (expr->ts.type == BT_CHARACTER && !gfc_is_proc_ptr_comp (expr))
gfc_conv_string_parameter (se);
else
se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
}
}
/* Unary ops are easy... Or they would be if ! was a valid op. */
static void
gfc_conv_unary_op (enum tree_code code, gfc_se * se, gfc_expr * expr)
{
gfc_se operand;
tree type;
gcc_assert (expr->ts.type != BT_CHARACTER);
/* Initialize the operand. */
gfc_init_se (&operand, se);
gfc_conv_expr_val (&operand, expr->value.op.op1);
gfc_add_block_to_block (&se->pre, &operand.pre);
type = gfc_typenode_for_spec (&expr->ts);
/* TRUTH_NOT_EXPR is not a "true" unary operator in GCC.
We must convert it to a compare to 0 (e.g. EQ_EXPR (op1, 0)).
All other unary operators have an equivalent GIMPLE unary operator. */
if (code == TRUTH_NOT_EXPR)
se->expr = fold_build2_loc (input_location, EQ_EXPR, type, operand.expr,
build_int_cst (type, 0));
else
se->expr = fold_build1_loc (input_location, code, type, operand.expr);
}
/* Expand power operator to optimal multiplications when a value is raised
to a constant integer n. See section 4.6.3, "Evaluation of Powers" of
Donald E. Knuth, "Seminumerical Algorithms", Vol. 2, "The Art of Computer
Programming", 3rd Edition, 1998. */
/* This code is mostly duplicated from expand_powi in the backend.
We establish the "optimal power tree" lookup table with the defined size.
The items in the table are the exponents used to calculate the index
exponents. Any integer n less than the value can get an "addition chain",
with the first node being one. */
#define POWI_TABLE_SIZE 256
/* The table is from builtins.c. */
static const unsigned char powi_table[POWI_TABLE_SIZE] =
{
0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
};
/* If n is larger than lookup table's max index, we use the "window
method". */
#define POWI_WINDOW_SIZE 3
/* Recursive function to expand the power operator. The temporary
values are put in tmpvar. The function returns tmpvar[1] ** n. */
static tree
gfc_conv_powi (gfc_se * se, unsigned HOST_WIDE_INT n, tree * tmpvar)
{
tree op0;
tree op1;
tree tmp;
int digit;
if (n < POWI_TABLE_SIZE)
{
if (tmpvar[n])
return tmpvar[n];
op0 = gfc_conv_powi (se, n - powi_table[n], tmpvar);
op1 = gfc_conv_powi (se, powi_table[n], tmpvar);
}
else if (n & 1)
{
digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
op0 = gfc_conv_powi (se, n - digit, tmpvar);
op1 = gfc_conv_powi (se, digit, tmpvar);
}
else
{
op0 = gfc_conv_powi (se, n >> 1, tmpvar);
op1 = op0;
}
tmp = fold_build2_loc (input_location, MULT_EXPR, TREE_TYPE (op0), op0, op1);
tmp = gfc_evaluate_now (tmp, &se->pre);
if (n < POWI_TABLE_SIZE)
tmpvar[n] = tmp;
return tmp;
}
/* Expand lhs ** rhs. rhs is a constant integer. If it expands successfully,
return 1. Else return 0 and a call to runtime library functions
will have to be built. */
static int
gfc_conv_cst_int_power (gfc_se * se, tree lhs, tree rhs)
{
tree cond;
tree tmp;
tree type;
tree vartmp[POWI_TABLE_SIZE];
HOST_WIDE_INT m;
unsigned HOST_WIDE_INT n;
int sgn;
/* If exponent is too large, we won't expand it anyway, so don't bother
with large integer values. */
if (!TREE_INT_CST (rhs).fits_shwi ())
return 0;
m = TREE_INT_CST (rhs).to_shwi ();
/* There's no ABS for HOST_WIDE_INT, so here we go. It also takes care
of the asymmetric range of the integer type. */
n = (unsigned HOST_WIDE_INT) (m < 0 ? -m : m);
type = TREE_TYPE (lhs);
sgn = tree_int_cst_sgn (rhs);
if (((FLOAT_TYPE_P (type) && !flag_unsafe_math_optimizations)
|| optimize_size) && (m > 2 || m < -1))
return 0;
/* rhs == 0 */
if (sgn == 0)
{
se->expr = gfc_build_const (type, integer_one_node);
return 1;
}
/* If rhs < 0 and lhs is an integer, the result is -1, 0 or 1. */
if ((sgn == -1) && (TREE_CODE (type) == INTEGER_TYPE))
{
tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
lhs, build_int_cst (TREE_TYPE (lhs), -1));
cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
lhs, build_int_cst (TREE_TYPE (lhs), 1));
/* If rhs is even,
result = (lhs == 1 || lhs == -1) ? 1 : 0. */
if ((n & 1) == 0)
{
tmp = fold_build2_loc (input_location, TRUTH_OR_EXPR,
boolean_type_node, tmp, cond);
se->expr = fold_build3_loc (input_location, COND_EXPR, type,
tmp, build_int_cst (type, 1),
build_int_cst (type, 0));
return 1;
}
/* If rhs is odd,
result = (lhs == 1) ? 1 : (lhs == -1) ? -1 : 0. */
tmp = fold_build3_loc (input_location, COND_EXPR, type, tmp,
build_int_cst (type, -1),
build_int_cst (type, 0));
se->expr = fold_build3_loc (input_location, COND_EXPR, type,
cond, build_int_cst (type, 1), tmp);
return 1;
}
memset (vartmp, 0, sizeof (vartmp));
vartmp[1] = lhs;
if (sgn == -1)
{
tmp = gfc_build_const (type, integer_one_node);
vartmp[1] = fold_build2_loc (input_location, RDIV_EXPR, type, tmp,
vartmp[1]);
}
se->expr = gfc_conv_powi (se, n, vartmp);
return 1;
}
/* Power op (**). Constant integer exponent has special handling. */
static void
gfc_conv_power_op (gfc_se * se, gfc_expr * expr)
{
tree gfc_int4_type_node;
int kind;
int ikind;
int res_ikind_1, res_ikind_2;
gfc_se lse;
gfc_se rse;
tree fndecl = NULL;
gfc_init_se (&lse, se);
gfc_conv_expr_val (&lse, expr->value.op.op1);
lse.expr = gfc_evaluate_now (lse.expr, &lse.pre);
gfc_add_block_to_block (&se->pre, &lse.pre);
gfc_init_se (&rse, se);
gfc_conv_expr_val (&rse, expr->value.op.op2);
gfc_add_block_to_block (&se->pre, &rse.pre);
if (expr->value.op.op2->ts.type == BT_INTEGER
&& expr->value.op.op2->expr_type == EXPR_CONSTANT)
if (gfc_conv_cst_int_power (se, lse.expr, rse.expr))
return;
gfc_int4_type_node = gfc_get_int_type (4);
/* In case of integer operands with kinds 1 or 2, we call the integer kind 4
library routine. But in the end, we have to convert the result back
if this case applies -- with res_ikind_K, we keep track whether operand K
falls into this case. */
res_ikind_1 = -1;
res_ikind_2 = -1;
kind = expr->value.op.op1->ts.kind;
switch (expr->value.op.op2->ts.type)
{
case BT_INTEGER:
ikind = expr->value.op.op2->ts.kind;
switch (ikind)
{
case 1:
case 2:
rse.expr = convert (gfc_int4_type_node, rse.expr);
res_ikind_2 = ikind;
/* Fall through. */
case 4:
ikind = 0;
break;
case 8:
ikind = 1;
break;
case 16:
ikind = 2;
break;
default:
gcc_unreachable ();
}
switch (kind)
{
case 1:
case 2:
if (expr->value.op.op1->ts.type == BT_INTEGER)
{
lse.expr = convert (gfc_int4_type_node, lse.expr);
res_ikind_1 = kind;
}
else
gcc_unreachable ();
/* Fall through. */
case 4:
kind = 0;
break;
case 8:
kind = 1;
break;
case 10:
kind = 2;
break;
case 16:
kind = 3;
break;
default:
gcc_unreachable ();
}
switch (expr->value.op.op1->ts.type)
{
case BT_INTEGER:
if (kind == 3) /* Case 16 was not handled properly above. */
kind = 2;
fndecl = gfor_fndecl_math_powi[kind][ikind].integer;
break;
case BT_REAL:
/* Use builtins for real ** int4. */
if (ikind == 0)
{
switch (kind)
{
case 0:
fndecl = builtin_decl_explicit (BUILT_IN_POWIF);
break;
case 1:
fndecl = builtin_decl_explicit (BUILT_IN_POWI);
break;
case 2:
fndecl = builtin_decl_explicit (BUILT_IN_POWIL);
break;
case 3:
/* Use the __builtin_powil() only if real(kind=16) is
actually the C long double type. */
if (!gfc_real16_is_float128)
fndecl = builtin_decl_explicit (BUILT_IN_POWIL);
break;
default:
gcc_unreachable ();
}
}
/* If we don't have a good builtin for this, go for the
library function. */
if (!fndecl)
fndecl = gfor_fndecl_math_powi[kind][ikind].real;
break;
case BT_COMPLEX:
fndecl = gfor_fndecl_math_powi[kind][ikind].cmplx;
break;
default:
gcc_unreachable ();
}
break;
case BT_REAL:
fndecl = gfc_builtin_decl_for_float_kind (BUILT_IN_POW, kind);
break;
case BT_COMPLEX:
fndecl = gfc_builtin_decl_for_float_kind (BUILT_IN_CPOW, kind);
break;
default:
gcc_unreachable ();
break;
}
se->expr = build_call_expr_loc (input_location,
fndecl, 2, lse.expr, rse.expr);
/* Convert the result back if it is of wrong integer kind. */
if (res_ikind_1 != -1 && res_ikind_2 != -1)
{
/* We want the maximum of both operand kinds as result. */
if (res_ikind_1 < res_ikind_2)
res_ikind_1 = res_ikind_2;
se->expr = convert (gfc_get_int_type (res_ikind_1), se->expr);
}
}
/* Generate code to allocate a string temporary. */
tree
gfc_conv_string_tmp (gfc_se * se, tree type, tree len)
{
tree var;
tree tmp;
if (gfc_can_put_var_on_stack (len))
{
/* Create a temporary variable to hold the result. */
tmp = fold_build2_loc (input_location, MINUS_EXPR,
gfc_charlen_type_node, len,
build_int_cst (gfc_charlen_type_node, 1));
tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node, tmp);
if (TREE_CODE (TREE_TYPE (type)) == ARRAY_TYPE)
tmp = build_array_type (TREE_TYPE (TREE_TYPE (type)), tmp);
else
tmp = build_array_type (TREE_TYPE (type), tmp);
var = gfc_create_var (tmp, "str");
var = gfc_build_addr_expr (type, var);
}
else
{
/* Allocate a temporary to hold the result. */
var = gfc_create_var (type, "pstr");
gcc_assert (POINTER_TYPE_P (type));
tmp = TREE_TYPE (type);
if (TREE_CODE (tmp) == ARRAY_TYPE)
tmp = TREE_TYPE (tmp);
tmp = TYPE_SIZE_UNIT (tmp);
tmp = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
fold_convert (size_type_node, len),
fold_convert (size_type_node, tmp));
tmp = gfc_call_malloc (&se->pre, type, tmp);
gfc_add_modify (&se->pre, var, tmp);
/* Free the temporary afterwards. */
tmp = gfc_call_free (convert (pvoid_type_node, var));
gfc_add_expr_to_block (&se->post, tmp);
}
return var;
}
/* Handle a string concatenation operation. A temporary will be allocated to
hold the result. */
static void
gfc_conv_concat_op (gfc_se * se, gfc_expr * expr)
{
gfc_se lse, rse;
tree len, type, var, tmp, fndecl;
gcc_assert (expr->value.op.op1->ts.type == BT_CHARACTER
&& expr->value.op.op2->ts.type == BT_CHARACTER);
gcc_assert (expr->value.op.op1->ts.kind == expr->value.op.op2->ts.kind);
gfc_init_se (&lse, se);
gfc_conv_expr (&lse, expr->value.op.op1);
gfc_conv_string_parameter (&lse);
gfc_init_se (&rse, se);
gfc_conv_expr (&rse, expr->value.op.op2);
gfc_conv_string_parameter (&rse);
gfc_add_block_to_block (&se->pre, &lse.pre);
gfc_add_block_to_block (&se->pre, &rse.pre);
type = gfc_get_character_type (expr->ts.kind, expr->ts.u.cl);
len = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
if (len == NULL_TREE)
{
len = fold_build2_loc (input_location, PLUS_EXPR,
TREE_TYPE (lse.string_length),
lse.string_length, rse.string_length);
}
type = build_pointer_type (type);
var = gfc_conv_string_tmp (se, type, len);
/* Do the actual concatenation. */
if (expr->ts.kind == 1)
fndecl = gfor_fndecl_concat_string;
else if (expr->ts.kind == 4)
fndecl = gfor_fndecl_concat_string_char4;
else
gcc_unreachable ();
tmp = build_call_expr_loc (input_location,
fndecl, 6, len, var, lse.string_length, lse.expr,
rse.string_length, rse.expr);
gfc_add_expr_to_block (&se->pre, tmp);
/* Add the cleanup for the operands. */
gfc_add_block_to_block (&se->pre, &rse.post);
gfc_add_block_to_block (&se->pre, &lse.post);
se->expr = var;
se->string_length = len;
}
/* Translates an op expression. Common (binary) cases are handled by this
function, others are passed on. Recursion is used in either case.
We use the fact that (op1.ts == op2.ts) (except for the power
operator **).
Operators need no special handling for scalarized expressions as long as
they call gfc_conv_simple_val to get their operands.
Character strings get special handling. */
static void
gfc_conv_expr_op (gfc_se * se, gfc_expr * expr)
{
enum tree_code code;
gfc_se lse;
gfc_se rse;
tree tmp, type;
int lop;
int checkstring;
checkstring = 0;
lop = 0;
switch (expr->value.op.op)
{
case INTRINSIC_PARENTHESES:
if ((expr->ts.type == BT_REAL
|| expr->ts.type == BT_COMPLEX)
&& gfc_option.flag_protect_parens)
{
gfc_conv_unary_op (PAREN_EXPR, se, expr);
gcc_assert (FLOAT_TYPE_P (TREE_TYPE (se->expr)));
return;
}
/* Fallthrough. */
case INTRINSIC_UPLUS:
gfc_conv_expr (se, expr->value.op.op1);
return;
case INTRINSIC_UMINUS:
gfc_conv_unary_op (NEGATE_EXPR, se, expr);
return;
case INTRINSIC_NOT:
gfc_conv_unary_op (TRUTH_NOT_EXPR, se, expr);
return;
case INTRINSIC_PLUS:
code = PLUS_EXPR;
break;
case INTRINSIC_MINUS:
code = MINUS_EXPR;
break;
case INTRINSIC_TIMES:
code = MULT_EXPR;
break;
case INTRINSIC_DIVIDE:
/* If expr is a real or complex expr, use an RDIV_EXPR. If op1 is
an integer, we must round towards zero, so we use a
TRUNC_DIV_EXPR. */
if (expr->ts.type == BT_INTEGER)
code = TRUNC_DIV_EXPR;
else
code = RDIV_EXPR;
break;
case INTRINSIC_POWER:
gfc_conv_power_op (se, expr);
return;
case INTRINSIC_CONCAT:
gfc_conv_concat_op (se, expr);
return;
case INTRINSIC_AND:
code = TRUTH_ANDIF_EXPR;
lop = 1;
break;
case INTRINSIC_OR:
code = TRUTH_ORIF_EXPR;
lop = 1;
break;
/* EQV and NEQV only work on logicals, but since we represent them
as integers, we can use EQ_EXPR and NE_EXPR for them in GIMPLE. */
case INTRINSIC_EQ:
case INTRINSIC_EQ_OS:
case INTRINSIC_EQV:
code = EQ_EXPR;
checkstring = 1;
lop = 1;
break;
case INTRINSIC_NE:
case INTRINSIC_NE_OS:
case INTRINSIC_NEQV:
code = NE_EXPR;
checkstring = 1;
lop = 1;
break;
case INTRINSIC_GT:
case INTRINSIC_GT_OS:
code = GT_EXPR;
checkstring = 1;
lop = 1;
break;
case INTRINSIC_GE:
case INTRINSIC_GE_OS:
code = GE_EXPR;
checkstring = 1;
lop = 1;
break;
case INTRINSIC_LT:
case INTRINSIC_LT_OS:
code = LT_EXPR;
checkstring = 1;
lop = 1;
break;
case INTRINSIC_LE:
case INTRINSIC_LE_OS:
code = LE_EXPR;
checkstring = 1;
lop = 1;
break;
case INTRINSIC_USER:
case INTRINSIC_ASSIGN:
/* These should be converted into function calls by the frontend. */
gcc_unreachable ();
default:
fatal_error ("Unknown intrinsic op");
return;
}
/* The only exception to this is **, which is handled separately anyway. */
gcc_assert (expr->value.op.op1->ts.type == expr->value.op.op2->ts.type);
if (checkstring && expr->value.op.op1->ts.type != BT_CHARACTER)
checkstring = 0;
/* lhs */
gfc_init_se (&lse, se);
gfc_conv_expr (&lse, expr->value.op.op1);
gfc_add_block_to_block (&se->pre, &lse.pre);
/* rhs */
gfc_init_se (&rse, se);
gfc_conv_expr (&rse, expr->value.op.op2);
gfc_add_block_to_block (&se->pre, &rse.pre);
if (checkstring)
{
gfc_conv_string_parameter (&lse);
gfc_conv_string_parameter (&rse);
lse.expr = gfc_build_compare_string (lse.string_length, lse.expr,
rse.string_length, rse.expr,
expr->value.op.op1->ts.kind,
code);
rse.expr = build_int_cst (TREE_TYPE (lse.expr), 0);
gfc_add_block_to_block (&lse.post, &rse.post);
}
type = gfc_typenode_for_spec (&expr->ts);
if (lop)
{
/* The result of logical ops is always boolean_type_node. */
tmp = fold_build2_loc (input_location, code, boolean_type_node,
lse.expr, rse.expr);
se->expr = convert (type, tmp);
}
else
se->expr = fold_build2_loc (input_location, code, type, lse.expr, rse.expr);
/* Add the post blocks. */
gfc_add_block_to_block (&se->post, &rse.post);
gfc_add_block_to_block (&se->post, &lse.post);
}
/* If a string's length is one, we convert it to a single character. */
tree
gfc_string_to_single_character (tree len, tree str, int kind)
{
if (len == NULL
|| !INTEGER_CST_P (len) || TREE_INT_CST_HIGH (len) != 0
|| !POINTER_TYPE_P (TREE_TYPE (str)))
return NULL_TREE;
if (TREE_INT_CST_LOW (len) == 1)
{
str = fold_convert (gfc_get_pchar_type (kind), str);
return build_fold_indirect_ref_loc (input_location, str);
}
if (kind == 1
&& TREE_CODE (str) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (str, 0)) == ARRAY_REF
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (str, 0), 0)) == STRING_CST
&& array_ref_low_bound (TREE_OPERAND (str, 0))
== TREE_OPERAND (TREE_OPERAND (str, 0), 1)
&& TREE_INT_CST_LOW (len) > 1
&& TREE_INT_CST_LOW (len)
== (unsigned HOST_WIDE_INT)
TREE_STRING_LENGTH (TREE_OPERAND (TREE_OPERAND (str, 0), 0)))
{
tree ret = fold_convert (gfc_get_pchar_type (kind), str);
ret = build_fold_indirect_ref_loc (input_location, ret);
if (TREE_CODE (ret) == INTEGER_CST)
{
tree string_cst = TREE_OPERAND (TREE_OPERAND (str, 0), 0);
int i, length = TREE_STRING_LENGTH (string_cst);
const char *ptr = TREE_STRING_POINTER (string_cst);
for (i = 1; i < length; i++)
if (ptr[i] != ' ')
return NULL_TREE;
return ret;
}
}
return NULL_TREE;
}
void
gfc_conv_scalar_char_value (gfc_symbol *sym, gfc_se *se, gfc_expr **expr)
{
if (sym->backend_decl)
{
/* This becomes the nominal_type in
function.c:assign_parm_find_data_types. */
TREE_TYPE (sym->backend_decl) = unsigned_char_type_node;
/* This becomes the passed_type in
function.c:assign_parm_find_data_types. C promotes char to
integer for argument passing. */
DECL_ARG_TYPE (sym->backend_decl) = unsigned_type_node;
DECL_BY_REFERENCE (sym->backend_decl) = 0;
}
if (expr != NULL)
{
/* If we have a constant character expression, make it into an
integer. */
if ((*expr)->expr_type == EXPR_CONSTANT)
{
gfc_typespec ts;
gfc_clear_ts (&ts);
*expr = gfc_get_int_expr (gfc_default_integer_kind, NULL,
(int)(*expr)->value.character.string[0]);
if ((*expr)->ts.kind != gfc_c_int_kind)
{
/* The expr needs to be compatible with a C int. If the
conversion fails, then the 2 causes an ICE. */
ts.type = BT_INTEGER;
ts.kind = gfc_c_int_kind;
gfc_convert_type (*expr, &ts, 2);
}
}
else if (se != NULL && (*expr)->expr_type == EXPR_VARIABLE)
{
if ((*expr)->ref == NULL)
{
se->expr = gfc_string_to_single_character
(build_int_cst (integer_type_node, 1),
gfc_build_addr_expr (gfc_get_pchar_type ((*expr)->ts.kind),
gfc_get_symbol_decl
((*expr)->symtree->n.sym)),
(*expr)->ts.kind);
}
else
{
gfc_conv_variable (se, *expr);
se->expr = gfc_string_to_single_character
(build_int_cst (integer_type_node, 1),
gfc_build_addr_expr (gfc_get_pchar_type ((*expr)->ts.kind),
se->expr),
(*expr)->ts.kind);
}
}
}
}
/* Helper function for gfc_build_compare_string. Return LEN_TRIM value
if STR is a string literal, otherwise return -1. */
static int
gfc_optimize_len_trim (tree len, tree str, int kind)
{
if (kind == 1
&& TREE_CODE (str) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (str, 0)) == ARRAY_REF
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (str, 0), 0)) == STRING_CST
&& array_ref_low_bound (TREE_OPERAND (str, 0))
== TREE_OPERAND (TREE_OPERAND (str, 0), 1)
&& TREE_INT_CST_LOW (len) >= 1
&& TREE_INT_CST_LOW (len)
== (unsigned HOST_WIDE_INT)
TREE_STRING_LENGTH (TREE_OPERAND (TREE_OPERAND (str, 0), 0)))
{
tree folded = fold_convert (gfc_get_pchar_type (kind), str);
folded = build_fold_indirect_ref_loc (input_location, folded);
if (TREE_CODE (folded) == INTEGER_CST)
{
tree string_cst = TREE_OPERAND (TREE_OPERAND (str, 0), 0);
int length = TREE_STRING_LENGTH (string_cst);
const char *ptr = TREE_STRING_POINTER (string_cst);
for (; length > 0; length--)
if (ptr[length - 1] != ' ')
break;
return length;
}
}
return -1;
}
/* Helper to build a call to memcmp. */
static tree
build_memcmp_call (tree s1, tree s2, tree n)
{
tree tmp;
if (!POINTER_TYPE_P (TREE_TYPE (s1)))
s1 = gfc_build_addr_expr (pvoid_type_node, s1);
else
s1 = fold_convert (pvoid_type_node, s1);
if (!POINTER_TYPE_P (TREE_TYPE (s2)))
s2 = gfc_build_addr_expr (pvoid_type_node, s2);
else
s2 = fold_convert (pvoid_type_node, s2);
n = fold_convert (size_type_node, n);
tmp = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_MEMCMP),
3, s1, s2, n);
return fold_convert (integer_type_node, tmp);
}
/* Compare two strings. If they are all single characters, the result is the
subtraction of them. Otherwise, we build a library call. */
tree
gfc_build_compare_string (tree len1, tree str1, tree len2, tree str2, int kind,
enum tree_code code)
{
tree sc1;
tree sc2;
tree fndecl;
gcc_assert (POINTER_TYPE_P (TREE_TYPE (str1)));
gcc_assert (POINTER_TYPE_P (TREE_TYPE (str2)));
sc1 = gfc_string_to_single_character (len1, str1, kind);
sc2 = gfc_string_to_single_character (len2, str2, kind);
if (sc1 != NULL_TREE && sc2 != NULL_TREE)
{
/* Deal with single character specially. */
sc1 = fold_convert (integer_type_node, sc1);
sc2 = fold_convert (integer_type_node, sc2);
return fold_build2_loc (input_location, MINUS_EXPR, integer_type_node,
sc1, sc2);
}
if ((code == EQ_EXPR || code == NE_EXPR)
&& optimize
&& INTEGER_CST_P (len1) && INTEGER_CST_P (len2))
{
/* If one string is a string literal with LEN_TRIM longer
than the length of the second string, the strings
compare unequal. */
int len = gfc_optimize_len_trim (len1, str1, kind);
if (len > 0 && compare_tree_int (len2, len) < 0)
return integer_one_node;
len = gfc_optimize_len_trim (len2, str2, kind);
if (len > 0 && compare_tree_int (len1, len) < 0)
return integer_one_node;
}
/* We can compare via memcpy if the strings are known to be equal
in length and they are
- kind=1
- kind=4 and the comparison is for (in)equality. */
if (INTEGER_CST_P (len1) && INTEGER_CST_P (len2)
&& tree_int_cst_equal (len1, len2)
&& (kind == 1 || code == EQ_EXPR || code == NE_EXPR))
{
tree tmp;
tree chartype;
chartype = gfc_get_char_type (kind);
tmp = fold_build2_loc (input_location, MULT_EXPR, TREE_TYPE(len1),
fold_convert (TREE_TYPE(len1),
TYPE_SIZE_UNIT(chartype)),
len1);
return build_memcmp_call (str1, str2, tmp);
}
/* Build a call for the comparison. */
if (kind == 1)
fndecl = gfor_fndecl_compare_string;
else if (kind == 4)
fndecl = gfor_fndecl_compare_string_char4;
else
gcc_unreachable ();
return build_call_expr_loc (input_location, fndecl, 4,
len1, str1, len2, str2);
}
/* Return the backend_decl for a procedure pointer component. */
static tree
get_proc_ptr_comp (gfc_expr *e)
{
gfc_se comp_se;
gfc_expr *e2;
expr_t old_type;
gfc_init_se (&comp_se, NULL);
e2 = gfc_copy_expr (e);
/* We have to restore the expr type later so that gfc_free_expr frees
the exact same thing that was allocated.
TODO: This is ugly. */
old_type = e2->expr_type;
e2->expr_type = EXPR_VARIABLE;
gfc_conv_expr (&comp_se, e2);
e2->expr_type = old_type;
gfc_free_expr (e2);
return build_fold_addr_expr_loc (input_location, comp_se.expr);
}
/* Convert a typebound function reference from a class object. */
static void
conv_base_obj_fcn_val (gfc_se * se, tree base_object, gfc_expr * expr)
{
gfc_ref *ref;
tree var;
if (TREE_CODE (base_object) != VAR_DECL)
{
var = gfc_create_var (TREE_TYPE (base_object), NULL);
gfc_add_modify (&se->pre, var, base_object);
}
se->expr = gfc_class_vptr_get (base_object);
se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
ref = expr->ref;
while (ref && ref->next)
ref = ref->next;
gcc_assert (ref && ref->type == REF_COMPONENT);
if (ref->u.c.sym->attr.extension)
conv_parent_component_references (se, ref);
gfc_conv_component_ref (se, ref);
se->expr = build_fold_addr_expr_loc (input_location, se->expr);
}
static void
conv_function_val (gfc_se * se, gfc_symbol * sym, gfc_expr * expr)
{
tree tmp;
if (gfc_is_proc_ptr_comp (expr))
tmp = get_proc_ptr_comp (expr);
else if (sym->attr.dummy)
{
tmp = gfc_get_symbol_decl (sym);
if (sym->attr.proc_pointer)
tmp = build_fold_indirect_ref_loc (input_location,
tmp);
gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (tmp))) == FUNCTION_TYPE);
}
else
{
if (!sym->backend_decl)
sym->backend_decl = gfc_get_extern_function_decl (sym);
TREE_USED (sym->backend_decl) = 1;
tmp = sym->backend_decl;
if (sym->attr.cray_pointee)
{
/* TODO - make the cray pointee a pointer to a procedure,
assign the pointer to it and use it for the call. This
will do for now! */
tmp = convert (build_pointer_type (TREE_TYPE (tmp)),
gfc_get_symbol_decl (sym->cp_pointer));
tmp = gfc_evaluate_now (tmp, &se->pre);
}
if (!POINTER_TYPE_P (TREE_TYPE (tmp)))
{
gcc_assert (TREE_CODE (tmp) == FUNCTION_DECL);
tmp = gfc_build_addr_expr (NULL_TREE, tmp);
}
}
se->expr = tmp;
}
/* Initialize MAPPING. */
void
gfc_init_interface_mapping (gfc_interface_mapping * mapping)
{
mapping->syms = NULL;
mapping->charlens = NULL;
}
/* Free all memory held by MAPPING (but not MAPPING itself). */
void
gfc_free_interface_mapping (gfc_interface_mapping * mapping)
{
gfc_interface_sym_mapping *sym;
gfc_interface_sym_mapping *nextsym;
gfc_charlen *cl;
gfc_charlen *nextcl;
for (sym = mapping->syms; sym; sym = nextsym)
{
nextsym = sym->next;
sym->new_sym->n.sym->formal = NULL;
gfc_free_symbol (sym->new_sym->n.sym);
gfc_free_expr (sym->expr);
free (sym->new_sym);
free (sym);
}
for (cl = mapping->charlens; cl; cl = nextcl)
{
nextcl = cl->next;
gfc_free_expr (cl->length);
free (cl);
}
}
/* Return a copy of gfc_charlen CL. Add the returned structure to
MAPPING so that it will be freed by gfc_free_interface_mapping. */
static gfc_charlen *
gfc_get_interface_mapping_charlen (gfc_interface_mapping * mapping,
gfc_charlen * cl)
{
gfc_charlen *new_charlen;
new_charlen = gfc_get_charlen ();
new_charlen->next = mapping->charlens;
new_charlen->length = gfc_copy_expr (cl->length);
mapping->charlens = new_charlen;
return new_charlen;
}
/* A subroutine of gfc_add_interface_mapping. Return a descriptorless
array variable that can be used as the actual argument for dummy
argument SYM. Add any initialization code to BLOCK. PACKED is as
for gfc_get_nodesc_array_type and DATA points to the first element
in the passed array. */
static tree
gfc_get_interface_mapping_array (stmtblock_t * block, gfc_symbol * sym,
gfc_packed packed, tree data)
{
tree type;
tree var;
type = gfc_typenode_for_spec (&sym->ts);
type = gfc_get_nodesc_array_type (type, sym->as, packed,
!sym->attr.target && !sym->attr.pointer
&& !sym->attr.proc_pointer);
var = gfc_create_var (type, "ifm");
gfc_add_modify (block, var, fold_convert (type, data));
return var;
}
/* A subroutine of gfc_add_interface_mapping. Set the stride, upper bounds
and offset of descriptorless array type TYPE given that it has the same
size as DESC. Add any set-up code to BLOCK. */
static void
gfc_set_interface_mapping_bounds (stmtblock_t * block, tree type, tree desc)
{
int n;
tree dim;
tree offset;
tree tmp;
offset = gfc_index_zero_node;
for (n = 0; n < GFC_TYPE_ARRAY_RANK (type); n++)
{
dim = gfc_rank_cst[n];
GFC_TYPE_ARRAY_STRIDE (type, n) = gfc_conv_array_stride (desc, n);
if (GFC_TYPE_ARRAY_LBOUND (type, n) == NULL_TREE)
{
GFC_TYPE_ARRAY_LBOUND (type, n)
= gfc_conv_descriptor_lbound_get (desc, dim);
GFC_TYPE_ARRAY_UBOUND (type, n)
= gfc_conv_descriptor_ubound_get (desc, dim);
}
else if (GFC_TYPE_ARRAY_UBOUND (type, n) == NULL_TREE)
{
tmp = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type,
gfc_conv_descriptor_ubound_get (desc, dim),
gfc_conv_descriptor_lbound_get (desc, dim));
tmp = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type,
GFC_TYPE_ARRAY_LBOUND (type, n), tmp);
tmp = gfc_evaluate_now (tmp, block);
GFC_TYPE_ARRAY_UBOUND (type, n) = tmp;
}
tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
GFC_TYPE_ARRAY_LBOUND (type, n),
GFC_TYPE_ARRAY_STRIDE (type, n));
offset = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type, offset, tmp);
}
offset = gfc_evaluate_now (offset, block);
GFC_TYPE_ARRAY_OFFSET (type) = offset;
}
/* Extend MAPPING so that it maps dummy argument SYM to the value stored
in SE. The caller may still use se->expr and se->string_length after
calling this function. */
void
gfc_add_interface_mapping (gfc_interface_mapping * mapping,
gfc_symbol * sym, gfc_se * se,
gfc_expr *expr)
{
gfc_interface_sym_mapping *sm;
tree desc;
tree tmp;
tree value;
gfc_symbol *new_sym;
gfc_symtree *root;
gfc_symtree *new_symtree;
/* Create a new symbol to represent the actual argument. */
new_sym = gfc_new_symbol (sym->name, NULL);
new_sym->ts = sym->ts;
new_sym->as = gfc_copy_array_spec (sym->as);
new_sym->attr.referenced = 1;
new_sym->attr.dimension = sym->attr.dimension;
new_sym->attr.contiguous = sym->attr.contiguous;
new_sym->attr.codimension = sym->attr.codimension;
new_sym->attr.pointer = sym->attr.pointer;
new_sym->attr.allocatable = sym->attr.allocatable;
new_sym->attr.flavor = sym->attr.flavor;
new_sym->attr.function = sym->attr.function;
/* Ensure that the interface is available and that
descriptors are passed for array actual arguments. */
if (sym->attr.flavor == FL_PROCEDURE)
{
new_sym->formal = expr->symtree->n.sym->formal;
new_sym->attr.always_explicit
= expr->symtree->n.sym->attr.always_explicit;
}
/* Create a fake symtree for it. */
root = NULL;
new_symtree = gfc_new_symtree (&root, sym->name);
new_symtree->n.sym = new_sym;
gcc_assert (new_symtree == root);
/* Create a dummy->actual mapping. */
sm = XCNEW (gfc_interface_sym_mapping);
sm->next = mapping->syms;
sm->old = sym;
sm->new_sym = new_symtree;
sm->expr = gfc_copy_expr (expr);
mapping->syms = sm;
/* Stabilize the argument's value. */
if (!sym->attr.function && se)
se->expr = gfc_evaluate_now (se->expr, &se->pre);
if (sym->ts.type == BT_CHARACTER)
{
/* Create a copy of the dummy argument's length. */
new_sym->ts.u.cl = gfc_get_interface_mapping_charlen (mapping, sym->ts.u.cl);
sm->expr->ts.u.cl = new_sym->ts.u.cl;
/* If the length is specified as "*", record the length that
the caller is passing. We should use the callee's length
in all other cases. */
if (!new_sym->ts.u.cl->length && se)
{
se->string_length = gfc_evaluate_now (se->string_length, &se->pre);
new_sym->ts.u.cl->backend_decl = se->string_length;
}
}
if (!se)
return;
/* Use the passed value as-is if the argument is a function. */
if (sym->attr.flavor == FL_PROCEDURE)
value = se->expr;
/* If the argument is either a string or a pointer to a string,
convert it to a boundless character type. */
else if (!sym->attr.dimension && sym->ts.type == BT_CHARACTER)
{
tmp = gfc_get_character_type_len (sym->ts.kind, NULL);
tmp = build_pointer_type (tmp);
if (sym->attr.pointer)
value = build_fold_indirect_ref_loc (input_location,
se->expr);
else
value = se->expr;
value = fold_convert (tmp, value);
}
/* If the argument is a scalar, a pointer to an array or an allocatable,
dereference it. */
else if (!sym->attr.dimension || sym->attr.pointer || sym->attr.allocatable)
value = build_fold_indirect_ref_loc (input_location,
se->expr);
/* For character(*), use the actual argument's descriptor. */
else if (sym->ts.type == BT_CHARACTER && !new_sym->ts.u.cl->length)
value = build_fold_indirect_ref_loc (input_location,
se->expr);
/* If the argument is an array descriptor, use it to determine
information about the actual argument's shape. */
else if (POINTER_TYPE_P (TREE_TYPE (se->expr))
&& GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (TREE_TYPE (se->expr))))
{
/* Get the actual argument's descriptor. */
desc = build_fold_indirect_ref_loc (input_location,
se->expr);
/* Create the replacement variable. */
tmp = gfc_conv_descriptor_data_get (desc);
value = gfc_get_interface_mapping_array (&se->pre, sym,
PACKED_NO, tmp);
/* Use DESC to work out the upper bounds, strides and offset. */
gfc_set_interface_mapping_bounds (&se->pre, TREE_TYPE (value), desc);
}
else
/* Otherwise we have a packed array. */
value = gfc_get_interface_mapping_array (&se->pre, sym,
PACKED_FULL, se->expr);
new_sym->backend_decl = value;
}
/* Called once all dummy argument mappings have been added to MAPPING,
but before the mapping is used to evaluate expressions. Pre-evaluate
the length of each argument, adding any initialization code to PRE and
any finalization code to POST. */
void
gfc_finish_interface_mapping (gfc_interface_mapping * mapping,
stmtblock_t * pre, stmtblock_t * post)
{
gfc_interface_sym_mapping *sym;
gfc_expr *expr;
gfc_se se;
for (sym = mapping->syms; sym; sym = sym->next)
if (sym->new_sym->n.sym->ts.type == BT_CHARACTER
&& !sym->new_sym->n.sym->ts.u.cl->backend_decl)
{
expr = sym->new_sym->n.sym->ts.u.cl->length;
gfc_apply_interface_mapping_to_expr (mapping, expr);
gfc_init_se (&se, NULL);
gfc_conv_expr (&se, expr);
se.expr = fold_convert (gfc_charlen_type_node, se.expr);
se.expr = gfc_evaluate_now (se.expr, &se.pre);
gfc_add_block_to_block (pre, &se.pre);
gfc_add_block_to_block (post, &se.post);
sym->new_sym->n.sym->ts.u.cl->backend_decl = se.expr;
}
}
/* Like gfc_apply_interface_mapping_to_expr, but applied to
constructor C. */
static void
gfc_apply_interface_mapping_to_cons (gfc_interface_mapping * mapping,
gfc_constructor_base base)
{
gfc_constructor *c;
for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
{
gfc_apply_interface_mapping_to_expr (mapping, c->expr);
if (c->iterator)
{
gfc_apply_interface_mapping_to_expr (mapping, c->iterator->start);
gfc_apply_interface_mapping_to_expr (mapping, c->iterator->end);
gfc_apply_interface_mapping_to_expr (mapping, c->iterator->step);
}
}
}
/* Like gfc_apply_interface_mapping_to_expr, but applied to
reference REF. */
static void
gfc_apply_interface_mapping_to_ref (gfc_interface_mapping * mapping,
gfc_ref * ref)
{
int n;
for (; ref; ref = ref->next)
switch (ref->type)
{
case REF_ARRAY:
for (n = 0; n < ref->u.ar.dimen; n++)
{
gfc_apply_interface_mapping_to_expr (mapping, ref->u.ar.start[n]);
gfc_apply_interface_mapping_to_expr (mapping, ref->u.ar.end[n]);
gfc_apply_interface_mapping_to_expr (mapping, ref->u.ar.stride[n]);
}
break;
case REF_COMPONENT:
break;
case REF_SUBSTRING:
gfc_apply_interface_mapping_to_expr (mapping, ref->u.ss.start);
gfc_apply_interface_mapping_to_expr (mapping, ref->u.ss.end);
break;
}
}
/* Convert intrinsic function calls into result expressions. */
static bool
gfc_map_intrinsic_function (gfc_expr *expr, gfc_interface_mapping *mapping)
{
gfc_symbol *sym;
gfc_expr *new_expr;
gfc_expr *arg1;
gfc_expr *arg2;
int d, dup;
arg1 = expr->value.function.actual->expr;
if (expr->value.function.actual->next)
arg2 = expr->value.function.actual->next->expr;
else
arg2 = NULL;
sym = arg1->symtree->n.sym;
if (sym->attr.dummy)
return false;
new_expr = NULL;
switch (expr->value.function.isym->id)
{
case GFC_ISYM_LEN:
/* TODO figure out why this condition is necessary. */
if (sym->attr.function
&& (arg1->ts.u.cl->length == NULL
|| (arg1->ts.u.cl->length->expr_type != EXPR_CONSTANT
&& arg1->ts.u.cl->length->expr_type != EXPR_VARIABLE)))
return false;
new_expr = gfc_copy_expr (arg1->ts.u.cl->length);
break;
case GFC_ISYM_SIZE:
if (!sym->as || sym->as->rank == 0)
return false;
if (arg2 && arg2->expr_type == EXPR_CONSTANT)
{
dup = mpz_get_si (arg2->value.integer);
d = dup - 1;
}
else
{
dup = sym->as->rank;
d = 0;
}
for (; d < dup; d++)
{
gfc_expr *tmp;
if (!sym->as->upper[d] || !sym->as->lower[d])
{
gfc_free_expr (new_expr);
return false;
}
tmp = gfc_add (gfc_copy_expr (sym->as->upper[d]),
gfc_get_int_expr (gfc_default_integer_kind,
NULL, 1));
tmp = gfc_subtract (tmp, gfc_copy_expr (sym->as->lower[d]));
if (new_expr)
new_expr = gfc_multiply (new_expr, tmp);
else
new_expr = tmp;
}
break;
case GFC_ISYM_LBOUND:
case GFC_ISYM_UBOUND:
/* TODO These implementations of lbound and ubound do not limit if
the size < 0, according to F95's 13.14.53 and 13.14.113. */
if (!sym->as || sym->as->rank == 0)
return false;
if (arg2 && arg2->expr_type == EXPR_CONSTANT)
d = mpz_get_si (arg2->value.integer) - 1;
else
/* TODO: If the need arises, this could produce an array of
ubound/lbounds. */
gcc_unreachable ();
if (expr->value.function.isym->id == GFC_ISYM_LBOUND)
{
if (sym->as->lower[d])
new_expr = gfc_copy_expr (sym->as->lower[d]);
}
else
{
if (sym->as->upper[d])
new_expr = gfc_copy_expr (sym->as->upper[d]);
}
break;
default:
break;
}
gfc_apply_interface_mapping_to_expr (mapping, new_expr);
if (!new_expr)
return false;
gfc_replace_expr (expr, new_expr);
return true;
}
static void
gfc_map_fcn_formal_to_actual (gfc_expr *expr, gfc_expr *map_expr,
gfc_interface_mapping * mapping)
{
gfc_formal_arglist *f;
gfc_actual_arglist *actual;
actual = expr->value.function.actual;
f = gfc_sym_get_dummy_args (map_expr->symtree->n.sym);
for (; f && actual; f = f->next, actual = actual->next)
{
if (!actual->expr)
continue;
gfc_add_interface_mapping (mapping, f->sym, NULL, actual->expr);
}
if (map_expr->symtree->n.sym->attr.dimension)
{
int d;
gfc_array_spec *as;
as = gfc_copy_array_spec (map_expr->symtree->n.sym->as);
for (d = 0; d < as->rank; d++)
{
gfc_apply_interface_mapping_to_expr (mapping, as->lower[d]);
gfc_apply_interface_mapping_to_expr (mapping, as->upper[d]);
}
expr->value.function.esym->as = as;
}
if (map_expr->symtree->n.sym->ts.type == BT_CHARACTER)
{
expr->value.function.esym->ts.u.cl->length
= gfc_copy_expr (map_expr->symtree->n.sym->ts.u.cl->length);
gfc_apply_interface_mapping_to_expr (mapping,
expr->value.function.esym->ts.u.cl->length);
}
}
/* EXPR is a copy of an expression that appeared in the interface
associated with MAPPING. Walk it recursively looking for references to
dummy arguments that MAPPING maps to actual arguments. Replace each such
reference with a reference to the associated actual argument. */
static void
gfc_apply_interface_mapping_to_expr (gfc_interface_mapping * mapping,
gfc_expr * expr)
{
gfc_interface_sym_mapping *sym;
gfc_actual_arglist *actual;
if (!expr)
return;
/* Copying an expression does not copy its length, so do that here. */
if (expr->ts.type == BT_CHARACTER && expr->ts.u.cl)
{
expr->ts.u.cl = gfc_get_interface_mapping_charlen (mapping, expr->ts.u.cl);
gfc_apply_interface_mapping_to_expr (mapping, expr->ts.u.cl->length);
}
/* Apply the mapping to any references. */
gfc_apply_interface_mapping_to_ref (mapping, expr->ref);
/* ...and to the expression's symbol, if it has one. */
/* TODO Find out why the condition on expr->symtree had to be moved into
the loop rather than being outside it, as originally. */
for (sym = mapping->syms; sym; sym = sym->next)
if (expr->symtree && sym->old == expr->symtree->n.sym)
{
if (sym->new_sym->n.sym->backend_decl)
expr->symtree = sym->new_sym;
else if (sym->expr)
gfc_replace_expr (expr, gfc_copy_expr (sym->expr));
/* Replace base type for polymorphic arguments. */
if (expr->ref && expr->ref->type == REF_COMPONENT
&& sym->expr && sym->expr->ts.type == BT_CLASS)
expr->ref->u.c.sym = sym->expr->ts.u.derived;
}
/* ...and to subexpressions in expr->value. */
switch (expr->expr_type)
{
case EXPR_VARIABLE:
case EXPR_CONSTANT:
case EXPR_NULL:
case EXPR_SUBSTRING:
break;
case EXPR_OP:
gfc_apply_interface_mapping_to_expr (mapping, expr->value.op.op1);
gfc_apply_interface_mapping_to_expr (mapping, expr->value.op.op2);
break;
case EXPR_FUNCTION:
for (actual = expr->value.function.actual; actual; actual = actual->next)
gfc_apply_interface_mapping_to_expr (mapping, actual->expr);
if (expr->value.function.esym == NULL
&& expr->value.function.isym != NULL
&& expr->value.function.actual->expr->symtree
&& gfc_map_intrinsic_function (expr, mapping))
break;
for (sym = mapping->syms; sym; sym = sym->next)
if (sym->old == expr->value.function.esym)
{
expr->value.function.esym = sym->new_sym->n.sym;
gfc_map_fcn_formal_to_actual (expr, sym->expr, mapping);
expr->value.function.esym->result = sym->new_sym->n.sym;
}
break;
case EXPR_ARRAY:
case EXPR_STRUCTURE:
gfc_apply_interface_mapping_to_cons (mapping, expr->value.constructor);
break;
case EXPR_COMPCALL:
case EXPR_PPC:
gcc_unreachable ();
break;
}
return;
}
/* Evaluate interface expression EXPR using MAPPING. Store the result
in SE. */
void
gfc_apply_interface_mapping (gfc_interface_mapping * mapping,
gfc_se * se, gfc_expr * expr)
{
expr = gfc_copy_expr (expr);
gfc_apply_interface_mapping_to_expr (mapping, expr);
gfc_conv_expr (se, expr);
se->expr = gfc_evaluate_now (se->expr, &se->pre);
gfc_free_expr (expr);
}
/* Returns a reference to a temporary array into which a component of
an actual argument derived type array is copied and then returned
after the function call. */
void
gfc_conv_subref_array_arg (gfc_se * parmse, gfc_expr * expr, int g77,
sym_intent intent, bool formal_ptr)
{
gfc_se lse;
gfc_se rse;
gfc_ss *lss;
gfc_ss *rss;
gfc_loopinfo loop;
gfc_loopinfo loop2;
gfc_array_info *info;
tree offset;
tree tmp_index;
tree tmp;
tree base_type;
tree size;
stmtblock_t body;
int n;
int dimen;
gcc_assert (expr->expr_type == EXPR_VARIABLE);
gfc_init_se (&lse, NULL);
gfc_init_se (&rse, NULL);
/* Walk the argument expression. */
rss = gfc_walk_expr (expr);
gcc_assert (rss != gfc_ss_terminator);
/* Initialize the scalarizer. */
gfc_init_loopinfo (&loop);
gfc_add_ss_to_loop (&loop, rss);
/* Calculate the bounds of the scalarization. */
gfc_conv_ss_startstride (&loop);
/* Build an ss for the temporary. */
if (expr->ts.type == BT_CHARACTER && !expr->ts.u.cl->backend_decl)
gfc_conv_string_length (expr->ts.u.cl, expr, &parmse->pre);
base_type = gfc_typenode_for_spec (&expr->ts);
if (GFC_ARRAY_TYPE_P (base_type)
|| GFC_DESCRIPTOR_TYPE_P (base_type))
base_type = gfc_get_element_type (base_type);
if (expr->ts.type == BT_CLASS)
base_type = gfc_typenode_for_spec (&CLASS_DATA (expr)->ts);
loop.temp_ss = gfc_get_temp_ss (base_type, ((expr->ts.type == BT_CHARACTER)
? expr->ts.u.cl->backend_decl
: NULL),
loop.dimen);
parmse->string_length = loop.temp_ss->info->string_length;
/* Associate the SS with the loop. */
gfc_add_ss_to_loop (&loop, loop.temp_ss);
/* Setup the scalarizing loops. */
gfc_conv_loop_setup (&loop, &expr->where);
/* Pass the temporary descriptor back to the caller. */
info = &loop.temp_ss->info->data.array;
parmse->expr = info->descriptor;
/* Setup the gfc_se structures. */
gfc_copy_loopinfo_to_se (&lse, &loop);
gfc_copy_loopinfo_to_se (&rse, &loop);
rse.ss = rss;
lse.ss = loop.temp_ss;
gfc_mark_ss_chain_used (rss, 1);
gfc_mark_ss_chain_used (loop.temp_ss, 1);
/* Start the scalarized loop body. */
gfc_start_scalarized_body (&loop, &body);
/* Translate the expression. */
gfc_conv_expr (&rse, expr);
gfc_conv_tmp_array_ref (&lse);
if (intent != INTENT_OUT)
{
tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, true, false, true);
gfc_add_expr_to_block (&body, tmp);
gcc_assert (rse.ss == gfc_ss_terminator);
gfc_trans_scalarizing_loops (&loop, &body);
}
else
{
/* Make sure that the temporary declaration survives by merging
all the loop declarations into the current context. */
for (n = 0; n < loop.dimen; n++)
{
gfc_merge_block_scope (&body);
body = loop.code[loop.order[n]];
}
gfc_merge_block_scope (&body);
}
/* Add the post block after the second loop, so that any
freeing of allocated memory is done at the right time. */
gfc_add_block_to_block (&parmse->pre, &loop.pre);
/**********Copy the temporary back again.*********/
gfc_init_se (&lse, NULL);
gfc_init_se (&rse, NULL);
/* Walk the argument expression. */
lss = gfc_walk_expr (expr);
rse.ss = loop.temp_ss;
lse.ss = lss;
/* Initialize the scalarizer. */
gfc_init_loopinfo (&loop2);
gfc_add_ss_to_loop (&loop2, lss);
/* Calculate the bounds of the scalarization. */
gfc_conv_ss_startstride (&loop2);
/* Setup the scalarizing loops. */
gfc_conv_loop_setup (&loop2, &expr->where);
gfc_copy_loopinfo_to_se (&lse, &loop2);
gfc_copy_loopinfo_to_se (&rse, &loop2);
gfc_mark_ss_chain_used (lss, 1);
gfc_mark_ss_chain_used (loop.temp_ss, 1);
/* Declare the variable to hold the temporary offset and start the
scalarized loop body. */
offset = gfc_create_var (gfc_array_index_type, NULL);
gfc_start_scalarized_body (&loop2, &body);
/* Build the offsets for the temporary from the loop variables. The
temporary array has lbounds of zero and strides of one in all
dimensions, so this is very simple. The offset is only computed
outside the innermost loop, so the overall transfer could be
optimized further. */
info = &rse.ss->info->data.array;
dimen = rse.ss->dimen;
tmp_index = gfc_index_zero_node;
for (n = dimen - 1; n > 0; n--)
{
tree tmp_str;
tmp = rse.loop->loopvar[n];
tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
tmp, rse.loop->from[n]);
tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
tmp, tmp_index);
tmp_str = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type,
rse.loop->to[n-1], rse.loop->from[n-1]);
tmp_str = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type,
tmp_str, gfc_index_one_node);
tmp_index = fold_build2_loc (input_location, MULT_EXPR,
gfc_array_index_type, tmp, tmp_str);
}
tmp_index = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type,
tmp_index, rse.loop->from[0]);
gfc_add_modify (&rse.loop->code[0], offset, tmp_index);
tmp_index = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type,
rse.loop->loopvar[0], offset);
/* Now use the offset for the reference. */
tmp = build_fold_indirect_ref_loc (input_location,
info->data);
rse.expr = gfc_build_array_ref (tmp, tmp_index, NULL);
if (expr->ts.type == BT_CHARACTER)
rse.string_length = expr->ts.u.cl->backend_decl;
gfc_conv_expr (&lse, expr);
gcc_assert (lse.ss == gfc_ss_terminator);
tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, false, false, true);
gfc_add_expr_to_block (&body, tmp);
/* Generate the copying loops. */
gfc_trans_scalarizing_loops (&loop2, &body);
/* Wrap the whole thing up by adding the second loop to the post-block
and following it by the post-block of the first loop. In this way,
if the temporary needs freeing, it is done after use! */
if (intent != INTENT_IN)
{
gfc_add_block_to_block (&parmse->post, &loop2.pre);
gfc_add_block_to_block (&parmse->post, &loop2.post);
}
gfc_add_block_to_block (&parmse->post, &loop.post);
gfc_cleanup_loop (&loop);
gfc_cleanup_loop (&loop2);
/* Pass the string length to the argument expression. */
if (expr->ts.type == BT_CHARACTER)
parmse->string_length = expr->ts.u.cl->backend_decl;
/* Determine the offset for pointer formal arguments and set the
lbounds to one. */
if (formal_ptr)
{
size = gfc_index_one_node;
offset = gfc_index_zero_node;
for (n = 0; n < dimen; n++)
{
tmp = gfc_conv_descriptor_ubound_get (parmse->expr,
gfc_rank_cst[n]);
tmp = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type, tmp,
gfc_index_one_node);
gfc_conv_descriptor_ubound_set (&parmse->pre,
parmse->expr,
gfc_rank_cst[n],
tmp);
gfc_conv_descriptor_lbound_set (&parmse->pre,
parmse->expr,
gfc_rank_cst[n],
gfc_index_one_node);
size = gfc_evaluate_now (size, &parmse->pre);
offset = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type,
offset, size);
offset = gfc_evaluate_now (offset, &parmse->pre);
tmp = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type,
rse.loop->to[n], rse.loop->from[n]);
tmp = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type,
tmp, gfc_index_one_node);
size = fold_build2_loc (input_location, MULT_EXPR,
gfc_array_index_type, size, tmp);
}
gfc_conv_descriptor_offset_set (&parmse->pre, parmse->expr,
offset);
}
/* We want either the address for the data or the address of the descriptor,
depending on the mode of passing array arguments. */
if (g77)
parmse->expr = gfc_conv_descriptor_data_get (parmse->expr);
else
parmse->expr = gfc_build_addr_expr (NULL_TREE, parmse->expr);
return;
}
/* Generate the code for argument list functions. */
static void
conv_arglist_function (gfc_se *se, gfc_expr *expr, const char *name)
{
/* Pass by value for g77 %VAL(arg), pass the address
indirectly for %LOC, else by reference. Thus %REF
is a "do-nothing" and %LOC is the same as an F95
pointer. */
if (strncmp (name, "%VAL", 4) == 0)
gfc_conv_expr (se, expr);
else if (strncmp (name, "%LOC", 4) == 0)
{
gfc_conv_expr_reference (se, expr);
se->expr = gfc_build_addr_expr (NULL, se->expr);
}
else if (strncmp (name, "%REF", 4) == 0)
gfc_conv_expr_reference (se, expr);
else
gfc_error ("Unknown argument list function at %L", &expr->where);
}
/* Generate code for a procedure call. Note can return se->post != NULL.
If se->direct_byref is set then se->expr contains the return parameter.
Return nonzero, if the call has alternate specifiers.
'expr' is only needed for procedure pointer components. */
int
gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
gfc_actual_arglist * args, gfc_expr * expr,
vec<tree, va_gc> *append_args)
{
gfc_interface_mapping mapping;
vec<tree, va_gc> *arglist;
vec<tree, va_gc> *retargs;
tree tmp;
tree fntype;
gfc_se parmse;
gfc_array_info *info;
int byref;
int parm_kind;
tree type;
tree var;
tree len;
tree base_object;
vec<tree, va_gc> *stringargs;
vec<tree, va_gc> *optionalargs;
tree result = NULL;
gfc_formal_arglist *formal;
gfc_actual_arglist *arg;
int has_alternate_specifier = 0;
bool need_interface_mapping;
bool callee_alloc;
gfc_typespec ts;
gfc_charlen cl;
gfc_expr *e;
gfc_symbol *fsym;
stmtblock_t post;
enum {MISSING = 0, ELEMENTAL, SCALAR, SCALAR_POINTER, ARRAY};
gfc_component *comp = NULL;
int arglen;
arglist = NULL;
retargs = NULL;
stringargs = NULL;
optionalargs = NULL;
var = NULL_TREE;
len = NULL_TREE;
gfc_clear_ts (&ts);
comp = gfc_get_proc_ptr_comp (expr);
if (se->ss != NULL)
{
if (!sym->attr.elemental && !(comp && comp->attr.elemental))
{
gcc_assert (se->ss->info->type == GFC_SS_FUNCTION);
if (se->ss->info->useflags)
{
gcc_assert ((!comp && gfc_return_by_reference (sym)
&& sym->result->attr.dimension)
|| (comp && comp->attr.dimension));
gcc_assert (se->loop != NULL);
/* Access the previously obtained result. */
gfc_conv_tmp_array_ref (se);
return 0;
}
}
info = &se->ss->info->data.array;
}
else
info = NULL;
gfc_init_block (&post);
gfc_init_interface_mapping (&mapping);
if (!comp)
{
formal = gfc_sym_get_dummy_args (sym);
need_interface_mapping = sym->attr.dimension ||
(sym->ts.type == BT_CHARACTER
&& sym->ts.u.cl->length
&& sym->ts.u.cl->length->expr_type
!= EXPR_CONSTANT);
}
else
{
formal = comp->ts.interface ? comp->ts.interface->formal : NULL;
need_interface_mapping = comp->attr.dimension ||
(comp->ts.type == BT_CHARACTER
&& comp->ts.u.cl->length
&& comp->ts.u.cl->length->expr_type
!= EXPR_CONSTANT);
}
base_object = NULL_TREE;
/* Evaluate the arguments. */
for (arg = args; arg != NULL;
arg = arg->next, formal = formal ? formal->next : NULL)
{
e = arg->expr;
fsym = formal ? formal->sym : NULL;
parm_kind = MISSING;
/* Class array expressions are sometimes coming completely unadorned
with either arrayspec or _data component. Correct that here.
OOP-TODO: Move this to the frontend. */
if (e && e->expr_type == EXPR_VARIABLE
&& !e->ref
&& e->ts.type == BT_CLASS
&& (CLASS_DATA (e)->attr.codimension
|| CLASS_DATA (e)->attr.dimension))
{
gfc_typespec temp_ts = e->ts;
gfc_add_class_array_ref (e);
e->ts = temp_ts;
}
if (e == NULL)
{
if (se->ignore_optional)
{
/* Some intrinsics have already been resolved to the correct
parameters. */
continue;
}
else if (arg->label)
{
has_alternate_specifier = 1;
continue;
}
else
{
gfc_init_se (&parmse, NULL);
/* For scalar arguments with VALUE attribute which are passed by
value, pass "0" and a hidden argument gives the optional
status. */
if (fsym && fsym->attr.optional && fsym->attr.value
&& !fsym->attr.dimension && fsym->ts.type != BT_CHARACTER
&& fsym->ts.type != BT_CLASS && fsym->ts.type != BT_DERIVED)
{
parmse.expr = fold_convert (gfc_sym_type (fsym),
integer_zero_node);
vec_safe_push (optionalargs, boolean_false_node);
}
else
{
/* Pass a NULL pointer for an absent arg. */
parmse.expr = null_pointer_node;
if (arg->missing_arg_type == BT_CHARACTER)
parmse.string_length = build_int_cst (gfc_charlen_type_node,
0);
}
}
}
else if (arg->expr->expr_type == EXPR_NULL
&& fsym && !fsym->attr.pointer
&& (fsym->ts.type != BT_CLASS
|| !CLASS_DATA (fsym)->attr.class_pointer))
{
/* Pass a NULL pointer to denote an absent arg. */
gcc_assert (fsym->attr.optional && !fsym->attr.allocatable
&& (fsym->ts.type != BT_CLASS
|| !CLASS_DATA (fsym)->attr.allocatable));
gfc_init_se (&parmse, NULL);
parmse.expr = null_pointer_node;
if (arg->missing_arg_type == BT_CHARACTER)
parmse.string_length = build_int_cst (gfc_charlen_type_node, 0);
}
else if (fsym && fsym->ts.type == BT_CLASS
&& e->ts.type == BT_DERIVED)
{
/* The derived type needs to be converted to a temporary
CLASS object. */
gfc_init_se (&parmse, se);
gfc_conv_derived_to_class (&parmse, e, fsym->ts, NULL,
fsym->attr.optional
&& e->expr_type == EXPR_VARIABLE
&& e->symtree->n.sym->attr.optional,
CLASS_DATA (fsym)->attr.class_pointer
|| CLASS_DATA (fsym)->attr.allocatable);
}
else if (UNLIMITED_POLY (fsym) && e->ts.type != BT_CLASS)
{
/* The intrinsic type needs to be converted to a temporary
CLASS object for the unlimited polymorphic formal. */
gfc_init_se (&parmse, se);
gfc_conv_intrinsic_to_class (&parmse, e, fsym->ts);
}
else if (se->ss && se->ss->info->useflags)
{
gfc_ss *ss;
ss = se->ss;
/* An elemental function inside a scalarized loop. */
gfc_init_se (&parmse, se);
parm_kind = ELEMENTAL;
if (fsym && fsym->attr.value)
gfc_conv_expr (&parmse, e);
else
gfc_conv_expr_reference (&parmse, e);
if (e->ts.type == BT_CHARACTER && !e->rank
&& e->expr_type == EXPR_FUNCTION)
parmse.expr = build_fold_indirect_ref_loc (input_location,
parmse.expr);
if (fsym && fsym->ts.type == BT_DERIVED
&& gfc_is_class_container_ref (e))
{
parmse.expr = gfc_class_data_get (parmse.expr);
if (fsym->attr.optional && e->expr_type == EXPR_VARIABLE
&& e->symtree->n.sym->attr.optional)
{
tree cond = gfc_conv_expr_present (e->symtree->n.sym);
parmse.expr = build3_loc (input_location, COND_EXPR,
TREE_TYPE (parmse.expr),
cond, parmse.expr,
fold_convert (TREE_TYPE (parmse.expr),
null_pointer_node));
}
}
/* If we are passing an absent array as optional dummy to an
elemental procedure, make sure that we pass NULL when the data
pointer is NULL. We need this extra conditional because of
scalarization which passes arrays elements to the procedure,
ignoring the fact that the array can be absent/unallocated/... */
if (ss->info->can_be_null_ref && ss->info->type != GFC_SS_REFERENCE)
{
tree descriptor_data;
descriptor_data = ss->info->data.array.data;
tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
descriptor_data,
fold_convert (TREE_TYPE (descriptor_data),
null_pointer_node));
parmse.expr
= fold_build3_loc (input_location, COND_EXPR,
TREE_TYPE (parmse.expr),
gfc_unlikely (tmp, PRED_FORTRAN_ABSENT_DUMMY),
fold_convert (TREE_TYPE (parmse.expr),
null_pointer_node),
parmse.expr);
}
/* The scalarizer does not repackage the reference to a class
array - instead it returns a pointer to the data element. */
if (fsym && fsym->ts.type == BT_CLASS && e->ts.type == BT_CLASS)
gfc_conv_class_to_class (&parmse, e, fsym->ts, true,
fsym->attr.intent != INTENT_IN
&& (CLASS_DATA (fsym)->attr.class_pointer
|| CLASS_DATA (fsym)->attr.allocatable),
fsym->attr.optional
&& e->expr_type == EXPR_VARIABLE
&& e->symtree->n.sym->attr.optional,
CLASS_DATA (fsym)->attr.class_pointer
|| CLASS_DATA (fsym)->attr.allocatable);
}
else
{
bool scalar;
gfc_ss *argss;
gfc_init_se (&parmse, NULL);
/* Check whether the expression is a scalar or not; we cannot use
e->rank as it can be nonzero for functions arguments. */
argss = gfc_walk_expr (e);
scalar = argss == gfc_ss_terminator;
if (!scalar)
gfc_free_ss_chain (argss);
/* Special handling for passing scalar polymorphic coarrays;
otherwise one passes "class->_data.data" instead of "&class". */
if (e->rank == 0 && e->ts.type == BT_CLASS
&& fsym && fsym->ts.type == BT_CLASS
&& CLASS_DATA (fsym)->attr.codimension
&& !CLASS_DATA (fsym)->attr.dimension)
{
gfc_add_class_array_ref (e);
parmse.want_coarray = 1;
scalar = false;
}
/* A scalar or transformational function. */
if (scalar)
{
if (e->expr_type == EXPR_VARIABLE
&& e->symtree->n.sym->attr.cray_pointee
&& fsym && fsym->attr.flavor == FL_PROCEDURE)
{
/* The Cray pointer needs to be converted to a pointer to
a type given by the expression. */
gfc_conv_expr (&parmse, e);
type = build_pointer_type (TREE_TYPE (parmse.expr));
tmp = gfc_get_symbol_decl (e->symtree->n.sym->cp_pointer);
parmse.expr = convert (type, tmp);
}
else if (fsym && fsym->attr.value)
{
if (fsym->ts.type == BT_CHARACTER
&& fsym->ts.is_c_interop
&& fsym->ns->proc_name != NULL
&& fsym->ns->proc_name->attr.is_bind_c)
{
parmse.expr = NULL;
gfc_conv_scalar_char_value (fsym, &parmse, &e);
if (parmse.expr == NULL)
gfc_conv_expr (&parmse, e);
}
else
{
gfc_conv_expr (&parmse, e);
if (fsym->attr.optional
&& fsym->ts.type != BT_CLASS
&& fsym->ts.type != BT_DERIVED)
{
if (e->expr_type != EXPR_VARIABLE
|| !e->symtree->n.sym->attr.optional
|| e->ref != NULL)
vec_safe_push (optionalargs, boolean_true_node);
else
{
tmp = gfc_conv_expr_present (e->symtree->n.sym);
if (!e->symtree->n.sym->attr.value)
parmse.expr
= fold_build3_loc (input_location, COND_EXPR,
TREE_TYPE (parmse.expr),
tmp, parmse.expr,
fold_convert (TREE_TYPE (parmse.expr),
integer_zero_node));
vec_safe_push (optionalargs, tmp);
}
}
}
}
else if (arg->name && arg->name[0] == '%')
/* Argument list functions %VAL, %LOC and %REF are signalled
through arg->name. */
conv_arglist_function (&parmse, arg->expr, arg->name);
else if ((e->expr_type == EXPR_FUNCTION)
&& ((e->value.function.esym
&& e->value.function.esym->result->attr.pointer)
|| (!e->value.function.esym
&& e->symtree->n.sym->attr.pointer))
&& fsym && fsym->attr.target)
{
gfc_conv_expr (&parmse, e);
parmse.expr = gfc_build_addr_expr (NULL_TREE, parmse.expr);
}
else if (e->expr_type == EXPR_FUNCTION
&& e->symtree->n.sym->result
&& e->symtree->n.sym->result != e->symtree->n.sym
&& e->symtree->n.sym->result->attr.proc_pointer)
{
/* Functions returning procedure pointers. */
gfc_conv_expr (&parmse, e);
if (fsym && fsym->attr.proc_pointer)
parmse.expr = gfc_build_addr_expr (NULL_TREE, parmse.expr);
}
else
{
if (e->ts.type == BT_CLASS && fsym
&& fsym->ts.type == BT_CLASS
&& (!CLASS_DATA (fsym)->as
|| CLASS_DATA (fsym)->as->type != AS_ASSUMED_RANK)
&& CLASS_DATA (e)->attr.codimension)
{
gcc_assert (!CLASS_DATA (fsym)->attr.codimension);
gcc_assert (!CLASS_DATA (fsym)->as);
gfc_add_class_array_ref (e);
parmse.want_coarray = 1;
gfc_conv_expr_reference (&parmse, e);
class_scalar_coarray_to_class (&parmse, e, fsym->ts,
fsym->attr.optional
&& e->expr_type == EXPR_VARIABLE);
}
else
gfc_conv_expr_reference (&parmse, e);
/* Catch base objects that are not variables. */
if (e->ts.type == BT_CLASS
&& e->expr_type != EXPR_VARIABLE
&& expr && e == expr->base_expr)
base_object = build_fold_indirect_ref_loc (input_location,
parmse.expr);
/* A class array element needs converting back to be a
class object, if the formal argument is a class object. */
if (fsym && fsym->ts.type == BT_CLASS
&& e->ts.type == BT_CLASS
&& ((CLASS_DATA (fsym)->as
&& CLASS_DATA (fsym)->as->type == AS_ASSUMED_RANK)
|| CLASS_DATA (e)->attr.dimension))
gfc_conv_class_to_class (&parmse, e, fsym->ts, false,
fsym->attr.intent != INTENT_IN
&& (CLASS_DATA (fsym)->attr.class_pointer
|| CLASS_DATA (fsym)->attr.allocatable),
fsym->attr.optional
&& e->expr_type == EXPR_VARIABLE
&& e->symtree->n.sym->attr.optional,
CLASS_DATA (fsym)->attr.class_pointer
|| CLASS_DATA (fsym)->attr.allocatable);
/* If an ALLOCATABLE dummy argument has INTENT(OUT) and is
allocated on entry, it must be deallocated. */
if (fsym && fsym->attr.intent == INTENT_OUT
&& (fsym->attr.allocatable
|| (fsym->ts.type == BT_CLASS
&& CLASS_DATA (fsym)->attr.allocatable)))
{
stmtblock_t block;
tree ptr;
gfc_init_block (&block);
ptr = parmse.expr;
if (e->ts.type == BT_CLASS)
ptr = gfc_class_data_get (ptr);
tmp = gfc_deallocate_scalar_with_status (ptr, NULL_TREE,
true, e, e->ts);
gfc_add_expr_to_block (&block, tmp);
tmp = fold_build2_loc (input_location, MODIFY_EXPR,
void_type_node, ptr,
null_pointer_node);
gfc_add_expr_to_block (&block, tmp);
if (fsym->ts.type == BT_CLASS && UNLIMITED_POLY (fsym))
{
gfc_add_modify (&block, ptr,
fold_convert (TREE_TYPE (ptr),
null_pointer_node));
gfc_add_expr_to_block (&block, tmp);
}
else if (fsym->ts.type == BT_CLASS)
{
gfc_symbol *vtab;
vtab = gfc_find_derived_vtab (fsym->ts.u.derived);
tmp = gfc_get_symbol_decl (vtab);
tmp = gfc_build_addr_expr (NULL_TREE, tmp);
ptr = gfc_class_vptr_get (parmse.expr);
gfc_add_modify (&block, ptr,
fold_convert (TREE_TYPE (ptr), tmp));
gfc_add_expr_to_block (&block, tmp);
}
if (fsym->attr.optional
&& e->expr_type == EXPR_VARIABLE
&& e->symtree->n.sym->attr.optional)
{
tmp = fold_build3_loc (input_location, COND_EXPR,
void_type_node,
gfc_conv_expr_present (e->symtree->n.sym),
gfc_finish_block (&block),
build_empty_stmt (input_location));
}
else
tmp = gfc_finish_block (&block);
gfc_add_expr_to_block (&se->pre, tmp);
}
if (fsym && (fsym->ts.type == BT_DERIVED
|| fsym->ts.type == BT_ASSUMED)
&& e->ts.type == BT_CLASS
&& !CLASS_DATA (e)->attr.dimension
&& !CLASS_DATA (e)->attr.codimension)
parmse.expr = gfc_class_data_get (parmse.expr);
/* Wrap scalar variable in a descriptor. We need to convert
the address of a pointer back to the pointer itself before,
we can assign it to the data field. */
if (fsym && fsym->as && fsym->as->type == AS_ASSUMED_RANK
&& fsym->ts.type != BT_CLASS && e->expr_type != EXPR_NULL)
{
tmp = parmse.expr;
if (TREE_CODE (tmp) == ADDR_EXPR
&& POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (tmp, 0))))
tmp = TREE_OPERAND (tmp, 0);
parmse.expr = gfc_conv_scalar_to_descriptor (&parmse, tmp,
fsym->attr);
parmse.expr = gfc_build_addr_expr (NULL_TREE,
parmse.expr);
}
else if (fsym && e->expr_type != EXPR_NULL
&& ((fsym->attr.pointer
&& fsym->attr.flavor != FL_PROCEDURE)
|| (fsym->attr.proc_pointer
&& !(e->expr_type == EXPR_VARIABLE
&& e->symtree->n.sym->attr.dummy))
|| (fsym->attr.proc_pointer
&& e->expr_type == EXPR_VARIABLE
&& gfc_is_proc_ptr_comp (e))
|| (fsym->attr.allocatable
&& fsym->attr.flavor != FL_PROCEDURE)))
{
/* Scalar pointer dummy args require an extra level of
indirection. The null pointer already contains
this level of indirection. */
parm_kind = SCALAR_POINTER;
parmse.expr = gfc_build_addr_expr (NULL_TREE, parmse.expr);
}
}
}
else if (e->ts.type == BT_CLASS
&& fsym && fsym->ts.type == BT_CLASS
&& (CLASS_DATA (fsym)->attr.dimension
|| CLASS_DATA (fsym)->attr.codimension))
{
/* Pass a class array. */
parmse.use_offset = 1;
gfc_conv_expr_descriptor (&parmse, e);
/* If an ALLOCATABLE dummy argument has INTENT(OUT) and is
allocated on entry, it must be deallocated. */
if (fsym->attr.intent == INTENT_OUT
&& CLASS_DATA (fsym)->attr.allocatable)
{
stmtblock_t block;
tree ptr;
gfc_init_block (&block);
ptr = parmse.expr;
ptr = gfc_class_data_get (ptr);
tmp = gfc_deallocate_with_status (ptr, NULL_TREE,
NULL_TREE, NULL_TREE,
NULL_TREE, true, e,
false);
gfc_add_expr_to_block (&block, tmp);
tmp = fold_build2_loc (input_location, MODIFY_EXPR,
void_type_node, ptr,
null_pointer_node);
gfc_add_expr_to_block (&block, tmp);
gfc_reset_vptr (&block, e);
if (fsym->attr.optional
&& e->expr_type == EXPR_VARIABLE
&& (!e->ref
|| (e->ref->type == REF_ARRAY
&& e->ref->u.ar.type != AR_FULL))
&& e->symtree->n.sym->attr.optional)
{
tmp = fold_build3_loc (input_location, COND_EXPR,
void_type_node,
gfc_conv_expr_present (e->symtree->n.sym),
gfc_finish_block (&block),
build_empty_stmt (input_location));
}
else
tmp = gfc_finish_block (&block);
gfc_add_expr_to_block (&se->pre, tmp);
}
/* The conversion does not repackage the reference to a class
array - _data descriptor. */
gfc_conv_class_to_class (&parmse, e, fsym->ts, false,
fsym->attr.intent != INTENT_IN
&& (CLASS_DATA (fsym)->attr.class_pointer
|| CLASS_DATA (fsym)->attr.allocatable),
fsym->attr.optional
&& e->expr_type == EXPR_VARIABLE
&& e->symtree->n.sym->attr.optional,
CLASS_DATA (fsym)->attr.class_pointer
|| CLASS_DATA (fsym)->attr.allocatable);
}
else
{
/* If the procedure requires an explicit interface, the actual
argument is passed according to the corresponding formal
argument. If the corresponding formal argument is a POINTER,
ALLOCATABLE or assumed shape, we do not use g77's calling
convention, and pass the address of the array descriptor
instead. Otherwise we use g77's calling convention. */
bool f;
f = (fsym != NULL)
&& !(fsym->attr.pointer || fsym->attr.allocatable)
&& fsym->as && fsym->as->type != AS_ASSUMED_SHAPE
&& fsym->as->type != AS_ASSUMED_RANK;
if (comp)
f = f || !comp->attr.always_explicit;
else
f = f || !sym->attr.always_explicit;
/* If the argument is a function call that may not create
a temporary for the result, we have to check that we
can do it, i.e. that there is no alias between this
argument and another one. */
if (gfc_get_noncopying_intrinsic_argument (e) != NULL)
{
gfc_expr *iarg;
sym_intent intent;
if (fsym != NULL)
intent = fsym->attr.intent;
else
intent = INTENT_UNKNOWN;
if (gfc_check_fncall_dependency (e, intent, sym, args,
NOT_ELEMENTAL))
parmse.force_tmp = 1;
iarg = e->value.function.actual->expr;
/* Temporary needed if aliasing due to host association. */
if (sym->attr.contained
&& !sym->attr.pure
&& !sym->attr.implicit_pure
&& !sym->attr.use_assoc
&& iarg->expr_type == EXPR_VARIABLE
&& sym->ns == iarg->symtree->n.sym->ns)
parmse.force_tmp = 1;
/* Ditto within module. */
if (sym->attr.use_assoc
&& !sym->attr.pure
&& !sym->attr.implicit_pure
&& iarg->expr_type == EXPR_VARIABLE
&& sym->module == iarg->symtree->n.sym->module)
parmse.force_tmp = 1;
}
if (e->expr_type == EXPR_VARIABLE
&& is_subref_array (e))
/* The actual argument is a component reference to an
array of derived types. In this case, the argument
is converted to a temporary, which is passed and then
written back after the procedure call. */
gfc_conv_subref_array_arg (&parmse, e, f,
fsym ? fsym->attr.intent : INTENT_INOUT,
fsym && fsym->attr.pointer);
else if (gfc_is_class_array_ref (e, NULL)
&& fsym && fsym->ts.type == BT_DERIVED)
/* The actual argument is a component reference to an
array of derived types. In this case, the argument
is converted to a temporary, which is passed and then
written back after the procedure call.
OOP-TODO: Insert code so that if the dynamic type is
the same as the declared type, copy-in/copy-out does
not occur. */
gfc_conv_subref_array_arg (&parmse, e, f,
fsym ? fsym->attr.intent : INTENT_INOUT,
fsym && fsym->attr.pointer);
else
gfc_conv_array_parameter (&parmse, e, f, fsym, sym->name, NULL);
/* If an ALLOCATABLE dummy argument has INTENT(OUT) and is
allocated on entry, it must be deallocated. */
if (fsym && fsym->attr.allocatable
&& fsym->attr.intent == INTENT_OUT)
{
tmp = build_fold_indirect_ref_loc (input_location,
parmse.expr);
tmp = gfc_trans_dealloc_allocated (tmp, false, e);
if (fsym->attr.optional
&& e->expr_type == EXPR_VARIABLE
&& e->symtree->n.sym->attr.optional)
tmp = fold_build3_loc (input_location, COND_EXPR,
void_type_node,
gfc_conv_expr_present (e->symtree->n.sym),
tmp, build_empty_stmt (input_location));
gfc_add_expr_to_block (&se->pre, tmp);
}
}
}
/* The case with fsym->attr.optional is that of a user subroutine
with an interface indicating an optional argument. When we call
an intrinsic subroutine, however, fsym is NULL, but we might still
have an optional argument, so we proceed to the substitution
just in case. */
if (e && (fsym == NULL || fsym->attr.optional))
{
/* If an optional argument is itself an optional dummy argument,
check its presence and substitute a null if absent. This is
only needed when passing an array to an elemental procedure
as then array elements are accessed - or no NULL pointer is
allowed and a "1" or "0" should be passed if not present.
When passing a non-array-descriptor full array to a
non-array-descriptor dummy, no check is needed. For
array-descriptor actual to array-descriptor dummy, see
PR 41911 for why a check has to be inserted.
fsym == NULL is checked as intrinsics required the descriptor
but do not always set fsym. */
if (e->expr_type == EXPR_VARIABLE
&& e->symtree->n.sym->attr.optional
&& ((e->rank != 0 && sym->attr.elemental)
|| e->representation.length || e->ts.type == BT_CHARACTER
|| (e->rank != 0
&& (fsym == NULL
|| (fsym-> as
&& (fsym->as->type == AS_ASSUMED_SHAPE
|| fsym->as->type == AS_ASSUMED_RANK
|| fsym->as->type == AS_DEFERRED))))))
gfc_conv_missing_dummy (&parmse, e, fsym ? fsym->ts : e->ts,
e->representation.length);
}
if (fsym && e)
{
/* Obtain the character length of an assumed character length
length procedure from the typespec. */
if (fsym->ts.type == BT_CHARACTER
&& parmse.string_length == NULL_TREE
&& e->ts.type == BT_PROCEDURE
&& e->symtree->n.sym->ts.type == BT_CHARACTER
&& e->symtree->n.sym->ts.u.cl->length != NULL
&& e->symtree->n.sym->ts.u.cl->length->expr_type == EXPR_CONSTANT)
{
gfc_conv_const_charlen (e->symtree->n.sym->ts.u.cl);
parmse.string_length = e->symtree->n.sym->ts.u.cl->backend_decl;
}
}
if (fsym && need_interface_mapping && e)
gfc_add_interface_mapping (&mapping, fsym, &parmse, e);
gfc_add_block_to_block (&se->pre, &parmse.pre);
gfc_add_block_to_block (&post, &parmse.post);
/* Allocated allocatable components of derived types must be
deallocated for non-variable scalars. Non-variable arrays are
dealt with in trans-array.c(gfc_conv_array_parameter). */
if (e && (e->ts.type == BT_DERIVED || e->ts.type == BT_CLASS)
&& e->ts.u.derived->attr.alloc_comp
&& !(e->symtree && e->symtree->n.sym->attr.pointer)
&& (e->expr_type != EXPR_VARIABLE && !e->rank))
{
int parm_rank;
tmp = build_fold_indirect_ref_loc (input_location,
parmse.expr);
parm_rank = e->rank;
switch (parm_kind)
{
case (ELEMENTAL):
case (SCALAR):
parm_rank = 0;
break;
case (SCALAR_POINTER):
tmp = build_fold_indirect_ref_loc (input_location,
tmp);
break;
}
if (e->expr_type == EXPR_OP
&& e->value.op.op == INTRINSIC_PARENTHESES
&& e->value.op.op1->expr_type == EXPR_VARIABLE)
{
tree local_tmp;
local_tmp = gfc_evaluate_now (tmp, &se->pre);
local_tmp = gfc_copy_alloc_comp (e->ts.u.derived, local_tmp, tmp, parm_rank);
gfc_add_expr_to_block (&se->post, local_tmp);
}
if (e->ts.type == BT_DERIVED && fsym && fsym->ts.type == BT_CLASS)
{
/* The derived type is passed to gfc_deallocate_alloc_comp.
Therefore, class actuals can handled correctly but derived
types passed to class formals need the _data component. */
tmp = gfc_class_data_get (tmp);
if (!CLASS_DATA (fsym)->attr.dimension)
tmp = build_fold_indirect_ref_loc (input_location, tmp);
}
tmp = gfc_deallocate_alloc_comp (e->ts.u.derived, tmp, parm_rank);
gfc_add_expr_to_block (&se->post, tmp);
}
/* Add argument checking of passing an unallocated/NULL actual to
a nonallocatable/nonpointer dummy. */
if (gfc_option.rtcheck & GFC_RTCHECK_POINTER && e != NULL)
{
symbol_attribute attr;
char *msg;
tree cond;
if (e->expr_type == EXPR_VARIABLE || e->expr_type == EXPR_FUNCTION)
attr = gfc_expr_attr (e);
else
goto end_pointer_check;
/* In Fortran 2008 it's allowed to pass a NULL pointer/nonallocated
allocatable to an optional dummy, cf. 12.5.2.12. */
if (fsym != NULL && fsym->attr.optional && !attr.proc_pointer
&& (gfc_option.allow_std & GFC_STD_F2008) != 0)
goto end_pointer_check;
if (attr.optional)
{
/* If the actual argument is an optional pointer/allocatable and
the formal argument takes an nonpointer optional value,
it is invalid to pass a non-present argument on, even
though there is no technical reason for this in gfortran.
See Fortran 2003, Section 12.4.1.6 item (7)+(8). */
tree present, null_ptr, type;
if (attr.allocatable
&& (fsym == NULL || !fsym->attr.allocatable))
asprintf (&msg, "Allocatable actual argument '%s' is not "
"allocated or not present", e->symtree->n.sym->name);
else if (attr.pointer
&& (fsym == NULL || !fsym->attr.pointer))
asprintf (&msg, "Pointer actual argument '%s' is not "
"associated or not present",
e->symtree->n.sym->name);
else if (attr.proc_pointer
&& (fsym == NULL || !fsym->attr.proc_pointer))
asprintf (&msg, "Proc-pointer actual argument '%s' is not "
"associated or not present",
e->symtree->n.sym->name);
else
goto end_pointer_check;
present = gfc_conv_expr_present (e->symtree->n.sym);
type = TREE_TYPE (present);
present = fold_build2_loc (input_location, EQ_EXPR,
boolean_type_node, present,
fold_convert (type,
null_pointer_node));
type = TREE_TYPE (parmse.expr);
null_ptr = fold_build2_loc (input_location, EQ_EXPR,
boolean_type_node, parmse.expr,
fold_convert (type,
null_pointer_node));
cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR,
boolean_type_node, present, null_ptr);
}
else
{
if (attr.allocatable
&& (fsym == NULL || !fsym->attr.allocatable))
asprintf (&msg, "Allocatable actual argument '%s' is not "
"allocated", e->symtree->n.sym->name);
else if (attr.pointer
&& (fsym == NULL || !fsym->attr.pointer))
asprintf (&msg, "Pointer actual argument '%s' is not "
"associated", e->symtree->n.sym->name);
else if (attr.proc_pointer
&& (fsym == NULL || !fsym->attr.proc_pointer))
asprintf (&msg, "Proc-pointer actual argument '%s' is not "
"associated", e->symtree->n.sym->name);
else
goto end_pointer_check;
tmp = parmse.expr;
/* If the argument is passed by value, we need to strip the
INDIRECT_REF. */
if (!POINTER_TYPE_P (TREE_TYPE (parmse.expr)))
tmp = gfc_build_addr_expr (NULL_TREE, tmp);
cond = fold_build2_loc (input_location, EQ_EXPR,
boolean_type_node, tmp,
fold_convert (TREE_TYPE (tmp),
null_pointer_node));
}
gfc_trans_runtime_check (true, false, cond, &se->pre, &e->where,
msg);
free (msg);
}
end_pointer_check:
/* Deferred length dummies pass the character length by reference
so that the value can be returned. */
if (parmse.string_length && fsym && fsym->ts.deferred)
{
tmp = parmse.string_length;
if (TREE_CODE (tmp) != VAR_DECL)
tmp = gfc_evaluate_now (parmse.string_length, &se->pre);
parmse.string_length = gfc_build_addr_expr (NULL_TREE, tmp);
}
/* Character strings are passed as two parameters, a length and a
pointer - except for Bind(c) which only passes the pointer.
An unlimited polymorphic formal argument likewise does not
need the length. */
if (parmse.string_length != NULL_TREE
&& !sym->attr.is_bind_c
&& !(fsym && UNLIMITED_POLY (fsym)))
vec_safe_push (stringargs, parmse.string_length);
/* When calling __copy for character expressions to unlimited
polymorphic entities, the dst argument needs a string length. */
if (sym->name[0] == '_' && e && e->ts.type == BT_CHARACTER
&& strncmp (sym->name, "__vtab_CHARACTER", 16) == 0
&& arg->next && arg->next->expr
&& arg->next->expr->ts.type == BT_DERIVED
&& arg->next->expr->ts.u.derived->attr.unlimited_polymorphic)
vec_safe_push (stringargs, parmse.string_length);
/* For descriptorless coarrays and assumed-shape coarray dummies, we
pass the token and the offset as additional arguments. */
if (fsym && fsym->attr.codimension
&& gfc_option.coarray == GFC_FCOARRAY_LIB
&& !fsym->attr.allocatable
&& e == NULL)
{
/* Token and offset. */
vec_safe_push (stringargs, null_pointer_node);
vec_safe_push (stringargs, build_int_cst (gfc_array_index_type, 0));
gcc_assert (fsym->attr.optional);
}
else if (fsym && fsym->attr.codimension
&& !fsym->attr.allocatable
&& gfc_option.coarray == GFC_FCOARRAY_LIB)
{
tree caf_decl, caf_type;
tree offset, tmp2;
caf_decl = get_tree_for_caf_expr (e);
caf_type = TREE_TYPE (caf_decl);
if (GFC_DESCRIPTOR_TYPE_P (caf_type)
&& GFC_TYPE_ARRAY_AKIND (caf_type) == GFC_ARRAY_ALLOCATABLE)
tmp = gfc_conv_descriptor_token (caf_decl);
else if (DECL_LANG_SPECIFIC (caf_decl)
&& GFC_DECL_TOKEN (caf_decl) != NULL_TREE)
tmp = GFC_DECL_TOKEN (caf_decl);
else
{
gcc_assert (GFC_ARRAY_TYPE_P (caf_type)
&& GFC_TYPE_ARRAY_CAF_TOKEN (caf_type) != NULL_TREE);
tmp = GFC_TYPE_ARRAY_CAF_TOKEN (caf_type);
}
vec_safe_push (stringargs, tmp);
if (GFC_DESCRIPTOR_TYPE_P (caf_type)
&& GFC_TYPE_ARRAY_AKIND (caf_type) == GFC_ARRAY_ALLOCATABLE)
offset = build_int_cst (gfc_array_index_type, 0);
else if (DECL_LANG_SPECIFIC (caf_decl)
&& GFC_DECL_CAF_OFFSET (caf_decl) != NULL_TREE)
offset = GFC_DECL_CAF_OFFSET (caf_decl);
else if (GFC_TYPE_ARRAY_CAF_OFFSET (caf_type) != NULL_TREE)
offset = GFC_TYPE_ARRAY_CAF_OFFSET (caf_type);
else
offset = build_int_cst (gfc_array_index_type, 0);
if (GFC_DESCRIPTOR_TYPE_P (caf_type))
tmp = gfc_conv_descriptor_data_get (caf_decl);
else
{
gcc_assert (POINTER_TYPE_P (caf_type));
tmp = caf_decl;
}
if (fsym->as->type == AS_ASSUMED_SHAPE
|| (fsym->as->type == AS_ASSUMED_RANK && !fsym->attr.pointer
&& !fsym->attr.allocatable))
{
gcc_assert (POINTER_TYPE_P (TREE_TYPE (parmse.expr)));
gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE
(TREE_TYPE (parmse.expr))));
tmp2 = build_fold_indirect_ref_loc (input_location, parmse.expr);
tmp2 = gfc_conv_descriptor_data_get (tmp2);
}
else if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (parmse.expr)))
tmp2 = gfc_conv_descriptor_data_get (parmse.expr);
else
{
gcc_assert (POINTER_TYPE_P (TREE_TYPE (parmse.expr)));
tmp2 = parmse.expr;
}
tmp = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type,
fold_convert (gfc_array_index_type, tmp2),
fold_convert (gfc_array_index_type, tmp));
offset = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type, offset, tmp);
vec_safe_push (stringargs, offset);
}
vec_safe_push (arglist, parmse.expr);
}
gfc_finish_interface_mapping (&mapping, &se->pre, &se->post);
if (comp)
ts = comp->ts;
else
ts = sym->ts;
if (ts.type == BT_CHARACTER && sym->attr.is_bind_c)
se->string_length = build_int_cst (gfc_charlen_type_node, 1);
else if (ts.type == BT_CHARACTER)
{
if (ts.u.cl->length == NULL)
{
/* Assumed character length results are not allowed by 5.1.1.5 of the
standard and are trapped in resolve.c; except in the case of SPREAD
(and other intrinsics?) and dummy functions. In the case of SPREAD,
we take the character length of the first argument for the result.
For dummies, we have to look through the formal argument list for
this function and use the character length found there.*/
if (ts.deferred)
cl.backend_decl = gfc_create_var (gfc_charlen_type_node, "slen");
else if (!sym->attr.dummy)
cl.backend_decl = (*stringargs)[0];
else
{
formal = gfc_sym_get_dummy_args (sym->ns->proc_name);
for (; formal; formal = formal->next)
if (strcmp (formal->sym->name, sym->name) == 0)
cl.backend_decl = formal->sym->ts.u.cl->backend_decl;
}
len = cl.backend_decl;
}
else
{
tree tmp;
/* Calculate the length of the returned string. */
gfc_init_se (&parmse, NULL);
if (need_interface_mapping)
gfc_apply_interface_mapping (&mapping, &parmse, ts.u.cl->length);
else
gfc_conv_expr (&parmse, ts.u.cl->length);
gfc_add_block_to_block (&se->pre, &parmse.pre);
gfc_add_block_to_block (&se->post, &parmse.post);
tmp = fold_convert (gfc_charlen_type_node, parmse.expr);
tmp = fold_build2_loc (input_location, MAX_EXPR,
gfc_charlen_type_node, tmp,
build_int_cst (gfc_charlen_type_node, 0));
cl.backend_decl = tmp;
}
/* Set up a charlen structure for it. */
cl.next = NULL;
cl.length = NULL;
ts.u.cl = &cl;
len = cl.backend_decl;
}
byref = (comp && (comp->attr.dimension || comp->ts.type == BT_CHARACTER))
|| (!comp && gfc_return_by_reference (sym));
if (byref)
{
if (se->direct_byref)
{
/* Sometimes, too much indirection can be applied; e.g. for
function_result = array_valued_recursive_function. */
if (TREE_TYPE (TREE_TYPE (se->expr))
&& TREE_TYPE (TREE_TYPE (TREE_TYPE (se->expr)))
&& GFC_DESCRIPTOR_TYPE_P
(TREE_TYPE (TREE_TYPE (TREE_TYPE (se->expr)))))
se->expr = build_fold_indirect_ref_loc (input_location,
se->expr);
/* If the lhs of an assignment x = f(..) is allocatable and
f2003 is allowed, we must do the automatic reallocation.
TODO - deal with intrinsics, without using a temporary. */
if (gfc_option.flag_realloc_lhs
&& se->ss && se->ss->loop_chain
&& se->ss->loop_chain->is_alloc_lhs
&& !expr->value.function.isym
&& sym->result->as != NULL)
{
/* Evaluate the bounds of the result, if known. */
gfc_set_loop_bounds_from_array_spec (&mapping, se,
sym->result->as);
/* Perform the automatic reallocation. */
tmp = gfc_alloc_allocatable_for_assignment (se->loop,
expr, NULL);
gfc_add_expr_to_block (&se->pre, tmp);
/* Pass the temporary as the first argument. */
result = info->descriptor;
}
else
result = build_fold_indirect_ref_loc (input_location,
se->expr);
vec_safe_push (retargs, se->expr);
}
else if (comp && comp->attr.dimension)
{
gcc_assert (se->loop && info);
/* Set the type of the array. */
tmp = gfc_typenode_for_spec (&comp->ts);
gcc_assert (se->ss->dimen == se->loop->dimen);
/* Evaluate the bounds of the result, if known. */
gfc_set_loop_bounds_from_array_spec (&mapping, se, comp->as);
/* If the lhs of an assignment x = f(..) is allocatable and
f2003 is allowed, we must not generate the function call
here but should just send back the results of the mapping.
This is signalled by the function ss being flagged. */
if (gfc_option.flag_realloc_lhs
&& se->ss && se->ss->is_alloc_lhs)
{
gfc_free_interface_mapping (&mapping);
return has_alternate_specifier;
}
/* Create a temporary to store the result. In case the function
returns a pointer, the temporary will be a shallow copy and
mustn't be deallocated. */
callee_alloc = comp->attr.allocatable || comp->attr.pointer;
gfc_trans_create_temp_array (&se->pre, &se->post, se->ss,
tmp, NULL_TREE, false,
!comp->attr.pointer, callee_alloc,
&se->ss->info->expr->where);
/* Pass the temporary as the first argument. */
result = info->descriptor;
tmp = gfc_build_addr_expr (NULL_TREE, result);
vec_safe_push (retargs, tmp);
}
else if (!comp && sym->result->attr.dimension)
{
gcc_assert (se->loop && info);
/* Set the type of the array. */
tmp = gfc_typenode_for_spec (&ts);
gcc_assert (se->ss->dimen == se->loop->dimen);
/* Evaluate the bounds of the result, if known. */
gfc_set_loop_bounds_from_array_spec (&mapping, se, sym->result->as);
/* If the lhs of an assignment x = f(..) is allocatable and
f2003 is allowed, we must not generate the function call
here but should just send back the results of the mapping.
This is signalled by the function ss being flagged. */
if (gfc_option.flag_realloc_lhs
&& se->ss && se->ss->is_alloc_lhs)
{
gfc_free_interface_mapping (&mapping);
return has_alternate_specifier;
}
/* Create a temporary to store the result. In case the function
returns a pointer, the temporary will be a shallow copy and
mustn't be deallocated. */
callee_alloc = sym->attr.allocatable || sym->attr.pointer;
gfc_trans_create_temp_array (&se->pre, &se->post, se->ss,
tmp, NULL_TREE, false,
!sym->attr.pointer, callee_alloc,
&se->ss->info->expr->where);
/* Pass the temporary as the first argument. */
result = info->descriptor;
tmp = gfc_build_addr_expr (NULL_TREE, result);
vec_safe_push (retargs, tmp);
}
else if (ts.type == BT_CHARACTER)
{
/* Pass the string length. */
type = gfc_get_character_type (ts.kind, ts.u.cl);
type = build_pointer_type (type);
/* Return an address to a char[0:len-1]* temporary for
character pointers. */
if ((!comp && (sym->attr.pointer || sym->attr.allocatable))
|| (comp && (comp->attr.pointer || comp->attr.allocatable)))
{
var = gfc_create_var (type, "pstr");
if ((!comp && sym->attr.allocatable)
|| (comp && comp->attr.allocatable))
{
gfc_add_modify (&se->pre, var,
fold_convert (TREE_TYPE (var),
null_pointer_node));
tmp = gfc_call_free (convert (pvoid_type_node, var));
gfc_add_expr_to_block (&se->post, tmp);
}
/* Provide an address expression for the function arguments. */
var = gfc_build_addr_expr (NULL_TREE, var);
}
else
var = gfc_conv_string_tmp (se, type, len);
vec_safe_push (retargs, var);
}
else
{
gcc_assert (gfc_option.flag_f2c && ts.type == BT_COMPLEX);
type = gfc_get_complex_type (ts.kind);
var = gfc_build_addr_expr (NULL_TREE, gfc_create_var (type, "cmplx"));
vec_safe_push (retargs, var);
}
/* Add the string length to the argument list. */
if (ts.type == BT_CHARACTER && ts.deferred)
{
tmp = len;
if (TREE_CODE (tmp) != VAR_DECL)
tmp = gfc_evaluate_now (len, &se->pre);
tmp = gfc_build_addr_expr (NULL_TREE, tmp);
vec_safe_push (retargs, tmp);
}
else if (ts.type == BT_CHARACTER)
vec_safe_push (retargs, len);
}
gfc_free_interface_mapping (&mapping);
/* We need to glom RETARGS + ARGLIST + STRINGARGS + APPEND_ARGS. */
arglen = (vec_safe_length (arglist) + vec_safe_length (optionalargs)
+ vec_safe_length (stringargs) + vec_safe_length (append_args));
vec_safe_reserve (retargs, arglen);
/* Add the return arguments. */
retargs->splice (arglist);
/* Add the hidden present status for optional+value to the arguments. */
retargs->splice (optionalargs);
/* Add the hidden string length parameters to the arguments. */
retargs->splice (stringargs);
/* We may want to append extra arguments here. This is used e.g. for
calls to libgfortran_matmul_??, which need extra information. */
if (!vec_safe_is_empty (append_args))
retargs->splice (append_args);
arglist = retargs;
/* Generate the actual call. */
if (base_object == NULL_TREE)
conv_function_val (se, sym, expr);
else
conv_base_obj_fcn_val (se, base_object, expr);
/* If there are alternate return labels, function type should be
integer. Can't modify the type in place though, since it can be shared
with other functions. For dummy arguments, the typing is done to
this result, even if it has to be repeated for each call. */
if (has_alternate_specifier
&& TREE_TYPE (TREE_TYPE (TREE_TYPE (se->expr))) != integer_type_node)
{
if (!sym->attr.dummy)
{
TREE_TYPE (sym->backend_decl)
= build_function_type (integer_type_node,
TYPE_ARG_TYPES (TREE_TYPE (sym->backend_decl)));
se->expr = gfc_build_addr_expr (NULL_TREE, sym->backend_decl);
}
else
TREE_TYPE (TREE_TYPE (TREE_TYPE (se->expr))) = integer_type_node;
}
fntype = TREE_TYPE (TREE_TYPE (se->expr));
se->expr = build_call_vec (TREE_TYPE (fntype), se->expr, arglist);
/* If we have a pointer function, but we don't want a pointer, e.g.
something like
x = f()
where f is pointer valued, we have to dereference the result. */
if (!se->want_pointer && !byref
&& ((!comp && (sym->attr.pointer || sym->attr.allocatable))
|| (comp && (comp->attr.pointer || comp->attr.allocatable))))
se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
/* f2c calling conventions require a scalar default real function to
return a double precision result. Convert this back to default
real. We only care about the cases that can happen in Fortran 77.
*/
if (gfc_option.flag_f2c && sym->ts.type == BT_REAL
&& sym->ts.kind == gfc_default_real_kind
&& !sym->attr.always_explicit)
se->expr = fold_convert (gfc_get_real_type (sym->ts.kind), se->expr);
/* A pure function may still have side-effects - it may modify its
parameters. */
TREE_SIDE_EFFECTS (se->expr) = 1;
#if 0
if (!sym->attr.pure)
TREE_SIDE_EFFECTS (se->expr) = 1;
#endif
if (byref)
{
/* Add the function call to the pre chain. There is no expression. */
gfc_add_expr_to_block (&se->pre, se->expr);
se->expr = NULL_TREE;
if (!se->direct_byref)
{
if ((sym->attr.dimension && !comp) || (comp && comp->attr.dimension))
{
if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
{
/* Check the data pointer hasn't been modified. This would
happen in a function returning a pointer. */
tmp = gfc_conv_descriptor_data_get (info->descriptor);
tmp = fold_build2_loc (input_location, NE_EXPR,
boolean_type_node,
tmp, info->data);
gfc_trans_runtime_check (true, false, tmp, &se->pre, NULL,
gfc_msg_fault);
}
se->expr = info->descriptor;
/* Bundle in the string length. */
se->string_length = len;
}
else if (ts.type == BT_CHARACTER)
{
/* Dereference for character pointer results. */
if ((!comp && (sym->attr.pointer || sym->attr.allocatable))
|| (comp && (comp->attr.pointer || comp->attr.allocatable)))
se->expr = build_fold_indirect_ref_loc (input_location, var);
else
se->expr = var;
se->string_length = len;
}
else
{
gcc_assert (ts.type == BT_COMPLEX && gfc_option.flag_f2c);
se->expr = build_fold_indirect_ref_loc (input_location, var);
}
}
}
/* Follow the function call with the argument post block. */
if (byref)
{
gfc_add_block_to_block (&se->pre, &post);
/* Transformational functions of derived types with allocatable
components must have the result allocatable components copied. */
arg = expr->value.function.actual;
if (result && arg && expr->rank
&& expr->value.function.isym
&& expr->value.function.isym->transformational
&& arg->expr->ts.type == BT_DERIVED
&& arg->expr->ts.u.derived->attr.alloc_comp)
{
tree tmp2;
/* Copy the allocatable components. We have to use a
temporary here to prevent source allocatable components
from being corrupted. */
tmp2 = gfc_evaluate_now (result, &se->pre);
tmp = gfc_copy_alloc_comp (arg->expr->ts.u.derived,
result, tmp2, expr->rank);
gfc_add_expr_to_block (&se->pre, tmp);
tmp = gfc_copy_allocatable_data (result, tmp2, TREE_TYPE(tmp2),
expr->rank);
gfc_add_expr_to_block (&se->pre, tmp);
/* Finally free the temporary's data field. */
tmp = gfc_conv_descriptor_data_get (tmp2);
tmp = gfc_deallocate_with_status (tmp, NULL_TREE, NULL_TREE,
NULL_TREE, NULL_TREE, true,
NULL, false);
gfc_add_expr_to_block (&se->pre, tmp);
}
}
else
gfc_add_block_to_block (&se->post, &post);
return has_alternate_specifier;
}
/* Fill a character string with spaces. */
static tree
fill_with_spaces (tree start, tree type, tree size)
{
stmtblock_t block, loop;
tree i, el, exit_label, cond, tmp;
/* For a simple char type, we can call memset(). */
if (compare_tree_int (TYPE_SIZE_UNIT (type), 1) == 0)
return build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_MEMSET),
3, start,
build_int_cst (gfc_get_int_type (gfc_c_int_kind),
lang_hooks.to_target_charset (' ')),
size);
/* Otherwise, we use a loop:
for (el = start, i = size; i > 0; el--, i+= TYPE_SIZE_UNIT (type))
*el = (type) ' ';
*/
/* Initialize variables. */
gfc_init_block (&block);
i = gfc_create_var (sizetype, "i");
gfc_add_modify (&block, i, fold_convert (sizetype, size));
el = gfc_create_var (build_pointer_type (type), "el");
gfc_add_modify (&block, el, fold_convert (TREE_TYPE (el), start));
exit_label = gfc_build_label_decl (NULL_TREE);
TREE_USED (exit_label) = 1;
/* Loop body. */
gfc_init_block (&loop);
/* Exit condition. */
cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, i,
build_zero_cst (sizetype));
tmp = build1_v (GOTO_EXPR, exit_label);
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond, tmp,
build_empty_stmt (input_location));
gfc_add_expr_to_block (&loop, tmp);
/* Assignment. */
gfc_add_modify (&loop,
fold_build1_loc (input_location, INDIRECT_REF, type, el),
build_int_cst (type, lang_hooks.to_target_charset (' ')));
/* Increment loop variables. */
gfc_add_modify (&loop, i,
fold_build2_loc (input_location, MINUS_EXPR, sizetype, i,
TYPE_SIZE_UNIT (type)));
gfc_add_modify (&loop, el,
fold_build_pointer_plus_loc (input_location,
el, TYPE_SIZE_UNIT (type)));
/* Making the loop... actually loop! */
tmp = gfc_finish_block (&loop);
tmp = build1_v (LOOP_EXPR, tmp);
gfc_add_expr_to_block (&block, tmp);
/* The exit label. */
tmp = build1_v (LABEL_EXPR, exit_label);
gfc_add_expr_to_block (&block, tmp);
return gfc_finish_block (&block);
}
/* Generate code to copy a string. */
void
gfc_trans_string_copy (stmtblock_t * block, tree dlength, tree dest,
int dkind, tree slength, tree src, int skind)
{
tree tmp, dlen, slen;
tree dsc;
tree ssc;
tree cond;
tree cond2;
tree tmp2;
tree tmp3;
tree tmp4;
tree chartype;
stmtblock_t tempblock;
gcc_assert (dkind == skind);
if (slength != NULL_TREE)
{
slen = fold_convert (size_type_node, gfc_evaluate_now (slength, block));
ssc = gfc_string_to_single_character (slen, src, skind);
}
else
{
slen = build_int_cst (size_type_node, 1);
ssc = src;
}
if (dlength != NULL_TREE)
{
dlen = fold_convert (size_type_node, gfc_evaluate_now (dlength, block));
dsc = gfc_string_to_single_character (dlen, dest, dkind);
}
else
{
dlen = build_int_cst (size_type_node, 1);
dsc = dest;
}
/* Assign directly if the types are compatible. */
if (dsc != NULL_TREE && ssc != NULL_TREE
&& TREE_TYPE (dsc) == TREE_TYPE (ssc))
{
gfc_add_modify (block, dsc, ssc);
return;
}
/* Do nothing if the destination length is zero. */
cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, dlen,
build_int_cst (size_type_node, 0));
/* The following code was previously in _gfortran_copy_string:
// The two strings may overlap so we use memmove.
void
copy_string (GFC_INTEGER_4 destlen, char * dest,
GFC_INTEGER_4 srclen, const char * src)
{
if (srclen >= destlen)
{
// This will truncate if too long.
memmove (dest, src, destlen);
}
else
{
memmove (dest, src, srclen);
// Pad with spaces.
memset (&dest[srclen], ' ', destlen - srclen);
}
}
We're now doing it here for better optimization, but the logic
is the same. */
/* For non-default character kinds, we have to multiply the string
length by the base type size. */
chartype = gfc_get_char_type (dkind);
slen = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
fold_convert (size_type_node, slen),
fold_convert (size_type_node,
TYPE_SIZE_UNIT (chartype)));
dlen = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
fold_convert (size_type_node, dlen),
fold_convert (size_type_node,
TYPE_SIZE_UNIT (chartype)));
if (dlength && POINTER_TYPE_P (TREE_TYPE (dest)))
dest = fold_convert (pvoid_type_node, dest);
else
dest = gfc_build_addr_expr (pvoid_type_node, dest);
if (slength && POINTER_TYPE_P (TREE_TYPE (src)))
src = fold_convert (pvoid_type_node, src);
else
src = gfc_build_addr_expr (pvoid_type_node, src);
/* Truncate string if source is too long. */
cond2 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, slen,
dlen);
tmp2 = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_MEMMOVE),
3, dest, src, dlen);
/* Else copy and pad with spaces. */
tmp3 = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_MEMMOVE),
3, dest, src, slen);
tmp4 = fold_build_pointer_plus_loc (input_location, dest, slen);
tmp4 = fill_with_spaces (tmp4, chartype,
fold_build2_loc (input_location, MINUS_EXPR,
TREE_TYPE(dlen), dlen, slen));
gfc_init_block (&tempblock);
gfc_add_expr_to_block (&tempblock, tmp3);
gfc_add_expr_to_block (&tempblock, tmp4);
tmp3 = gfc_finish_block (&tempblock);
/* The whole copy_string function is there. */
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond2,
tmp2, tmp3);
tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond, tmp,
build_empty_stmt (input_location));
gfc_add_expr_to_block (block, tmp);
}
/* Translate a statement function.
The value of a statement function reference is obtained by evaluating the
expression using the values of the actual arguments for the values of the
corresponding dummy arguments. */
static void
gfc_conv_statement_function (gfc_se * se, gfc_expr * expr)
{
gfc_symbol *sym;
gfc_symbol *fsym;
gfc_formal_arglist *fargs;
gfc_actual_arglist *args;
gfc_se lse;
gfc_se rse;
gfc_saved_var *saved_vars;
tree *temp_vars;
tree type;
tree tmp;
int n;
sym = expr->symtree->n.sym;
args = expr->value.function.actual;
gfc_init_se (&lse, NULL);
gfc_init_se (&rse, NULL);
n = 0;
for (fargs = gfc_sym_get_dummy_args (sym); fargs; fargs = fargs->next)
n++;
saved_vars = XCNEWVEC (gfc_saved_var, n);
temp_vars = XCNEWVEC (tree, n);
for (fargs = gfc_sym_get_dummy_args (sym), n = 0; fargs;
fargs = fargs->next, n++)
{
/* Each dummy shall be specified, explicitly or implicitly, to be
scalar. */
gcc_assert (fargs->sym->attr.dimension == 0);
fsym = fargs->sym;
if (fsym->ts.type == BT_CHARACTER)
{
/* Copy string arguments. */
tree arglen;
gcc_assert (fsym->ts.u.cl && fsym->ts.u.cl->length
&& fsym->ts.u.cl->length->expr_type == EXPR_CONSTANT);
/* Create a temporary to hold the value. */
if (fsym->ts.u.cl->backend_decl == NULL_TREE)
fsym->ts.u.cl->backend_decl
= gfc_conv_constant_to_tree (fsym->ts.u.cl->length);
type = gfc_get_character_type (fsym->ts.kind, fsym->ts.u.cl);
temp_vars[n] = gfc_create_var (type, fsym->name);
arglen = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
gfc_conv_expr (&rse, args->expr);
gfc_conv_string_parameter (&rse);
gfc_add_block_to_block (&se->pre, &lse.pre);
gfc_add_block_to_block (&se->pre, &rse.pre);
gfc_trans_string_copy (&se->pre, arglen, temp_vars[n], fsym->ts.kind,
rse.string_length, rse.expr, fsym->ts.kind);
gfc_add_block_to_block (&se->pre, &lse.post);
gfc_add_block_to_block (&se->pre, &rse.post);
}
else
{
/* For everything else, just evaluate the expression. */
/* Create a temporary to hold the value. */
type = gfc_typenode_for_spec (&fsym->ts);
temp_vars[n] = gfc_create_var (type, fsym->name);
gfc_conv_expr (&lse, args->expr);
gfc_add_block_to_block (&se->pre, &lse.pre);
gfc_add_modify (&se->pre, temp_vars[n], lse.expr);
gfc_add_block_to_block (&se->pre, &lse.post);
}
args = args->next;
}
/* Use the temporary variables in place of the real ones. */
for (fargs = gfc_sym_get_dummy_args (sym), n = 0; fargs;
fargs = fargs->next, n++)
gfc_shadow_sym (fargs->sym, temp_vars[n], &saved_vars[n]);
gfc_conv_expr (se, sym->value);
if (sym->ts.type == BT_CHARACTER)
{
gfc_conv_const_charlen (sym->ts.u.cl);
/* Force the expression to the correct length. */
if (!INTEGER_CST_P (se->string_length)
|| tree_int_cst_lt (se->string_length,
sym->ts.u.cl->backend_decl))
{
type = gfc_get_character_type (sym->ts.kind, sym->ts.u.cl);
tmp = gfc_create_var (type, sym->name);
tmp = gfc_build_addr_expr (build_pointer_type (type), tmp);
gfc_trans_string_copy (&se->pre, sym->ts.u.cl->backend_decl, tmp,
sym->ts.kind, se->string_length, se->expr,
sym->ts.kind);
se->expr = tmp;
}
se->string_length = sym->ts.u.cl->backend_decl;
}
/* Restore the original variables. */
for (fargs = gfc_sym_get_dummy_args (sym), n = 0; fargs;
fargs = fargs->next, n++)
gfc_restore_sym (fargs->sym, &saved_vars[n]);
free (temp_vars);
free (saved_vars);
}
/* Translate a function expression. */
static void
gfc_conv_function_expr (gfc_se * se, gfc_expr * expr)
{
gfc_symbol *sym;
if (expr->value.function.isym)
{
gfc_conv_intrinsic_function (se, expr);
return;
}
/* expr.value.function.esym is the resolved (specific) function symbol for
most functions. However this isn't set for dummy procedures. */
sym = expr->value.function.esym;
if (!sym)
sym = expr->symtree->n.sym;
/* We distinguish statement functions from general functions to improve
runtime performance. */
if (sym->attr.proc == PROC_ST_FUNCTION)
{
gfc_conv_statement_function (se, expr);
return;
}
gfc_conv_procedure_call (se, sym, expr->value.function.actual, expr,
NULL);
}
/* Determine whether the given EXPR_CONSTANT is a zero initializer. */
static bool
is_zero_initializer_p (gfc_expr * expr)
{
if (expr->expr_type != EXPR_CONSTANT)
return false;
/* We ignore constants with prescribed memory representations for now. */
if (expr->representation.string)
return false;
switch (expr->ts.type)
{
case BT_INTEGER:
return mpz_cmp_si (expr->value.integer, 0) == 0;
case BT_REAL:
return mpfr_zero_p (expr->value.real)
&& MPFR_SIGN (expr->value.real) >= 0;
case BT_LOGICAL:
return expr->value.logical == 0;
case BT_COMPLEX:
return mpfr_zero_p (mpc_realref (expr->value.complex))
&& MPFR_SIGN (mpc_realref (expr->value.complex)) >= 0
&& mpfr_zero_p (mpc_imagref (expr->value.complex))
&& MPFR_SIGN (mpc_imagref (expr->value.complex)) >= 0;
default:
break;
}
return false;
}
static void
gfc_conv_array_constructor_expr (gfc_se * se, gfc_expr * expr)
{
gfc_ss *ss;
ss = se->ss;
gcc_assert (ss != NULL && ss != gfc_ss_terminator);
gcc_assert (ss->info->expr == expr && ss->info->type == GFC_SS_CONSTRUCTOR);
gfc_conv_tmp_array_ref (se);
}
/* Build a static initializer. EXPR is the expression for the initial value.
The other parameters describe the variable of the component being
initialized. EXPR may be null. */
tree
gfc_conv_initializer (gfc_expr * expr, gfc_typespec * ts, tree type,
bool array, bool pointer, bool procptr)
{
gfc_se se;
if (!(expr || pointer || procptr))
return NULL_TREE;
/* Check if we have ISOCBINDING_NULL_PTR or ISOCBINDING_NULL_FUNPTR
(these are the only two iso_c_binding derived types that can be
used as initialization expressions). If so, we need to modify
the 'expr' to be that for a (void *). */
if (expr != NULL && expr->ts.type == BT_DERIVED
&& expr->ts.is_iso_c && expr->ts.u.derived)
{
gfc_symbol *derived = expr->ts.u.derived;
/* The derived symbol has already been converted to a (void *). Use
its kind. */
expr = gfc_get_int_expr (derived->ts.kind, NULL, 0);
expr->ts.f90_type = derived->ts.f90_type;
gfc_init_se (&se, NULL);
gfc_conv_constant (&se, expr);
gcc_assert (TREE_CODE (se.expr) != CONSTRUCTOR);
return se.expr;
}
if (array && !procptr)
{
tree ctor;
/* Arrays need special handling. */
if (pointer)
ctor = gfc_build_null_descriptor (type);
/* Special case assigning an array to zero. */
else if (is_zero_initializer_p (expr))
ctor = build_constructor (type, NULL);
else
ctor = gfc_conv_array_initializer (type, expr);
TREE_STATIC (ctor) = 1;
return ctor;
}
else if (pointer || procptr)
{
if (ts->type == BT_CLASS && !procptr)
{
gfc_init_se (&se, NULL);
gfc_conv_structure (&se, gfc_class_initializer (ts, expr), 1);
gcc_assert (TREE_CODE (se.expr) == CONSTRUCTOR);
TREE_STATIC (se.expr) = 1;
return se.expr;
}
else if (!expr || expr->expr_type == EXPR_NULL)
return fold_convert (type, null_pointer_node);
else
{
gfc_init_se (&se, NULL);
se.want_pointer = 1;
gfc_conv_expr (&se, expr);
gcc_assert (TREE_CODE (se.expr) != CONSTRUCTOR);
return se.expr;
}
}
else
{
switch (ts->type)
{
case BT_DERIVED:
case BT_CLASS:
gfc_init_se (&se, NULL);
if (ts->type == BT_CLASS && expr->expr_type == EXPR_NULL)
gfc_conv_structure (&se, gfc_class_initializer (ts, expr), 1);
else
gfc_conv_structure (&se, expr, 1);
gcc_assert (TREE_CODE (se.expr) == CONSTRUCTOR);
TREE_STATIC (se.expr) = 1;
return se.expr;
case BT_CHARACTER:
{
tree ctor = gfc_conv_string_init (ts->u.cl->backend_decl,expr);
TREE_STATIC (ctor) = 1;
return ctor;
}
default:
gfc_init_se (&se, NULL);
gfc_conv_constant (&se, expr);
gcc_assert (TREE_CODE (se.expr) != CONSTRUCTOR);
return se.expr;
}
}
}
static tree
gfc_trans_subarray_assign (tree dest, gfc_component * cm, gfc_expr * expr)
{
gfc_se rse;
gfc_se lse;
gfc_ss *rss;
gfc_ss *lss;
gfc_array_info *lss_array;
stmtblock_t body;
stmtblock_t block;
gfc_loopinfo loop;
int n;
tree tmp;
gfc_start_block (&block);
/* Initialize the scalarizer. */
gfc_init_loopinfo (&loop);
gfc_init_se (&lse, NULL);
gfc_init_se (&rse, NULL);
/* Walk the rhs. */
rss = gfc_walk_expr (expr);
if (rss == gfc_ss_terminator)
/* The rhs is scalar. Add a ss for the expression. */
rss = gfc_get_scalar_ss (gfc_ss_terminator, expr);
/* Create a SS for the destination. */
lss = gfc_get_array_ss (gfc_ss_terminator, NULL, cm->as->rank,
GFC_SS_COMPONENT);
lss_array = &lss->info->data.array;
lss_array->shape = gfc_get_shape (cm->as->rank);
lss_array->descriptor = dest;
lss_array->data = gfc_conv_array_data (dest);
lss_array->offset = gfc_conv_array_offset (dest);
for (n = 0; n < cm->as->rank; n++)
{
lss_array->start[n] = gfc_conv_array_lbound (dest, n);
lss_array->stride[n] = gfc_index_one_node;
mpz_init (lss_array->shape[n]);
mpz_sub (lss_array->shape[n], cm->as->upper[n]->value.integer,
cm->as->lower[n]->value.integer);
mpz_add_ui (lss_array->shape[n], lss_array->shape[n], 1);
}
/* Associate the SS with the loop. */
gfc_add_ss_to_loop (&loop, lss);
gfc_add_ss_to_loop (&loop, rss);
/* Calculate the bounds of the scalarization. */
gfc_conv_ss_startstride (&loop);
/* Setup the scalarizing loops. */
gfc_conv_loop_setup (&loop, &expr->where);
/* Setup the gfc_se structures. */
gfc_copy_loopinfo_to_se (&lse, &loop);
gfc_copy_loopinfo_to_se (&rse, &loop);
rse.ss = rss;
gfc_mark_ss_chain_used (rss, 1);
lse.ss = lss;
gfc_mark_ss_chain_used (lss, 1);
/* Start the scalarized loop body. */
gfc_start_scalarized_body (&loop, &body);
gfc_conv_tmp_array_ref (&lse);
if (cm->ts.type == BT_CHARACTER)
lse.string_length = cm->ts.u.cl->backend_decl;
gfc_conv_expr (&rse, expr);
tmp = gfc_trans_scalar_assign (&lse, &rse, cm->ts, true, false, true);
gfc_add_expr_to_block (&body, tmp);
gcc_assert (rse.ss == gfc_ss_terminator);
/* Generate the copying loops. */
gfc_trans_scalarizing_loops (&loop, &body);
/* Wrap the whole thing up. */
gfc_add_block_to_block (&block, &loop.pre);
gfc_add_block_to_block (&block, &loop.post);
gcc_assert (lss_array->shape != NULL);
gfc_free_shape (&lss_array->shape, cm->as->rank);
gfc_cleanup_loop (&loop);
return gfc_finish_block (&block);
}
static tree
gfc_trans_alloc_subarray_assign (tree dest, gfc_component * cm,
gfc_expr * expr)
{
gfc_se se;
stmtblock_t block;
tree offset;
int n;
tree tmp;
tree tmp2;
gfc_array_spec *as;
gfc_expr *arg = NULL;
gfc_start_block (&block);
gfc_init_se (&se, NULL);
/* Get the descriptor for the expressions. */
se.want_pointer = 0;
gfc_conv_expr_descriptor (&se, expr);
gfc_add_block_to_block (&block, &se.pre);
gfc_add_modify (&block, dest, se.expr);
/* Deal with arrays of derived types with allocatable components. */
if (cm->ts.type == BT_DERIVED
&& cm->ts.u.derived->attr.alloc_comp)
tmp = gfc_copy_alloc_comp (cm->ts.u.derived,
se.expr, dest,
cm->as->rank);
else
tmp = gfc_duplicate_allocatable (dest, se.expr,
TREE_TYPE(cm->backend_decl),
cm->as->rank);
gfc_add_expr_to_block (&block, tmp);
gfc_add_block_to_block (&block, &se.post);
if (expr->expr_type != EXPR_VARIABLE)
gfc_conv_descriptor_data_set (&block, se.expr,
null_pointer_node);
/* We need to know if the argument of a conversion function is a
variable, so that the correct lower bound can be used. */
if (expr->expr_type == EXPR_FUNCTION
&& expr->value.function.isym
&& expr->value.function.isym->conversion
&& expr->value.function.actual->expr
&& expr->value.function.actual->expr->expr_type == EXPR_VARIABLE)
arg = expr->value.function.actual->expr;
/* Obtain the array spec of full array references. */
if (arg)
as = gfc_get_full_arrayspec_from_expr (arg);
else
as = gfc_get_full_arrayspec_from_expr (expr);
/* Shift the lbound and ubound of temporaries to being unity,
rather than zero, based. Always calculate the offset. */
offset = gfc_conv_descriptor_offset_get (dest);
gfc_add_modify (&block, offset, gfc_index_zero_node);
tmp2 =gfc_create_var (gfc_array_index_type, NULL);
for (n = 0; n < expr->rank; n++)
{
tree span;
tree lbound;
/* Obtain the correct lbound - ISO/IEC TR 15581:2001 page 9.
TODO It looks as if gfc_conv_expr_descriptor should return
the correct bounds and that the following should not be
necessary. This would simplify gfc_conv_intrinsic_bound
as well. */
if (as && as->lower[n])
{
gfc_se lbse;
gfc_init_se (&lbse, NULL);
gfc_conv_expr (&lbse, as->lower[n]);
gfc_add_block_to_block (&block, &lbse.pre);
lbound = gfc_evaluate_now (lbse.expr, &block);
}
else if (as && arg)
{
tmp = gfc_get_symbol_decl (arg->symtree->n.sym);
lbound = gfc_conv_descriptor_lbound_get (tmp,
gfc_rank_cst[n]);
}
else if (as)
lbound = gfc_conv_descriptor_lbound_get (dest,
gfc_rank_cst[n]);
else
lbound = gfc_index_one_node;
lbound = fold_convert (gfc_array_index_type, lbound);
/* Shift the bounds and set the offset accordingly. */
tmp = gfc_conv_descriptor_ubound_get (dest, gfc_rank_cst[n]);
span = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
tmp, gfc_conv_descriptor_lbound_get (dest, gfc_rank_cst[n]));
tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
span, lbound);
gfc_conv_descriptor_ubound_set (&block, dest,
gfc_rank_cst[n], tmp);
gfc_conv_descriptor_lbound_set (&block, dest,
gfc_rank_cst[n], lbound);
tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
gfc_conv_descriptor_lbound_get (dest,
gfc_rank_cst[n]),
gfc_conv_descriptor_stride_get (dest,
gfc_rank_cst[n]));
gfc_add_modify (&block, tmp2, tmp);
tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
offset, tmp2);
gfc_conv_descriptor_offset_set (&block, dest, tmp);
}
if (arg)
{
/* If a conversion expression has a null data pointer
argument, nullify the allocatable component. */
tree non_null_expr;
tree null_expr;
if (arg->symtree->n.sym->attr.allocatable
|| arg->symtree->n.sym->attr.pointer)
{
non_null_expr = gfc_finish_block (&block);
gfc_start_block (&block);
gfc_conv_descriptor_data_set (&block, dest,
null_pointer_node);
null_expr = gfc_finish_block (&block);
tmp = gfc_conv_descriptor_data_get (arg->symtree->n.sym->backend_decl);
tmp = build2_loc (input_location, EQ_EXPR, boolean_type_node, tmp,
fold_convert (TREE_TYPE (tmp), null_pointer_node));
return build3_v (COND_EXPR, tmp,
null_expr, non_null_expr);
}
}
return gfc_finish_block (&block);
}
/* Assign a single component of a derived type constructor. */
static tree
gfc_trans_subcomponent_assign (tree dest, gfc_component * cm, gfc_expr * expr)
{
gfc_se se;
gfc_se lse;
stmtblock_t block;
tree tmp;
gfc_start_block (&block);
if (cm->attr.pointer || cm->attr.proc_pointer)
{
gfc_init_se (&se, NULL);
/* Pointer component. */
if (cm->attr.dimension && !cm->attr.proc_pointer)
{
/* Array pointer. */
if (expr->expr_type == EXPR_NULL)
gfc_conv_descriptor_data_set (&block, dest, null_pointer_node);
else
{
se.direct_byref = 1;
se.expr = dest;
gfc_conv_expr_descriptor (&se, expr);
gfc_add_block_to_block (&block, &se.pre);
gfc_add_block_to_block (&block, &se.post);
}
}
else
{
/* Scalar pointers. */
se.want_pointer = 1;
gfc_conv_expr (&se, expr);
gfc_add_block_to_block (&block, &se.pre);
if (expr->symtree && expr->symtree->n.sym->attr.proc_pointer
&& expr->symtree->n.sym->attr.dummy)
se.expr = build_fold_indirect_ref_loc (input_location, se.expr);
gfc_add_modify (&block, dest,
fold_convert (TREE_TYPE (dest), se.expr));
gfc_add_block_to_block (&block, &se.post);
}
}
else if (cm->ts.type == BT_CLASS && expr->expr_type == EXPR_NULL)
{
/* NULL initialization for CLASS components. */
tmp = gfc_trans_structure_assign (dest,
gfc_class_initializer (&cm->ts, expr));
gfc_add_expr_to_block (&block, tmp);
}
else if (cm->attr.dimension && !cm->attr.proc_pointer)
{
if (cm->attr.allocatable && expr->expr_type == EXPR_NULL)
gfc_conv_descriptor_data_set (&block, dest, null_pointer_node);
else if (cm->attr.allocatable)
{
tmp = gfc_trans_alloc_subarray_assign (dest, cm, expr);
gfc_add_expr_to_block (&block, tmp);
}
else
{
tmp = gfc_trans_subarray_assign (dest, cm, expr);
gfc_add_expr_to_block (&block, tmp);
}
}
else if (expr->ts.type == BT_DERIVED && expr->ts.f90_type != BT_VOID)
{
if (expr->expr_type != EXPR_STRUCTURE)
{
gfc_init_se (&se, NULL);
gfc_conv_expr (&se, expr);
gfc_add_block_to_block (&block, &se.pre);
gfc_add_modify (&block, dest,
fold_convert (TREE_TYPE (dest), se.expr));
gfc_add_block_to_block (&block, &se.post);
}
else
{
/* Nested constructors. */
tmp = gfc_trans_structure_assign (dest, expr);
gfc_add_expr_to_block (&block, tmp);
}
}
else if (gfc_deferred_strlen (cm, &tmp))
{
tree strlen;
strlen = tmp;
gcc_assert (strlen);
strlen = fold_build3_loc (input_location, COMPONENT_REF,
TREE_TYPE (strlen),
TREE_OPERAND (dest, 0),
strlen, NULL_TREE);
if (expr->expr_type == EXPR_NULL)
{
tmp = build_int_cst (TREE_TYPE (cm->backend_decl), 0);
gfc_add_modify (&block, dest, tmp);
tmp = build_int_cst (TREE_TYPE (strlen), 0);
gfc_add_modify (&block, strlen, tmp);
}
else
{
tree size;
gfc_init_se (&se, NULL);
gfc_conv_expr (&se, expr);
size = size_of_string_in_bytes (cm->ts.kind, se.string_length);
tmp = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_MALLOC),
1, size);
gfc_add_modify (&block, dest,
fold_convert (TREE_TYPE (dest), tmp));
gfc_add_modify (&block, strlen, se.string_length);
tmp = gfc_build_memcpy_call (dest, se.expr, size);
gfc_add_expr_to_block (&block, tmp);
}
}
else if (!cm->attr.deferred_parameter)
{
/* Scalar component (excluding deferred parameters). */
gfc_init_se (&se, NULL);
gfc_init_se (&lse, NULL);
gfc_conv_expr (&se, expr);
if (cm->ts.type == BT_CHARACTER)
lse.string_length = cm->ts.u.cl->backend_decl;
lse.expr = dest;
tmp = gfc_trans_scalar_assign (&lse, &se, cm->ts, true, false, true);
gfc_add_expr_to_block (&block, tmp);
}
return gfc_finish_block (&block);
}
/* Assign a derived type constructor to a variable. */
static tree
gfc_trans_structure_assign (tree dest, gfc_expr * expr)
{
gfc_constructor *c;
gfc_component *cm;
stmtblock_t block;
tree field;
tree tmp;
gfc_start_block (&block);
cm = expr->ts.u.derived->components;
if (expr->ts.u.derived->from_intmod == INTMOD_ISO_C_BINDING
&& (expr->ts.u.derived->intmod_sym_id == ISOCBINDING_PTR
|| expr->ts.u.derived->intmod_sym_id == ISOCBINDING_FUNPTR))
{
gfc_se se, lse;
gcc_assert (cm->backend_decl == NULL);
gfc_init_se (&se, NULL);
gfc_init_se (&lse, NULL);
gfc_conv_expr (&se, gfc_constructor_first (expr->value.constructor)->expr);
lse.expr = dest;
gfc_add_modify (&block, lse.expr,
fold_convert (TREE_TYPE (lse.expr), se.expr));
return gfc_finish_block (&block);
}
for (c = gfc_constructor_first (expr->value.constructor);
c; c = gfc_constructor_next (c), cm = cm->next)
{
/* Skip absent members in default initializers. */
if (!c->expr)
continue;
field = cm->backend_decl;
tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
dest, field, NULL_TREE);
tmp = gfc_trans_subcomponent_assign (tmp, cm, c->expr);
gfc_add_expr_to_block (&block, tmp);
}
return gfc_finish_block (&block);
}
/* Build an expression for a constructor. If init is nonzero then
this is part of a static variable initializer. */
void
gfc_conv_structure (gfc_se * se, gfc_expr * expr, int init)
{
gfc_constructor *c;
gfc_component *cm;
tree val;
tree type;
tree tmp;
vec<constructor_elt, va_gc> *v = NULL;
gcc_assert (se->ss == NULL);
gcc_assert (expr->expr_type == EXPR_STRUCTURE);
type = gfc_typenode_for_spec (&expr->ts);
if (!init)
{
/* Create a temporary variable and fill it in. */
se->expr = gfc_create_var (type, expr->ts.u.derived->name);
tmp = gfc_trans_structure_assign (se->expr, expr);
gfc_add_expr_to_block (&se->pre, tmp);
return;
}
cm = expr->ts.u.derived->components;
for (c = gfc_constructor_first (expr->value.constructor);
c; c = gfc_constructor_next (c), cm = cm->next)
{
/* Skip absent members in default initializers and allocatable
components. Although the latter have a default initializer
of EXPR_NULL,... by default, the static nullify is not needed
since this is done every time we come into scope. */
if (!c->expr || (cm->attr.allocatable && cm->attr.flavor != FL_PROCEDURE))
continue;
if (cm->initializer && cm->initializer->expr_type != EXPR_NULL
&& strcmp (cm->name, "_extends") == 0
&& cm->initializer->symtree)
{
tree vtab;
gfc_symbol *vtabs;
vtabs = cm->initializer->symtree->n.sym;
vtab = gfc_build_addr_expr (NULL_TREE, gfc_get_symbol_decl (vtabs));
vtab = unshare_expr_without_location (vtab);
CONSTRUCTOR_APPEND_ELT (v, cm->backend_decl, vtab);
}
else if (cm->ts.u.derived && strcmp (cm->name, "_size") == 0)
{
val = TYPE_SIZE_UNIT (gfc_get_derived_type (cm->ts.u.derived));
CONSTRUCTOR_APPEND_ELT (v, cm->backend_decl, val);
}
else if (cm->ts.type == BT_INTEGER && strcmp (cm->name, "_len") == 0)
CONSTRUCTOR_APPEND_ELT (v, cm->backend_decl,
fold_convert (TREE_TYPE (cm->backend_decl),
integer_zero_node));
else
{
val = gfc_conv_initializer (c->expr, &cm->ts,
TREE_TYPE (cm->backend_decl),
cm->attr.dimension, cm->attr.pointer,
cm->attr.proc_pointer);
val = unshare_expr_without_location (val);
/* Append it to the constructor list. */
CONSTRUCTOR_APPEND_ELT (v, cm->backend_decl, val);
}
}
se->expr = build_constructor (type, v);
if (init)
TREE_CONSTANT (se->expr) = 1;
}
/* Translate a substring expression. */
static void
gfc_conv_substring_expr (gfc_se * se, gfc_expr * expr)
{
gfc_ref *ref;
ref = expr->ref;
gcc_assert (ref == NULL || ref->type == REF_SUBSTRING);
se->expr = gfc_build_wide_string_const (expr->ts.kind,
expr->value.character.length,
expr->value.character.string);
se->string_length = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (se->expr)));
TYPE_STRING_FLAG (TREE_TYPE (se->expr)) = 1;
if (ref)
gfc_conv_substring (se, ref, expr->ts.kind, NULL, &expr->where);
}
/* Entry point for expression translation. Evaluates a scalar quantity.
EXPR is the expression to be translated, and SE is the state structure if
called from within the scalarized. */
void
gfc_conv_expr (gfc_se * se, gfc_expr * expr)
{
gfc_ss *ss;
ss = se->ss;
if (ss && ss->info->expr == expr
&& (ss->info->type == GFC_SS_SCALAR
|| ss->info->type == GFC_SS_REFERENCE))
{
gfc_ss_info *ss_info;
ss_info = ss->info;
/* Substitute a scalar expression evaluated outside the scalarization
loop. */
se->expr = ss_info->data.scalar.value;
/* If the reference can be NULL, the value field contains the reference,
not the value the reference points to (see gfc_add_loop_ss_code). */
if (ss_info->can_be_null_ref)
se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
se->string_length = ss_info->string_length;
gfc_advance_se_ss_chain (se);
return;
}
/* We need to convert the expressions for the iso_c_binding derived types.
C_NULL_PTR and C_NULL_FUNPTR will be made EXPR_NULL, which evaluates to
null_pointer_node. C_PTR and C_FUNPTR are converted to match the
typespec for the C_PTR and C_FUNPTR symbols, which has already been
updated to be an integer with a kind equal to the size of a (void *). */
if (expr->ts.type == BT_DERIVED && expr->ts.u.derived->ts.f90_type == BT_VOID
&& expr->ts.u.derived->attr.is_bind_c)
{
if (expr->expr_type == EXPR_VARIABLE
&& (expr->symtree->n.sym->intmod_sym_id == ISOCBINDING_NULL_PTR
|| expr->symtree->n.sym->intmod_sym_id
== ISOCBINDING_NULL_FUNPTR))
{
/* Set expr_type to EXPR_NULL, which will result in
null_pointer_node being used below. */
expr->expr_type = EXPR_NULL;
}
else
{
/* Update the type/kind of the expression to be what the new
type/kind are for the updated symbols of C_PTR/C_FUNPTR. */
expr->ts.type = BT_INTEGER;
expr->ts.f90_type = BT_VOID;
expr->ts.kind = gfc_index_integer_kind;
}
}
gfc_fix_class_refs (expr);
switch (expr->expr_type)
{
case EXPR_OP:
gfc_conv_expr_op (se, expr);
break;
case EXPR_FUNCTION:
gfc_conv_function_expr (se, expr);
break;
case EXPR_CONSTANT:
gfc_conv_constant (se, expr);
break;
case EXPR_VARIABLE:
gfc_conv_variable (se, expr);
break;
case EXPR_NULL:
se->expr = null_pointer_node;
break;
case EXPR_SUBSTRING:
gfc_conv_substring_expr (se, expr);
break;
case EXPR_STRUCTURE:
gfc_conv_structure (se, expr, 0);
break;
case EXPR_ARRAY:
gfc_conv_array_constructor_expr (se, expr);
break;
default:
gcc_unreachable ();
break;
}
}
/* Like gfc_conv_expr_val, but the value is also suitable for use in the lhs
of an assignment. */
void
gfc_conv_expr_lhs (gfc_se * se, gfc_expr * expr)
{
gfc_conv_expr (se, expr);
/* All numeric lvalues should have empty post chains. If not we need to
figure out a way of rewriting an lvalue so that it has no post chain. */
gcc_assert (expr->ts.type == BT_CHARACTER || !se->post.head);
}
/* Like gfc_conv_expr, but the POST block is guaranteed to be empty for
numeric expressions. Used for scalar values where inserting cleanup code
is inconvenient. */
void
gfc_conv_expr_val (gfc_se * se, gfc_expr * expr)
{
tree val;
gcc_assert (expr->ts.type != BT_CHARACTER);
gfc_conv_expr (se, expr);
if (se->post.head)
{
val = gfc_create_var (TREE_TYPE (se->expr), NULL);
gfc_add_modify (&se->pre, val, se->expr);
se->expr = val;
gfc_add_block_to_block (&se->pre, &se->post);
}
}
/* Helper to translate an expression and convert it to a particular type. */
void
gfc_conv_expr_type (gfc_se * se, gfc_expr * expr, tree type)
{
gfc_conv_expr_val (se, expr);
se->expr = convert (type, se->expr);
}
/* Converts an expression so that it can be passed by reference. Scalar
values only. */
void
gfc_conv_expr_reference (gfc_se * se, gfc_expr * expr)
{
gfc_ss *ss;
tree var;
ss = se->ss;
if (ss && ss->info->expr == expr
&& ss->info->type == GFC_SS_REFERENCE)
{
/* Returns a reference to the scalar evaluated outside the loop
for this case. */
gfc_conv_expr (se, expr);
if (expr->ts.type == BT_CHARACTER
&& expr->expr_type != EXPR_FUNCTION)
gfc_conv_string_parameter (se);
else
se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
return;
}
if (expr->ts.type == BT_CHARACTER)
{
gfc_conv_expr (se, expr);
gfc_conv_string_parameter (se);
return;
}
if (expr->expr_type == EXPR_VARIABLE)
{
se->want_pointer = 1;
gfc_conv_expr (se, expr);
if (se->post.head)
{
var = gfc_create_var (TREE_TYPE (se->expr), NULL);
gfc_add_modify (&se->pre, var, se->expr);
gfc_add_block_to_block (&se->pre, &se->post);
se->expr = var;
}
return;
}
if (expr->expr_type == EXPR_FUNCTION
&& ((expr->value.function.esym
&& expr->value.function.esym->result->attr.pointer
&& !expr->value.function.esym->result->attr.dimension)
|| (!expr->value.function.esym && !expr->ref
&& expr->symtree->n.sym->attr.pointer
&& !expr->symtree->n.sym->attr.dimension)))
{
se->want_pointer = 1;
gfc_conv_expr (se, expr);
var = gfc_create_var (TREE_TYPE (se->expr), NULL);
gfc_add_modify (&se->pre, var, se->expr);
se->expr = var;
return;
}
gfc_conv_expr (se, expr);
/* Create a temporary var to hold the value. */
if (TREE_CONSTANT (se->expr))
{
tree tmp = se->expr;
STRIP_TYPE_NOPS (tmp);
var = build_decl (input_location,
CONST_DECL, NULL, TREE_TYPE (tmp));
DECL_INITIAL (var) = tmp;
TREE_STATIC (var) = 1;
pushdecl (var);
}
else
{
var = gfc_create_var (TREE_TYPE (se->expr), NULL);
gfc_add_modify (&se->pre, var, se->expr);
}
gfc_add_block_to_block (&se->pre, &se->post);
/* Take the address of that value. */
se->expr = gfc_build_addr_expr (NULL_TREE, var);
if (expr->ts.type == BT_DERIVED && expr->rank
&& !gfc_is_finalizable (expr->ts.u.derived, NULL)
&& expr->ts.u.derived->attr.alloc_comp
&& expr->expr_type != EXPR_VARIABLE)
{
tree tmp;
tmp = build_fold_indirect_ref_loc (input_location, se->expr);
tmp = gfc_deallocate_alloc_comp (expr->ts.u.derived, tmp, expr->rank);
/* The components shall be deallocated before
their containing entity. */
gfc_prepend_expr_to_block (&se->post, tmp);
}
}
tree
gfc_trans_pointer_assign (gfc_code * code)
{
return gfc_trans_pointer_assignment (code->expr1, code->expr2);
}
/* Generate code for a pointer assignment. */
tree
gfc_trans_pointer_assignment (gfc_expr * expr1, gfc_expr * expr2)
{
gfc_expr *expr1_vptr = NULL;
gfc_se lse;
gfc_se rse;
stmtblock_t block;
tree desc;
tree tmp;
tree decl;
bool scalar;
gfc_ss *ss;
gfc_start_block (&block);
gfc_init_se (&lse, NULL);
/* Check whether the expression is a scalar or not; we cannot use
expr1->rank as it can be nonzero for proc pointers. */
ss = gfc_walk_expr (expr1);
scalar = ss == gfc_ss_terminator;
if (!scalar)
gfc_free_ss_chain (ss);
if (expr1->ts.type == BT_DERIVED && expr2->ts.type == BT_CLASS
&& expr2->expr_type != EXPR_FUNCTION)
{
gfc_add_data_component (expr2);
/* The following is required as gfc_add_data_component doesn't
update ts.type if there is a tailing REF_ARRAY. */
expr2->ts.type = BT_DERIVED;
}
if (scalar)
{
/* Scalar pointers. */
lse.want_pointer = 1;
gfc_conv_expr (&lse, expr1);
gfc_init_se (&rse, NULL);
rse.want_pointer = 1;
gfc_conv_expr (&rse, expr2);
if (expr1->symtree->n.sym->attr.proc_pointer
&& expr1->symtree->n.sym->attr.dummy)
lse.expr = build_fold_indirect_ref_loc (input_location,
lse.expr);
if (expr2->symtree && expr2->symtree->n.sym->attr.proc_pointer
&& expr2->symtree->n.sym->attr.dummy)
rse.expr = build_fold_indirect_ref_loc (input_location,
rse.expr);
/* For string assignments to unlimited polymorphic pointers add an
assignment of the string_length to the _len component of the
pointer. */
if ((expr1->ts.type == BT_CLASS || expr1->ts.type == BT_DERIVED)
&& expr1->ts.u.derived->attr.unlimited_polymorphic
&& (expr2->ts.type == BT_CHARACTER ||
((expr2->ts.type == BT_DERIVED || expr2->ts.type == BT_CLASS)
&& expr2->ts.u.derived->attr.unlimited_polymorphic)))
{
gfc_expr *len_comp;
gfc_se se;
len_comp = gfc_get_len_component (expr1);
gfc_init_se (&se, NULL);
gfc_conv_expr (&se, len_comp);
/* ptr % _len = len (str) */
gfc_add_modify (&block, se.expr, rse.string_length);
lse.string_length = se.expr;
gfc_free_expr (len_comp);
}
gfc_add_block_to_block (&block, &lse.pre);
gfc_add_block_to_block (&block, &rse.pre);
/* Check character lengths if character expression. The test is only
really added if -fbounds-check is enabled. Exclude deferred
character length lefthand sides. */
if (expr1->ts.type == BT_CHARACTER && expr2->expr_type != EXPR_NULL
&& !expr1->ts.deferred
&& !expr1->symtree->n.sym->attr.proc_pointer
&& !gfc_is_proc_ptr_comp (expr1))
{
gcc_assert (expr2->ts.type == BT_CHARACTER);
gcc_assert (lse.string_length && rse.string_length);
gfc_trans_same_strlen_check ("pointer assignment", &expr1->where,
lse.string_length, rse.string_length,
&block);
}
/* The assignment to an deferred character length sets the string
length to that of the rhs. */
if (expr1->ts.deferred)
{
if (expr2->expr_type != EXPR_NULL && lse.string_length != NULL)
gfc_add_modify (&block, lse.string_length, rse.string_length);
else if (lse.string_length != NULL)
gfc_add_modify (&block, lse.string_length,
build_int_cst (gfc_charlen_type_node, 0));
}
if (expr1->ts.type == BT_DERIVED && expr2->ts.type == BT_CLASS)
rse.expr = gfc_class_data_get (rse.expr);
gfc_add_modify (&block, lse.expr,
fold_convert (TREE_TYPE (lse.expr), rse.expr));
gfc_add_block_to_block (&block, &rse.post);
gfc_add_block_to_block (&block, &lse.post);
}
else
{
gfc_ref* remap;
bool rank_remap;
tree strlen_lhs;
tree strlen_rhs = NULL_TREE;
/* Array pointer. Find the last reference on the LHS and if it is an
array section ref, we're dealing with bounds remapping. In this case,
set it to AR_FULL so that gfc_conv_expr_descriptor does
not see it and process the bounds remapping afterwards explicitly. */
for (remap = expr1->ref; remap; remap = remap->next)
if (!remap->next && remap->type == REF_ARRAY
&& remap->u.ar.type == AR_SECTION)
break;
rank_remap = (remap && remap->u.ar.end[0]);
gfc_init_se (&lse, NULL);
if (remap)
lse.descriptor_only = 1;
if (expr2->expr_type == EXPR_FUNCTION && expr2->ts.type == BT_CLASS
&& expr1->ts.type == BT_CLASS)
expr1_vptr = gfc_copy_expr (expr1);
gfc_conv_expr_descriptor (&lse, expr1);
strlen_lhs = lse.string_length;
desc = lse.expr;
if (expr2->expr_type == EXPR_NULL)
{
/* Just set the data pointer to null. */
gfc_conv_descriptor_data_set (&lse.pre, lse.expr, null_pointer_node);
}
else if (rank_remap)
{
/* If we are rank-remapping, just get the RHS's descriptor and
process this later on. */
gfc_init_se (&rse, NULL);
rse.direct_byref = 1;
rse.byref_noassign = 1;
if (expr2->expr_type == EXPR_FUNCTION && expr2->ts.type == BT_CLASS)
{
gfc_conv_function_expr (&rse, expr2);
if (expr1->ts.type != BT_CLASS)
rse.expr = gfc_class_data_get (rse.expr);
else
{
tmp = gfc_create_var (TREE_TYPE (rse.expr), "ptrtemp");
gfc_add_modify (&lse.pre, tmp, rse.expr);
gfc_add_vptr_component (expr1_vptr);
gfc_init_se (&rse, NULL);
rse.want_pointer = 1;
gfc_conv_expr (&rse, expr1_vptr);
gfc_add_modify (&lse.pre, rse.expr,
fold_convert (TREE_TYPE (rse.expr),
gfc_class_vptr_get (tmp)));
rse.expr = gfc_class_data_get (tmp);
}
}
else if (expr2->expr_type == EXPR_FUNCTION)
{
tree bound[GFC_MAX_DIMENSIONS];
int i;
for (i = 0; i < expr2->rank; i++)
bound[i] = NULL_TREE;
tmp = gfc_typenode_for_spec (&expr2->ts);
tmp = gfc_get_array_type_bounds (tmp, expr2->rank, 0,
bound, bound, 0,
GFC_ARRAY_POINTER_CONT, false);
tmp = gfc_create_var (tmp, "ptrtemp");
lse.descriptor_only = 0;
lse.expr = tmp;
lse.direct_byref = 1;
gfc_conv_expr_descriptor (&lse, expr2);
strlen_rhs = lse.string_length;
rse.expr = tmp;
}
else
{
gfc_conv_expr_descriptor (&rse, expr2);
strlen_rhs = rse.string_length;
}
}
else if (expr2->expr_type == EXPR_VARIABLE)
{
/* Assign directly to the LHS's descriptor. */
lse.descriptor_only = 0;
lse.direct_byref = 1;
gfc_conv_expr_descriptor (&lse, expr2);
strlen_rhs = lse.string_length;
/* If this is a subreference array pointer assignment, use the rhs
descriptor element size for the lhs span. */
if (expr1->symtree->n.sym->attr.subref_array_pointer)
{
decl = expr1->symtree->n.sym->backend_decl;
gfc_init_se (&rse, NULL);
rse.descriptor_only = 1;
gfc_conv_expr (&rse, expr2);
tmp = gfc_get_element_type (TREE_TYPE (rse.expr));
tmp = fold_convert (gfc_array_index_type, size_in_bytes (tmp));
if (!INTEGER_CST_P (tmp))
gfc_add_block_to_block (&lse.post, &rse.pre);
gfc_add_modify (&lse.post, GFC_DECL_SPAN(decl), tmp);
}
}
else if (expr2->expr_type == EXPR_FUNCTION && expr2->ts.type == BT_CLASS)
{
gfc_init_se (&rse, NULL);
rse.want_pointer = 1;
gfc_conv_function_expr (&rse, expr2);
if (expr1->ts.type != BT_CLASS)
{
rse.expr = gfc_class_data_get (rse.expr);
gfc_add_modify (&lse.pre, desc, rse.expr);
}
else
{
tmp = gfc_create_var (TREE_TYPE (rse.expr), "ptrtemp");
gfc_add_modify (&lse.pre, tmp, rse.expr);
gfc_add_vptr_component (expr1_vptr);
gfc_init_se (&rse, NULL);
rse.want_pointer = 1;
gfc_conv_expr (&rse, expr1_vptr);
gfc_add_modify (&lse.pre, rse.expr,
fold_convert (TREE_TYPE (rse.expr),
gfc_class_vptr_get (tmp)));
rse.expr = gfc_class_data_get (tmp);
gfc_add_modify (&lse.pre, desc, rse.expr);
}
}
else
{
/* Assign to a temporary descriptor and then copy that
temporary to the pointer. */
tmp = gfc_create_var (TREE_TYPE (desc), "ptrtemp");
lse.descriptor_only = 0;
lse.expr = tmp;
lse.direct_byref = 1;
gfc_conv_expr_descriptor (&lse, expr2);
strlen_rhs = lse.string_length;
gfc_add_modify (&lse.pre, desc, tmp);
}
if (expr1_vptr)
gfc_free_expr (expr1_vptr);
gfc_add_block_to_block (&block, &lse.pre);
if (rank_remap)
gfc_add_block_to_block (&block, &rse.pre);
/* If we do bounds remapping, update LHS descriptor accordingly. */
if (remap)
{
int dim;
gcc_assert (remap->u.ar.dimen == expr1->rank);
if (rank_remap)
{
/* Do rank remapping. We already have the RHS's descriptor
converted in rse and now have to build the correct LHS
descriptor for it. */
tree dtype, data;
tree offs, stride;
tree lbound, ubound;
/* Set dtype. */
dtype = gfc_conv_descriptor_dtype (desc);
tmp = gfc_get_dtype (TREE_TYPE (desc));
gfc_add_modify (&block, dtype, tmp);
/* Copy data pointer. */
data = gfc_conv_descriptor_data_get (rse.expr);
gfc_conv_descriptor_data_set (&block, desc, data);
/* Copy offset but adjust it such that it would correspond
to a lbound of zero. */
offs = gfc_conv_descriptor_offset_get (rse.expr);
for (dim = 0; dim < expr2->rank; ++dim)
{
stride = gfc_conv_descriptor_stride_get (rse.expr,
gfc_rank_cst[dim]);
lbound = gfc_conv_descriptor_lbound_get (rse.expr,
gfc_rank_cst[dim]);
tmp = fold_build2_loc (input_location, MULT_EXPR,
gfc_array_index_type, stride, lbound);
offs = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type, offs, tmp);
}
gfc_conv_descriptor_offset_set (&block, desc, offs);
/* Set the bounds as declared for the LHS and calculate strides as
well as another offset update accordingly. */
stride = gfc_conv_descriptor_stride_get (rse.expr,
gfc_rank_cst[0]);
for (dim = 0; dim < expr1->rank; ++dim)
{
gfc_se lower_se;
gfc_se upper_se;
gcc_assert (remap->u.ar.start[dim] && remap->u.ar.end[dim]);
/* Convert declared bounds. */
gfc_init_se (&lower_se, NULL);
gfc_init_se (&upper_se, NULL);
gfc_conv_expr (&lower_se, remap->u.ar.start[dim]);
gfc_conv_expr (&upper_se, remap->u.ar.end[dim]);
gfc_add_block_to_block (&block, &lower_se.pre);
gfc_add_block_to_block (&block, &upper_se.pre);
lbound = fold_convert (gfc_array_index_type, lower_se.expr);
ubound = fold_convert (gfc_array_index_type, upper_se.expr);
lbound = gfc_evaluate_now (lbound, &block);
ubound = gfc_evaluate_now (ubound, &block);
gfc_add_block_to_block (&block, &lower_se.post);
gfc_add_block_to_block (&block, &upper_se.post);
/* Set bounds in descriptor. */
gfc_conv_descriptor_lbound_set (&block, desc,
gfc_rank_cst[dim], lbound);
gfc_conv_descriptor_ubound_set (&block, desc,
gfc_rank_cst[dim], ubound);
/* Set stride. */
stride = gfc_evaluate_now (stride, &block);
gfc_conv_descriptor_stride_set (&block, desc,
gfc_rank_cst[dim], stride);
/* Update offset. */
offs = gfc_conv_descriptor_offset_get (desc);
tmp = fold_build2_loc (input_location, MULT_EXPR,
gfc_array_index_type, lbound, stride);
offs = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type, offs, tmp);
offs = gfc_evaluate_now (offs, &block);
gfc_conv_descriptor_offset_set (&block, desc, offs);
/* Update stride. */
tmp = gfc_conv_array_extent_dim (lbound, ubound, NULL);
stride = fold_build2_loc (input_location, MULT_EXPR,
gfc_array_index_type, stride, tmp);
}
}
else
{
/* Bounds remapping. Just shift the lower bounds. */
gcc_assert (expr1->rank == expr2->rank);
for (dim = 0; dim < remap->u.ar.dimen; ++dim)
{
gfc_se lbound_se;
gcc_assert (remap->u.ar.start[dim]);
gcc_assert (!remap->u.ar.end[dim]);
gfc_init_se (&lbound_se, NULL);
gfc_conv_expr (&lbound_se, remap->u.ar.start[dim]);
gfc_add_block_to_block (&block, &lbound_se.pre);
gfc_conv_shift_descriptor_lbound (&block, desc,
dim, lbound_se.expr);
gfc_add_block_to_block (&block, &lbound_se.post);
}
}
}
/* Check string lengths if applicable. The check is only really added
to the output code if -fbounds-check is enabled. */
if (expr1->ts.type == BT_CHARACTER && expr2->expr_type != EXPR_NULL)
{
gcc_assert (expr2->ts.type == BT_CHARACTER);
gcc_assert (strlen_lhs && strlen_rhs);
gfc_trans_same_strlen_check ("pointer assignment", &expr1->where,
strlen_lhs, strlen_rhs, &block);
}
/* If rank remapping was done, check with -fcheck=bounds that
the target is at least as large as the pointer. */
if (rank_remap && (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS))
{
tree lsize, rsize;
tree fault;
const char* msg;
lsize = gfc_conv_descriptor_size (lse.expr, expr1->rank);
rsize = gfc_conv_descriptor_size (rse.expr, expr2->rank);
lsize = gfc_evaluate_now (lsize, &block);
rsize = gfc_evaluate_now (rsize, &block);
fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
rsize, lsize);
msg = _("Target of rank remapping is too small (%ld < %ld)");
gfc_trans_runtime_check (true, false, fault, &block, &expr2->where,
msg, rsize, lsize);
}
gfc_add_block_to_block (&block, &lse.post);
if (rank_remap)
gfc_add_block_to_block (&block, &rse.post);
}
return gfc_finish_block (&block);
}
/* Makes sure se is suitable for passing as a function string parameter. */
/* TODO: Need to check all callers of this function. It may be abused. */
void
gfc_conv_string_parameter (gfc_se * se)
{
tree type;
if (TREE_CODE (se->expr) == STRING_CST)
{
type = TREE_TYPE (TREE_TYPE (se->expr));
se->expr = gfc_build_addr_expr (build_pointer_type (type), se->expr);
return;
}
if (TYPE_STRING_FLAG (TREE_TYPE (se->expr)))
{
if (TREE_CODE (se->expr) != INDIRECT_REF)
{
type = TREE_TYPE (se->expr);
se->expr = gfc_build_addr_expr (build_pointer_type (type), se->expr);
}
else
{
type = gfc_get_character_type_len (gfc_default_character_kind,
se->string_length);
type = build_pointer_type (type);
se->expr = gfc_build_addr_expr (type, se->expr);
}
}
gcc_assert (POINTER_TYPE_P (TREE_TYPE (se->expr)));
}
/* Generate code for assignment of scalar variables. Includes character
strings and derived types with allocatable components.
If you know that the LHS has no allocations, set dealloc to false.
DEEP_COPY has no effect if the typespec TS is not a derived type with
allocatable components. Otherwise, if it is set, an explicit copy of each
allocatable component is made. This is necessary as a simple copy of the
whole object would copy array descriptors as is, so that the lhs's
allocatable components would point to the rhs's after the assignment.
Typically, setting DEEP_COPY is necessary if the rhs is a variable, and not
necessary if the rhs is a non-pointer function, as the allocatable components
are not accessible by other means than the function's result after the
function has returned. It is even more subtle when temporaries are involved,
as the two following examples show:
1. When we evaluate an array constructor, a temporary is created. Thus
there is theoretically no alias possible. However, no deep copy is
made for this temporary, so that if the constructor is made of one or
more variable with allocatable components, those components still point
to the variable's: DEEP_COPY should be set for the assignment from the
temporary to the lhs in that case.
2. When assigning a scalar to an array, we evaluate the scalar value out
of the loop, store it into a temporary variable, and assign from that.
In that case, deep copying when assigning to the temporary would be a
waste of resources; however deep copies should happen when assigning from
the temporary to each array element: again DEEP_COPY should be set for
the assignment from the temporary to the lhs. */
tree
gfc_trans_scalar_assign (gfc_se * lse, gfc_se * rse, gfc_typespec ts,
bool l_is_temp, bool deep_copy, bool dealloc)
{
stmtblock_t block;
tree tmp;
tree cond;
gfc_init_block (&block);
if (ts.type == BT_CHARACTER)
{
tree rlen = NULL;
tree llen = NULL;
if (lse->string_length != NULL_TREE)
{
gfc_conv_string_parameter (lse);
gfc_add_block_to_block (&block, &lse->pre);
llen = lse->string_length;
}
if (rse->string_length != NULL_TREE)
{
gcc_assert (rse->string_length != NULL_TREE);
gfc_conv_string_parameter (rse);
gfc_add_block_to_block (&block, &rse->pre);
rlen = rse->string_length;
}
gfc_trans_string_copy (&block, llen, lse->expr, ts.kind, rlen,
rse->expr, ts.kind);
}
else if (ts.type == BT_DERIVED && ts.u.derived->attr.alloc_comp)
{
tree tmp_var = NULL_TREE;
cond = NULL_TREE;
/* Are the rhs and the lhs the same? */
if (deep_copy)
{
cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
gfc_build_addr_expr (NULL_TREE, lse->expr),
gfc_build_addr_expr (NULL_TREE, rse->expr));
cond = gfc_evaluate_now (cond, &lse->pre);
}
/* Deallocate the lhs allocated components as long as it is not
the same as the rhs. This must be done following the assignment
to prevent deallocating data that could be used in the rhs
expression. */
if (!l_is_temp && dealloc)
{
tmp_var = gfc_evaluate_now (lse->expr, &lse->pre);
tmp = gfc_deallocate_alloc_comp_no_caf (ts.u.derived, tmp_var, 0);
if (deep_copy)
tmp = build3_v (COND_EXPR, cond, build_empty_stmt (input_location),
tmp);
gfc_add_expr_to_block (&lse->post, tmp);
}
gfc_add_block_to_block (&block, &rse->pre);
gfc_add_block_to_block (&block, &lse->pre);
gfc_add_modify (&block, lse->expr,
fold_convert (TREE_TYPE (lse->expr), rse->expr));
/* Restore pointer address of coarray components. */
if (ts.u.derived->attr.coarray_comp && deep_copy && tmp_var != NULL_TREE)
{
tmp = gfc_reassign_alloc_comp_caf (ts.u.derived, tmp_var, lse->expr);
tmp = build3_v (COND_EXPR, cond, build_empty_stmt (input_location),
tmp);
gfc_add_expr_to_block (&block, tmp);
}
/* Do a deep copy if the rhs is a variable, if it is not the
same as the lhs. */
if (deep_copy)
{
tmp = gfc_copy_alloc_comp (ts.u.derived, rse->expr, lse->expr, 0);
tmp = build3_v (COND_EXPR, cond, build_empty_stmt (input_location),
tmp);
gfc_add_expr_to_block (&block, tmp);
}
}
else if (ts.type == BT_DERIVED || ts.type == BT_CLASS)
{
gfc_add_block_to_block (&block, &lse->pre);
gfc_add_block_to_block (&block, &rse->pre);
tmp = fold_build1_loc (input_location, VIEW_CONVERT_EXPR,
TREE_TYPE (lse->expr), rse->expr);
gfc_add_modify (&block, lse->expr, tmp);
}
else
{
gfc_add_block_to_block (&block, &lse->pre);
gfc_add_block_to_block (&block, &rse->pre);
gfc_add_modify (&block, lse->expr,
fold_convert (TREE_TYPE (lse->expr), rse->expr));
}
gfc_add_block_to_block (&block, &lse->post);
gfc_add_block_to_block (&block, &rse->post);
return gfc_finish_block (&block);
}
/* There are quite a lot of restrictions on the optimisation in using an
array function assign without a temporary. */
static bool
arrayfunc_assign_needs_temporary (gfc_expr * expr1, gfc_expr * expr2)
{
gfc_ref * ref;
bool seen_array_ref;
bool c = false;
gfc_symbol *sym = expr1->symtree->n.sym;
/* The caller has already checked rank>0 and expr_type == EXPR_FUNCTION. */
if (expr2->value.function.isym && !gfc_is_intrinsic_libcall (expr2))
return true;
/* Elemental functions are scalarized so that they don't need a
temporary in gfc_trans_assignment_1, so return a true. Otherwise,
they would need special treatment in gfc_trans_arrayfunc_assign. */
if (expr2->value.function.esym != NULL
&& expr2->value.function.esym->attr.elemental)
return true;
/* Need a temporary if rhs is not FULL or a contiguous section. */
if (expr1->ref && !(gfc_full_array_ref_p (expr1->ref, &c) || c))
return true;
/* Need a temporary if EXPR1 can't be expressed as a descriptor. */
if (gfc_ref_needs_temporary_p (expr1->ref))
return true;
/* Functions returning pointers or allocatables need temporaries. */
c = expr2->value.function.esym
? (expr2->value.function.esym->attr.pointer
|| expr2->value.function.esym->attr.allocatable)
: (expr2->symtree->n.sym->attr.pointer
|| expr2->symtree->n.sym->attr.allocatable);
if (c)
return true;
/* Character array functions need temporaries unless the
character lengths are the same. */
if (expr2->ts.type == BT_CHARACTER && expr2->rank > 0)
{
if (expr1->ts.u.cl->length == NULL
|| expr1->ts.u.cl->length->expr_type != EXPR_CONSTANT)
return true;
if (expr2->ts.u.cl->length == NULL
|| expr2->ts.u.cl->length->expr_type != EXPR_CONSTANT)
return true;
if (mpz_cmp (expr1->ts.u.cl->length->value.integer,
expr2->ts.u.cl->length->value.integer) != 0)
return true;
}
/* Check that no LHS component references appear during an array
reference. This is needed because we do not have the means to
span any arbitrary stride with an array descriptor. This check
is not needed for the rhs because the function result has to be
a complete type. */
seen_array_ref = false;
for (ref = expr1->ref; ref; ref = ref->next)
{
if (ref->type == REF_ARRAY)
seen_array_ref= true;
else if (ref->type == REF_COMPONENT && seen_array_ref)
return true;
}
/* Check for a dependency. */
if (gfc_check_fncall_dependency (expr1, INTENT_OUT,
expr2->value.function.esym,
expr2->value.function.actual,
NOT_ELEMENTAL))
return true;
/* If we have reached here with an intrinsic function, we do not
need a temporary except in the particular case that reallocation
on assignment is active and the lhs is allocatable and a target. */
if (expr2->value.function.isym)
return (gfc_option.flag_realloc_lhs
&& sym->attr.allocatable
&& sym->attr.target);
/* If the LHS is a dummy, we need a temporary if it is not
INTENT(OUT). */
if (sym->attr.dummy && sym->attr.intent != INTENT_OUT)
return true;
/* If the lhs has been host_associated, is in common, a pointer or is
a target and the function is not using a RESULT variable, aliasing
can occur and a temporary is needed. */
if ((sym->attr.host_assoc
|| sym->attr.in_common
|| sym->attr.pointer
|| sym->attr.cray_pointee
|| sym->attr.target)
&& expr2->symtree != NULL
&& expr2->symtree->n.sym == expr2->symtree->n.sym->result)
return true;
/* A PURE function can unconditionally be called without a temporary. */
if (expr2->value.function.esym != NULL
&& expr2->value.function.esym->attr.pure)
return false;
/* Implicit_pure functions are those which could legally be declared
to be PURE. */
if (expr2->value.function.esym != NULL
&& expr2->value.function.esym->attr.implicit_pure)
return false;
if (!sym->attr.use_assoc
&& !sym->attr.in_common
&& !sym->attr.pointer
&& !sym->attr.target
&& !sym->attr.cray_pointee
&& expr2->value.function.esym)
{
/* A temporary is not needed if the function is not contained and
the variable is local or host associated and not a pointer or
a target. */
if (!expr2->value.function.esym->attr.contained)
return false;
/* A temporary is not needed if the lhs has never been host
associated and the procedure is contained. */
else if (!sym->attr.host_assoc)
return false;
/* A temporary is not needed if the variable is local and not
a pointer, a target or a result. */
if (sym->ns->parent
&& expr2->value.function.esym->ns == sym->ns->parent)
return false;
}
/* Default to temporary use. */
return true;
}
/* Provide the loop info so that the lhs descriptor can be built for
reallocatable assignments from extrinsic function calls. */
static void
realloc_lhs_loop_for_fcn_call (gfc_se *se, locus *where, gfc_ss **ss,
gfc_loopinfo *loop)
{
/* Signal that the function call should not be made by
gfc_conv_loop_setup. */
se->ss->is_alloc_lhs = 1;
gfc_init_loopinfo (loop);
gfc_add_ss_to_loop (loop, *ss);
gfc_add_ss_to_loop (loop, se->ss);
gfc_conv_ss_startstride (loop);
gfc_conv_loop_setup (loop, where);
gfc_copy_loopinfo_to_se (se, loop);
gfc_add_block_to_block (&se->pre, &loop->pre);
gfc_add_block_to_block (&se->pre, &loop->post);
se->ss->is_alloc_lhs = 0;
}
/* For assignment to a reallocatable lhs from intrinsic functions,
replace the se.expr (ie. the result) with a temporary descriptor.
Null the data field so that the library allocates space for the
result. Free the data of the original descriptor after the function,
in case it appears in an argument expression and transfer the
result to the original descriptor. */
static void
fcncall_realloc_result (gfc_se *se, int rank)
{
tree desc;
tree res_desc;
tree tmp;
tree offset;
tree zero_cond;
int n;
/* Use the allocation done by the library. Substitute the lhs
descriptor with a copy, whose data field is nulled.*/
desc = build_fold_indirect_ref_loc (input_location, se->expr);
if (POINTER_TYPE_P (TREE_TYPE (desc)))
desc = build_fold_indirect_ref_loc (input_location, desc);
/* Unallocated, the descriptor does not have a dtype. */
tmp = gfc_conv_descriptor_dtype (desc);
gfc_add_modify (&se->pre, tmp, gfc_get_dtype (TREE_TYPE (desc)));
res_desc = gfc_evaluate_now (desc, &se->pre);
gfc_conv_descriptor_data_set (&se->pre, res_desc, null_pointer_node);
se->expr = gfc_build_addr_expr (NULL_TREE, res_desc);
/* Free the lhs after the function call and copy the result data to
the lhs descriptor. */
tmp = gfc_conv_descriptor_data_get (desc);
zero_cond = fold_build2_loc (input_location, EQ_EXPR,
boolean_type_node, tmp,
build_int_cst (TREE_TYPE (tmp), 0));
zero_cond = gfc_evaluate_now (zero_cond, &se->post);
tmp = gfc_call_free (fold_convert (pvoid_type_node, tmp));
gfc_add_expr_to_block (&se->post, tmp);
tmp = gfc_conv_descriptor_data_get (res_desc);
gfc_conv_descriptor_data_set (&se->post, desc, tmp);
/* Check that the shapes are the same between lhs and expression. */
for (n = 0 ; n < rank; n++)
{
tree tmp1;
tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
tmp1 = gfc_conv_descriptor_lbound_get (res_desc, gfc_rank_cst[n]);
tmp = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type, tmp, tmp1);
tmp1 = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]);
tmp = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type, tmp, tmp1);
tmp1 = gfc_conv_descriptor_ubound_get (res_desc, gfc_rank_cst[n]);
tmp = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type, tmp, tmp1);
tmp = fold_build2_loc (input_location, NE_EXPR,
boolean_type_node, tmp,
gfc_index_zero_node);
tmp = gfc_evaluate_now (tmp, &se->post);
zero_cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
boolean_type_node, tmp,
zero_cond);
}
/* 'zero_cond' being true is equal to lhs not being allocated or the
shapes being different. */
zero_cond = gfc_evaluate_now (zero_cond, &se->post);
/* Now reset the bounds returned from the function call to bounds based
on the lhs lbounds, except where the lhs is not allocated or the shapes
of 'variable and 'expr' are different. Set the offset accordingly. */
offset = gfc_index_zero_node;
for (n = 0 ; n < rank; n++)
{
tree lbound;
lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
lbound = fold_build3_loc (input_location, COND_EXPR,
gfc_array_index_type, zero_cond,
gfc_index_one_node, lbound);
lbound = gfc_evaluate_now (lbound, &se->post);
tmp = gfc_conv_descriptor_ubound_get (res_desc, gfc_rank_cst[n]);
tmp = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type, tmp, lbound);
gfc_conv_descriptor_lbound_set (&se->post, desc,
gfc_rank_cst[n], lbound);
gfc_conv_descriptor_ubound_set (&se->post, desc,
gfc_rank_cst[n], tmp);
/* Set stride and accumulate the offset. */
tmp = gfc_conv_descriptor_stride_get (res_desc, gfc_rank_cst[n]);
gfc_conv_descriptor_stride_set (&se->post, desc,
gfc_rank_cst[n], tmp);
tmp = fold_build2_loc (input_location, MULT_EXPR,
gfc_array_index_type, lbound, tmp);
offset = fold_build2_loc (input_location, MINUS_EXPR,
gfc_array_index_type, offset, tmp);
offset = gfc_evaluate_now (offset, &se->post);
}
gfc_conv_descriptor_offset_set (&se->post, desc, offset);
}
/* Try to translate array(:) = func (...), where func is a transformational
array function, without using a temporary. Returns NULL if this isn't the
case. */
static tree
gfc_trans_arrayfunc_assign (gfc_expr * expr1, gfc_expr * expr2)
{
gfc_se se;
gfc_ss *ss = NULL;
gfc_component *comp = NULL;
gfc_loopinfo loop;
if (arrayfunc_assign_needs_temporary (expr1, expr2))
return NULL;
/* The frontend doesn't seem to bother filling in expr->symtree for intrinsic
functions. */
comp = gfc_get_proc_ptr_comp (expr2);
gcc_assert (expr2->value.function.isym
|| (comp && comp->attr.dimension)
|| (!comp && gfc_return_by_reference (expr2->value.function.esym)
&& expr2->value.function.esym->result->attr.dimension));
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
se.want_pointer = 1;
gfc_conv_array_parameter (&se, expr1, false, NULL, NULL, NULL);
if (expr1->ts.type == BT_DERIVED
&& expr1->ts.u.derived->attr.alloc_comp)
{
tree tmp;
tmp = gfc_deallocate_alloc_comp_no_caf (expr1->ts.u.derived, se.expr,
expr1->rank);
gfc_add_expr_to_block (&se.pre, tmp);
}
se.direct_byref = 1;
se.ss = gfc_walk_expr (expr2);
gcc_assert (se.ss != gfc_ss_terminator);
/* Reallocate on assignment needs the loopinfo for extrinsic functions.
This is signalled to gfc_conv_procedure_call by setting is_alloc_lhs.
Clearly, this cannot be done for an allocatable function result, since
the shape of the result is unknown and, in any case, the function must
correctly take care of the reallocation internally. For intrinsic
calls, the array data is freed and the library takes care of allocation.
TODO: Add logic of trans-array.c: gfc_alloc_allocatable_for_assignment
to the library. */
if (gfc_option.flag_realloc_lhs
&& gfc_is_reallocatable_lhs (expr1)
&& !gfc_expr_attr (expr1).codimension
&& !gfc_is_coindexed (expr1)
&& !(expr2->value.function.esym
&& expr2->value.function.esym->result->attr.allocatable))
{
realloc_lhs_warning (expr1->ts.type, true, &expr1->where);
if (!expr2->value.function.isym)
{
ss = gfc_walk_expr (expr1);
gcc_assert (ss != gfc_ss_terminator);
realloc_lhs_loop_for_fcn_call (&se, &expr1->where, &ss, &loop);
ss->is_alloc_lhs = 1;
}
else
fcncall_realloc_result (&se, expr1->rank);
}
gfc_conv_function_expr (&se, expr2);
gfc_add_block_to_block (&se.pre, &se.post);
if (ss)
gfc_cleanup_loop (&loop);
else
gfc_free_ss_chain (se.ss);
return gfc_finish_block (&se.pre);
}
/* Try to efficiently translate array(:) = 0. Return NULL if this
can't be done. */
static tree
gfc_trans_zero_assign (gfc_expr * expr)
{
tree dest, len, type;
tree tmp;
gfc_symbol *sym;
sym = expr->symtree->n.sym;
dest = gfc_get_symbol_decl (sym);
type = TREE_TYPE (dest);
if (POINTER_TYPE_P (type))
type = TREE_TYPE (type);
if (!GFC_ARRAY_TYPE_P (type))
return NULL_TREE;
/* Determine the length of the array. */
len = GFC_TYPE_ARRAY_SIZE (type);
if (!len || TREE_CODE (len) != INTEGER_CST)
return NULL_TREE;
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
len = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type, len,
fold_convert (gfc_array_index_type, tmp));
/* If we are zeroing a local array avoid taking its address by emitting
a = {} instead. */
if (!POINTER_TYPE_P (TREE_TYPE (dest)))
return build2_loc (input_location, MODIFY_EXPR, void_type_node,
dest, build_constructor (TREE_TYPE (dest),
NULL));
/* Convert arguments to the correct types. */
dest = fold_convert (pvoid_type_node, dest);
len = fold_convert (size_type_node, len);
/* Construct call to __builtin_memset. */
tmp = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_MEMSET),
3, dest, integer_zero_node, len);
return fold_convert (void_type_node, tmp);
}
/* Helper for gfc_trans_array_copy and gfc_trans_array_constructor_copy
that constructs the call to __builtin_memcpy. */
tree
gfc_build_memcpy_call (tree dst, tree src, tree len)
{
tree tmp;
/* Convert arguments to the correct types. */
if (!POINTER_TYPE_P (TREE_TYPE (dst)))
dst = gfc_build_addr_expr (pvoid_type_node, dst);
else
dst = fold_convert (pvoid_type_node, dst);
if (!POINTER_TYPE_P (TREE_TYPE (src)))
src = gfc_build_addr_expr (pvoid_type_node, src);
else
src = fold_convert (pvoid_type_node, src);
len = fold_convert (size_type_node, len);
/* Construct call to __builtin_memcpy. */
tmp = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_MEMCPY),
3, dst, src, len);
return fold_convert (void_type_node, tmp);
}
/* Try to efficiently translate dst(:) = src(:). Return NULL if this
can't be done. EXPR1 is the destination/lhs and EXPR2 is the
source/rhs, both are gfc_full_array_ref_p which have been checked for
dependencies. */
static tree
gfc_trans_array_copy (gfc_expr * expr1, gfc_expr * expr2)
{
tree dst, dlen, dtype;
tree src, slen, stype;
tree tmp;
dst = gfc_get_symbol_decl (expr1->symtree->n.sym);
src = gfc_get_symbol_decl (expr2->symtree->n.sym);
dtype = TREE_TYPE (dst);
if (POINTER_TYPE_P (dtype))
dtype = TREE_TYPE (dtype);
stype = TREE_TYPE (src);
if (POINTER_TYPE_P (stype))
stype = TREE_TYPE (stype);
if (!GFC_ARRAY_TYPE_P (dtype) || !GFC_ARRAY_TYPE_P (stype))
return NULL_TREE;
/* Determine the lengths of the arrays. */
dlen = GFC_TYPE_ARRAY_SIZE (dtype);
if (!dlen || TREE_CODE (dlen) != INTEGER_CST)
return NULL_TREE;
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (dtype));
dlen = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
dlen, fold_convert (gfc_array_index_type, tmp));
slen = GFC_TYPE_ARRAY_SIZE (stype);
if (!slen || TREE_CODE (slen) != INTEGER_CST)
return NULL_TREE;
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (stype));
slen = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
slen, fold_convert (gfc_array_index_type, tmp));
/* Sanity check that they are the same. This should always be
the case, as we should already have checked for conformance. */
if (!tree_int_cst_equal (slen, dlen))
return NULL_TREE;
return gfc_build_memcpy_call (dst, src, dlen);
}
/* Try to efficiently translate array(:) = (/ ... /). Return NULL if
this can't be done. EXPR1 is the destination/lhs for which
gfc_full_array_ref_p is true, and EXPR2 is the source/rhs. */
static tree
gfc_trans_array_constructor_copy (gfc_expr * expr1, gfc_expr * expr2)
{
unsigned HOST_WIDE_INT nelem;
tree dst, dtype;
tree src, stype;
tree len;
tree tmp;
nelem = gfc_constant_array_constructor_p (expr2->value.constructor);
if (nelem == 0)
return NULL_TREE;
dst = gfc_get_symbol_decl (expr1->symtree->n.sym);
dtype = TREE_TYPE (dst);
if (POINTER_TYPE_P (dtype))
dtype = TREE_TYPE (dtype);
if (!GFC_ARRAY_TYPE_P (dtype))
return NULL_TREE;
/* Determine the lengths of the array. */
len = GFC_TYPE_ARRAY_SIZE (dtype);
if (!len || TREE_CODE (len) != INTEGER_CST)
return NULL_TREE;
/* Confirm that the constructor is the same size. */
if (compare_tree_int (len, nelem) != 0)
return NULL_TREE;
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (dtype));
len = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type, len,
fold_convert (gfc_array_index_type, tmp));
stype = gfc_typenode_for_spec (&expr2->ts);
src = gfc_build_constant_array_constructor (expr2, stype);
stype = TREE_TYPE (src);
if (POINTER_TYPE_P (stype))
stype = TREE_TYPE (stype);
return gfc_build_memcpy_call (dst, src, len);
}
/* Tells whether the expression is to be treated as a variable reference. */
static bool
expr_is_variable (gfc_expr *expr)
{
gfc_expr *arg;
gfc_component *comp;
gfc_symbol *func_ifc;
if (expr->expr_type == EXPR_VARIABLE)
return true;
arg = gfc_get_noncopying_intrinsic_argument (expr);
if (arg)
{
gcc_assert (expr->value.function.isym->id == GFC_ISYM_TRANSPOSE);
return expr_is_variable (arg);
}
/* A data-pointer-returning function should be considered as a variable
too. */
if (expr->expr_type == EXPR_FUNCTION
&& expr->ref == NULL)
{
if (expr->value.function.isym != NULL)
return false;
if (expr->value.function.esym != NULL)
{
func_ifc = expr->value.function.esym;
goto found_ifc;
}
else
{
gcc_assert (expr->symtree);
func_ifc = expr->symtree->n.sym;
goto found_ifc;
}
gcc_unreachable ();
}
comp = gfc_get_proc_ptr_comp (expr);
if ((expr->expr_type == EXPR_PPC || expr->expr_type == EXPR_FUNCTION)
&& comp)
{
func_ifc = comp->ts.interface;
goto found_ifc;
}
if (expr->expr_type == EXPR_COMPCALL)
{
gcc_assert (!expr->value.compcall.tbp->is_generic);
func_ifc = expr->value.compcall.tbp->u.specific->n.sym;
goto found_ifc;
}
return false;
found_ifc:
gcc_assert (func_ifc->attr.function
&& func_ifc->result != NULL);
return func_ifc->result->attr.pointer;
}
/* Is the lhs OK for automatic reallocation? */
static bool
is_scalar_reallocatable_lhs (gfc_expr *expr)
{
gfc_ref * ref;
/* An allocatable variable with no reference. */
if (expr->symtree->n.sym->attr.allocatable
&& !expr->ref)
return true;
/* All that can be left are allocatable components. */
if ((expr->symtree->n.sym->ts.type != BT_DERIVED
&& expr->symtree->n.sym->ts.type != BT_CLASS)
|| !expr->symtree->n.sym->ts.u.derived->attr.alloc_comp)
return false;
/* Find an allocatable component ref last. */
for (ref = expr->ref; ref; ref = ref->next)
if (ref->type == REF_COMPONENT
&& !ref->next
&& ref->u.c.component->attr.allocatable)
return true;
return false;
}
/* Allocate or reallocate scalar lhs, as necessary. */
static void
alloc_scalar_allocatable_for_assignment (stmtblock_t *block,
tree string_length,
gfc_expr *expr1,
gfc_expr *expr2)
{
tree cond;
tree tmp;
tree size;
tree size_in_bytes;
tree jump_label1;
tree jump_label2;
gfc_se lse;
if (!expr1 || expr1->rank)
return;
if (!expr2 || expr2->rank)
return;
realloc_lhs_warning (expr2->ts.type, false, &expr2->where);
/* Since this is a scalar lhs, we can afford to do this. That is,
there is no risk of side effects being repeated. */
gfc_init_se (&lse, NULL);
lse.want_pointer = 1;
gfc_conv_expr (&lse, expr1);
jump_label1 = gfc_build_label_decl (NULL_TREE);
jump_label2 = gfc_build_label_decl (NULL_TREE);
/* Do the allocation if the lhs is NULL. Otherwise go to label 1. */
tmp = build_int_cst (TREE_TYPE (lse.expr), 0);
cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
lse.expr, tmp);
tmp = build3_v (COND_EXPR, cond,
build1_v (GOTO_EXPR, jump_label1),
build_empty_stmt (input_location));
gfc_add_expr_to_block (block, tmp);
if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
{
/* Use the rhs string length and the lhs element size. */
size = string_length;
tmp = TREE_TYPE (gfc_typenode_for_spec (&expr1->ts));
tmp = TYPE_SIZE_UNIT (tmp);
size_in_bytes = fold_build2_loc (input_location, MULT_EXPR,
TREE_TYPE (tmp), tmp,
fold_convert (TREE_TYPE (tmp), size));
}
else
{
/* Otherwise use the length in bytes of the rhs. */
size = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr1->ts));
size_in_bytes = size;
}
size_in_bytes = fold_build2_loc (input_location, MAX_EXPR, size_type_node,
size_in_bytes, size_one_node);
if (expr1->ts.type == BT_DERIVED && expr1->ts.u.derived->attr.alloc_comp)
{
tmp = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_CALLOC),
2, build_one_cst (size_type_node),
size_in_bytes);
tmp = fold_convert (TREE_TYPE (lse.expr), tmp);
gfc_add_modify (block, lse.expr, tmp);
}
else
{
tmp = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_MALLOC),
1, size_in_bytes);
tmp = fold_convert (TREE_TYPE (lse.expr), tmp);
gfc_add_modify (block, lse.expr, tmp);
}
if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
{
/* Deferred characters need checking for lhs and rhs string
length. Other deferred parameter variables will have to
come here too. */
tmp = build1_v (GOTO_EXPR, jump_label2);
gfc_add_expr_to_block (block, tmp);
}
tmp = build1_v (LABEL_EXPR, jump_label1);
gfc_add_expr_to_block (block, tmp);
/* For a deferred length character, reallocate if lengths of lhs and
rhs are different. */
if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
{
cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
expr1->ts.u.cl->backend_decl, size);
/* Jump past the realloc if the lengths are the same. */
tmp = build3_v (COND_EXPR, cond,
build1_v (GOTO_EXPR, jump_label2),
build_empty_stmt (input_location));
gfc_add_expr_to_block (block, tmp);
tmp = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_REALLOC),
2, fold_convert (pvoid_type_node, lse.expr),
size_in_bytes);
tmp = fold_convert (TREE_TYPE (lse.expr), tmp);
gfc_add_modify (block, lse.expr, tmp);
tmp = build1_v (LABEL_EXPR, jump_label2);
gfc_add_expr_to_block (block, tmp);
/* Update the lhs character length. */
size = string_length;
if (TREE_CODE (expr1->ts.u.cl->backend_decl) == VAR_DECL)
gfc_add_modify (block, expr1->ts.u.cl->backend_decl, size);
else
gfc_add_modify (block, lse.string_length, size);
}
}
/* Check for assignments of the type
a = a + 4
to make sure we do not check for reallocation unneccessarily. */
static bool
is_runtime_conformable (gfc_expr *expr1, gfc_expr *expr2)
{
gfc_actual_arglist *a;
gfc_expr *e1, *e2;
switch (expr2->expr_type)
{
case EXPR_VARIABLE:
return gfc_dep_compare_expr (expr1, expr2) == 0;
case EXPR_FUNCTION:
if (expr2->value.function.esym
&& expr2->value.function.esym->attr.elemental)
{
for (a = expr2->value.function.actual; a != NULL; a = a->next)
{
e1 = a->expr;
if (e1 && e1->rank > 0 && !is_runtime_conformable (expr1, e1))
return false;
}
return true;
}
else if (expr2->value.function.isym
&& expr2->value.function.isym->elemental)
{
for (a = expr2->value.function.actual; a != NULL; a = a->next)
{
e1 = a->expr;
if (e1 && e1->rank > 0 && !is_runtime_conformable (expr1, e1))
return false;
}
return true;
}
break;
case EXPR_OP:
switch (expr2->value.op.op)
{
case INTRINSIC_NOT:
case INTRINSIC_UPLUS:
case INTRINSIC_UMINUS:
case INTRINSIC_PARENTHESES:
return is_runtime_conformable (expr1, expr2->value.op.op1);
case INTRINSIC_PLUS:
case INTRINSIC_MINUS:
case INTRINSIC_TIMES:
case INTRINSIC_DIVIDE:
case INTRINSIC_POWER:
case INTRINSIC_AND:
case INTRINSIC_OR:
case INTRINSIC_EQV:
case INTRINSIC_NEQV:
case INTRINSIC_EQ:
case INTRINSIC_NE:
case INTRINSIC_GT:
case INTRINSIC_GE:
case INTRINSIC_LT:
case INTRINSIC_LE:
case INTRINSIC_EQ_OS:
case INTRINSIC_NE_OS:
case INTRINSIC_GT_OS:
case INTRINSIC_GE_OS:
case INTRINSIC_LT_OS:
case INTRINSIC_LE_OS:
e1 = expr2->value.op.op1;
e2 = expr2->value.op.op2;
if (e1->rank == 0 && e2->rank > 0)
return is_runtime_conformable (expr1, e2);
else if (e1->rank > 0 && e2->rank == 0)
return is_runtime_conformable (expr1, e1);
else if (e1->rank > 0 && e2->rank > 0)
return is_runtime_conformable (expr1, e1)
&& is_runtime_conformable (expr1, e2);
break;
default:
break;
}
break;
default:
break;
}
return false;
}
/* Subroutine of gfc_trans_assignment that actually scalarizes the
assignment. EXPR1 is the destination/LHS and EXPR2 is the source/RHS.
init_flag indicates initialization expressions and dealloc that no
deallocate prior assignment is needed (if in doubt, set true). */
static tree
gfc_trans_assignment_1 (gfc_expr * expr1, gfc_expr * expr2, bool init_flag,
bool dealloc)
{
gfc_se lse;
gfc_se rse;
gfc_ss *lss;
gfc_ss *lss_section;
gfc_ss *rss;
gfc_loopinfo loop;
tree tmp;
stmtblock_t block;
stmtblock_t body;
bool l_is_temp;
bool scalar_to_array;
tree string_length;
int n;
/* Assignment of the form lhs = rhs. */
gfc_start_block (&block);
gfc_init_se (&lse, NULL);
gfc_init_se (&rse, NULL);
/* Walk the lhs. */
lss = gfc_walk_expr (expr1);
if (gfc_is_reallocatable_lhs (expr1)
&& !(expr2->expr_type == EXPR_FUNCTION
&& expr2->value.function.isym != NULL))
lss->is_alloc_lhs = 1;
rss = NULL;
if (lss != gfc_ss_terminator)
{
/* The assignment needs scalarization. */
lss_section = lss;
/* Find a non-scalar SS from the lhs. */
while (lss_section != gfc_ss_terminator
&& lss_section->info->type != GFC_SS_SECTION)
lss_section = lss_section->next;
gcc_assert (lss_section != gfc_ss_terminator);
/* Initialize the scalarizer. */
gfc_init_loopinfo (&loop);
/* Walk the rhs. */
rss = gfc_walk_expr (expr2);
if (rss == gfc_ss_terminator)
/* The rhs is scalar. Add a ss for the expression. */
rss = gfc_get_scalar_ss (gfc_ss_terminator, expr2);
/* Associate the SS with the loop. */
gfc_add_ss_to_loop (&loop, lss);
gfc_add_ss_to_loop (&loop, rss);
/* Calculate the bounds of the scalarization. */
gfc_conv_ss_startstride (&loop);
/* Enable loop reversal. */
for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
loop.reverse[n] = GFC_ENABLE_REVERSE;
/* Resolve any data dependencies in the statement. */
gfc_conv_resolve_dependencies (&loop, lss, rss);
/* Setup the scalarizing loops. */
gfc_conv_loop_setup (&loop, &expr2->where);
/* Setup the gfc_se structures. */
gfc_copy_loopinfo_to_se (&lse, &loop);
gfc_copy_loopinfo_to_se (&rse, &loop);
rse.ss = rss;
gfc_mark_ss_chain_used (rss, 1);
if (loop.temp_ss == NULL)
{
lse.ss = lss;
gfc_mark_ss_chain_used (lss, 1);
}
else
{
lse.ss = loop.temp_ss;
gfc_mark_ss_chain_used (lss, 3);
gfc_mark_ss_chain_used (loop.temp_ss, 3);
}
/* Allow the scalarizer to workshare array assignments. */
if ((ompws_flags & OMPWS_WORKSHARE_FLAG) && loop.temp_ss == NULL)
ompws_flags |= OMPWS_SCALARIZER_WS;
/* Start the scalarized loop body. */
gfc_start_scalarized_body (&loop, &body);
}
else
gfc_init_block (&body);
l_is_temp = (lss != gfc_ss_terminator && loop.temp_ss != NULL);
/* Translate the expression. */
gfc_conv_expr (&rse, expr2);
/* Stabilize a string length for temporaries. */
if (expr2->ts.type == BT_CHARACTER)
string_length = gfc_evaluate_now (rse.string_length, &rse.pre);
else
string_length = NULL_TREE;
if (l_is_temp)
{
gfc_conv_tmp_array_ref (&lse);
if (expr2->ts.type == BT_CHARACTER)
lse.string_length = string_length;
}
else
gfc_conv_expr (&lse, expr1);
/* Assignments of scalar derived types with allocatable components
to arrays must be done with a deep copy and the rhs temporary
must have its components deallocated afterwards. */
scalar_to_array = (expr2->ts.type == BT_DERIVED
&& expr2->ts.u.derived->attr.alloc_comp
&& !expr_is_variable (expr2)
&& !gfc_is_constant_expr (expr2)
&& expr1->rank && !expr2->rank);
if (scalar_to_array && dealloc)
{
tmp = gfc_deallocate_alloc_comp_no_caf (expr2->ts.u.derived, rse.expr, 0);
gfc_add_expr_to_block (&loop.post, tmp);
}
/* When assigning a character function result to a deferred-length variable,
the function call must happen before the (re)allocation of the lhs -
otherwise the character length of the result is not known.
NOTE: This relies on having the exact dependence of the length type
parameter available to the caller; gfortran saves it in the .mod files. */
if (gfc_option.flag_realloc_lhs && expr2->ts.type == BT_CHARACTER
&& expr1->ts.deferred)
gfc_add_block_to_block (&block, &rse.pre);
tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts,
l_is_temp || init_flag,
expr_is_variable (expr2) || scalar_to_array
|| expr2->expr_type == EXPR_ARRAY, dealloc);
gfc_add_expr_to_block (&body, tmp);
if (lss == gfc_ss_terminator)
{
/* F2003: Add the code for reallocation on assignment. */
if (gfc_option.flag_realloc_lhs
&& is_scalar_reallocatable_lhs (expr1))
alloc_scalar_allocatable_for_assignment (&block, rse.string_length,
expr1, expr2);
/* Use the scalar assignment as is. */
gfc_add_block_to_block (&block, &body);
}
else
{
gcc_assert (lse.ss == gfc_ss_terminator
&& rse.ss == gfc_ss_terminator);
if (l_is_temp)
{
gfc_trans_scalarized_loop_boundary (&loop, &body);
/* We need to copy the temporary to the actual lhs. */
gfc_init_se (&lse, NULL);
gfc_init_se (&rse, NULL);
gfc_copy_loopinfo_to_se (&lse, &loop);
gfc_copy_loopinfo_to_se (&rse, &loop);
rse.ss = loop.temp_ss;
lse.ss = lss;
gfc_conv_tmp_array_ref (&rse);
gfc_conv_expr (&lse, expr1);
gcc_assert (lse.ss == gfc_ss_terminator
&& rse.ss == gfc_ss_terminator);
if (expr2->ts.type == BT_CHARACTER)
rse.string_length = string_length;
tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts,
false, false, dealloc);
gfc_add_expr_to_block (&body, tmp);
}
/* F2003: Allocate or reallocate lhs of allocatable array. */
if (gfc_option.flag_realloc_lhs
&& gfc_is_reallocatable_lhs (expr1)
&& !gfc_expr_attr (expr1).codimension
&& !gfc_is_coindexed (expr1)
&& expr2->rank
&& !is_runtime_conformable (expr1, expr2))
{
realloc_lhs_warning (expr1->ts.type, true, &expr1->where);
ompws_flags &= ~OMPWS_SCALARIZER_WS;
tmp = gfc_alloc_allocatable_for_assignment (&loop, expr1, expr2);
if (tmp != NULL_TREE)
gfc_add_expr_to_block (&loop.code[expr1->rank - 1], tmp);
}
/* Generate the copying loops. */
gfc_trans_scalarizing_loops (&loop, &body);
/* Wrap the whole thing up. */
gfc_add_block_to_block (&block, &loop.pre);
gfc_add_block_to_block (&block, &loop.post);
gfc_cleanup_loop (&loop);
}
return gfc_finish_block (&block);
}
/* Check whether EXPR is a copyable array. */
static bool
copyable_array_p (gfc_expr * expr)
{
if (expr->expr_type != EXPR_VARIABLE)
return false;
/* First check it's an array. */
if (expr->rank < 1 || !expr->ref || expr->ref->next)
return false;
if (!gfc_full_array_ref_p (expr->ref, NULL))
return false;
/* Next check that it's of a simple enough type. */
switch (expr->ts.type)
{
case BT_INTEGER:
case BT_REAL:
case BT_COMPLEX:
case BT_LOGICAL:
return true;
case BT_CHARACTER:
return false;
case BT_DERIVED:
return !expr->ts.u.derived->attr.alloc_comp;
default:
break;
}
return false;
}
/* Translate an assignment. */
tree
gfc_trans_assignment (gfc_expr * expr1, gfc_expr * expr2, bool init_flag,
bool dealloc)
{
tree tmp;
/* Special case a single function returning an array. */
if (expr2->expr_type == EXPR_FUNCTION && expr2->rank > 0)
{
tmp = gfc_trans_arrayfunc_assign (expr1, expr2);
if (tmp)
return tmp;
}
/* Special case assigning an array to zero. */
if (copyable_array_p (expr1)
&& is_zero_initializer_p (expr2))
{
tmp = gfc_trans_zero_assign (expr1);
if (tmp)
return tmp;
}
/* Special case copying one array to another. */
if (copyable_array_p (expr1)
&& copyable_array_p (expr2)
&& gfc_compare_types (&expr1->ts, &expr2->ts)
&& !gfc_check_dependency (expr1, expr2, 0))
{
tmp = gfc_trans_array_copy (expr1, expr2);
if (tmp)
return tmp;
}
/* Special case initializing an array from a constant array constructor. */
if (copyable_array_p (expr1)
&& expr2->expr_type == EXPR_ARRAY
&& gfc_compare_types (&expr1->ts, &expr2->ts))
{
tmp = gfc_trans_array_constructor_copy (expr1, expr2);
if (tmp)
return tmp;
}
/* Fallback to the scalarizer to generate explicit loops. */
return gfc_trans_assignment_1 (expr1, expr2, init_flag, dealloc);
}
tree
gfc_trans_init_assign (gfc_code * code)
{
return gfc_trans_assignment (code->expr1, code->expr2, true, false);
}
tree
gfc_trans_assign (gfc_code * code)
{
return gfc_trans_assignment (code->expr1, code->expr2, false, true);
}
| gpl-2.0 |
heluxie/LTP | lib/tst_device.c | 2 | 4897 | /*
* Copyright (C) 2014 Cyril Hrubis chrubis@suse.cz
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <linux/loop.h>
#include "test.h"
#include "safe_macros.h"
#ifndef LOOP_CTL_GET_FREE
# define LOOP_CTL_GET_FREE 0x4C82
#endif
#define LOOP_CONTROL_FILE "/dev/loop-control"
#define DEV_FILE "test_dev.img"
static char dev_path[1024];
static int device_acquired;
static const char *dev_variants[] = {
"/dev/loop%i",
"/dev/loop/%i"
};
static int set_dev_path(int dev)
{
unsigned int i;
struct stat st;
for (i = 0; i < ARRAY_SIZE(dev_variants); i++) {
snprintf(dev_path, sizeof(dev_path), dev_variants[i], dev);
if (stat(dev_path, &st) == 0 && S_ISBLK(st.st_mode))
return 1;
}
return 0;
}
static int find_free_loopdev(void)
{
int ctl_fd, dev_fd, rc, i;
struct loop_info loopinfo;
/* since Linux 3.1 */
ctl_fd = open(LOOP_CONTROL_FILE, O_RDWR);
if (ctl_fd > 0) {
rc = ioctl(ctl_fd, LOOP_CTL_GET_FREE);
close(ctl_fd);
if (rc >= 0) {
set_dev_path(rc);
tst_resm(TINFO, "Found free device '%s'", dev_path);
return 0;
}
tst_resm(TINFO, "Couldn't find free loop device");
return 1;
}
switch (errno) {
case ENOENT:
break;
default:
tst_resm(TBROK | TERRNO, "Failed to open " LOOP_CONTROL_FILE);
}
/*
* Older way is to iterate over /dev/loop%i and /dev/loop/%i and try
* LOOP_GET_STATUS ioctl() which fails for free loop devices.
*/
for (i = 0; i < 256; i++) {
if (!set_dev_path(i))
continue;
dev_fd = open(dev_path, O_RDONLY);
if (dev_fd < 0)
continue;
if (ioctl(dev_fd, LOOP_GET_STATUS, &loopinfo) == 0) {
tst_resm(TINFO, "Device '%s' in use", dev_path);
} else {
if (errno != ENXIO)
continue;
tst_resm(TINFO, "Found free device '%s'", dev_path);
close(dev_fd);
return 0;
}
close(dev_fd);
}
tst_resm(TINFO, "No free devices found");
return 1;
}
static void attach_device(void (*cleanup_fn)(void),
const char *dev, const char *file)
{
int dev_fd, file_fd, err;
dev_fd = SAFE_OPEN(cleanup_fn, dev, O_RDWR);
file_fd = SAFE_OPEN(cleanup_fn, file, O_RDWR);
if (ioctl(dev_fd, LOOP_SET_FD, file_fd) < 0) {
err = errno;
close(dev_fd);
close(file_fd);
tst_brkm(TBROK, cleanup_fn,
"ioctl(%s, LOOP_SET_FD, %s) failed: %s",
dev, file, tst_strerrno(err));
}
close(dev_fd);
close(file_fd);
}
static void detach_device(void (*cleanup_fn)(void), const char *dev)
{
int dev_fd, err;
dev_fd = SAFE_OPEN(cleanup_fn, dev, O_RDONLY);
if (ioctl(dev_fd, LOOP_CLR_FD, 0) < 0) {
err = errno;
close(dev_fd);
tst_brkm(TBROK, cleanup_fn,
"ioctl(%s, LOOP_CLR_FD, 0) failed: %s",
dev, tst_strerrno(err));
}
close(dev_fd);
}
const char *tst_acquire_device(void (cleanup_fn)(void))
{
char *dev;
struct stat st;
if (device_acquired)
tst_brkm(TBROK, cleanup_fn, "Device allready acquired");
if (!tst_tmpdir_created()) {
tst_brkm(TBROK, cleanup_fn,
"Cannot acquire device without tmpdir() created");
}
dev = getenv("LTP_DEV");
if (dev) {
tst_resm(TINFO, "Using test device LTP_DEV='%s'", dev);
SAFE_STAT(cleanup_fn, dev, &st);
if (!S_ISBLK(st.st_mode)) {
tst_brkm(TBROK, cleanup_fn,
"%s is not a block device", dev);
}
return dev;
}
if (tst_fill_file(DEV_FILE, 0, 1024, 20480)) {
tst_brkm(TBROK | TERRNO, cleanup_fn,
"Failed to create " DEV_FILE);
}
if (find_free_loopdev())
return NULL;
attach_device(cleanup_fn, dev_path, DEV_FILE);
device_acquired = 1;
return dev_path;
}
void tst_release_device(void (cleanup_fn)(void), const char *dev)
{
if (getenv("LTP_DEV"))
return;
/*
* Loop device was created -> we need to deatch it.
*
* The file image is deleted in tst_rmdir();
*/
detach_device(cleanup_fn, dev);
device_acquired = 0;
}
| gpl-2.0 |
shinkumara/royss_shinkumara_kernel | mm/page_alloc.c | 2 | 170541 | /*
* linux/mm/page_alloc.c
*
* Manages the free list, the system allocates free pages here.
* Note that kmalloc() lives in slab.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
* Swap reorganised 29.12.95, Stephen Tweedie
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
* Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
* Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
* Zone balancing, Kanoj Sarcar, SGI, Jan 2000
* Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
* (lots of bits borrowed from Ingo Molnar & Andrew Morton)
*/
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/jiffies.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/oom.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/memory_hotplug.h>
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
#include <linux/vmstat.h>
#include <linux/mempolicy.h>
#include <linux/stop_machine.h>
#include <linux/sort.h>
#include <linux/pfn.h>
#include <linux/backing-dev.h>
#include <linux/fault-inject.h>
#include <linux/page-isolation.h>
#include <linux/page_cgroup.h>
#include <linux/debugobjects.h>
#include <linux/kmemleak.h>
#include <linux/memory.h>
#include <linux/compaction.h>
#include <trace/events/kmem.h>
#include <linux/ftrace_event.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
#include <linux/migrate.h>
#include <linux/page-debug-flags.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
#include "internal.h"
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
* N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
* It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
* Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
* defined in <linux/topology.h>.
*/
DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
#endif
/*
* Array of node states.
*/
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
[N_POSSIBLE] = NODE_MASK_ALL,
[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
[N_CPU] = { { [0] = 1UL } },
#endif /* NUMA */
};
EXPORT_SYMBOL(node_states);
unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
/*
* When calculating the number of globally allowed dirty pages, there
* is a certain number of per-zone reserves that should not be
* considered dirtyable memory. This is the sum of those reserves
* over all existing zones that contribute dirtyable memory.
*/
unsigned long dirty_balance_reserve __read_mostly;
#ifdef CONFIG_FIX_MOVABLE_ZONE
unsigned long total_unmovable_pages __read_mostly;
#endif
int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
#ifdef CONFIG_PM_SLEEP
/*
* The following functions are used by the suspend/hibernate code to temporarily
* change gfp_allowed_mask in order to avoid using I/O during memory allocations
* while devices are suspended. To avoid races with the suspend/hibernate code,
* they should always be called with pm_mutex held (gfp_allowed_mask also should
* only be modified with pm_mutex held, unless the suspend/hibernate code is
* guaranteed not to run in parallel with that modification).
*/
static gfp_t saved_gfp_mask;
void pm_restore_gfp_mask(void)
{
WARN_ON(!mutex_is_locked(&pm_mutex));
if (saved_gfp_mask) {
gfp_allowed_mask = saved_gfp_mask;
saved_gfp_mask = 0;
}
}
void pm_restrict_gfp_mask(void)
{
WARN_ON(!mutex_is_locked(&pm_mutex));
WARN_ON(saved_gfp_mask);
saved_gfp_mask = gfp_allowed_mask;
gfp_allowed_mask &= ~GFP_IOFS;
}
bool pm_suspended_storage(void)
{
if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
return false;
return true;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif
static void __free_pages_ok(struct page *page, unsigned int order);
/*
* results with 256, 32 in the lowmem_reserve sysctl:
* 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
* 1G machine -> (16M dma, 784M normal, 224M high)
* NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
* HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
* HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
*
* TBD: should special case ZONE_DMA32 machines here - in those we normally
* don't need any ZONE_NORMAL reservation
*/
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
#ifdef CONFIG_ZONE_DMA
256,
#endif
#ifdef CONFIG_ZONE_DMA32
256,
#endif
#ifdef CONFIG_HIGHMEM
32,
#endif
32,
};
EXPORT_SYMBOL(totalram_pages);
#ifdef CONFIG_FIX_MOVABLE_ZONE
EXPORT_SYMBOL(total_unmovable_pages);
#endif
static char * const zone_names[MAX_NR_ZONES] = {
#ifdef CONFIG_ZONE_DMA
"DMA",
#endif
#ifdef CONFIG_ZONE_DMA32
"DMA32",
#endif
"Normal",
#ifdef CONFIG_HIGHMEM
"HighMem",
#endif
"Movable",
};
int min_free_kbytes = 1024;
int min_free_order_shift = 1;
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
int nr_online_nodes __read_mostly = 1;
EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
int page_group_by_mobility_disabled __read_mostly;
static void set_pageblock_migratetype(struct page *page, int migratetype)
{
if (unlikely(page_group_by_mobility_disabled))
migratetype = MIGRATE_UNMOVABLE;
set_pageblock_flags_group(page, (unsigned long)migratetype,
PB_migrate, PB_migrate_end);
}
bool oom_killer_disabled __read_mostly;
#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
{
int ret = 0;
unsigned seq;
unsigned long pfn = page_to_pfn(page);
do {
seq = zone_span_seqbegin(zone);
if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
ret = 1;
else if (pfn < zone->zone_start_pfn)
ret = 1;
} while (zone_span_seqretry(zone, seq));
return ret;
}
static int page_is_consistent(struct zone *zone, struct page *page)
{
if (!pfn_valid_within(page_to_pfn(page)))
return 0;
if (zone != page_zone(page))
return 0;
return 1;
}
/*
* Temporary debugging check for pages not lying within a given zone.
*/
static int bad_range(struct zone *zone, struct page *page)
{
if (page_outside_zone_boundaries(zone, page))
return 1;
if (!page_is_consistent(zone, page))
return 1;
return 0;
}
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
return 0;
}
#endif
static void bad_page(struct page *page)
{
static unsigned long resume;
static unsigned long nr_shown;
static unsigned long nr_unshown;
/* Don't complain about poisoned pages */
if (PageHWPoison(page)) {
reset_page_mapcount(page); /* remove PageBuddy */
return;
}
/*
* Allow a burst of 60 reports, then keep quiet for that minute;
* or allow a steady drip of one report per second.
*/
if (nr_shown == 60) {
if (time_before(jiffies, resume)) {
nr_unshown++;
goto out;
}
if (nr_unshown) {
printk(KERN_ALERT
"BUG: Bad page state: %lu messages suppressed\n",
nr_unshown);
nr_unshown = 0;
}
nr_shown = 0;
}
if (nr_shown++ == 0)
resume = jiffies + 60 * HZ;
printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
current->comm, page_to_pfn(page));
dump_page(page);
print_modules();
dump_stack();
out:
/* Leave bad fields for debug, except PageBuddy could make trouble */
reset_page_mapcount(page); /* remove PageBuddy */
add_taint(TAINT_BAD_PAGE);
}
/*
* Higher-order pages are called "compound pages". They are structured thusly:
*
* The first PAGE_SIZE page is called the "head page".
*
* The remaining PAGE_SIZE pages are called "tail pages".
*
* All pages have PG_compound set. All tail pages have their ->first_page
* pointing at the head page.
*
* The first tail page's ->lru.next holds the address of the compound page's
* put_page() function. Its ->lru.prev holds the order of allocation.
* This usage means that zero-order pages may not be compound.
*/
static void free_compound_page(struct page *page)
{
__free_pages_ok(page, compound_order(page));
}
void prep_compound_page(struct page *page, unsigned long order)
{
int i;
int nr_pages = 1 << order;
set_compound_page_dtor(page, free_compound_page);
set_compound_order(page, order);
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
__SetPageTail(p);
set_page_count(p, 0);
p->first_page = page;
}
}
/* update __split_huge_page_refcount if you change this function */
static int destroy_compound_page(struct page *page, unsigned long order)
{
int i;
int nr_pages = 1 << order;
int bad = 0;
if (unlikely(compound_order(page) != order) ||
unlikely(!PageHead(page))) {
bad_page(page);
bad++;
}
__ClearPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
if (unlikely(!PageTail(p) || (p->first_page != page))) {
bad_page(page);
bad++;
}
__ClearPageTail(p);
}
return bad;
}
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
int i;
/*
* clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
* and __GFP_HIGHMEM from hard or soft interrupt context.
*/
VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
for (i = 0; i < (1 << order); i++)
clear_highpage(page + i);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
static int __init debug_guardpage_minorder_setup(char *buf)
{
unsigned long res;
if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
return 0;
}
_debug_guardpage_minorder = res;
printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
return 0;
}
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
static inline void set_page_guard_flag(struct page *page)
{
__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
}
static inline void clear_page_guard_flag(struct page *page)
{
__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
}
#else
static inline void set_page_guard_flag(struct page *page) { }
static inline void clear_page_guard_flag(struct page *page) { }
#endif
static inline void set_page_order(struct page *page, int order)
{
set_page_private(page, order);
__SetPageBuddy(page);
}
static inline void rmv_page_order(struct page *page)
{
__ClearPageBuddy(page);
set_page_private(page, 0);
}
/*
* Locate the struct page for both the matching buddy in our
* pair (buddy1) and the combined O(n+1) page they form (page).
*
* 1) Any buddy B1 will have an order O twin B2 which satisfies
* the following equation:
* B2 = B1 ^ (1 << O)
* For example, if the starting buddy (buddy2) is #8 its order
* 1 buddy is #10:
* B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
*
* 2) Any buddy B will have an order O+1 parent P which
* satisfies the following equation:
* P = B & ~(1 << O)
*
* Assumption: *_mem_map is contiguous at least up to MAX_ORDER
*/
static inline unsigned long
__find_buddy_index(unsigned long page_idx, unsigned int order)
{
return page_idx ^ (1 << order);
}
/*
* This function checks whether a page is free && is the buddy
* we can do coalesce a page and its buddy if
* (a) the buddy is not in a hole &&
* (b) the buddy is in the buddy system &&
* (c) a page and its buddy have the same order &&
* (d) a page and its buddy are in the same zone.
*
* For recording whether a page is in the buddy system, we set ->_mapcount -2.
* Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
*
* For recording page's order, we use page_private(page).
*/
static inline int page_is_buddy(struct page *page, struct page *buddy,
int order)
{
if (!pfn_valid_within(page_to_pfn(buddy)))
return 0;
if (page_zone_id(page) != page_zone_id(buddy))
return 0;
if (page_is_guard(buddy) && page_order(buddy) == order) {
VM_BUG_ON(page_count(buddy) != 0);
return 1;
}
if (PageBuddy(buddy) && page_order(buddy) == order) {
VM_BUG_ON(page_count(buddy) != 0);
return 1;
}
return 0;
}
/*
* Freeing function for a buddy system allocator.
*
* The concept of a buddy system is to maintain direct-mapped table
* (containing bit values) for memory blocks of various "orders".
* The bottom level table contains the map for the smallest allocatable
* units of memory (here, pages), and each level above it describes
* pairs of units from the levels below, hence, "buddies".
* At a high level, all that happens here is marking the table entry
* at the bottom level available, and propagating the changes upward
* as necessary, plus some accounting needed to play nicely with other
* parts of the VM system.
* At each level, we keep a list of pages, which are heads of continuous
* free pages of length of (1 << order) and marked with _mapcount -2. Page's
* order is recorded in page_private(page) field.
* So when we are allocating or freeing one, we can derive the state of the
* other. That is, if we allocate a small block, and both were
* free, the remainder of the region must be split into blocks.
* If a block is freed, and its buddy is also free, then this
* triggers coalescing into a block of larger size.
*
* -- wli
*/
static inline void __free_one_page(struct page *page,
struct zone *zone, unsigned int order,
int migratetype)
{
unsigned long page_idx;
unsigned long combined_idx;
unsigned long uninitialized_var(buddy_idx);
struct page *buddy = NULL;
if (unlikely(PageCompound(page)))
if (unlikely(destroy_compound_page(page, order)))
return;
VM_BUG_ON(migratetype == -1);
page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
VM_BUG_ON(page_idx & ((1 << order) - 1));
VM_BUG_ON(bad_range(zone, page));
while (order < MAX_ORDER-1) {
buddy_idx = __find_buddy_index(page_idx, order);
buddy = page + (buddy_idx - page_idx);
if (!page_is_buddy(page, buddy, order))
break;
/*
* Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
* merge with it and move up one order.
*/
if (page_is_guard(buddy)) {
clear_page_guard_flag(buddy);
set_page_private(page, 0);
__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
} else {
list_del(&buddy->lru);
zone->free_area[order].nr_free--;
rmv_page_order(buddy);
}
combined_idx = buddy_idx & page_idx;
page = page + (combined_idx - page_idx);
page_idx = combined_idx;
order++;
}
set_page_order(page, order);
/*
* If this is not the largest possible page, check if the buddy
* of the next-highest order is free. If it is, it's possible
* that pages are being freed that will coalesce soon. In case,
* that is happening, add the free page to the tail of the list
* so it's less likely to be used soon and more likely to be merged
* as a higher order page
*/
if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
struct page *higher_page, *higher_buddy;
combined_idx = buddy_idx & page_idx;
higher_page = page + (combined_idx - page_idx);
buddy_idx = __find_buddy_index(combined_idx, order + 1);
higher_buddy = higher_page + (buddy_idx - combined_idx);
if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
list_add_tail(&page->lru,
&zone->free_area[order].free_list[migratetype]);
goto out;
}
}
list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
zone->free_area[order].nr_free++;
}
/*
* free_page_mlock() -- clean up attempts to free and mlocked() page.
* Page should not be on lru, so no need to fix that up.
* free_pages_check() will verify...
*/
static inline void free_page_mlock(struct page *page)
{
__dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}
static inline int free_pages_check(struct page *page)
{
if (unlikely(page_mapcount(page) |
(page->mapping != NULL) |
(atomic_read(&page->_count) != 0) |
(page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
(mem_cgroup_bad_page_check(page)))) {
bad_page(page);
return 1;
}
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
return 0;
}
/*
* Frees a number of pages from the PCP lists
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
* If the zone was previously in an "all pages pinned" state then look to
* see if this freeing clears that state.
*
* And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
struct per_cpu_pages *pcp)
{
int migratetype = 0;
int batch_free = 0;
int to_free = count;
spin_lock(&zone->lock);
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
while (to_free) {
struct page *page;
struct list_head *list;
/*
* Remove pages from lists in a round-robin fashion. A
* batch_free count is maintained that is incremented when an
* empty list is encountered. This is so more pages are freed
* off fuller lists instead of spinning excessively around empty
* lists
*/
do {
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
list = &pcp->lists[migratetype];
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
if (batch_free == MIGRATE_PCPTYPES)
batch_free = to_free;
do {
page = list_entry(list->prev, struct page, lru);
/* must delete as __free_one_page list manipulates */
list_del(&page->lru);
/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
__free_one_page(page, zone, 0, page_private(page));
trace_mm_page_pcpu_drain(page, 0, page_private(page));
} while (--to_free && --batch_free && !list_empty(list));
}
__mod_zone_page_state(zone, NR_FREE_PAGES, count);
spin_unlock(&zone->lock);
}
static void free_one_page(struct zone *zone, struct page *page, int order,
int migratetype)
{
spin_lock(&zone->lock);
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
spin_unlock(&zone->lock);
}
static bool free_pages_prepare(struct page *page, unsigned int order)
{
int i;
int bad = 0;
trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
if (PageAnon(page))
page->mapping = NULL;
for (i = 0; i < (1 << order); i++)
bad += free_pages_check(page + i);
if (bad)
return false;
if (!PageHighMem(page)) {
debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
arch_free_page(page, order);
kernel_map_pages(page, 1 << order, 0);
return true;
}
static void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
int wasMlocked = __TestClearPageMlocked(page);
if (!free_pages_prepare(page, order))
return;
local_irq_save(flags);
if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order,
get_pageblock_migratetype(page));
local_irq_restore(flags);
}
void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
unsigned int loop;
prefetchw(page);
for (loop = 0; loop < nr_pages; loop++) {
struct page *p = &page[loop];
if (loop + 1 < nr_pages)
prefetchw(p + 1);
__ClearPageReserved(p);
set_page_count(p, 0);
}
set_page_refcounted(page);
__free_pages(page, order);
}
#ifdef CONFIG_CMA
/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
void __init init_cma_reserved_pageblock(struct page *page)
{
unsigned i = pageblock_nr_pages;
struct page *p = page;
do {
__ClearPageReserved(p);
set_page_count(p, 0);
} while (++p, --i);
set_page_refcounted(page);
set_pageblock_migratetype(page, MIGRATE_CMA);
__free_pages(page, pageblock_order);
totalram_pages += pageblock_nr_pages;
}
#endif
/*
* The order of subdivision here is critical for the IO subsystem.
* Please do not alter this order without good reasons and regression
* testing. Specifically, as large blocks of memory are subdivided,
* the order in which smaller blocks are delivered depends on the order
* they're subdivided in this function. This is the primary factor
* influencing the order in which pages are delivered to the IO
* subsystem according to empirical testing, and this is also justified
* by considering the behavior of a buddy system containing a single
* large block of memory acted on by a series of small allocations.
* This behavior is a critical factor in sglist merging's success.
*
* -- wli
*/
static inline void expand(struct zone *zone, struct page *page,
int low, int high, struct free_area *area,
int migratetype)
{
unsigned long size = 1 << high;
while (high > low) {
area--;
high--;
size >>= 1;
VM_BUG_ON(bad_range(zone, &page[size]));
#ifdef CONFIG_DEBUG_PAGEALLOC
if (high < debug_guardpage_minorder()) {
/*
* Mark as guard pages (or page), that will allow to
* merge back to allocator when buddy will be freed.
* Corresponding page table entries will not be touched,
* pages will stay not present in virtual address space
*/
INIT_LIST_HEAD(&page[size].lru);
set_page_guard_flag(&page[size]);
set_page_private(&page[size], high);
/* Guard pages are not available for any usage */
__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
continue;
}
#endif
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
set_page_order(&page[size], high);
}
}
/*
* This page is about to be returned from the page allocator
*/
static inline int check_new_page(struct page *page)
{
if (unlikely(page_mapcount(page) |
(page->mapping != NULL) |
(atomic_read(&page->_count) != 0) |
(page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
(mem_cgroup_bad_page_check(page)))) {
bad_page(page);
return 1;
}
return 0;
}
static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
{
int i;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
return 1;
}
set_page_private(page, 0);
set_page_refcounted(page);
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
if (gfp_flags & __GFP_ZERO)
prep_zero_page(page, order, gfp_flags);
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
return 0;
}
/*
* Go through the free lists for the given migratetype and remove
* the smallest available page from the freelists
*/
static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
int migratetype)
{
unsigned int current_order;
struct free_area * area;
struct page *page;
/* Find a page of the appropriate size in the preferred list */
for (current_order = order; current_order < MAX_ORDER; ++current_order) {
area = &(zone->free_area[current_order]);
if (list_empty(&area->free_list[migratetype]))
continue;
page = list_entry(area->free_list[migratetype].next,
struct page, lru);
list_del(&page->lru);
rmv_page_order(page);
area->nr_free--;
expand(zone, page, order, current_order, area, migratetype);
return page;
}
return NULL;
}
/*
* This array describes the order lists are fallen back to when
* the free lists for the desirable migrate type are depleted
*/
static int fallbacks[MIGRATE_TYPES][4] = {
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
#ifdef CONFIG_CMA
[MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
[MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
#else
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
#endif
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
};
/*
* Move the free pages in a range to the free lists of the requested type.
* Note that start_page and end_pages are not aligned on a pageblock
* boundary. If alignment is required, use move_freepages_block()
*/
static int move_freepages(struct zone *zone,
struct page *start_page, struct page *end_page,
int migratetype)
{
struct page *page;
unsigned long order;
int pages_moved = 0;
#ifndef CONFIG_HOLES_IN_ZONE
/*
* page_zone is not safe to call in this context when
* CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
* anyway as we check zone boundaries in move_freepages_block().
* Remove at a later date when no bug reports exist related to
* grouping pages by mobility
*/
BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif
for (page = start_page; page <= end_page;) {
/* Make sure we are not inadvertently changing nodes */
VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
if (!pfn_valid_within(page_to_pfn(page))) {
page++;
continue;
}
if (!PageBuddy(page)) {
page++;
continue;
}
order = page_order(page);
list_move(&page->lru,
&zone->free_area[order].free_list[migratetype]);
page += 1 << order;
pages_moved += 1 << order;
}
return pages_moved;
}
static int move_freepages_block(struct zone *zone, struct page *page,
int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
start_pfn = page_to_pfn(page);
start_pfn = start_pfn & ~(pageblock_nr_pages-1);
start_page = pfn_to_page(start_pfn);
end_page = start_page + pageblock_nr_pages - 1;
end_pfn = start_pfn + pageblock_nr_pages - 1;
/* Do not cross zone boundaries */
if (start_pfn < zone->zone_start_pfn)
start_page = page;
if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
return 0;
return move_freepages(zone, start_page, end_page, migratetype);
}
static void change_pageblock_range(struct page *pageblock_page,
int start_order, int migratetype)
{
int nr_pageblocks = 1 << (start_order - pageblock_order);
while (nr_pageblocks--) {
set_pageblock_migratetype(pageblock_page, migratetype);
pageblock_page += pageblock_nr_pages;
}
}
/* Remove an element from the buddy allocator from the fallback list */
static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
{
struct free_area * area;
int current_order;
struct page *page;
int migratetype, i;
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1; current_order >= order;
--current_order) {
for (i = 0;; i++) {
migratetype = fallbacks[start_migratetype][i];
/* MIGRATE_RESERVE handled later if necessary */
if (migratetype == MIGRATE_RESERVE)
break;
area = &(zone->free_area[current_order]);
if (list_empty(&area->free_list[migratetype]))
continue;
page = list_entry(area->free_list[migratetype].next,
struct page, lru);
area->nr_free--;
/*
* If breaking a large block of pages, move all free
* pages to the preferred allocation list. If falling
* back for a reclaimable kernel allocation, be more
* aggressive about taking ownership of free pages
*
* On the other hand, never change migration
* type of MIGRATE_CMA pageblocks nor move CMA
* pages on different free lists. We don't
* want unmovable pages to be allocated from
* MIGRATE_CMA areas.
*/
if (!is_migrate_cma(migratetype) &&
(unlikely(current_order >= pageblock_order / 2) ||
start_migratetype == MIGRATE_RECLAIMABLE ||
page_group_by_mobility_disabled)) {
int pages;
pages = move_freepages_block(zone, page,
start_migratetype);
/* Claim the whole block if over half of it is free */
if (pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled)
set_pageblock_migratetype(page,
start_migratetype);
migratetype = start_migratetype;
}
/* Remove the page from the freelists */
list_del(&page->lru);
rmv_page_order(page);
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order &&
!is_migrate_cma(migratetype))
change_pageblock_range(page, current_order,
start_migratetype);
expand(zone, page, order, current_order, area,
is_migrate_cma(migratetype)
? migratetype : start_migratetype);
trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, migratetype);
return page;
}
}
return NULL;
}
/*
* Do the hard work of removing an element from the buddy allocator.
* Call me with the zone->lock already held.
*/
static struct page *__rmqueue(struct zone *zone, unsigned int order,
int migratetype)
{
struct page *page;
retry_reserve:
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
page = __rmqueue_fallback(zone, order, migratetype);
/*
* Use MIGRATE_RESERVE rather than fail an allocation. goto
* is used because __rmqueue_smallest is an inline function
* and we want just one call site
*/
if (!page) {
migratetype = MIGRATE_RESERVE;
goto retry_reserve;
}
}
trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
/*
* Obtain a specified number of elements from the buddy allocator, all under
* a single hold of the lock, for efficiency. Add them to the supplied list.
* Returns the number of new pages which were placed at *list.
*/
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, int cold)
{
int mt = migratetype, i;
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype);
if (unlikely(page == NULL))
break;
/*
* Split buddy pages returned by expand() are received here
* in physical page order. The page is added to the callers and
* list and the list head then moves forward. From the callers
* perspective, the linked list is ordered by page number in
* some conditions. This is useful for IO devices that can
* merge IO requests if the physical pages are ordered
* properly.
*/
if (likely(cold == 0))
list_add(&page->lru, list);
else
list_add_tail(&page->lru, list);
if (IS_ENABLED(CONFIG_CMA)) {
mt = get_pageblock_migratetype(page);
if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
mt = migratetype;
}
set_page_private(page, mt);
list = &page->lru;
}
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
spin_unlock(&zone->lock);
return i;
}
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
* currently executing processor on remote nodes after they have
* expired.
*
* Note that this function must be called with the thread pinned to
* a single processor.
*/
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
int to_drain;
local_irq_save(flags);
if (pcp->count >= pcp->batch)
to_drain = pcp->batch;
else
to_drain = pcp->count;
free_pcppages_bulk(zone, to_drain, pcp);
pcp->count -= to_drain;
local_irq_restore(flags);
}
#endif
/*
* Drain pages of the indicated processor.
*
* The processor must either be the current processor and the
* thread pinned to the current processor or a processor that
* is not online.
*/
static void drain_pages(unsigned int cpu)
{
unsigned long flags;
struct zone *zone;
for_each_populated_zone(zone) {
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
local_irq_save(flags);
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
if (pcp->count) {
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
local_irq_restore(flags);
}
}
/*
* Spill all of this CPU's per-cpu pages back into the buddy allocator.
*/
void drain_local_pages(void *arg)
{
drain_pages(smp_processor_id());
}
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
*
* Note that this code is protected against sending an IPI to an offline
* CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
* on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
* nothing keeps CPUs from showing up after we populated the cpumask and
* before the call to on_each_cpu_mask().
*/
void drain_all_pages(void)
{
int cpu;
struct per_cpu_pageset *pcp;
struct zone *zone;
/*
* Allocate in the BSS so we wont require allocation in
* direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
*/
static cpumask_t cpus_with_pcps;
/*
* We don't care about racing with CPU hotplug event
* as offline notification will cause the notified
* cpu to drain that CPU pcps and on_each_cpu_mask
* disables preemption as part of its processing
*/
for_each_online_cpu(cpu) {
bool has_pcps = false;
for_each_populated_zone(zone) {
pcp = per_cpu_ptr(zone->pageset, cpu);
if (pcp->pcp.count) {
has_pcps = true;
break;
}
}
if (has_pcps)
cpumask_set_cpu(cpu, &cpus_with_pcps);
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
}
#ifdef CONFIG_HIBERNATION
void mark_free_pages(struct zone *zone)
{
unsigned long pfn, max_zone_pfn;
unsigned long flags;
int order, t;
struct list_head *curr;
if (!zone->spanned_pages)
return;
spin_lock_irqsave(&zone->lock, flags);
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!swsusp_page_is_forbidden(page))
swsusp_unset_page_free(page);
}
for_each_migratetype_order(order, t) {
list_for_each(curr, &zone->free_area[order].free_list[t]) {
unsigned long i;
pfn = page_to_pfn(list_entry(curr, struct page, lru));
for (i = 0; i < (1UL << order); i++)
swsusp_set_page_free(pfn_to_page(pfn + i));
}
}
spin_unlock_irqrestore(&zone->lock, flags);
}
#endif /* CONFIG_PM */
/*
* Free a 0-order page
* cold == 1 ? free a cold page : free a hot page
*/
void free_hot_cold_page(struct page *page, int cold)
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
unsigned long flags;
int migratetype;
int wasMlocked = __TestClearPageMlocked(page);
if (!free_pages_prepare(page, 0))
return;
migratetype = get_pageblock_migratetype(page);
set_page_private(page, migratetype);
local_irq_save(flags);
if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_event(PGFREE);
/*
* We only track unmovable, reclaimable and movable on pcp lists.
* Free ISOLATE pages back to the allocator because they are being
* offlined but treat RESERVE as movable pages so we can get those
* areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
if (unlikely(migratetype == MIGRATE_ISOLATE)) {
free_one_page(zone, page, 0, migratetype);
goto out;
}
migratetype = MIGRATE_MOVABLE;
}
pcp = &this_cpu_ptr(zone->pageset)->pcp;
if (cold)
list_add_tail(&page->lru, &pcp->lists[migratetype]);
else
list_add(&page->lru, &pcp->lists[migratetype]);
pcp->count++;
if (pcp->count >= pcp->high) {
free_pcppages_bulk(zone, pcp->batch, pcp);
pcp->count -= pcp->batch;
}
out:
local_irq_restore(flags);
}
/*
* Free a list of 0-order pages
*/
void free_hot_cold_page_list(struct list_head *list, int cold)
{
struct page *page, *next;
list_for_each_entry_safe(page, next, list, lru) {
trace_mm_page_free_batched(page, cold);
free_hot_cold_page(page, cold);
}
}
/*
* split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n]
* Each sub-page must be freed individually.
*
* Note: this is probably too low level an operation for use in drivers.
* Please consult with lkml before using this in your driver.
*/
void split_page(struct page *page, unsigned int order)
{
int i;
VM_BUG_ON(PageCompound(page));
VM_BUG_ON(!page_count(page));
#ifdef CONFIG_KMEMCHECK
/*
* Split shadow pages too, because free(page[0]) would
* otherwise free the whole shadow.
*/
if (kmemcheck_page_is_tracked(page))
split_page(virt_to_page(page[0].shadow), order);
#endif
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
}
/*
* Similar to split_page except the page is already free. As this is only
* being used for migration, the migratetype of the block also changes.
* As this is called with interrupts disabled, the caller is responsible
* for calling arch_alloc_page() and kernel_map_page() after interrupts
* are enabled.
*
* Note: this is probably too low level an operation for use in drivers.
* Please consult with lkml before using this in your driver.
*/
int split_free_page(struct page *page)
{
unsigned int order;
unsigned long watermark;
struct zone *zone;
BUG_ON(!PageBuddy(page));
zone = page_zone(page);
order = page_order(page);
/* Obey watermarks as if the page was being allocated */
watermark = low_wmark_pages(zone) + (1 << order);
if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
return 0;
/* Remove page from free list */
list_del(&page->lru);
zone->free_area[order].nr_free--;
rmv_page_order(page);
__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
/* Split into individual pages */
set_page_refcounted(page);
split_page(page, order);
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
for (; page < endpage; page += pageblock_nr_pages) {
int mt = get_pageblock_migratetype(page);
if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
set_pageblock_migratetype(page,
MIGRATE_MOVABLE);
}
}
return 1 << order;
}
/*
* Really, prep_compound_page() should be called from __rmqueue_bulk(). But
* we cheat by calling it from here, in the order > 0 path. Saves a branch
* or two.
*/
static inline
struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, int order, gfp_t gfp_flags,
int migratetype)
{
unsigned long flags;
struct page *page;
int cold = !!(gfp_flags & __GFP_COLD);
again:
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
struct list_head *list;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, cold);
if (unlikely(list_empty(list)))
goto failed;
}
if (cold)
page = list_entry(list->prev, struct page, lru);
else
page = list_entry(list->next, struct page, lru);
list_del(&page->lru);
pcp->count--;
} else {
if (unlikely(gfp_flags & __GFP_NOFAIL)) {
/*
* __GFP_NOFAIL is not to be used in new code.
*
* All __GFP_NOFAIL callers should be fixed so that they
* properly detect and handle allocation failures.
*
* We most definitely don't want callers attempting to
* allocate greater than order-1 page units with
* __GFP_NOFAIL.
*/
WARN_ON_ONCE(order > 1);
}
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
spin_unlock(&zone->lock);
if (!page)
goto failed;
__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
}
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);
VM_BUG_ON(bad_range(zone, page));
if (prep_new_page(page, order, gfp_flags))
goto again;
return page;
failed:
local_irq_restore(flags);
return NULL;
}
/* The ALLOC_WMARK bits are used as an index to zone->watermark */
#define ALLOC_WMARK_MIN WMARK_MIN
#define ALLOC_WMARK_LOW WMARK_LOW
#define ALLOC_WMARK_HIGH WMARK_HIGH
#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
/* Mask to get the watermark bits */
#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
#define ALLOC_HARDER 0x10 /* try to alloc harder */
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#ifdef CONFIG_FAIL_PAGE_ALLOC
static struct {
struct fault_attr attr;
u32 ignore_gfp_highmem;
u32 ignore_gfp_wait;
u32 min_order;
} fail_page_alloc = {
.attr = FAULT_ATTR_INITIALIZER,
.ignore_gfp_wait = 1,
.ignore_gfp_highmem = 1,
.min_order = 1,
};
static int __init setup_fail_page_alloc(char *str)
{
return setup_fault_attr(&fail_page_alloc.attr, str);
}
__setup("fail_page_alloc=", setup_fail_page_alloc);
static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
if (order < fail_page_alloc.min_order)
return 0;
if (gfp_mask & __GFP_NOFAIL)
return 0;
if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
return 0;
if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
return 0;
return should_fail(&fail_page_alloc.attr, 1 << order);
}
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int __init fail_page_alloc_debugfs(void)
{
umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
&fail_page_alloc.attr);
if (IS_ERR(dir))
return PTR_ERR(dir);
if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
&fail_page_alloc.ignore_gfp_wait))
goto fail;
if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
&fail_page_alloc.ignore_gfp_highmem))
goto fail;
if (!debugfs_create_u32("min-order", mode, dir,
&fail_page_alloc.min_order))
goto fail;
return 0;
fail:
debugfs_remove_recursive(dir);
return -ENOMEM;
}
late_initcall(fail_page_alloc_debugfs);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
#else /* CONFIG_FAIL_PAGE_ALLOC */
static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return 0;
}
#endif /* CONFIG_FAIL_PAGE_ALLOC */
/*
* Return true if free pages are above 'mark'. This takes into account the order
* of the allocation.
*/
static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags, long free_pages)
{
/* free_pages may go negative - that's OK */
long min = mark;
long lowmem_reserve = z->lowmem_reserve[classzone_idx];
int o;
free_pages -= (1 << order) - 1;
if (alloc_flags & ALLOC_HIGH)
min -= min / 2;
if (alloc_flags & ALLOC_HARDER)
min -= min / 4;
if (free_pages <= min + lowmem_reserve)
return false;
for (o = 0; o < order; o++) {
/* At the next order, this order's pages become unavailable */
free_pages -= z->free_area[o].nr_free << o;
/* Require fewer higher order pages to be free */
min >>= min_free_order_shift;
if (free_pages <= min)
return false;
}
return true;
}
bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags)
{
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
zone_page_state(z, NR_FREE_PAGES));
}
bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags)
{
long free_pages = zone_page_state(z, NR_FREE_PAGES);
if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
free_pages);
}
#ifdef CONFIG_NUMA
/*
* zlc_setup - Setup for "zonelist cache". Uses cached zone data to
* skip over zones that are not allowed by the cpuset, or that have
* been recently (in last second) found to be nearly full. See further
* comments in mmzone.h. Reduces cache footprint of zonelist scans
* that have to skip over a lot of full or unallowed zones.
*
* If the zonelist cache is present in the passed in zonelist, then
* returns a pointer to the allowed node mask (either the current
* tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
*
* If the zonelist cache is not available for this zonelist, does
* nothing and returns NULL.
*
* If the fullzones BITMAP in the zonelist cache is stale (more than
* a second since last zap'd) then we zap it out (clear its bits.)
*
* We hold off even calling zlc_setup, until after we've checked the
* first zone in the zonelist, on the theory that most allocations will
* be satisfied from that first zone, so best to examine that zone as
* quickly as we can.
*/
static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
{
struct zonelist_cache *zlc; /* cached zonelist speedup info */
nodemask_t *allowednodes; /* zonelist_cache approximation */
zlc = zonelist->zlcache_ptr;
if (!zlc)
return NULL;
if (time_after(jiffies, zlc->last_full_zap + HZ)) {
bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
zlc->last_full_zap = jiffies;
}
allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
&cpuset_current_mems_allowed :
&node_states[N_HIGH_MEMORY];
return allowednodes;
}
/*
* Given 'z' scanning a zonelist, run a couple of quick checks to see
* if it is worth looking at further for free memory:
* 1) Check that the zone isn't thought to be full (doesn't have its
* bit set in the zonelist_cache fullzones BITMAP).
* 2) Check that the zones node (obtained from the zonelist_cache
* z_to_n[] mapping) is allowed in the passed in allowednodes mask.
* Return true (non-zero) if zone is worth looking at further, or
* else return false (zero) if it is not.
*
* This check -ignores- the distinction between various watermarks,
* such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
* found to be full for any variation of these watermarks, it will
* be considered full for up to one second by all requests, unless
* we are so low on memory on all allowed nodes that we are forced
* into the second scan of the zonelist.
*
* In the second scan we ignore this zonelist cache and exactly
* apply the watermarks to all zones, even it is slower to do so.
* We are low on memory in the second scan, and should leave no stone
* unturned looking for a free page.
*/
static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
nodemask_t *allowednodes)
{
struct zonelist_cache *zlc; /* cached zonelist speedup info */
int i; /* index of *z in zonelist zones */
int n; /* node that zone *z is on */
zlc = zonelist->zlcache_ptr;
if (!zlc)
return 1;
i = z - zonelist->_zonerefs;
n = zlc->z_to_n[i];
/* This zone is worth trying if it is allowed but not full */
return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
}
/*
* Given 'z' scanning a zonelist, set the corresponding bit in
* zlc->fullzones, so that subsequent attempts to allocate a page
* from that zone don't waste time re-examining it.
*/
static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
{
struct zonelist_cache *zlc; /* cached zonelist speedup info */
int i; /* index of *z in zonelist zones */
zlc = zonelist->zlcache_ptr;
if (!zlc)
return;
i = z - zonelist->_zonerefs;
set_bit(i, zlc->fullzones);
}
/*
* clear all zones full, called after direct reclaim makes progress so that
* a zone that was recently full is not skipped over for up to a second
*/
static void zlc_clear_zones_full(struct zonelist *zonelist)
{
struct zonelist_cache *zlc; /* cached zonelist speedup info */
zlc = zonelist->zlcache_ptr;
if (!zlc)
return;
bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
}
#else /* CONFIG_NUMA */
static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
{
return NULL;
}
static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
nodemask_t *allowednodes)
{
return 1;
}
static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
{
}
static void zlc_clear_zones_full(struct zonelist *zonelist)
{
}
#endif /* CONFIG_NUMA */
/*
* get_page_from_freelist goes through the zonelist trying to allocate
* a page.
*/
static struct page *
get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
struct zone *preferred_zone, int migratetype)
{
struct zoneref *z;
struct page *page = NULL;
int classzone_idx;
struct zone *zone;
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
int zlc_active = 0; /* set if using zonelist_cache */
int did_zlc_setup = 0; /* just call zlc_setup() one time */
classzone_idx = zone_idx(preferred_zone);
zonelist_scan:
/*
* Scan zonelist, looking for a zone with enough free.
* See also cpuset_zone_allowed() comment in kernel/cpuset.c.
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) {
if (NUMA_BUILD && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed_softwall(zone, gfp_mask))
continue;
/*
* When allocating a page cache page for writing, we
* want to get it from a zone that is within its dirty
* limit, such that no single zone holds more than its
* proportional share of globally allowed dirty pages.
* The dirty limits take into account the zone's
* lowmem reserves and high watermark so that kswapd
* should be able to balance it without having to
* write pages from its LRU list.
*
* This may look like it could increase pressure on
* lower zones by failing allocations in higher zones
* before they are full. But the pages that do spill
* over are limited as the lower zones are protected
* by this very same mechanism. It should not become
* a practical burden to them.
*
* XXX: For now, allow allocations to potentially
* exceed the per-zone dirty limit in the slowpath
* (ALLOC_WMARK_LOW unset) before going into reclaim,
* which is important when on a NUMA setup the allowed
* zones are together not big enough to reach the
* global limit. The proper fix for these situations
* will require awareness of zones in the
* dirty-throttling and the flusher threads.
*/
if ((alloc_flags & ALLOC_WMARK_LOW) &&
(gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
goto this_zone_full;
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
unsigned long mark;
int ret;
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
if (zone_watermark_ok(zone, order, mark,
classzone_idx, alloc_flags))
goto try_this_zone;
if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
/*
* we do zlc_setup if there are multiple nodes
* and before considering the first zone allowed
* by the cpuset.
*/
allowednodes = zlc_setup(zonelist, alloc_flags);
zlc_active = 1;
did_zlc_setup = 1;
}
if (zone_reclaim_mode == 0)
goto this_zone_full;
/*
* As we may have just activated ZLC, check if the first
* eligible zone has failed zone_reclaim recently.
*/
if (NUMA_BUILD && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
ret = zone_reclaim(zone, gfp_mask, order);
switch (ret) {
case ZONE_RECLAIM_NOSCAN:
/* did not scan */
continue;
case ZONE_RECLAIM_FULL:
/* scanned but unreclaimable */
continue;
default:
/* did we reclaim enough */
if (!zone_watermark_ok(zone, order, mark,
classzone_idx, alloc_flags))
goto this_zone_full;
}
}
try_this_zone:
page = buffered_rmqueue(preferred_zone, zone, order,
gfp_mask, migratetype);
if (page)
break;
this_zone_full:
if (NUMA_BUILD)
zlc_mark_zone_full(zonelist, z);
}
if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
/* Disable zlc cache for second zonelist scan */
zlc_active = 0;
goto zonelist_scan;
}
return page;
}
/*
* Large machines with many possible nodes should not always dump per-node
* meminfo in irq context.
*/
static inline bool should_suppress_show_mem(void)
{
bool ret = false;
#if NODES_SHIFT > 8
ret = in_interrupt();
#endif
return ret;
}
static DEFINE_RATELIMIT_STATE(nopage_rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
{
unsigned int filter = SHOW_MEM_FILTER_NODES;
if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
debug_guardpage_minorder() > 0)
return;
/*
* Walking all memory to count page types is very expensive and should
* be inhibited in non-blockable contexts.
*/
if (!(gfp_mask & __GFP_WAIT))
filter |= SHOW_MEM_FILTER_PAGE_COUNT;
/*
* This documents exceptions given to allocations in certain
* contexts that are allowed to allocate outside current's set
* of allowed nodes.
*/
if (!(gfp_mask & __GFP_NOMEMALLOC))
if (test_thread_flag(TIF_MEMDIE) ||
(current->flags & (PF_MEMALLOC | PF_EXITING)))
filter &= ~SHOW_MEM_FILTER_NODES;
if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
filter &= ~SHOW_MEM_FILTER_NODES;
if (fmt) {
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_warn("%pV", &vaf);
va_end(args);
}
pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
current->comm, order, gfp_mask);
dump_stack();
if (!should_suppress_show_mem())
show_mem(filter);
}
static inline int
should_alloc_retry(gfp_t gfp_mask, unsigned int order,
unsigned long did_some_progress,
unsigned long pages_reclaimed)
{
/* Do not loop if specifically requested */
if (gfp_mask & __GFP_NORETRY)
return 0;
/* Always retry if specifically requested */
if (gfp_mask & __GFP_NOFAIL)
return 1;
/*
* Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
* making forward progress without invoking OOM. Suspend also disables
* storage devices so kswapd will not help. Bail if we are suspending.
*/
if (!did_some_progress && pm_suspended_storage())
return 0;
/*
* In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
* means __GFP_NOFAIL, but that may not be true in other
* implementations.
*/
if (order <= PAGE_ALLOC_COSTLY_ORDER)
return 1;
/*
* For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
* specified, then we retry until we no longer reclaim any pages
* (above), or we've reclaimed an order of pages at least as
* large as the allocation's order. In both cases, if the
* allocation still fails, we stop retrying.
*/
if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
return 1;
return 0;
}
static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
int migratetype)
{
struct page *page;
/* Acquire the OOM killer lock for the zones in zonelist */
if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
schedule_timeout_uninterruptible(1);
return NULL;
}
/*
* PM-freezer should be notified that there might be an OOM killer on
* its way to kill and wake somebody up. This is too early and we might
* end up not killing anything but false positives are acceptable.
* See freeze_processes.
*/
note_oom_kill();
/*
* Go through the zonelist yet one more time, keep very high watermark
* here, this is only to catch a parallel oom killing, we must fail if
* we're still under heavy pressure.
*/
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
order, zonelist, high_zoneidx,
ALLOC_WMARK_HIGH|ALLOC_CPUSET,
preferred_zone, migratetype);
if (page)
goto out;
if (!(gfp_mask & __GFP_NOFAIL)) {
/* The OOM killer will not help higher order allocs */
if (order > PAGE_ALLOC_COSTLY_ORDER)
goto out;
/* The OOM killer does not needlessly kill tasks for lowmem */
if (high_zoneidx < ZONE_NORMAL)
goto out;
/*
* GFP_THISNODE contains __GFP_NORETRY and we never hit this.
* Sanity check for bare calls of __GFP_THISNODE, not real OOM.
* The caller should handle page allocation failure by itself if
* it specifies __GFP_THISNODE.
* Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
*/
if (gfp_mask & __GFP_THISNODE)
goto out;
}
/* Exhausted what can be done so it's blamo time */
out_of_memory(zonelist, gfp_mask, order, nodemask, false);
out:
clear_zonelist_oom(zonelist, gfp_mask);
return page;
}
#ifdef CONFIG_COMPACTION
/* Try memory compaction for high-order allocations before reclaim */
static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, bool sync_migration,
bool *deferred_compaction,
unsigned long *did_some_progress)
{
struct page *page;
if (!order)
return NULL;
if (compaction_deferred(preferred_zone, order)) {
*deferred_compaction = true;
return NULL;
}
current->flags |= PF_MEMALLOC;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
nodemask, sync_migration);
current->flags &= ~PF_MEMALLOC;
if (*did_some_progress != COMPACT_SKIPPED) {
/* Page migration frees to the PCP lists but we want merging */
drain_pages(get_cpu());
put_cpu();
page = get_page_from_freelist(gfp_mask, nodemask,
order, zonelist, high_zoneidx,
alloc_flags, preferred_zone,
migratetype);
if (page) {
preferred_zone->compact_considered = 0;
preferred_zone->compact_defer_shift = 0;
if (order >= preferred_zone->compact_order_failed)
preferred_zone->compact_order_failed = order + 1;
count_vm_event(COMPACTSUCCESS);
return page;
}
/*
* It's bad if compaction run occurs and fails.
* The most likely reason is that pages exist,
* but not enough to satisfy watermarks.
*/
count_vm_event(COMPACTFAIL);
/*
* As async compaction considers a subset of pageblocks, only
* defer if the failure was a sync compaction failure.
*/
if (sync_migration)
defer_compaction(preferred_zone, order);
cond_resched();
}
return NULL;
}
#else
static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, bool sync_migration,
bool *deferred_compaction,
unsigned long *did_some_progress)
{
return NULL;
}
#endif /* CONFIG_COMPACTION */
/* Perform direct synchronous page reclaim */
static int
__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
nodemask_t *nodemask)
{
struct reclaim_state reclaim_state;
int progress;
cond_resched();
/* We now go into synchronous reclaim */
cpuset_memory_pressure_bump();
current->flags |= PF_MEMALLOC;
lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0;
current->reclaim_state = &reclaim_state;
progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
current->reclaim_state = NULL;
lockdep_clear_current_reclaim_state();
current->flags &= ~PF_MEMALLOC;
cond_resched();
return progress;
}
/* The really slow allocator path where we enter direct reclaim */
static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, unsigned long *did_some_progress)
{
struct page *page = NULL;
bool drained = false;
*did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
nodemask);
if (unlikely(!(*did_some_progress)))
return NULL;
/* After successful reclaim, reconsider all zones for allocation */
if (NUMA_BUILD)
zlc_clear_zones_full(zonelist);
retry:
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx,
alloc_flags, preferred_zone,
migratetype);
/*
* If an allocation failed after direct reclaim, it could be because
* pages are pinned on the per-cpu lists. Drain them and try again
*/
if (!page && !drained) {
drain_all_pages();
drained = true;
goto retry;
}
return page;
}
/*
* This is called in the allocator slow-path if the allocation request is of
* sufficient urgency to ignore watermarks and take other desperate measures
*/
static inline struct page *
__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
int migratetype)
{
struct page *page;
do {
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
preferred_zone, migratetype);
if (!page && gfp_mask & __GFP_NOFAIL)
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
} while (!page && (gfp_mask & __GFP_NOFAIL));
return page;
}
static inline
void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
enum zone_type high_zoneidx,
enum zone_type classzone_idx)
{
struct zoneref *z;
struct zone *zone;
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
wakeup_kswapd(zone, order, classzone_idx);
}
static inline int
gfp_to_alloc_flags(gfp_t gfp_mask)
{
int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
/*
* The caller may dip into page reserves a bit more if the caller
* cannot run direct reclaim, or if the caller has realtime scheduling
* policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
* set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
*/
alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
if (atomic) {
/*
* Not worth trying to allocate harder for __GFP_NOMEMALLOC even
* if it can't schedule.
*/
if (!(gfp_mask & __GFP_NOMEMALLOC))
alloc_flags |= ALLOC_HARDER;
/*
* Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
* comment for __cpuset_node_allowed_softwall().
*/
alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(rt_task(current)) && !in_interrupt())
alloc_flags |= ALLOC_HARDER;
if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
if (!in_interrupt() &&
((current->flags & PF_MEMALLOC) ||
unlikely(test_thread_flag(TIF_MEMDIE))))
alloc_flags |= ALLOC_NO_WATERMARKS;
}
return alloc_flags;
}
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
int migratetype)
{
const gfp_t wait = gfp_mask & __GFP_WAIT;
struct page *page = NULL;
int alloc_flags;
unsigned long pages_reclaimed = 0;
unsigned long did_some_progress;
bool sync_migration = false;
bool deferred_compaction = false;
/*
* In the slowpath, we sanity check order to avoid ever trying to
* reclaim >= MAX_ORDER areas which will never succeed. Callers may
* be using allocators in order of preference for an area that is
* too large.
*/
if (order >= MAX_ORDER) {
WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
return NULL;
}
/*
* GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
* __GFP_NOWARN set) should not cause reclaim since the subsystem
* (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
* using a larger set of nodes after it has established that the
* allowed per node queues are empty and that nodes are
* over allocated.
*/
if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage;
restart:
if (!(gfp_mask & __GFP_NO_KSWAPD))
wake_all_kswapd(order, zonelist, high_zoneidx,
zone_idx(preferred_zone));
/*
* OK, we're below the kswapd watermark and have kicked background
* reclaim. Now things get more complex, so set up alloc_flags according
* to how we want to proceed.
*/
alloc_flags = gfp_to_alloc_flags(gfp_mask);
/*
* Find the true preferred zone if the allocation is unconstrained by
* cpusets.
*/
if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
first_zones_zonelist(zonelist, high_zoneidx, NULL,
&preferred_zone);
rebalance:
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
preferred_zone, migratetype);
if (page)
goto got_pg;
/* Allocate without watermarks if the context allows */
if (alloc_flags & ALLOC_NO_WATERMARKS) {
page = __alloc_pages_high_priority(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype);
if (page)
goto got_pg;
}
/* Atomic allocations - we can't balance anything */
if (!wait)
goto nopage;
/* Avoid recursion of direct reclaim */
if (current->flags & PF_MEMALLOC)
goto nopage;
/* Avoid allocations with no watermarks from looping endlessly */
if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
goto nopage;
/*
* Try direct compaction. The first pass is asynchronous. Subsequent
* attempts after direct reclaim are synchronous
*/
page = __alloc_pages_direct_compact(gfp_mask, order,
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
migratetype, sync_migration,
&deferred_compaction,
&did_some_progress);
if (page)
goto got_pg;
sync_migration = true;
/*
* If compaction is deferred for high-order allocations, it is because
* sync compaction recently failed. In this is the case and the caller
* has requested the system not be heavily disrupted, fail the
* allocation now instead of entering direct reclaim
*/
if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
goto nopage;
/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order,
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
migratetype, &did_some_progress);
if (page)
goto got_pg;
/*
* If we failed to make any progress reclaiming, then we are
* running out of options and have to consider going OOM
*/
if (!did_some_progress) {
if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
if (oom_killer_disabled)
goto nopage;
/* Coredumps can quickly deplete all memory reserves */
if ((current->flags & PF_DUMPCORE) &&
!(gfp_mask & __GFP_NOFAIL))
goto nopage;
page = __alloc_pages_may_oom(gfp_mask, order,
zonelist, high_zoneidx,
nodemask, preferred_zone,
migratetype);
if (page)
goto got_pg;
if (!(gfp_mask & __GFP_NOFAIL)) {
/*
* The oom killer is not called for high-order
* allocations that may fail, so if no progress
* is being made, there are no other options and
* retrying is unlikely to help.
*/
if (order > PAGE_ALLOC_COSTLY_ORDER)
goto nopage;
/*
* The oom killer is not called for lowmem
* allocations to prevent needlessly killing
* innocent tasks.
*/
if (high_zoneidx < ZONE_NORMAL)
goto nopage;
}
goto restart;
}
}
/* Check if we should retry the allocation */
pages_reclaimed += did_some_progress;
if (should_alloc_retry(gfp_mask, order, did_some_progress,
pages_reclaimed)) {
/* Wait for some write requests to complete then retry */
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
goto rebalance;
} else {
/*
* High-order allocations do not necessarily loop after
* direct reclaim and reclaim/compaction depends on compaction
* being called after reclaim so call directly if necessary
*/
page = __alloc_pages_direct_compact(gfp_mask, order,
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
migratetype, sync_migration,
&deferred_compaction,
&did_some_progress);
if (page)
goto got_pg;
}
nopage:
warn_alloc_failed(gfp_mask, order, NULL);
return page;
got_pg:
if (kmemcheck_enabled)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
return page;
}
/*
* This is the 'heart' of the zoned buddy allocator.
*/
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask)
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
struct zone *preferred_zone;
struct page *page = NULL;
int migratetype = allocflags_to_migratetype(gfp_mask);
unsigned int cpuset_mems_cookie;
gfp_mask &= gfp_allowed_mask;
lockdep_trace_alloc(gfp_mask);
might_sleep_if(gfp_mask & __GFP_WAIT);
if (should_fail_alloc_page(gfp_mask, order))
return NULL;
/*
* Check the zones suitable for the gfp_mask contain at least one
* valid zone. It's possible to have an empty zonelist as a result
* of GFP_THISNODE and a memoryless node
*/
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;
retry_cpuset:
cpuset_mems_cookie = get_mems_allowed();
/* The preferred zone is used for statistics later */
first_zones_zonelist(zonelist, high_zoneidx,
nodemask ? : &cpuset_current_mems_allowed,
&preferred_zone);
if (!preferred_zone)
goto out;
/* First allocation attempt */
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
preferred_zone, migratetype);
if (unlikely(!page))
page = __alloc_pages_slowpath(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype);
trace_mm_page_alloc(page, order, gfp_mask, migratetype);
out:
/*
* When updating a task's mems_allowed, it is possible to race with
* parallel threads in such a way that an allocation can fail while
* the mask is being updated. If a page allocation is about to fail,
* check if the cpuset changed during allocation and if so, retry.
*/
if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
goto retry_cpuset;
return page;
}
EXPORT_SYMBOL(__alloc_pages_nodemask);
/*
* Common helper functions.
*/
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
struct page *page;
/*
* __get_free_pages() returns a 32-bit address, which cannot represent
* a highmem page
*/
VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
page = alloc_pages(gfp_mask, order);
if (!page)
return 0;
return (unsigned long) page_address(page);
}
EXPORT_SYMBOL(__get_free_pages);
unsigned long get_zeroed_page(gfp_t gfp_mask)
{
return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
}
EXPORT_SYMBOL(get_zeroed_page);
void __free_pages(struct page *page, unsigned int order)
{
if (put_page_testzero(page)) {
if (order == 0)
free_hot_cold_page(page, 0);
else
__free_pages_ok(page, order);
}
}
EXPORT_SYMBOL(__free_pages);
void free_pages(unsigned long addr, unsigned int order)
{
if (addr != 0) {
VM_BUG_ON(!virt_addr_valid((void *)addr));
__free_pages(virt_to_page((void *)addr), order);
}
}
EXPORT_SYMBOL(free_pages);
static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
{
if (addr) {
unsigned long alloc_end = addr + (PAGE_SIZE << order);
unsigned long used = addr + PAGE_ALIGN(size);
split_page(virt_to_page((void *)addr), order);
while (used < alloc_end) {
free_page(used);
used += PAGE_SIZE;
}
}
return (void *)addr;
}
/**
* alloc_pages_exact - allocate an exact number physically-contiguous pages.
* @size: the number of bytes to allocate
* @gfp_mask: GFP flags for the allocation
*
* This function is similar to alloc_pages(), except that it allocates the
* minimum number of pages to satisfy the request. alloc_pages() can only
* allocate memory in power-of-two pages.
*
* This function is also limited by MAX_ORDER.
*
* Memory allocated by this function must be released by free_pages_exact().
*/
void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
{
unsigned int order = get_order(size);
unsigned long addr;
addr = __get_free_pages(gfp_mask, order);
return make_alloc_exact(addr, order, size);
}
EXPORT_SYMBOL(alloc_pages_exact);
/**
* alloc_pages_exact_nid - allocate an exact number of physically-contiguous
* pages on a node.
* @nid: the preferred node ID where memory should be allocated
* @size: the number of bytes to allocate
* @gfp_mask: GFP flags for the allocation
*
* Like alloc_pages_exact(), but try to allocate on node nid first before falling
* back.
* Note this is not alloc_pages_exact_node() which allocates on a specific node,
* but is not exact.
*/
void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
{
unsigned order = get_order(size);
struct page *p = alloc_pages_node(nid, gfp_mask, order);
if (!p)
return NULL;
return make_alloc_exact((unsigned long)page_address(p), order, size);
}
EXPORT_SYMBOL(alloc_pages_exact_nid);
/**
* free_pages_exact - release memory allocated via alloc_pages_exact()
* @virt: the value returned by alloc_pages_exact.
* @size: size of allocation, same value as passed to alloc_pages_exact().
*
* Release the memory allocated by a previous call to alloc_pages_exact.
*/
void free_pages_exact(void *virt, size_t size)
{
unsigned long addr = (unsigned long)virt;
unsigned long end = addr + PAGE_ALIGN(size);
while (addr < end) {
free_page(addr);
addr += PAGE_SIZE;
}
}
EXPORT_SYMBOL(free_pages_exact);
static unsigned int nr_free_zone_pages(int offset)
{
struct zoneref *z;
struct zone *zone;
/* Just pick one node, since fallback list is circular */
unsigned int sum = 0;
struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
for_each_zone_zonelist(zone, z, zonelist, offset) {
unsigned long size = zone->present_pages;
unsigned long high = high_wmark_pages(zone);
if (size > high)
sum += size - high;
}
return sum;
}
/*
* Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
*/
unsigned int nr_free_buffer_pages(void)
{
return nr_free_zone_pages(gfp_zone(GFP_USER));
}
EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
/*
* Amount of free RAM allocatable within all zones
*/
unsigned int nr_free_pagecache_pages(void)
{
return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
}
static inline void show_node(struct zone *zone)
{
if (NUMA_BUILD)
printk("Node %d ", zone_to_nid(zone));
}
void si_meminfo(struct sysinfo *val)
{
val->totalram = totalram_pages;
val->sharedram = 0;
val->freeram = global_page_state(NR_FREE_PAGES);
val->bufferram = nr_blockdev_pages();
val->totalhigh = totalhigh_pages;
val->freehigh = nr_free_highpages();
val->mem_unit = PAGE_SIZE;
}
EXPORT_SYMBOL(si_meminfo);
#ifdef CONFIG_NUMA
void si_meminfo_node(struct sysinfo *val, int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
val->totalram = pgdat->node_present_pages;
val->freeram = node_page_state(nid, NR_FREE_PAGES);
#ifdef CONFIG_HIGHMEM
val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
NR_FREE_PAGES);
#else
val->totalhigh = 0;
val->freehigh = 0;
#endif
val->mem_unit = PAGE_SIZE;
}
#endif
/*
* Determine whether the node should be displayed or not, depending on whether
* SHOW_MEM_FILTER_NODES was passed to show_free_areas().
*/
bool skip_free_areas_node(unsigned int flags, int nid)
{
bool ret = false;
unsigned int cpuset_mems_cookie;
if (!(flags & SHOW_MEM_FILTER_NODES))
goto out;
do {
cpuset_mems_cookie = get_mems_allowed();
ret = !node_isset(nid, cpuset_current_mems_allowed);
} while (!put_mems_allowed(cpuset_mems_cookie));
out:
return ret;
}
#define K(x) ((x) << (PAGE_SHIFT-10))
/*
* Show free area list (used inside shift_scroll-lock stuff)
* We also calculate the percentage fragmentation. We do this by counting the
* memory on each free list with the exception of the first item on the list.
* Suppresses nodes that are not allowed by current's cpuset if
* SHOW_MEM_FILTER_NODES is passed.
*/
void show_free_areas(unsigned int filter)
{
int cpu;
struct zone *zone;
for_each_populated_zone(zone) {
if (skip_free_areas_node(filter, zone_to_nid(zone)))
continue;
show_node(zone);
printk("%s per-cpu:\n", zone->name);
for_each_online_cpu(cpu) {
struct per_cpu_pageset *pageset;
pageset = per_cpu_ptr(zone->pageset, cpu);
printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
cpu, pageset->pcp.high,
pageset->pcp.batch, pageset->pcp.count);
}
}
printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
" unevictable:%lu"
" dirty:%lu writeback:%lu unstable:%lu\n"
" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
global_page_state(NR_ACTIVE_ANON),
global_page_state(NR_INACTIVE_ANON),
global_page_state(NR_ISOLATED_ANON),
global_page_state(NR_ACTIVE_FILE),
global_page_state(NR_INACTIVE_FILE),
global_page_state(NR_ISOLATED_FILE),
global_page_state(NR_UNEVICTABLE),
global_page_state(NR_FILE_DIRTY),
global_page_state(NR_WRITEBACK),
global_page_state(NR_UNSTABLE_NFS),
global_page_state(NR_FREE_PAGES),
global_page_state(NR_SLAB_RECLAIMABLE),
global_page_state(NR_SLAB_UNRECLAIMABLE),
global_page_state(NR_FILE_MAPPED),
global_page_state(NR_SHMEM),
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE));
for_each_populated_zone(zone) {
int i;
if (skip_free_areas_node(filter, zone_to_nid(zone)))
continue;
show_node(zone);
printk("%s"
" free:%lukB"
" min:%lukB"
" low:%lukB"
" high:%lukB"
" active_anon:%lukB"
" inactive_anon:%lukB"
" active_file:%lukB"
" inactive_file:%lukB"
" unevictable:%lukB"
" isolated(anon):%lukB"
" isolated(file):%lukB"
" present:%lukB"
" mlocked:%lukB"
" dirty:%lukB"
" writeback:%lukB"
" mapped:%lukB"
" shmem:%lukB"
" slab_reclaimable:%lukB"
" slab_unreclaimable:%lukB"
" kernel_stack:%lukB"
" pagetables:%lukB"
" unstable:%lukB"
" bounce:%lukB"
" writeback_tmp:%lukB"
" pages_scanned:%lu"
" all_unreclaimable? %s"
"\n",
zone->name,
K(zone_page_state(zone, NR_FREE_PAGES)),
K(min_wmark_pages(zone)),
K(low_wmark_pages(zone)),
K(high_wmark_pages(zone)),
K(zone_page_state(zone, NR_ACTIVE_ANON)),
K(zone_page_state(zone, NR_INACTIVE_ANON)),
K(zone_page_state(zone, NR_ACTIVE_FILE)),
K(zone_page_state(zone, NR_INACTIVE_FILE)),
K(zone_page_state(zone, NR_UNEVICTABLE)),
K(zone_page_state(zone, NR_ISOLATED_ANON)),
K(zone_page_state(zone, NR_ISOLATED_FILE)),
K(zone->present_pages),
K(zone_page_state(zone, NR_MLOCK)),
K(zone_page_state(zone, NR_FILE_DIRTY)),
K(zone_page_state(zone, NR_WRITEBACK)),
K(zone_page_state(zone, NR_FILE_MAPPED)),
K(zone_page_state(zone, NR_SHMEM)),
K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
zone_page_state(zone, NR_KERNEL_STACK) *
THREAD_SIZE / 1024,
K(zone_page_state(zone, NR_PAGETABLE)),
K(zone_page_state(zone, NR_UNSTABLE_NFS)),
K(zone_page_state(zone, NR_BOUNCE)),
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
zone->pages_scanned,
(zone->all_unreclaimable ? "yes" : "no")
);
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
printk(" %lu", zone->lowmem_reserve[i]);
printk("\n");
}
for_each_populated_zone(zone) {
unsigned long nr[MAX_ORDER], flags, order, total = 0;
if (skip_free_areas_node(filter, zone_to_nid(zone)))
continue;
show_node(zone);
printk("%s: ", zone->name);
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
nr[order] = zone->free_area[order].nr_free;
total += nr[order] << order;
}
spin_unlock_irqrestore(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++)
printk("%lu*%lukB ", nr[order], K(1UL) << order);
printk("= %lukB\n", K(total));
}
printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
show_swap_cache_info();
}
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
zoneref->zone_idx = zone_idx(zone);
}
/*
* Builds allocation fallback zone lists.
*
* Add all populated zones of a node to the zonelist.
*/
static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
int nr_zones, enum zone_type zone_type)
{
struct zone *zone;
BUG_ON(zone_type >= MAX_NR_ZONES);
zone_type++;
do {
zone_type--;
zone = pgdat->node_zones + zone_type;
if (populated_zone(zone)) {
zoneref_set_zone(zone,
&zonelist->_zonerefs[nr_zones++]);
check_highest_zone(zone_type);
}
} while (zone_type);
return nr_zones;
}
/*
* zonelist_order:
* 0 = automatic detection of better ordering.
* 1 = order by ([node] distance, -zonetype)
* 2 = order by (-zonetype, [node] distance)
*
* If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
* the same zonelist. So only NUMA can configure this param.
*/
#define ZONELIST_ORDER_DEFAULT 0
#define ZONELIST_ORDER_NODE 1
#define ZONELIST_ORDER_ZONE 2
/* zonelist order in the kernel.
* set_zonelist_order() will set this to NODE or ZONE.
*/
static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
#ifdef CONFIG_NUMA
/* The value user specified ....changed by config */
static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
/* string for sysctl */
#define NUMA_ZONELIST_ORDER_LEN 16
char numa_zonelist_order[16] = "default";
/*
* interface for configure zonelist ordering.
* command line option "numa_zonelist_order"
* = "[dD]efault - default, automatic configuration.
* = "[nN]ode - order by node locality, then by zone within node
* = "[zZ]one - order by zone, then by locality within zone
*/
static int __parse_numa_zonelist_order(char *s)
{
if (*s == 'd' || *s == 'D') {
user_zonelist_order = ZONELIST_ORDER_DEFAULT;
} else if (*s == 'n' || *s == 'N') {
user_zonelist_order = ZONELIST_ORDER_NODE;
} else if (*s == 'z' || *s == 'Z') {
user_zonelist_order = ZONELIST_ORDER_ZONE;
} else {
printk(KERN_WARNING
"Ignoring invalid numa_zonelist_order value: "
"%s\n", s);
return -EINVAL;
}
return 0;
}
static __init int setup_numa_zonelist_order(char *s)
{
int ret;
if (!s)
return 0;
ret = __parse_numa_zonelist_order(s);
if (ret == 0)
strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
return ret;
}
early_param("numa_zonelist_order", setup_numa_zonelist_order);
/*
* sysctl handler for numa_zonelist_order
*/
int numa_zonelist_order_handler(ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos)
{
char saved_string[NUMA_ZONELIST_ORDER_LEN];
int ret;
static DEFINE_MUTEX(zl_order_mutex);
mutex_lock(&zl_order_mutex);
if (write)
strcpy(saved_string, (char*)table->data);
ret = proc_dostring(table, write, buffer, length, ppos);
if (ret)
goto out;
if (write) {
int oldval = user_zonelist_order;
if (__parse_numa_zonelist_order((char*)table->data)) {
/*
* bogus value. restore saved string
*/
strncpy((char*)table->data, saved_string,
NUMA_ZONELIST_ORDER_LEN);
user_zonelist_order = oldval;
} else if (oldval != user_zonelist_order) {
mutex_lock(&zonelists_mutex);
build_all_zonelists(NULL);
mutex_unlock(&zonelists_mutex);
}
}
out:
mutex_unlock(&zl_order_mutex);
return ret;
}
#define MAX_NODE_LOAD (nr_online_nodes)
static int node_load[MAX_NUMNODES];
/**
* find_next_best_node - find the next node that should appear in a given node's fallback list
* @node: node whose fallback list we're appending
* @used_node_mask: nodemask_t of already used nodes
*
* We use a number of factors to determine which is the next node that should
* appear on a given node's fallback list. The node should not have appeared
* already in @node's fallback list, and it should be the next closest node
* according to the distance array (which contains arbitrary distance values
* from each node to each node in the system), and should also prefer nodes
* with no CPUs, since presumably they'll have very little allocation pressure
* on them otherwise.
* It returns -1 if no node is found.
*/
static int find_next_best_node(int node, nodemask_t *used_node_mask)
{
int n, val;
int min_val = INT_MAX;
int best_node = -1;
const struct cpumask *tmp = cpumask_of_node(0);
/* Use the local node if we haven't already */
if (!node_isset(node, *used_node_mask)) {
node_set(node, *used_node_mask);
return node;
}
for_each_node_state(n, N_HIGH_MEMORY) {
/* Don't want a node to appear more than once */
if (node_isset(n, *used_node_mask))
continue;
/* Use the distance array to find the distance */
val = node_distance(node, n);
/* Penalize nodes under us ("prefer the next node") */
val += (n < node);
/* Give preference to headless and unused nodes */
tmp = cpumask_of_node(n);
if (!cpumask_empty(tmp))
val += PENALTY_FOR_NODE_WITH_CPUS;
/* Slight preference for less loaded node */
val *= (MAX_NODE_LOAD*MAX_NUMNODES);
val += node_load[n];
if (val < min_val) {
min_val = val;
best_node = n;
}
}
if (best_node >= 0)
node_set(best_node, *used_node_mask);
return best_node;
}
/*
* Build zonelists ordered by node and zones within node.
* This results in maximum locality--normal zone overflows into local
* DMA zone, if any--but risks exhausting DMA zone.
*/
static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
{
int j;
struct zonelist *zonelist;
zonelist = &pgdat->node_zonelists[0];
for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
;
j = build_zonelists_node(NODE_DATA(node), zonelist, j,
MAX_NR_ZONES - 1);
zonelist->_zonerefs[j].zone = NULL;
zonelist->_zonerefs[j].zone_idx = 0;
}
/*
* Build gfp_thisnode zonelists
*/
static void build_thisnode_zonelists(pg_data_t *pgdat)
{
int j;
struct zonelist *zonelist;
zonelist = &pgdat->node_zonelists[1];
j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
zonelist->_zonerefs[j].zone = NULL;
zonelist->_zonerefs[j].zone_idx = 0;
}
/*
* Build zonelists ordered by zone and nodes within zones.
* This results in conserving DMA zone[s] until all Normal memory is
* exhausted, but results in overflowing to remote node while memory
* may still exist in local DMA zone.
*/
static int node_order[MAX_NUMNODES];
static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
{
int pos, j, node;
int zone_type; /* needs to be signed */
struct zone *z;
struct zonelist *zonelist;
zonelist = &pgdat->node_zonelists[0];
pos = 0;
for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
for (j = 0; j < nr_nodes; j++) {
node = node_order[j];
z = &NODE_DATA(node)->node_zones[zone_type];
if (populated_zone(z)) {
zoneref_set_zone(z,
&zonelist->_zonerefs[pos++]);
check_highest_zone(zone_type);
}
}
}
zonelist->_zonerefs[pos].zone = NULL;
zonelist->_zonerefs[pos].zone_idx = 0;
}
static int default_zonelist_order(void)
{
int nid, zone_type;
unsigned long low_kmem_size,total_size;
struct zone *z;
int average_size;
/*
* ZONE_DMA and ZONE_DMA32 can be very small area in the system.
* If they are really small and used heavily, the system can fall
* into OOM very easily.
* This function detect ZONE_DMA/DMA32 size and configures zone order.
*/
/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
low_kmem_size = 0;
total_size = 0;
for_each_online_node(nid) {
for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
z = &NODE_DATA(nid)->node_zones[zone_type];
if (populated_zone(z)) {
if (zone_type < ZONE_NORMAL)
low_kmem_size += z->present_pages;
total_size += z->present_pages;
} else if (zone_type == ZONE_NORMAL) {
/*
* If any node has only lowmem, then node order
* is preferred to allow kernel allocations
* locally; otherwise, they can easily infringe
* on other nodes when there is an abundance of
* lowmem available to allocate from.
*/
return ZONELIST_ORDER_NODE;
}
}
}
if (!low_kmem_size || /* there are no DMA area. */
low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
return ZONELIST_ORDER_NODE;
/*
* look into each node's config.
* If there is a node whose DMA/DMA32 memory is very big area on
* local memory, NODE_ORDER may be suitable.
*/
average_size = total_size /
(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
for_each_online_node(nid) {
low_kmem_size = 0;
total_size = 0;
for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
z = &NODE_DATA(nid)->node_zones[zone_type];
if (populated_zone(z)) {
if (zone_type < ZONE_NORMAL)
low_kmem_size += z->present_pages;
total_size += z->present_pages;
}
}
if (low_kmem_size &&
total_size > average_size && /* ignore small node */
low_kmem_size > total_size * 70/100)
return ZONELIST_ORDER_NODE;
}
return ZONELIST_ORDER_ZONE;
}
static void set_zonelist_order(void)
{
if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
current_zonelist_order = default_zonelist_order();
else
current_zonelist_order = user_zonelist_order;
}
static void build_zonelists(pg_data_t *pgdat)
{
int j, node, load;
enum zone_type i;
nodemask_t used_mask;
int local_node, prev_node;
struct zonelist *zonelist;
int order = current_zonelist_order;
/* initialize zonelists */
for (i = 0; i < MAX_ZONELISTS; i++) {
zonelist = pgdat->node_zonelists + i;
zonelist->_zonerefs[0].zone = NULL;
zonelist->_zonerefs[0].zone_idx = 0;
}
/* NUMA-aware ordering of nodes */
local_node = pgdat->node_id;
load = nr_online_nodes;
prev_node = local_node;
nodes_clear(used_mask);
memset(node_order, 0, sizeof(node_order));
j = 0;
while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
int distance = node_distance(local_node, node);
/*
* If another node is sufficiently far away then it is better
* to reclaim pages in a zone before going off node.
*/
if (distance > RECLAIM_DISTANCE)
zone_reclaim_mode = 1;
/*
* We don't want to pressure a particular node.
* So adding penalty to the first node in same
* distance group to make it round-robin.
*/
if (distance != node_distance(local_node, prev_node))
node_load[node] = load;
prev_node = node;
load--;
if (order == ZONELIST_ORDER_NODE)
build_zonelists_in_node_order(pgdat, node);
else
node_order[j++] = node; /* remember order */
}
if (order == ZONELIST_ORDER_ZONE) {
/* calculate node order -- i.e., DMA last! */
build_zonelists_in_zone_order(pgdat, j);
}
build_thisnode_zonelists(pgdat);
}
/* Construct the zonelist performance cache - see further mmzone.h */
static void build_zonelist_cache(pg_data_t *pgdat)
{
struct zonelist *zonelist;
struct zonelist_cache *zlc;
struct zoneref *z;
zonelist = &pgdat->node_zonelists[0];
zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
for (z = zonelist->_zonerefs; z->zone; z++)
zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
}
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
* Return node id of node used for "local" allocations.
* I.e., first node id of first zone in arg node's generic zonelist.
* Used for initializing percpu 'numa_mem', which is used primarily
* for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
*/
int local_memory_node(int node)
{
struct zone *zone;
(void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
gfp_zone(GFP_KERNEL),
NULL,
&zone);
return zone->node;
}
#endif
#else /* CONFIG_NUMA */
static void set_zonelist_order(void)
{
current_zonelist_order = ZONELIST_ORDER_ZONE;
}
static void build_zonelists(pg_data_t *pgdat)
{
int node, local_node;
enum zone_type j;
struct zonelist *zonelist;
local_node = pgdat->node_id;
zonelist = &pgdat->node_zonelists[0];
j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
/*
* Now we build the zonelist so that it contains the zones
* of all the other nodes.
* We don't want to pressure a particular node, so when
* building the zones for node N, we make sure that the
* zones coming right after the local ones are those from
* node N+1 (modulo N)
*/
for (node = local_node + 1; node < MAX_NUMNODES; node++) {
if (!node_online(node))
continue;
j = build_zonelists_node(NODE_DATA(node), zonelist, j,
MAX_NR_ZONES - 1);
}
for (node = 0; node < local_node; node++) {
if (!node_online(node))
continue;
j = build_zonelists_node(NODE_DATA(node), zonelist, j,
MAX_NR_ZONES - 1);
}
zonelist->_zonerefs[j].zone = NULL;
zonelist->_zonerefs[j].zone_idx = 0;
}
/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
static void build_zonelist_cache(pg_data_t *pgdat)
{
pgdat->node_zonelists[0].zlcache_ptr = NULL;
}
#endif /* CONFIG_NUMA */
/*
* Boot pageset table. One per cpu which is going to be used for all
* zones and all nodes. The parameters will be set in such a way
* that an item put on a list will immediately be handed over to
* the buddy list. This is safe since pageset manipulation is done
* with interrupts disabled.
*
* The boot_pagesets must be kept even after bootup is complete for
* unused processors and/or zones. They do play a role for bootstrapping
* hotplugged processors.
*
* zoneinfo_show() and maybe other functions do
* not check if the processor is online before following the pageset pointer.
* Other parts of the kernel may not check if the zone is available.
*/
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
static void setup_zone_pageset(struct zone *zone);
/*
* Global mutex to protect against size modification of zonelists
* as well as to serialize pageset setup for the new populated zone.
*/
DEFINE_MUTEX(zonelists_mutex);
/* return values int ....just for stop_machine() */
static __init_refok int __build_all_zonelists(void *data)
{
int nid;
int cpu;
#ifdef CONFIG_NUMA
memset(node_load, 0, sizeof(node_load));
#endif
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
build_zonelists(pgdat);
build_zonelist_cache(pgdat);
}
/*
* Initialize the boot_pagesets that are going to be used
* for bootstrapping processors. The real pagesets for
* each zone will be allocated later when the per cpu
* allocator is available.
*
* boot_pagesets are used also for bootstrapping offline
* cpus if the system is already booted because the pagesets
* are needed to initialize allocators on a specific cpu too.
* F.e. the percpu allocator needs the page allocator which
* needs the percpu allocator in order to allocate its pagesets
* (a chicken-egg dilemma).
*/
for_each_possible_cpu(cpu) {
setup_pageset(&per_cpu(boot_pageset, cpu), 0);
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
* We now know the "local memory node" for each node--
* i.e., the node of the first zone in the generic zonelist.
* Set up numa_mem percpu variable for on-line cpus. During
* boot, only the boot cpu should be on-line; we'll init the
* secondary cpus' numa_mem as they come on-line. During
* node/memory hotplug, we'll fixup all on-line cpus.
*/
if (cpu_online(cpu))
set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
#endif
}
return 0;
}
/*
* Called with zonelists_mutex held always
* unless system_state == SYSTEM_BOOTING.
*/
void __ref build_all_zonelists(void *data)
{
set_zonelist_order();
if (system_state == SYSTEM_BOOTING) {
__build_all_zonelists(NULL);
mminit_verify_zonelist();
cpuset_init_current_mems_allowed();
} else {
/* we have to stop all cpus to guarantee there is no user
of zonelist */
#ifdef CONFIG_MEMORY_HOTPLUG
if (data)
setup_zone_pageset((struct zone *)data);
#endif
stop_machine(__build_all_zonelists, NULL, NULL);
/* cpuset refresh routine should be here */
}
vm_total_pages = nr_free_pagecache_pages();
/*
* Disable grouping by mobility if the number of pages in the
* system is too low to allow the mechanism to work. It would be
* more accurate, but expensive to check per-zone. This check is
* made on memory-hotadd so a system can start with mobility
* disabled and enable it later
*/
if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
page_group_by_mobility_disabled = 1;
else
page_group_by_mobility_disabled = 0;
printk("Built %i zonelists in %s order, mobility grouping %s. "
"Total pages: %ld\n",
nr_online_nodes,
zonelist_order_name[current_zonelist_order],
page_group_by_mobility_disabled ? "off" : "on",
vm_total_pages);
#ifdef CONFIG_NUMA
printk("Policy zone: %s\n", zone_names[policy_zone]);
#endif
}
/*
* Helper functions to size the waitqueue hash table.
* Essentially these want to choose hash table sizes sufficiently
* large so that collisions trying to wait on pages are rare.
* But in fact, the number of active page waitqueues on typical
* systems is ridiculously low, less than 200. So this is even
* conservative, even though it seems large.
*
* The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
* waitqueues, i.e. the size of the waitq table given the number of pages.
*/
#define PAGES_PER_WAITQUEUE 256
#ifndef CONFIG_MEMORY_HOTPLUG
static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
{
unsigned long size = 1;
pages /= PAGES_PER_WAITQUEUE;
while (size < pages)
size <<= 1;
/*
* Once we have dozens or even hundreds of threads sleeping
* on IO we've got bigger problems than wait queue collision.
* Limit the size of the wait table to a reasonable size.
*/
size = min(size, 4096UL);
return max(size, 4UL);
}
#else
/*
* A zone's size might be changed by hot-add, so it is not possible to determine
* a suitable size for its wait_table. So we use the maximum size now.
*
* The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
*
* i386 (preemption config) : 4096 x 16 = 64Kbyte.
* ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
* ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
*
* The maximum entries are prepared when a zone's memory is (512K + 256) pages
* or more by the traditional way. (See above). It equals:
*
* i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
* ia64(16K page size) : = ( 8G + 4M)byte.
* powerpc (64K page size) : = (32G +16M)byte.
*/
static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
{
return 4096UL;
}
#endif
/*
* This is an integer logarithm so that shifts can be used later
* to extract the more random high bits from the multiplicative
* hash function before the remainder is taken.
*/
static inline unsigned long wait_table_bits(unsigned long size)
{
return ffz(~size);
}
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
/*
* Check if a pageblock contains reserved pages
*/
static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
return 1;
}
return 0;
}
/*
* Mark a number of pageblocks as MIGRATE_RESERVE. The number
* of blocks reserved is based on min_wmark_pages(zone). The memory within
* the reserve will tend to store contiguous free pages. Setting min_free_kbytes
* higher will lead to a bigger reserve which will get freed as contiguous
* blocks as reclaim kicks in
*/
static void setup_zone_migrate_reserve(struct zone *zone)
{
unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
struct page *page;
unsigned long block_migratetype;
int reserve;
/*
* Get the start pfn, end pfn and the number of blocks to reserve
* We have to be careful to be aligned to pageblock_nr_pages to
* make sure that we always check pfn_valid for the first page in
* the block.
*/
start_pfn = zone->zone_start_pfn;
end_pfn = start_pfn + zone->spanned_pages;
start_pfn = roundup(start_pfn, pageblock_nr_pages);
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
pageblock_order;
/*
* Reserve blocks are generally in place to help high-order atomic
* allocations that are short-lived. A min_free_kbytes value that
* would result in more than 2 reserve blocks for atomic allocations
* is assumed to be in place to help anti-fragmentation for the
* future allocation of hugepages at runtime.
*/
reserve = min(2, reserve);
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
/* Watch out for overlapping nodes */
if (page_to_nid(page) != zone_to_nid(zone))
continue;
block_migratetype = get_pageblock_migratetype(page);
/* Only test what is necessary when the reserves are not met */
if (reserve > 0) {
/*
* Blocks with reserved pages will never free, skip
* them.
*/
block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
if (pageblock_is_reserved(pfn, block_end_pfn))
continue;
/* If this block is reserved, account for it */
if (block_migratetype == MIGRATE_RESERVE) {
reserve--;
continue;
}
/* Suitable for reserving if this block is movable */
if (block_migratetype == MIGRATE_MOVABLE) {
set_pageblock_migratetype(page,
MIGRATE_RESERVE);
move_freepages_block(zone, page,
MIGRATE_RESERVE);
reserve--;
continue;
}
}
/*
* If the reserve is met and this is a previous reserved block,
* take it back
*/
if (block_migratetype == MIGRATE_RESERVE) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
move_freepages_block(zone, page, MIGRATE_MOVABLE);
}
}
}
/*
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn, enum memmap_context context)
{
struct page *page;
unsigned long end_pfn = start_pfn + size;
unsigned long pfn;
struct zone *z;
if (highest_memmap_pfn < end_pfn - 1)
highest_memmap_pfn = end_pfn - 1;
z = &NODE_DATA(nid)->node_zones[zone];
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
/*
* There can be holes in boot-time mem_map[]s
* handed to this function. They do not
* exist on hotplugged memory.
*/
if (context == MEMMAP_EARLY) {
if (!early_pfn_valid(pfn))
continue;
if (!early_pfn_in_nid(pfn, nid))
continue;
}
page = pfn_to_page(pfn);
set_page_links(page, zone, nid, pfn);
mminit_verify_page_links(page, zone, nid, pfn);
init_page_count(page);
reset_page_mapcount(page);
SetPageReserved(page);
/*
* Mark the block movable so that blocks are reserved for
* movable at startup. This will force kernel allocations
* to reserve their blocks rather than leaking throughout
* the address space during boot when many long-lived
* kernel allocations are made. Later some blocks near
* the start are marked MIGRATE_RESERVE by
* setup_zone_migrate_reserve()
*
* bitmap is created for zone's valid pfn range. but memmap
* can be created for invalid pages (for alignment)
* check here not to call set_pageblock_migratetype() against
* pfn out of zone.
*/
if ((z->zone_start_pfn <= pfn)
&& (pfn < z->zone_start_pfn + z->spanned_pages)
&& !(pfn & (pageblock_nr_pages - 1)))
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
if (!is_highmem_idx(zone))
set_page_address(page, __va(pfn << PAGE_SHIFT));
#endif
}
}
static void __meminit zone_init_free_lists(struct zone *zone)
{
int order, t;
for_each_migratetype_order(order, t) {
INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
zone->free_area[order].nr_free = 0;
}
}
#ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(size, nid, zone, start_pfn) \
memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
#endif
static int zone_batchsize(struct zone *zone)
{
#ifdef CONFIG_MMU
int batch;
/*
* The per-cpu-pages pools are set to around 1000th of the
* size of the zone. But no more than 1/2 of a meg.
*
* OK, so we don't know how big the cache is. So guess.
*/
batch = zone->present_pages / 1024;
if (batch * PAGE_SIZE > 512 * 1024)
batch = (512 * 1024) / PAGE_SIZE;
batch /= 4; /* We effectively *= 4 below */
if (batch < 1)
batch = 1;
/*
* Clamp the batch to a 2^n - 1 value. Having a power
* of 2 value was found to be more likely to have
* suboptimal cache aliasing properties in some cases.
*
* For example if 2 tasks are alternately allocating
* batches of pages, one task can end up with a lot
* of pages of one half of the possible page colors
* and the other with pages of the other colors.
*/
batch = rounddown_pow_of_two(batch + batch/2) - 1;
return batch;
#else
/* The deferral and batching of frees should be suppressed under NOMMU
* conditions.
*
* The problem is that NOMMU needs to be able to allocate large chunks
* of contiguous memory as there's no hardware page translation to
* assemble apparent contiguous memory from discontiguous pages.
*
* Queueing large contiguous runs of pages for batching, however,
* causes the pages to actually be freed in smaller chunks. As there
* can be a significant delay between the individual batches being
* recycled, this leads to the once large chunks of space being
* fragmented and becoming unavailable for high-order allocations.
*/
return 0;
#endif
}
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
int migratetype;
memset(p, 0, sizeof(*p));
pcp = &p->pcp;
pcp->count = 0;
pcp->high = 6 * batch;
pcp->batch = max(1UL, 1 * batch);
for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
INIT_LIST_HEAD(&pcp->lists[migratetype]);
}
/*
* setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
* to the value high for the pageset p.
*/
static void setup_pagelist_highmark(struct per_cpu_pageset *p,
unsigned long high)
{
struct per_cpu_pages *pcp;
pcp = &p->pcp;
pcp->high = high;
pcp->batch = max(1UL, high/4);
if ((high/4) > (PAGE_SHIFT * 8))
pcp->batch = PAGE_SHIFT * 8;
}
static void setup_zone_pageset(struct zone *zone)
{
int cpu;
zone->pageset = alloc_percpu(struct per_cpu_pageset);
for_each_possible_cpu(cpu) {
struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
setup_pageset(pcp, zone_batchsize(zone));
if (percpu_pagelist_fraction)
setup_pagelist_highmark(pcp,
(zone->present_pages /
percpu_pagelist_fraction));
}
}
/*
* Allocate per cpu pagesets and initialize them.
* Before this call only boot pagesets were available.
*/
void __init setup_per_cpu_pageset(void)
{
struct zone *zone;
for_each_populated_zone(zone)
setup_zone_pageset(zone);
}
static noinline __init_refok
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
struct pglist_data *pgdat = zone->zone_pgdat;
size_t alloc_size;
/*
* The per-page waitqueue mechanism uses hashed waitqueues
* per zone.
*/
zone->wait_table_hash_nr_entries =
wait_table_hash_nr_entries(zone_size_pages);
zone->wait_table_bits =
wait_table_bits(zone->wait_table_hash_nr_entries);
alloc_size = zone->wait_table_hash_nr_entries
* sizeof(wait_queue_head_t);
if (!slab_is_available()) {
zone->wait_table = (wait_queue_head_t *)
alloc_bootmem_node_nopanic(pgdat, alloc_size);
} else {
/*
* This case means that a zone whose size was 0 gets new memory
* via memory hot-add.
* But it may be the case that a new node was hot-added. In
* this case vmalloc() will not be able to use this new node's
* memory - this wait_table must be initialized to use this new
* node itself as well.
* To use this new node's memory, further consideration will be
* necessary.
*/
zone->wait_table = vmalloc(alloc_size);
}
if (!zone->wait_table)
return -ENOMEM;
for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
init_waitqueue_head(zone->wait_table + i);
return 0;
}
static int __zone_pcp_update(void *data)
{
struct zone *zone = data;
int cpu;
unsigned long batch = zone_batchsize(zone), flags;
for_each_possible_cpu(cpu) {
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
local_irq_save(flags);
free_pcppages_bulk(zone, pcp->count, pcp);
setup_pageset(pset, batch);
local_irq_restore(flags);
}
return 0;
}
void zone_pcp_update(struct zone *zone)
{
stop_machine(__zone_pcp_update, zone, NULL);
}
static __meminit void zone_pcp_init(struct zone *zone)
{
/*
* per cpu subsystem is not up at this point. The following code
* relies on the ability of the linker to provide the
* offset of a (static) per cpu variable into the per cpu area.
*/
zone->pageset = &boot_pageset;
if (zone->present_pages)
printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
zone->name, zone->present_pages,
zone_batchsize(zone));
}
__meminit int init_currently_empty_zone(struct zone *zone,
unsigned long zone_start_pfn,
unsigned long size,
enum memmap_context context)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int ret;
ret = zone_wait_table_init(zone, size);
if (ret)
return ret;
pgdat->nr_zones = zone_idx(zone) + 1;
zone->zone_start_pfn = zone_start_pfn;
mminit_dprintk(MMINIT_TRACE, "memmap_init",
"Initialising map node %d zone %lu pfns %lu -> %lu\n",
pgdat->node_id,
(unsigned long)zone_idx(zone),
zone_start_pfn, (zone_start_pfn + size));
zone_init_free_lists(zone);
return 0;
}
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
/*
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
* Architectures may implement their own version but if add_active_range()
* was used and there are no special requirements, this is a convenient
* alternative
*/
int __meminit __early_pfn_to_nid(unsigned long pfn)
{
unsigned long start_pfn, end_pfn;
int i, nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
if (start_pfn <= pfn && pfn < end_pfn)
return nid;
/* This is a memory hole */
return -1;
}
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
int __meminit early_pfn_to_nid(unsigned long pfn)
{
int nid;
nid = __early_pfn_to_nid(pfn);
if (nid >= 0)
return nid;
/* just returns 0 */
return 0;
}
#ifdef CONFIG_NODES_SPAN_OTHER_NODES
bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
{
int nid;
nid = __early_pfn_to_nid(pfn);
if (nid >= 0 && nid != node)
return false;
return true;
}
#endif
/**
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
* @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
*
* If an architecture guarantees that all ranges registered with
* add_active_ranges() contain no holes and may be freed, this
* this function may be used instead of calling free_bootmem() manually.
*/
void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
{
unsigned long start_pfn, end_pfn;
int i, this_nid;
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
start_pfn = min(start_pfn, max_low_pfn);
end_pfn = min(end_pfn, max_low_pfn);
if (start_pfn < end_pfn)
free_bootmem_node(NODE_DATA(this_nid),
PFN_PHYS(start_pfn),
(end_pfn - start_pfn) << PAGE_SHIFT);
}
}
/**
* sparse_memory_present_with_active_regions - Call memory_present for each active range
* @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
*
* If an architecture guarantees that all ranges registered with
* add_active_ranges() contain no holes and may be freed, this
* function may be used instead of calling memory_present() manually.
*/
void __init sparse_memory_present_with_active_regions(int nid)
{
unsigned long start_pfn, end_pfn;
int i, this_nid;
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
memory_present(this_nid, start_pfn, end_pfn);
}
/**
* get_pfn_range_for_nid - Return the start and end page frames for a node
* @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
* @start_pfn: Passed by reference. On return, it will have the node start_pfn.
* @end_pfn: Passed by reference. On return, it will have the node end_pfn.
*
* It returns the start and end page frame of a node based on information
* provided by an arch calling add_active_range(). If called for a node
* with no available memory, a warning is printed and the start and end
* PFNs will be 0.
*/
void __meminit get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn)
{
unsigned long this_start_pfn, this_end_pfn;
int i;
*start_pfn = -1UL;
*end_pfn = 0;
for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
*start_pfn = min(*start_pfn, this_start_pfn);
*end_pfn = max(*end_pfn, this_end_pfn);
}
if (*start_pfn == -1UL)
*start_pfn = 0;
}
/*
* This finds a zone that can be used for ZONE_MOVABLE pages. The
* assumption is made that zones within a node are ordered in monotonic
* increasing memory addresses so that the "highest" populated zone is used
*/
static void __init find_usable_zone_for_movable(void)
{
int zone_index;
for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
if (zone_index == ZONE_MOVABLE)
continue;
if (arch_zone_highest_possible_pfn[zone_index] >
arch_zone_lowest_possible_pfn[zone_index])
break;
}
VM_BUG_ON(zone_index == -1);
movable_zone = zone_index;
}
/*
* The zone ranges provided by the architecture do not include ZONE_MOVABLE
* because it is sized independent of architecture. Unlike the other zones,
* the starting point for ZONE_MOVABLE is not fixed. It may be different
* in each node depending on the size of each node and how evenly kernelcore
* is distributed. This helper function adjusts the zone ranges
* provided by the architecture for a given node by using the end of the
* highest usable zone for ZONE_MOVABLE. This preserves the assumption that
* zones within a node are in order of monotonic increases memory addresses
*/
static void __meminit adjust_zone_range_for_zone_movable(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
unsigned long *zone_start_pfn,
unsigned long *zone_end_pfn)
{
/* Only adjust if ZONE_MOVABLE is on this node */
if (zone_movable_pfn[nid]) {
/* Size ZONE_MOVABLE */
if (zone_type == ZONE_MOVABLE) {
*zone_start_pfn = zone_movable_pfn[nid];
*zone_end_pfn = min(node_end_pfn,
arch_zone_highest_possible_pfn[movable_zone]);
/* Adjust for ZONE_MOVABLE starting within this range */
} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
*zone_end_pfn > zone_movable_pfn[nid]) {
*zone_end_pfn = zone_movable_pfn[nid];
/* Check if this whole range is within ZONE_MOVABLE */
} else if (*zone_start_pfn >= zone_movable_pfn[nid])
*zone_start_pfn = *zone_end_pfn;
}
}
/*
* Return the number of pages a zone spans in a node, including holes
* present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
*/
static unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *ignored)
{
unsigned long node_start_pfn, node_end_pfn;
unsigned long zone_start_pfn, zone_end_pfn;
/* Get the start and end of the node and zone */
get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
adjust_zone_range_for_zone_movable(nid, zone_type,
node_start_pfn, node_end_pfn,
&zone_start_pfn, &zone_end_pfn);
/* Check that this node has pages within the zone's required range */
if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
return 0;
/* Move the zone boundaries inside the node if necessary */
zone_end_pfn = min(zone_end_pfn, node_end_pfn);
zone_start_pfn = max(zone_start_pfn, node_start_pfn);
/* Return the spanned pages */
return zone_end_pfn - zone_start_pfn;
}
/*
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for.
*/
unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
unsigned long nr_absent = range_end_pfn - range_start_pfn;
unsigned long start_pfn, end_pfn;
int i;
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
nr_absent -= end_pfn - start_pfn;
}
return nr_absent;
}
/**
* absent_pages_in_range - Return number of page frames in holes within a range
* @start_pfn: The start PFN to start searching for holes
* @end_pfn: The end PFN to stop searching for holes
*
* It returns the number of pages frames in memory holes within a range.
*/
unsigned long __init absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn)
{
return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
}
/* Return the number of page frames in holes in a zone on a node */
static unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *ignored)
{
unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
unsigned long node_start_pfn, node_end_pfn;
unsigned long zone_start_pfn, zone_end_pfn;
get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
adjust_zone_range_for_zone_movable(nid, zone_type,
node_start_pfn, node_end_pfn,
&zone_start_pfn, &zone_end_pfn);
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
}
#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *zones_size)
{
return zones_size[zone_type];
}
static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *zholes_size)
{
if (!zholes_size)
return 0;
return zholes_size[zone_type];
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
unsigned long realtotalpages, totalpages = 0;
enum zone_type i;
for (i = 0; i < MAX_NR_ZONES; i++)
totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
zones_size);
pgdat->node_spanned_pages = totalpages;
realtotalpages = totalpages;
for (i = 0; i < MAX_NR_ZONES; i++)
realtotalpages -=
zone_absent_pages_in_node(pgdat->node_id, i,
zholes_size);
pgdat->node_present_pages = realtotalpages;
printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
realtotalpages);
}
#ifndef CONFIG_SPARSEMEM
/*
* Calculate the size of the zone->blockflags rounded to an unsigned long
* Start by making sure zonesize is a multiple of pageblock_order by rounding
* up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
* round what is now in bits to nearest long in bits, then return it in
* bytes.
*/
static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
{
unsigned long usemapsize;
zonesize += zone_start_pfn & (pageblock_nr_pages-1);
usemapsize = roundup(zonesize, pageblock_nr_pages);
usemapsize = usemapsize >> pageblock_order;
usemapsize *= NR_PAGEBLOCK_BITS;
usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
return usemapsize / 8;
}
static void __init setup_usemap(struct pglist_data *pgdat,
struct zone *zone,
unsigned long zone_start_pfn,
unsigned long zonesize)
{
unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
zone->pageblock_flags = NULL;
if (usemapsize)
zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
usemapsize);
}
#else
static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
unsigned long zone_start_pfn, unsigned long zonesize) {}
#endif /* CONFIG_SPARSEMEM */
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
void __init set_pageblock_order(void)
{
unsigned int order;
/* Check that pageblock_nr_pages has not already been setup */
if (pageblock_order)
return;
if (HPAGE_SHIFT > PAGE_SHIFT)
order = HUGETLB_PAGE_ORDER;
else
order = MAX_ORDER - 1;
/*
* Assume the largest contiguous order of interest is a huge page.
* This value may be variable depending on boot parameters on IA64 and
* powerpc.
*/
pageblock_order = order;
}
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
/*
* When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
* is unused as pageblock_order is set at compile-time. See
* include/linux/pageblock-flags.h for the values of pageblock_order based on
* the kernel config
*/
void __init set_pageblock_order(void)
{
}
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
/*
* Set up the zone data structures:
* - mark all pages reserved
* - mark all memory queues empty
* - clear the memory bitmaps
*/
static void __paginginit free_area_init_core(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
enum zone_type j;
int nid = pgdat->node_id;
unsigned long zone_start_pfn = pgdat->node_start_pfn;
int ret;
pgdat_resize_init(pgdat);
pgdat->nr_zones = 0;
init_waitqueue_head(&pgdat->kswapd_wait);
pgdat->kswapd_max_order = 0;
pgdat_page_cgroup_init(pgdat);
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
unsigned long size, realsize, memmap_pages;
enum lru_list lru;
size = zone_spanned_pages_in_node(nid, j, zones_size);
realsize = size - zone_absent_pages_in_node(nid, j,
zholes_size);
/*
* Adjust realsize so that it accounts for how much memory
* is used by this zone for memmap. This affects the watermark
* and per-cpu initialisations
*/
memmap_pages =
PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
if (realsize >= memmap_pages) {
realsize -= memmap_pages;
if (memmap_pages)
printk(KERN_DEBUG
" %s zone: %lu pages used for memmap\n",
zone_names[j], memmap_pages);
} else
printk(KERN_WARNING
" %s zone: %lu pages exceeds realsize %lu\n",
zone_names[j], memmap_pages, realsize);
/* Account for reserved pages */
if (j == 0 && realsize > dma_reserve) {
realsize -= dma_reserve;
printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
zone_names[0], dma_reserve);
}
if (!is_highmem_idx(j))
nr_kernel_pages += realsize;
nr_all_pages += realsize;
zone->spanned_pages = size;
zone->present_pages = realsize;
#ifdef CONFIG_NUMA
zone->node = nid;
zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
/ 100;
zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
#endif
zone->name = zone_names[j];
spin_lock_init(&zone->lock);
spin_lock_init(&zone->lru_lock);
zone_seqlock_init(zone);
zone->zone_pgdat = pgdat;
zone_pcp_init(zone);
for_each_lru(lru)
INIT_LIST_HEAD(&zone->lruvec.lists[lru]);
zone->reclaim_stat.recent_rotated[0] = 0;
zone->reclaim_stat.recent_rotated[1] = 0;
zone->reclaim_stat.recent_scanned[0] = 0;
zone->reclaim_stat.recent_scanned[1] = 0;
zap_zone_vm_stats(zone);
zone->flags = 0;
if (!size)
continue;
set_pageblock_order();
setup_usemap(pgdat, zone, zone_start_pfn, size);
ret = init_currently_empty_zone(zone, zone_start_pfn,
size, MEMMAP_EARLY);
BUG_ON(ret);
memmap_init(size, nid, j, zone_start_pfn);
zone_start_pfn += size;
}
}
static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
{
/* Skip empty nodes */
if (!pgdat->node_spanned_pages)
return;
#ifdef CONFIG_FLAT_NODE_MEM_MAP
/* ia64 gets its own node_mem_map, before this, without bootmem */
if (!pgdat->node_mem_map) {
unsigned long size, start, end;
struct page *map;
/*
* The zone's endpoints aren't required to be MAX_ORDER
* aligned but the node_mem_map endpoints must be in order
* for the buddy allocator to function correctly.
*/
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
map = alloc_remap(pgdat->node_id, size);
if (!map)
map = alloc_bootmem_node_nopanic(pgdat, size);
pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
/*
* With no DISCONTIG, the global mem_map is just set as node 0's
*/
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
}
#endif
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
}
void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
unsigned long node_start_pfn, unsigned long *zholes_size)
{
pg_data_t *pgdat = NODE_DATA(nid);
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
calculate_node_totalpages(pgdat, zones_size, zholes_size);
alloc_node_mem_map(pgdat);
#ifdef CONFIG_FLAT_NODE_MEM_MAP
printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
nid, (unsigned long)pgdat,
(unsigned long)pgdat->node_mem_map);
#endif
free_area_init_core(pgdat, zones_size, zholes_size);
}
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
#if MAX_NUMNODES > 1
/*
* Figure out the number of possible node ids.
*/
static void __init setup_nr_node_ids(void)
{
unsigned int node;
unsigned int highest = 0;
for_each_node_mask(node, node_possible_map)
highest = node;
nr_node_ids = highest + 1;
}
#else
static inline void setup_nr_node_ids(void)
{
}
#endif
/**
* node_map_pfn_alignment - determine the maximum internode alignment
*
* This function should be called after node map is populated and sorted.
* It calculates the maximum power of two alignment which can distinguish
* all the nodes.
*
* For example, if all nodes are 1GiB and aligned to 1GiB, the return value
* would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
* nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
* shifted, 1GiB is enough and this function will indicate so.
*
* This is used to test whether pfn -> nid mapping of the chosen memory
* model has fine enough granularity to avoid incorrect mapping for the
* populated node map.
*
* Returns the determined alignment in pfn's. 0 if there is no alignment
* requirement (single node).
*/
unsigned long __init node_map_pfn_alignment(void)
{
unsigned long accl_mask = 0, last_end = 0;
unsigned long start, end, mask;
int last_nid = -1;
int i, nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
if (!start || last_nid < 0 || last_nid == nid) {
last_nid = nid;
last_end = end;
continue;
}
/*
* Start with a mask granular enough to pin-point to the
* start pfn and tick off bits one-by-one until it becomes
* too coarse to separate the current node from the last.
*/
mask = ~((1 << __ffs(start)) - 1);
while (mask && last_end <= (start & (mask << 1)))
mask <<= 1;
/* accumulate all internode masks */
accl_mask |= mask;
}
/* convert mask to number of pages */
return ~accl_mask + 1;
}
/* Find the lowest pfn for a node */
static unsigned long __init find_min_pfn_for_node(int nid)
{
unsigned long min_pfn = ULONG_MAX;
unsigned long start_pfn;
int i;
for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
min_pfn = min(min_pfn, start_pfn);
if (min_pfn == ULONG_MAX) {
printk(KERN_WARNING
"Could not find start_pfn for node %d\n", nid);
return 0;
}
return min_pfn;
}
/**
* find_min_pfn_with_active_regions - Find the minimum PFN registered
*
* It returns the minimum PFN based on information provided via
* add_active_range().
*/
unsigned long __init find_min_pfn_with_active_regions(void)
{
return find_min_pfn_for_node(MAX_NUMNODES);
}
/*
* early_calculate_totalpages()
* Sum pages in active regions for movable zone.
* Populate N_HIGH_MEMORY for calculating usable_nodes.
*/
static unsigned long __init early_calculate_totalpages(void)
{
unsigned long totalpages = 0;
unsigned long start_pfn, end_pfn;
int i, nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
unsigned long pages = end_pfn - start_pfn;
totalpages += pages;
if (pages)
node_set_state(nid, N_HIGH_MEMORY);
}
return totalpages;
}
/*
* Find the PFN the Movable zone begins in each node. Kernel memory
* is spread evenly between nodes as long as the nodes have enough
* memory. When they don't, some nodes will have more kernelcore than
* others
*/
static void __init find_zone_movable_pfns_for_nodes(void)
{
int i, nid;
unsigned long usable_startpfn;
unsigned long kernelcore_node, kernelcore_remaining;
/* save the state before borrow the nodemask */
nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
unsigned long totalpages = early_calculate_totalpages();
int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
#ifdef CONFIG_FIX_MOVABLE_ZONE
required_movablecore = movable_reserved_size >> PAGE_SHIFT;
#endif
/*
* If movablecore was specified, calculate what size of
* kernelcore that corresponds so that memory usable for
* any allocation type is evenly spread. If both kernelcore
* and movablecore are specified, then the value of kernelcore
* will be used for required_kernelcore if it's greater than
* what movablecore would have allowed.
*/
if (required_movablecore) {
unsigned long corepages;
/*
* Round-up so that ZONE_MOVABLE is at least as large as what
* was requested by the user
*/
required_movablecore =
roundup(required_movablecore, MAX_ORDER_NR_PAGES);
corepages = totalpages - required_movablecore;
required_kernelcore = max(required_kernelcore, corepages);
}
/* If kernelcore was not specified, there is no ZONE_MOVABLE */
if (!required_kernelcore)
goto out;
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
find_usable_zone_for_movable();
usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
restart:
/* Spread kernelcore memory as evenly as possible throughout nodes */
kernelcore_node = required_kernelcore / usable_nodes;
for_each_node_state(nid, N_HIGH_MEMORY) {
unsigned long start_pfn, end_pfn;
/*
* Recalculate kernelcore_node if the division per node
* now exceeds what is necessary to satisfy the requested
* amount of memory for the kernel
*/
if (required_kernelcore < kernelcore_node)
kernelcore_node = required_kernelcore / usable_nodes;
/*
* As the map is walked, we track how much memory is usable
* by the kernel using kernelcore_remaining. When it is
* 0, the rest of the node is usable by ZONE_MOVABLE
*/
kernelcore_remaining = kernelcore_node;
/* Go through each range of PFNs within this node */
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
unsigned long size_pages;
start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn >= end_pfn)
continue;
/* Account for what is only usable for kernelcore */
if (start_pfn < usable_startpfn) {
unsigned long kernel_pages;
kernel_pages = min(end_pfn, usable_startpfn)
- start_pfn;
kernelcore_remaining -= min(kernel_pages,
kernelcore_remaining);
required_kernelcore -= min(kernel_pages,
required_kernelcore);
/* Continue if range is now fully accounted */
if (end_pfn <= usable_startpfn) {
/*
* Push zone_movable_pfn to the end so
* that if we have to rebalance
* kernelcore across nodes, we will
* not double account here
*/
zone_movable_pfn[nid] = end_pfn;
continue;
}
start_pfn = usable_startpfn;
}
/*
* The usable PFN range for ZONE_MOVABLE is from
* start_pfn->end_pfn. Calculate size_pages as the
* number of pages used as kernelcore
*/
size_pages = end_pfn - start_pfn;
if (size_pages > kernelcore_remaining)
size_pages = kernelcore_remaining;
zone_movable_pfn[nid] = start_pfn + size_pages;
/*
* Some kernelcore has been met, update counts and
* break if the kernelcore for this node has been
* satisified
*/
required_kernelcore -= min(required_kernelcore,
size_pages);
kernelcore_remaining -= size_pages;
if (!kernelcore_remaining)
break;
}
}
/*
* If there is still required_kernelcore, we do another pass with one
* less node in the count. This will push zone_movable_pfn[nid] further
* along on the nodes that still have memory until kernelcore is
* satisified
*/
usable_nodes--;
if (usable_nodes && required_kernelcore > usable_nodes)
goto restart;
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
for (nid = 0; nid < MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
out:
/* restore the node_state */
node_states[N_HIGH_MEMORY] = saved_node_state;
}
/* Any regular memory on that node ? */
static void check_for_regular_memory(pg_data_t *pgdat)
{
#ifdef CONFIG_HIGHMEM
enum zone_type zone_type;
for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
struct zone *zone = &pgdat->node_zones[zone_type];
if (zone->present_pages) {
node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
break;
}
}
#endif
}
/**
* free_area_init_nodes - Initialise all pg_data_t and zone data
* @max_zone_pfn: an array of max PFNs for each zone
*
* This will call free_area_init_node() for each active node in the system.
* Using the page ranges provided by add_active_range(), the size of each
* zone in each node and their holes is calculated. If the maximum PFN
* between two adjacent zones match, it is assumed that the zone is empty.
* For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
* that arch_max_dma32_pfn has no pages. It is also assumed that a zone
* starts where the previous one ended. For example, ZONE_DMA32 starts
* at arch_max_dma_pfn.
*/
void __init free_area_init_nodes(unsigned long *max_zone_pfn)
{
unsigned long start_pfn, end_pfn;
int i, nid;
/* Record where the zone boundaries are */
memset(arch_zone_lowest_possible_pfn, 0,
sizeof(arch_zone_lowest_possible_pfn));
memset(arch_zone_highest_possible_pfn, 0,
sizeof(arch_zone_highest_possible_pfn));
arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
for (i = 1; i < MAX_NR_ZONES; i++) {
if (i == ZONE_MOVABLE)
continue;
arch_zone_lowest_possible_pfn[i] =
arch_zone_highest_possible_pfn[i-1];
arch_zone_highest_possible_pfn[i] =
max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
}
arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
/* Find the PFNs that ZONE_MOVABLE begins at in each node */
memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
find_zone_movable_pfns_for_nodes();
/* Print out the zone ranges */
printk("Zone PFN ranges:\n");
for (i = 0; i < MAX_NR_ZONES; i++) {
if (i == ZONE_MOVABLE)
continue;
printk(" %-8s ", zone_names[i]);
if (arch_zone_lowest_possible_pfn[i] ==
arch_zone_highest_possible_pfn[i])
printk("empty\n");
else
printk("%0#10lx -> %0#10lx\n",
arch_zone_lowest_possible_pfn[i],
arch_zone_highest_possible_pfn[i]);
}
/* Print out the PFNs ZONE_MOVABLE begins at in each node */
printk("Movable zone start PFN for each node\n");
for (i = 0; i < MAX_NUMNODES; i++) {
if (zone_movable_pfn[i])
printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
}
/* Print out the early_node_map[] */
printk("Early memory PFN ranges\n");
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
/* Initialise every node */
mminit_verify_pageflags_layout();
setup_nr_node_ids();
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
free_area_init_node(nid, NULL,
find_min_pfn_for_node(nid), NULL);
/* Any memory on that node */
if (pgdat->node_present_pages)
node_set_state(nid, N_HIGH_MEMORY);
check_for_regular_memory(pgdat);
}
}
static int __init cmdline_parse_core(char *p, unsigned long *core)
{
unsigned long long coremem;
if (!p)
return -EINVAL;
coremem = memparse(p, &p);
*core = coremem >> PAGE_SHIFT;
/* Paranoid check that UL is enough for the coremem value */
WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
return 0;
}
/*
* kernelcore=size sets the amount of memory for use for allocations that
* cannot be reclaimed or migrated.
*/
static int __init cmdline_parse_kernelcore(char *p)
{
return cmdline_parse_core(p, &required_kernelcore);
}
/*
* movablecore=size sets the amount of memory for use for allocations that
* can be reclaimed or migrated.
*/
static int __init cmdline_parse_movablecore(char *p)
{
return cmdline_parse_core(p, &required_movablecore);
}
early_param("kernelcore", cmdline_parse_kernelcore);
early_param("movablecore", cmdline_parse_movablecore);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
* set_dma_reserve - set the specified number of pages reserved in the first zone
* @new_dma_reserve: The number of pages to mark reserved
*
* The per-cpu batchsize and zone watermarks are determined by present_pages.
* In the DMA zone, a significant percentage may be consumed by kernel image
* and other unfreeable allocations which can skew the watermarks badly. This
* function may optionally be used to account for unfreeable pages in the
* first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
* smaller per-cpu batchsize.
*/
void __init set_dma_reserve(unsigned long new_dma_reserve)
{
dma_reserve = new_dma_reserve;
}
void __init free_area_init(unsigned long *zones_size)
{
free_area_init_node(0, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
}
static int page_alloc_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
lru_add_drain_cpu(cpu);
drain_pages(cpu);
/*
* Spill the event counters of the dead processor
* into the current processors event counters.
* This artificially elevates the count of the current
* processor.
*/
vm_events_fold_cpu(cpu);
/*
* Zero the differential counters of the dead processor
* so that the vm statistics are consistent.
*
* This is only okay since the processor is dead and cannot
* race with what we are doing.
*/
refresh_cpu_vm_stats(cpu);
}
return NOTIFY_OK;
}
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
}
/*
* calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
* or min_free_kbytes changes.
*/
static void calculate_totalreserve_pages(void)
{
struct pglist_data *pgdat;
unsigned long reserve_pages = 0;
enum zone_type i, j;
for_each_online_pgdat(pgdat) {
for (i = 0; i < MAX_NR_ZONES; i++) {
struct zone *zone = pgdat->node_zones + i;
unsigned long max = 0;
/* Find valid and maximum lowmem_reserve in the zone */
for (j = i; j < MAX_NR_ZONES; j++) {
if (zone->lowmem_reserve[j] > max)
max = zone->lowmem_reserve[j];
}
/* we treat the high watermark as reserved pages. */
max += high_wmark_pages(zone);
if (max > zone->present_pages)
max = zone->present_pages;
reserve_pages += max;
/*
* Lowmem reserves are not available to
* GFP_HIGHUSER page cache allocations and
* kswapd tries to balance zones to their high
* watermark. As a result, neither should be
* regarded as dirtyable memory, to prevent a
* situation where reclaim has to clean pages
* in order to balance the zones.
*/
zone->dirty_balance_reserve = max;
}
}
dirty_balance_reserve = reserve_pages;
totalreserve_pages = reserve_pages;
}
/*
* setup_per_zone_lowmem_reserve - called whenever
* sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
* has a correct pages reserved value, so an adequate number of
* pages are left in the zone after a successful __alloc_pages().
*/
static void setup_per_zone_lowmem_reserve(void)
{
struct pglist_data *pgdat;
enum zone_type j, idx;
for_each_online_pgdat(pgdat) {
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
unsigned long present_pages = zone->present_pages;
zone->lowmem_reserve[j] = 0;
idx = j;
while (idx) {
struct zone *lower_zone;
idx--;
if (sysctl_lowmem_reserve_ratio[idx] < 1)
sysctl_lowmem_reserve_ratio[idx] = 1;
lower_zone = pgdat->node_zones + idx;
lower_zone->lowmem_reserve[j] = present_pages /
sysctl_lowmem_reserve_ratio[idx];
present_pages += lower_zone->present_pages;
}
}
}
/* update totalreserve_pages */
calculate_totalreserve_pages();
}
static void __setup_per_zone_wmarks(void)
{
unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
unsigned long lowmem_pages = 0;
struct zone *zone;
unsigned long flags;
/* Calculate total number of !ZONE_HIGHMEM pages */
for_each_zone(zone) {
if (!is_highmem(zone))
lowmem_pages += zone->present_pages;
}
for_each_zone(zone) {
u64 tmp;
spin_lock_irqsave(&zone->lock, flags);
tmp = (u64)pages_min * zone->present_pages;
do_div(tmp, lowmem_pages);
if (is_highmem(zone)) {
/*
* __GFP_HIGH and PF_MEMALLOC allocations usually don't
* need highmem pages, so cap pages_min to a small
* value here.
*
* The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
* deltas controls asynch page reclaim, and so should
* not be capped for highmem.
*/
int min_pages;
min_pages = zone->present_pages / 1024;
if (min_pages < SWAP_CLUSTER_MAX)
min_pages = SWAP_CLUSTER_MAX;
if (min_pages > 128)
min_pages = 128;
zone->watermark[WMARK_MIN] = min_pages;
} else {
/*
* If it's a lowmem zone, reserve a number of pages
* proportionate to the zone's size.
*/
zone->watermark[WMARK_MIN] = tmp;
}
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
setup_zone_migrate_reserve(zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
/* update totalreserve_pages */
calculate_totalreserve_pages();
}
/**
* setup_per_zone_wmarks - called when min_free_kbytes changes
* or when memory is hot-{added|removed}
*
* Ensures that the watermark[min,low,high] values for each zone are set
* correctly with respect to min_free_kbytes.
*/
void setup_per_zone_wmarks(void)
{
mutex_lock(&zonelists_mutex);
__setup_per_zone_wmarks();
mutex_unlock(&zonelists_mutex);
}
/*
* The inactive anon list should be small enough that the VM never has to
* do too much work, but large enough that each inactive page has a chance
* to be referenced again before it is swapped out.
*
* The inactive_anon ratio is the target ratio of ACTIVE_ANON to
* INACTIVE_ANON pages on this zone's LRU, maintained by the
* pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
* the anonymous pages are kept on the inactive list.
*
* total target max
* memory ratio inactive anon
* -------------------------------------
* 10MB 1 5MB
* 100MB 1 50MB
* 1GB 3 250MB
* 10GB 10 0.9GB
* 100GB 31 3GB
* 1TB 101 10GB
* 10TB 320 32GB
*/
static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
{
unsigned int gb, ratio;
/* Zone size in gigabytes */
gb = zone->present_pages >> (30 - PAGE_SHIFT);
if (gb)
ratio = int_sqrt(10 * gb);
else
ratio = 1;
zone->inactive_ratio = ratio;
}
static void __meminit setup_per_zone_inactive_ratio(void)
{
struct zone *zone;
for_each_zone(zone)
calculate_zone_inactive_ratio(zone);
}
/*
* Initialise min_free_kbytes.
*
* For small machines we want it small (128k min). For large machines
* we want it large (64MB max). But it is not linear, because network
* bandwidth does not increase linearly with machine size. We use
*
* min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
* min_free_kbytes = sqrt(lowmem_kbytes * 16)
*
* which yields
*
* 16MB: 512k
* 32MB: 724k
* 64MB: 1024k
* 128MB: 1448k
* 256MB: 2048k
* 512MB: 2896k
* 1024MB: 4096k
* 2048MB: 5792k
* 4096MB: 8192k
* 8192MB: 11584k
* 16384MB: 16384k
*/
int __meminit init_per_zone_wmark_min(void)
{
unsigned long lowmem_kbytes;
lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
if (min_free_kbytes < 128)
min_free_kbytes = 128;
if (min_free_kbytes > 65536)
min_free_kbytes = 65536;
setup_per_zone_wmarks();
refresh_zone_stat_thresholds();
setup_per_zone_lowmem_reserve();
setup_per_zone_inactive_ratio();
return 0;
}
module_init(init_per_zone_wmark_min)
/*
* min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
* that we can call two helper functions whenever min_free_kbytes
* changes.
*/
int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, buffer, length, ppos);
if (write)
setup_per_zone_wmarks();
return 0;
}
#ifdef CONFIG_NUMA
int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
for_each_zone(zone)
zone->min_unmapped_pages = (zone->present_pages *
sysctl_min_unmapped_ratio) / 100;
return 0;
}
int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
for_each_zone(zone)
zone->min_slab_pages = (zone->present_pages *
sysctl_min_slab_ratio) / 100;
return 0;
}
#endif
/*
* lowmem_reserve_ratio_sysctl_handler - just a wrapper around
* proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
* whenever sysctl_lowmem_reserve_ratio changes.
*
* The reserve ratio obviously has absolutely no relation with the
* minimum watermarks. The lowmem reserve ratio can only make sense
* if in function of the boot time zone sizes.
*/
int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec_minmax(table, write, buffer, length, ppos);
setup_per_zone_lowmem_reserve();
return 0;
}
/*
* percpu_pagelist_fraction - changes the pcp->high for each zone on each
* cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
* can have before it gets flushed back to buddy allocator.
*/
int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
unsigned int cpu;
int ret;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (!write || (ret < 0))
return ret;
for_each_populated_zone(zone) {
for_each_possible_cpu(cpu) {
unsigned long high;
high = zone->present_pages / percpu_pagelist_fraction;
setup_pagelist_highmark(
per_cpu_ptr(zone->pageset, cpu), high);
}
}
return 0;
}
int hashdist = HASHDIST_DEFAULT;
#ifdef CONFIG_NUMA
static int __init set_hashdist(char *str)
{
if (!str)
return 0;
hashdist = simple_strtoul(str, &str, 0);
return 1;
}
__setup("hashdist=", set_hashdist);
#endif
/*
* allocate a large system hash table from bootmem
* - it is assumed that the hash table must contain an exact power-of-2
* quantity of entries
* - limit is the number of hash buckets, not the total allocation size
*/
void *__init alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
unsigned long numentries,
int scale,
int flags,
unsigned int *_hash_shift,
unsigned int *_hash_mask,
unsigned long limit)
{
unsigned long long max = limit;
unsigned long log2qty, size;
void *table = NULL;
/* allow the kernel cmdline to have a say */
if (!numentries) {
/* round applicable memory size up to nearest megabyte */
numentries = nr_kernel_pages;
numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
numentries >>= 20 - PAGE_SHIFT;
numentries <<= 20 - PAGE_SHIFT;
/* limit to 1 bucket per 2^scale bytes of low memory */
if (scale > PAGE_SHIFT)
numentries >>= (scale - PAGE_SHIFT);
else
numentries <<= (PAGE_SHIFT - scale);
/* Make sure we've got at least a 0-order allocation.. */
if (unlikely(flags & HASH_SMALL)) {
/* Makes no sense without HASH_EARLY */
WARN_ON(!(flags & HASH_EARLY));
if (!(numentries >> *_hash_shift)) {
numentries = 1UL << *_hash_shift;
BUG_ON(!numentries);
}
} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
numentries = PAGE_SIZE / bucketsize;
}
numentries = roundup_pow_of_two(numentries);
/* limit allocation size to 1/16 total memory by default */
if (max == 0) {
max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
do_div(max, bucketsize);
}
max = min(max, 0x80000000ULL);
if (numentries > max)
numentries = max;
log2qty = ilog2(numentries);
do {
size = bucketsize << log2qty;
if (flags & HASH_EARLY)
table = alloc_bootmem_nopanic(size);
else if (hashdist)
table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
else {
/*
* If bucketsize is not a power-of-two, we may free
* some pages at the end of hash table which
* alloc_pages_exact() automatically does
*/
if (get_order(size) < MAX_ORDER) {
table = alloc_pages_exact(size, GFP_ATOMIC);
kmemleak_alloc(table, size, 1, GFP_ATOMIC);
}
}
} while (!table && size > PAGE_SIZE && --log2qty);
if (!table)
panic("Failed to allocate %s hash table\n", tablename);
printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
tablename,
(1UL << log2qty),
ilog2(size) - PAGE_SHIFT,
size);
if (_hash_shift)
*_hash_shift = log2qty;
if (_hash_mask)
*_hash_mask = (1 << log2qty) - 1;
return table;
}
/* Return a pointer to the bitmap storing bits affecting a block of pages */
static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
return __pfn_to_section(pfn)->pageblock_flags;
#else
return zone->pageblock_flags;
#endif /* CONFIG_SPARSEMEM */
}
static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
pfn &= (PAGES_PER_SECTION-1);
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
#else
pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
#endif /* CONFIG_SPARSEMEM */
}
/**
* get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
* @page: The page within the block of interest
* @start_bitidx: The first bit of interest to retrieve
* @end_bitidx: The last bit of interest
* returns pageblock_bits flags
*/
unsigned long get_pageblock_flags_group(struct page *page,
int start_bitidx, int end_bitidx)
{
struct zone *zone;
unsigned long *bitmap;
unsigned long pfn, bitidx;
unsigned long flags = 0;
unsigned long value = 1;
zone = page_zone(page);
pfn = page_to_pfn(page);
bitmap = get_pageblock_bitmap(zone, pfn);
bitidx = pfn_to_bitidx(zone, pfn);
for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
if (test_bit(bitidx + start_bitidx, bitmap))
flags |= value;
return flags;
}
/**
* set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
* @page: The page within the block of interest
* @start_bitidx: The first bit of interest
* @end_bitidx: The last bit of interest
* @flags: The flags to set
*/
void set_pageblock_flags_group(struct page *page, unsigned long flags,
int start_bitidx, int end_bitidx)
{
struct zone *zone;
unsigned long *bitmap;
unsigned long pfn, bitidx;
unsigned long value = 1;
zone = page_zone(page);
pfn = page_to_pfn(page);
bitmap = get_pageblock_bitmap(zone, pfn);
bitidx = pfn_to_bitidx(zone, pfn);
VM_BUG_ON(pfn < zone->zone_start_pfn);
VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
if (flags & value)
__set_bit(bitidx + start_bitidx, bitmap);
else
__clear_bit(bitidx + start_bitidx, bitmap);
}
/*
* This is designed as sub function...plz see page_isolation.c also.
* set/clear page block's type to be ISOLATE.
* page allocater never alloc memory from ISOLATE block.
*/
static int
__count_immobile_pages(struct zone *zone, struct page *page, int count)
{
unsigned long pfn, iter, found;
int mt;
/*
* For avoiding noise data, lru_add_drain_all() should be called
* If ZONE_MOVABLE, the zone never contains immobile pages
*/
if (zone_idx(zone) == ZONE_MOVABLE)
return true;
mt = get_pageblock_migratetype(page);
if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
return true;
pfn = page_to_pfn(page);
for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
unsigned long check = pfn + iter;
if (!pfn_valid_within(check))
continue;
page = pfn_to_page(check);
if (!page_count(page)) {
if (PageBuddy(page))
iter += (1 << page_order(page)) - 1;
continue;
}
if (!PageLRU(page))
found++;
/*
* If there are RECLAIMABLE pages, we need to check it.
* But now, memory offline itself doesn't call shrink_slab()
* and it still to be fixed.
*/
/*
* If the page is not RAM, page_count()should be 0.
* we don't need more check. This is an _used_ not-movable page.
*
* The problematic thing here is PG_reserved pages. PG_reserved
* is set to both of a memory hole page and a _used_ kernel
* page at boot.
*/
if (found > count)
return false;
}
return true;
}
bool is_pageblock_removable_nolock(struct page *page)
{
struct zone *zone;
unsigned long pfn;
/*
* We have to be careful here because we are iterating over memory
* sections which are not zone aware so we might end up outside of
* the zone but still within the section.
* We have to take care about the node as well. If the node is offline
* its NODE_DATA will be NULL - see page_zone.
*/
if (!node_online(page_to_nid(page)))
return false;
zone = page_zone(page);
pfn = page_to_pfn(page);
if (zone->zone_start_pfn > pfn ||
zone->zone_start_pfn + zone->spanned_pages <= pfn)
return false;
return __count_immobile_pages(zone, page, 0);
}
int set_migratetype_isolate(struct page *page)
{
struct zone *zone;
unsigned long flags, pfn;
struct memory_isolate_notify arg;
int notifier_ret;
int ret = -EBUSY;
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
pfn = page_to_pfn(page);
arg.start_pfn = pfn;
arg.nr_pages = pageblock_nr_pages;
arg.pages_found = 0;
/*
* It may be possible to isolate a pageblock even if the
* migratetype is not MIGRATE_MOVABLE. The memory isolation
* notifier chain is used by balloon drivers to return the
* number of pages in a range that are held by the balloon
* driver to shrink memory. If all the pages are accounted for
* by balloons, are free, or on the LRU, isolation can continue.
* Later, for example, when memory hotplug notifier runs, these
* pages reported as "can be isolated" should be isolated(freed)
* by the balloon driver through the memory notifier chain.
*/
notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
notifier_ret = notifier_to_errno(notifier_ret);
if (notifier_ret)
goto out;
/*
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages.
*/
if (__count_immobile_pages(zone, page, arg.pages_found))
ret = 0;
/*
* immobile means "not-on-lru" paes. If immobile is larger than
* removable-by-driver pages reported by notifier, we'll fail.
*/
out:
if (!ret) {
set_pageblock_migratetype(page, MIGRATE_ISOLATE);
move_freepages_block(zone, page, MIGRATE_ISOLATE);
}
spin_unlock_irqrestore(&zone->lock, flags);
if (!ret)
drain_all_pages();
return ret;
}
void unset_migratetype_isolate(struct page *page, unsigned migratetype)
{
struct zone *zone;
unsigned long flags;
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
goto out;
set_pageblock_migratetype(page, migratetype);
move_freepages_block(zone, page, migratetype);
out:
spin_unlock_irqrestore(&zone->lock, flags);
}
#ifdef CONFIG_CMA
static unsigned long pfn_max_align_down(unsigned long pfn)
{
return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
pageblock_nr_pages) - 1);
}
static unsigned long pfn_max_align_up(unsigned long pfn)
{
return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
pageblock_nr_pages));
}
static struct page *
__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
int **resultp)
{
return alloc_page(GFP_HIGHUSER_MOVABLE);
}
/* [start, end) must belong to a single zone. */
static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
{
/* This function is based on compact_zone() from compaction.c. */
unsigned long pfn = start;
unsigned int tries = 0;
int ret = 0;
struct compact_control cc = {
.nr_migratepages = 0,
.order = -1,
.zone = page_zone(pfn_to_page(start)),
.sync = true,
};
INIT_LIST_HEAD(&cc.migratepages);
migrate_prep_local();
while (pfn < end || !list_empty(&cc.migratepages)) {
if (fatal_signal_pending(current)) {
ret = -EINTR;
break;
}
if (list_empty(&cc.migratepages)) {
cc.nr_migratepages = 0;
pfn = isolate_migratepages_range(cc.zone, &cc,
pfn, end);
if (!pfn) {
ret = -EINTR;
break;
}
tries = 0;
} else if (++tries == 5) {
ret = ret < 0 ? ret : -EBUSY;
break;
}
ret = migrate_pages(&cc.migratepages,
__alloc_contig_migrate_alloc,
0, false, true);
}
putback_lru_pages(&cc.migratepages);
return ret > 0 ? 0 : ret;
}
/*
* Update zone's cma pages counter used for watermark level calculation.
*/
static inline void __update_cma_watermarks(struct zone *zone, int count)
{
unsigned long flags;
spin_lock_irqsave(&zone->lock, flags);
zone->min_cma_pages += count;
spin_unlock_irqrestore(&zone->lock, flags);
setup_per_zone_wmarks();
}
/*
* Trigger memory pressure bump to reclaim some pages in order to be able to
* allocate 'count' pages in single page units. Does similar work as
*__alloc_pages_slowpath() function.
*/
static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
struct zonelist *zonelist = node_zonelist(0, gfp_mask);
int did_some_progress = 0;
int order = 1;
/*
* Increase level of watermarks to force kswapd do his job
* to stabilise at new watermark level.
*/
__update_cma_watermarks(zone, count);
/* Obey watermarks as if the page was being allocated */
while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
NULL);
if (!did_some_progress) {
/* Exhausted what can be done so it's blamo time */
out_of_memory(zonelist, gfp_mask, order, NULL, false);
}
}
/* Restore original watermark levels. */
__update_cma_watermarks(zone, -count);
return count;
}
/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
* @end: one-past-the-last PFN to allocate
* @migratetype: migratetype of the underlaying pageblocks (either
* #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
* in range must have the same migratetype and it must
* be either of the two.
*
* The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
* aligned, however it's the caller's responsibility to guarantee that
* we are the only thread that changes migrate type of pageblocks the
* pages fall in.
*
* The PFN range must belong to a single zone.
*
* Returns zero on success or negative error code. On success all
* pages which PFN is in [start, end) are allocated for the caller and
* need to be freed with free_contig_range().
*/
int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype)
{
struct zone *zone = page_zone(pfn_to_page(start));
unsigned long outer_start, outer_end;
int ret = 0, order;
/*
* What we do here is we mark all pageblocks in range as
* MIGRATE_ISOLATE. Because pageblock and max order pages may
* have different sizes, and due to the way page allocator
* work, we align the range to biggest of the two pages so
* that page allocator won't try to merge buddies from
* different pageblocks and change MIGRATE_ISOLATE to some
* other migration type.
*
* Once the pageblocks are marked as MIGRATE_ISOLATE, we
* migrate the pages from an unaligned range (ie. pages that
* we are interested in). This will put all the pages in
* range back to page allocator as MIGRATE_ISOLATE.
*
* When this is done, we take the pages in range from page
* allocator removing them from the buddy system. This way
* page allocator will never consider using them.
*
* This lets us mark the pageblocks back as
* MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
* aligned range but not in the unaligned, original range are
* put back to page allocator so that buddy can use them.
*/
ret = start_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
if (ret)
goto done;
ret = __alloc_contig_migrate_range(start, end);
if (ret)
goto done;
/*
* Pages from [start, end) are within a MAX_ORDER_NR_PAGES
* aligned blocks that are marked as MIGRATE_ISOLATE. What's
* more, all pages in [start, end) are free in page allocator.
* What we are going to do is to allocate all pages from
* [start, end) (that is remove them from page allocator).
*
* The only problem is that pages at the beginning and at the
* end of interesting range may be not aligned with pages that
* page allocator holds, ie. they can be part of higher order
* pages. Because of this, we reserve the bigger range and
* once this is done free the pages we are not interested in.
*
* We don't have to hold zone->lock here because the pages are
* isolated thus they won't get removed from buddy.
*/
lru_add_drain_all();
drain_all_pages();
order = 0;
outer_start = start;
while (!PageBuddy(pfn_to_page(outer_start))) {
if (++order >= MAX_ORDER) {
ret = -EBUSY;
goto done;
}
outer_start &= ~0UL << order;
}
/* Make sure the range is really isolated. */
if (test_pages_isolated(outer_start, end)) {
pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
outer_start, end);
ret = -EBUSY;
goto done;
}
/*
* Reclaim enough pages to make sure that contiguous allocation
* will not starve the system.
*/
__reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
/* Grab isolated pages from freelists. */
outer_end = isolate_freepages_range(outer_start, end);
if (!outer_end) {
ret = -EBUSY;
goto done;
}
/* Free head and tail (if any) */
if (start != outer_start)
free_contig_range(outer_start, start - outer_start);
if (end != outer_end)
free_contig_range(end, outer_end - end);
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
return ret;
}
void free_contig_range(unsigned long pfn, unsigned nr_pages)
{
for (; nr_pages--; ++pfn)
__free_page(pfn_to_page(pfn));
}
#endif
#ifdef CONFIG_MEMORY_HOTREMOVE
/*
* All pages in the range must be isolated before calling this.
*/
void
__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *page;
struct zone *zone;
int order, i;
unsigned long pfn;
unsigned long flags;
/* find the first valid pfn */
for (pfn = start_pfn; pfn < end_pfn; pfn++)
if (pfn_valid(pfn))
break;
if (pfn == end_pfn)
return;
zone = page_zone(pfn_to_page(pfn));
spin_lock_irqsave(&zone->lock, flags);
pfn = start_pfn;
while (pfn < end_pfn) {
if (!pfn_valid(pfn)) {
pfn++;
continue;
}
page = pfn_to_page(pfn);
BUG_ON(page_count(page));
BUG_ON(!PageBuddy(page));
order = page_order(page);
#ifdef CONFIG_DEBUG_VM
printk(KERN_INFO "remove from free list %lx %d %lx\n",
pfn, 1 << order, end_pfn);
#endif
list_del(&page->lru);
rmv_page_order(page);
zone->free_area[order].nr_free--;
__mod_zone_page_state(zone, NR_FREE_PAGES,
- (1UL << order));
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page))
totalhigh_pages -= 1 << order;
#endif
for (i = 0; i < (1 << order); i++)
SetPageReserved((page+i));
pfn += (1 << order);
}
spin_unlock_irqrestore(&zone->lock, flags);
}
#endif
#ifdef CONFIG_MEMORY_FAILURE
bool is_free_buddy_page(struct page *page)
{
struct zone *zone = page_zone(page);
unsigned long pfn = page_to_pfn(page);
unsigned long flags;
int order;
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
struct page *page_head = page - (pfn & ((1 << order) - 1));
if (PageBuddy(page_head) && page_order(page_head) >= order)
break;
}
spin_unlock_irqrestore(&zone->lock, flags);
return order < MAX_ORDER;
}
#endif
static struct trace_print_flags pageflag_names[] = {
{1UL << PG_locked, "locked" },
{1UL << PG_error, "error" },
{1UL << PG_referenced, "referenced" },
{1UL << PG_uptodate, "uptodate" },
{1UL << PG_dirty, "dirty" },
{1UL << PG_lru, "lru" },
{1UL << PG_active, "active" },
{1UL << PG_slab, "slab" },
{1UL << PG_owner_priv_1, "owner_priv_1" },
{1UL << PG_arch_1, "arch_1" },
{1UL << PG_reserved, "reserved" },
{1UL << PG_private, "private" },
{1UL << PG_private_2, "private_2" },
{1UL << PG_writeback, "writeback" },
#ifdef CONFIG_PAGEFLAGS_EXTENDED
{1UL << PG_head, "head" },
{1UL << PG_tail, "tail" },
#else
{1UL << PG_compound, "compound" },
#endif
{1UL << PG_swapcache, "swapcache" },
{1UL << PG_mappedtodisk, "mappedtodisk" },
{1UL << PG_reclaim, "reclaim" },
{1UL << PG_swapbacked, "swapbacked" },
{1UL << PG_unevictable, "unevictable" },
#ifdef CONFIG_MMU
{1UL << PG_mlocked, "mlocked" },
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
{1UL << PG_uncached, "uncached" },
#endif
#ifdef CONFIG_MEMORY_FAILURE
{1UL << PG_hwpoison, "hwpoison" },
#endif
{-1UL, NULL },
};
static void dump_page_flags(unsigned long flags)
{
const char *delim = "";
unsigned long mask;
int i;
printk(KERN_ALERT "page flags: %#lx(", flags);
/* remove zone id */
flags &= (1UL << NR_PAGEFLAGS) - 1;
for (i = 0; pageflag_names[i].name && flags; i++) {
mask = pageflag_names[i].mask;
if ((flags & mask) != mask)
continue;
flags &= ~mask;
printk("%s%s", delim, pageflag_names[i].name);
delim = "|";
}
/* check for left over flags */
if (flags)
printk("%s%#lx", delim, flags);
printk(")\n");
}
void dump_page(struct page *page)
{
printk(KERN_ALERT
"page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
page, atomic_read(&page->_count), page_mapcount(page),
page->mapping, page->index);
dump_page_flags(page->flags);
mem_cgroup_print_bad_page(page);
}
| gpl-2.0 |
7k2/test-repo | src/ocoltbl.cpp | 2 | 18569 | /*
* Seven Kingdoms 2: The Fryhtan War
*
* Copyright 1999 Enlight Software Ltd.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
// Filename : OCOLTBL.CPP
// Description : generated color remap table
#include <ocoltbl.h>
#include <all.h>
#include <math.h>
#include <ovga.h>
// ---------- define const -----------//
// value of full intensity, 255 for 24-bit color, 64 for 18-bit color
#define MAX_COLOUR 255
#define PI 3.14159265359L
#define NEAREST_COLOR 8
#define BRIGHTNESS_WEIGHTING 4.0
BYTE ColorTable::identity_table[MAX_COLOUR_TABLE_SIZE] =
{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
};
// --------- define inline function ---------//
inline int sq(int a)
{
return a*a;
}
// ---------- begin of function ColorTable::ColorTable ----------//
ColorTable::ColorTable()
{
remap_table = NULL;
remap_table_array = NULL;
}
ColorTable::ColorTable(int absScale, int tableSize, WORD *customTable)
{
remap_table = NULL;
remap_table_array = NULL;
init(absScale, tableSize, customTable);
}
// ---------- end of function ColorTable::ColorTable ----------//
// ---------- begin of function ColorTable::ColorTable ----------//
ColorTable::ColorTable(const ColorTable& ct) : abs_scale(ct.abs_scale),
table_size(ct.table_size)
{
if( ct.remap_table )
{
remap_table = (WORD *)mem_add(table_size * (2*abs_scale+1) * sizeof(WORD) );
memcpy(remap_table, ct.remap_table, table_size * (2*abs_scale+1) * sizeof(WORD) );
remap_table_array = (WORD **)mem_add(sizeof(WORD *) * (2*abs_scale+1) );
create_table_array();
}
else
{
remap_table = NULL;
remap_table_array = NULL;
}
}
// ---------- end of function ColorTable::ColorTable ----------//
// ---------- begin of function ColorTable::~ColorTable ----------//
ColorTable::~ColorTable()
{
deinit();
}
// ---------- end of function ColorTable::~ColorTable ----------//
// ---------- begin of function ColorTable::init ----------//
void ColorTable::init()
{
deinit();
abs_scale = 0;
}
// initialize a custom table, given the no. of absolute scale and table size
// the customTable array is (2*absScale+1) groups
// and each group has (tableSize) bytes of remapping entries
void ColorTable::init(int absScale, int tableSize, WORD *customTable)
{
deinit();
abs_scale = absScale;
table_size = table_size;
remap_table = (WORD *)mem_add(table_size * (2*absScale+1) * sizeof(WORD) );
memcpy(remap_table, customTable, tableSize * (2*absScale+1) * sizeof(WORD) );
remap_table_array = (WORD **)mem_add(sizeof(WORD *) * (2*absScale+1) );
create_table_array();
}
// ---------- end of function ColorTable::init ----------//
// ---------- begin of function ColorTable::deinit ----------//
void ColorTable::deinit()
{
if( remap_table )
{
mem_del( remap_table );
remap_table = NULL;
}
if( remap_table_array)
{
mem_del( remap_table_array);
remap_table_array = NULL;
}
}
// ---------- end of function ColorTable::deinit ----------//
// ---------- begin of function ColorTable::operator= ----------//
ColorTable& ColorTable::operator=(const ColorTable& ct)
{
deinit();
abs_scale = ct.abs_scale;
table_size= ct.table_size;
if( ct.remap_table )
{
remap_table = (WORD *)mem_add(table_size * (2*abs_scale+1) );
memcpy(remap_table, ct.remap_table, table_size * (2*abs_scale+1) * sizeof(WORD) );
remap_table_array = (WORD **)mem_add(sizeof(WORD *) * (2*abs_scale+1) );
create_table_array();
}
else
{
remap_table = NULL;
remap_table_array = NULL;
}
return *this;
}
// ---------- begin of function ColorTable::operator= ----------//
// ---------- begin of function ColorTable::generate_table ----------//
//
// generate +absScale to -absScale (total 2*absScale +1 remap table )
// from palette pal (768 byte)
// any color in the reservedColor is unmodified
// note reservedColor array must be in accending order
//
// <int> absScale number of scale to full white/full black
// <BYTE *>pal input palette, size must be 3*table_size
// <int>palSize size of palette entry
// <BYTE *>reservedColor array of reserved color,
// <int> reservedCount size of reservedColor
// reserved color will map to itself and will not be mapped except by itself
//
void ColorTable::generate_table(int absScale, PalDesc & palD, RGBColor (*fp)(RGBColor, int, int))
{
int palSize = palD.pal_size;
err_when(absScale < 0 || palD.reserved_count < 0 || palD.pal == NULL);
err_when(palSize > MAX_COLOUR_TABLE_SIZE);
deinit();
abs_scale = absScale;
table_size = palSize;
WORD *remapEntry = remap_table = (WORD *)mem_add(table_size * (2*absScale+1) * sizeof(WORD) );
remap_table_array = (WORD **)mem_add(sizeof(WORD *) * (2*absScale+1) );
int scale, c;
// ------- generate negative scale ----------//
for( scale = -absScale; scale <= absScale; ++scale)
{
int reservedIndex = 0;
for( c=0; c < palSize; ++c, ++remapEntry)
{
RGBColor rgb;
if( palD.is_reserved(c, reservedIndex) )
rgb = palD.get_rgb(c); // don't change reserved color
else
rgb = (*fp)(palD.get_rgb(c), scale, absScale);
*remapEntry = vga.make_pixel(&rgb);
}
}
create_table_array();
}
// ---------- end of function ColorTable::generate_table ----------//
// ---------- begin of function ColorTable::generate_table_fast ----------//
// simplified version, it ignores reserved colors
void ColorTable::generate_table_fast (int absScale, PalDesc &palD, RGBColor (*fp)(RGBColor, int, int))
{
int palSize = palD.pal_size;
err_when(absScale < 0 || palD.reserved_count < 0 || palD.pal == NULL);
err_when(palSize > MAX_COLOUR_TABLE_SIZE);
deinit();
abs_scale = absScale;
table_size = palSize;
WORD *remapEntry = remap_table = (WORD *)mem_add(table_size * (2*absScale+1) * sizeof(WORD) );
remap_table_array = (WORD **)mem_add(sizeof(WORD *) * (2*absScale+1) );
int scale, c;
// ------- generate negative scale ----------//
for( scale = -absScale; scale <= absScale; ++scale)
{
for( c=0; c < palSize; ++c, ++remapEntry)
{
RGBColor rgb = (*fp)(palD.get_rgb(c), scale, absScale);
*remapEntry = vga.make_pixel(&rgb);
}
}
create_table_array();
}
// ---------- end of function ColorTable::generate_table_fast ----------//
// ---------- begin of function ColorTable::generate_table ----------//
//
// match one set of palette with a universal palette
// the set of palette is pointed by sPal, size is sPalSize,
// with some reserved color pointed by sReservedColor and size is sReservedCount
// the univeral palette is pointed by Pal, size is PalSize,
// with some reserved color pointed by reservedColor and size is reservedCount
// note : numbers in reservedColor must be in ascending order
//
// generated map size must be palSize and it has only scale
//
void ColorTable::generate_table(PalDesc &sPalD, PalDesc &palD)
{
int sPalSize = sPalD.pal_size, palSize = palD.pal_size;
err_when(sPalD.pal == NULL || sPalSize <= 0 || sPalD.reserved_count < 0);
err_when(palD.pal == NULL || palSize <= 0 || palD.reserved_count < 0);
err_when(palSize > MAX_COLOUR_TABLE_SIZE || sPalSize > MAX_COLOUR_TABLE_SIZE);
deinit();
abs_scale = 0;
table_size = sPalSize;
WORD *remapEntry = remap_table = (WORD *)mem_add(sPalSize);
remap_table_array = (WORD **)mem_add(sizeof(WORD *));
int sReservedIndex = 0;
for(int c=0; c < sPalSize; ++c, ++remapEntry)
{
*remapEntry = c; // put a default value (as if it is a reserved color)
// ------ see if it is a reserved color --------//
if( sPalD.is_reserved(c, sReservedIndex))
continue;
RGBColor rgb = sPalD.get_rgb(c);
// ------- scan the closet color, except the reserved color
int cc, dist[NEAREST_COLOR], thisDiff;
BYTE closeColor[NEAREST_COLOR]; // [0] is the closest
for( cc = 0; cc < NEAREST_COLOR; ++cc )
{
closeColor[cc] = c;
dist[cc] = 3*0xff*0xff+1;
}
int dReservedIndex = 0;
int d;
for( d=0; d < palSize; ++d)
{
// ------- skip scanning reserved color ------//
if( palD.is_reserved(d, dReservedIndex) )
continue;
// ------- compare the sqaure distance ----------//
thisDiff = color_dist(rgb, palD.get_rgb(d));
if( thisDiff < dist[NEAREST_COLOR-1])
{
BYTE d1 = (BYTE) d;
for( cc = 0; cc < NEAREST_COLOR; ++cc )
{
if( thisDiff < dist[cc] )
{
// swap thisDiff and dist[cc]
// so that the replaced result will be shifted to next
int tempd;
BYTE tempc;
tempd = dist[cc];
dist[cc] = thisDiff;
thisDiff = tempd;
tempc = closeColor[cc];
closeColor[cc] = d1;
d1 = tempc;
}
}
}
}
// closeColor[] are the closest 8 colours, use hsv comparison to find the nearest
d = closeColor[0];
*remapEntry = d;
int minDiff = color_dist_hsv(rgb, palD.get_rgb(d));
for( cc = 1; cc < NEAREST_COLOR; ++cc)
{
d = closeColor[cc];
thisDiff = color_dist_hsv(rgb, palD.get_rgb(d));
if( thisDiff < minDiff )
{
minDiff = thisDiff;
*remapEntry = d;
}
}
}
create_table_array();
}
// ---------- end of function ColorTable::generate_table ----------//
// ---------- begin of function ColorTable::get_table ----------//
WORD *ColorTable::get_table(int scale)
{
err_when( !remap_table );
err_when( scale < -abs_scale || scale > abs_scale);
return remap_table + table_size * (scale + abs_scale);
}
// ---------- end of function ColorTable::get_table ----------//
// ---------- begin of function ColorTable::create_table_array ----------//
void ColorTable::create_table_array()
{
err_when( !remap_table );
for( int j = 0; j < 2*abs_scale+1; ++j)
{
remap_table_array[j] = remap_table + table_size * j;
}
}
// ---------- end of function ColorTable::create_table_array ----------//
// ---------- begin of function ColorTable::bright_func ---------//
RGBColor ColorTable::bright_func(RGBColor c, int scale, int absScale)
{
RGBColor ans;
if( scale < 0)
{
double factor = sqrt(double(absScale + scale) / absScale);
ans.red = BYTE(c.red * factor);
ans.green = BYTE(c.green * factor);
ans.blue = BYTE(c.blue * factor);
}
else if( scale > 0 )
{
ans.red = c.red + (MAX_COLOUR - c.red) * scale / absScale;
ans.green = c.green + (MAX_COLOUR - c.green) * scale / absScale;
ans.blue = c.blue + (MAX_COLOUR - c.blue) * scale / absScale;
}
else // scale == 0
{
ans = c; // if absScale == 0, scale is only 0, then avoid divide by zero
}
return ans;
}
// ---------- end of function ColorTable::bright_func ---------//
// ---------- begin of function ColorTable::patch_table --------//
void ColorTable::patch_table(BYTE from, WORD to)
{
err_when(from >= table_size);
for(int s = -abs_scale; s <= abs_scale; ++s)
{
get_table(s)[from] = to;
}
}
// ---------- end of function ColorTable::patch_table --------//
// ---------- begin of function ColorTable::color_dist --------//
int ColorTable::color_dist(RGBColor c1, RGBColor c2)
{
return sq((int)c2.red-c1.red) + sq((int)c2.green-c1.green) + sq((int)c2.blue-c1.blue);
}
// ---------- end of function ColorTable::color_dist --------//
// ---------- begin of function ColorTable::color_dist_hsv --------//
int ColorTable::color_dist_hsv(RGBColor c1, RGBColor c2)
{
// calculate a distance for the colour
// h betweeh 0 and 6
// s between 0 and 1
// v between 0 and 1
HSVColor hsv1(rgb2hsv(c1));
HSVColor hsv2(rgb2hsv(c2));;
double dx = hsv2.saturation * cos(hsv2.hue * PI / 3.0) - hsv1.saturation * cos(hsv1.hue * PI / 3.0);
double dy = hsv2.saturation * sin(hsv2.hue * PI / 3.0) - hsv1.saturation * sin(hsv1.hue * PI / 3.0);
double dv = hsv2.brightness - hsv1.brightness;
return int(10000 * ( dx*dx + dy*dy + dv*dv*BRIGHTNESS_WEIGHTING ));
}
// ---------- end of function ColorTable::color_dist_hsv --------//
// -------- begin of function ColorTable::rgb2hsv ---------//
HSVColor ColorTable::rgb2hsv(RGBColor &rgb)
{
if( rgb.red == rgb.green && rgb.red == rgb.blue)
{
return HSVColor(1.0, 0.0, rgb.red / 255.0);
}
// find the smallest colour
if( rgb.red <= rgb.green && rgb.red <= rgb.blue)
{
if( rgb.green >= rgb.blue )
{
// g is the primary, b is secondary
return HSVColor( 2.0 + (double) rgb.blue/ rgb.green,
rgb.blue != 0 ? 1.0 - (double) rgb.red / rgb.blue : 1.0,
rgb.green / 255.0);
}
else
{
// b is the primary, g is secondary
return HSVColor( 4.0 - (double) rgb.green / rgb.blue,
rgb.green != 0 ? 1.0 - (double) rgb.red/ rgb.green : 1.0,
rgb.blue / 255.0);
}
}
else if( rgb.green <= rgb.red && rgb.green <= rgb.blue)
{
if( rgb.red >= rgb.blue)
{
// r is the primary, b is secondary
return HSVColor( 6.0 - (double)rgb.blue/rgb.red,
rgb.blue!=0 ? 1.0 - (double)rgb.green/rgb.blue: 1.0,
rgb.red / 255.0);
}
else
{
// b is the primary, r is secondary
return HSVColor( 4.0 + (double)rgb.red/rgb.blue,
rgb.red!=0 ? 1.0 - (double)rgb.green/rgb.red: 1.0,
rgb.blue / 255.0);
}
}
else if( rgb.blue <= rgb.red && rgb.blue <= rgb.green)
{
if( rgb.red >= rgb.green)
{
// r is the primary, g is secondary
return HSVColor( (double)rgb.green/rgb.red,
rgb.green!=0 ? 1.0 - (double)rgb.blue/rgb.green: 1.0,
rgb.red / 255.0);
}
else
{
// g is the primary, r is secondary
return HSVColor( 2.0 - (double)rgb.red/rgb.green,
rgb.red!=0 ? 1.0 - (double)rgb.blue/rgb.red: 1.0,
rgb.green / 255.0);
}
}
else
{
err_when(1);
return HSVColor( 1.0, 0.0, rgb.red / 255.0);
}
}
// -------- end of function ColorTable::rgb2hsv ---------//
// -------- begin of function ColorTable::hsv2rgb ---------//
RGBColor ColorTable::hsv2rgb(HSVColor &hsv)
{
while( hsv.hue < 0.0)
hsv.hue += 6.0;
while(hsv.hue >= 6.0)
hsv.hue -= 6.0;
double p = hsv.brightness * 255.0;
err_when( p >= 256.0);
RGBColor ans;
if( hsv.hue < 1.0)
{
// r is primary, g is secondary
ans.red = BYTE(p);
p *= hsv.hue;
ans.green = BYTE(p); // *r * h;
p *= 1.0 - hsv.saturation;
ans.blue = BYTE(p);
}
else if( hsv.hue < 2.0)
{
// g is primary, r is secondary
ans.green = BYTE(p);
p *= 2.0 - hsv.hue;
ans.red = BYTE(p);
p *= 1.0 - hsv.saturation;
ans.blue = BYTE(p);
}
else if( hsv.hue < 3.0)
{
// g is primary, b is secondary
ans.green = BYTE(p);
p *= hsv.hue - 2.0;
ans.blue = BYTE(p);
p *= 1.0 - hsv.saturation;
ans.red = BYTE(p);
}
else if( hsv.hue < 4.0)
{
// b is primary g is secondary
ans.blue = BYTE(p);
p *= 4.0 - hsv.hue;
ans.green = BYTE(p);
p *= 1.0 - hsv.saturation;
ans.red = BYTE(p);
}
else if( hsv.hue < 5.0)
{
// b is primary, r is secondary
ans.blue = BYTE(p);
p *= hsv.hue - 4.0;
ans.red = BYTE(p);
p *= 1.0 - hsv.saturation;
ans.green = BYTE(p);
}
else if( hsv.hue < 6.0)
{
// r is primary, b is secondary
ans.red = BYTE(p);
p *= 6.0 - hsv.hue;
ans.blue = BYTE(p);
p *= 1.0 - hsv.saturation;
ans.green = BYTE(p);
}
return ans;
}
// -------- end of function ColorTable::hsv2rgb ---------//
// -------- begin of function ColorTable::write_file ---------//
int ColorTable::write_file(File *f)
{
return( f->file_put_long(abs_scale) && f->file_put_long(table_size)
&& f->file_write(remap_table, table_size * (2*abs_scale+1) * remap_table[0] ) );
}
// -------- end of function ColorTable::write_file ---------//
// -------- begin of function ColorTable::read_file ---------//
int ColorTable::read_file(File *f)
{
deinit();
abs_scale = f->file_get_long();
table_size = f->file_get_long();
remap_table = (WORD *)mem_add(table_size * (2*abs_scale+1) * remap_table[0] );
if(! f->file_read(remap_table, table_size * (2*abs_scale+1) * remap_table[0]) )
{
mem_del(remap_table);
remap_table = 0;
return 0;
}
remap_table_array = (WORD **)mem_add(sizeof(remap_table_array[0]) * (2*abs_scale+1) );
create_table_array();
return 1;
}
// -------- end of function ColorTable::read_file ---------//
// -------- begin of function PalDescFile::PalDescFile ------//
//
PalDescFile::PalDescFile(char *fileName, int headerSize, int pitch, int size, int bitWidth, BYTE *rPtr, int rCount) :
PalDesc( mem_add(pitch*size), pitch, size, bitWidth, rPtr, rCount)
{
File palFile;
palFile.file_open( fileName );
palFile.file_seek( headerSize, SEEK_CUR );
palFile.file_read( pal, pitch*size );
}
// -------- end of function PalDescFile::PalDescFile ------//
// -------- begin of function PalDescFile::~PalDescFile ------//
//
PalDescFile::~PalDescFile()
{
mem_del( pal );
pal = NULL;
}
// -------- end of function PalDescFile::~PalDescFile ------//
| gpl-2.0 |
fzqing/linux-2.6 | arch/ppc/qe_io/qe_common.c | 2 | 7786 | /*
* arch/ppc/qe_io/qe_common.c
*
* General Purpose functions for the global management of the
* QUICC Engine (QE).
*
* Author: Shlomi Gridish <gridish@freescale.com>
*
* Copyright 2005 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/immap_qe.h>
#include <asm/qe.h>
#include <asm/rheap.h>
#define QE_MAP_SIZE (0x100000) /* 1MB */
/* QE snum state
*/
typedef enum qe_snum_state {
QE_SNUM_STATE_USED, /* used */
QE_SNUM_STATE_FREE /* free */
} qe_snum_state_e;
/* QE snum
*/
typedef struct qe_snum {
u8 num; /* snum */
qe_snum_state_e state; /* state */
} qe_snum_t;
/* We allocate this here because it is used almost exclusively for
* the communication processor devices.
*/
EXPORT_SYMBOL(qe_immr);
qe_map_t *qe_immr;
static qe_snum_t snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
static void qe_snums_init(void);
static void qe_muram_init(void);
static int qe_sdma_init(void);
void qe_reset(void)
{
qe_immr = (qe_map_t *) ioremap(QE_MAP_ADDR, QE_MAP_SIZE);
qe_snums_init();
qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
(u8) QE_CR_PROTOCOL_UNSPECIFIED, 0);
/* Reclaim the MURAM memory for our use. */
qe_muram_init();
if (qe_sdma_init())
panic("sdma init failed!");
}
EXPORT_SYMBOL(qe_issue_cmd);
int qe_issue_cmd(uint cmd, uint device, u8 mcn_protocol, u32 cmd_input)
{
unsigned long flags;
u32 cecr;
u8 mcn_shift = 0, dev_shift = 0;
local_irq_save(flags);
if (cmd == QE_RESET) {
out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
} else {
if (cmd == QE_ASSIGN_PAGE) {
/* Here device is the SNUM, not sub-block */
dev_shift = QE_CR_SNUM_SHIFT;
} else if (cmd == QE_ASSIGN_RISC) {
/* Here device is the SNUM, and mcnProtocol is e_QeCmdRiscAssignment value */
dev_shift = QE_CR_SNUM_SHIFT;
mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
} else {
if (device == QE_CR_SUBBLOCK_USB)
mcn_shift = QE_CR_MCN_USB_SHIFT;
else
mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
}
out_be32(&qe_immr->cp.cecdr, cmd_input);
out_be32(&qe_immr->cp.cecr,
(cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
mcn_protocol << mcn_shift));
}
/* wait for the QE_CR_FLG to clear */
do {
cecr = in_be32(&qe_immr->cp.cecr);
} while (cecr & QE_CR_FLG);
local_irq_restore(flags);
return 0;
}
/* Set a baud rate generator. This needs lots of work. There are
* 16 BRGs, which can be connected to the QE channels or output
* as clocks. The BRGs are in two different block of internal
* memory mapped space.
* The baud rate clock is the system clock divided by something.
* It was set up long ago during the initial boot phase and is
* is given to us.
* Baud rate clocks are zero-based in the driver code (as that maps
* to port numbers). Documentation uses 1-based numbering.
*/
#define BRG_CLK (((bd_t *)__res)->bi_brgfreq)
/* This function is used by UARTS, or anything else that uses a 16x
* oversampled clock.
*/
void qe_setbrg(uint brg, uint rate)
{
volatile uint *bp;
u32 divisor;
int div16 = 0;
bp = (uint *) & qe_immr->brg.brgc1;
bp += brg;
divisor = (BRG_CLK / rate);
if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
div16 = 1;
divisor /= 16;
}
*bp = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE;
if (div16)
*bp |= QE_BRGC_DIV16;
}
static void qe_snums_init(void)
{
int i;
/* Initialize the SNUMs array. */
for (i = 0; i < QE_NUM_OF_SNUM; i++)
snums[i].state = QE_SNUM_STATE_FREE;
/* Initialize SNUMs (thread serial numbers) according to QE spec chapter 4, SNUM table */
i = 0;
snums[i++].num = 0x04;
snums[i++].num = 0x05;
snums[i++].num = 0x0C;
snums[i++].num = 0x0D;
snums[i++].num = 0x14;
snums[i++].num = 0x15;
snums[i++].num = 0x1C;
snums[i++].num = 0x1D;
snums[i++].num = 0x24;
snums[i++].num = 0x25;
snums[i++].num = 0x2C;
snums[i++].num = 0x2D;
snums[i++].num = 0x34;
snums[i++].num = 0x35;
snums[i++].num = 0x88;
snums[i++].num = 0x89;
snums[i++].num = 0x98;
snums[i++].num = 0x99;
snums[i++].num = 0xA8;
snums[i++].num = 0xA9;
snums[i++].num = 0xB8;
snums[i++].num = 0xB9;
snums[i++].num = 0xC8;
snums[i++].num = 0xC9;
snums[i++].num = 0xD8;
snums[i++].num = 0xD9;
snums[i++].num = 0xE8;
snums[i++].num = 0xE9;
}
int qe_get_snum(void)
{
unsigned long flags;
int snum = -EBUSY;
int i;
local_irq_save(flags);
for (i = 0; i < QE_NUM_OF_SNUM; i++) {
if (snums[i].state == QE_SNUM_STATE_FREE) {
snums[i].state = QE_SNUM_STATE_USED;
snum = snums[i].num;
break;
}
}
local_irq_restore(flags);
return snum;
}
EXPORT_SYMBOL(qe_get_snum);
void qe_put_snum(u8 snum)
{
int i;
for (i = 0; i < QE_NUM_OF_SNUM; i++) {
if (snums[i].num == snum) {
snums[i].state = QE_SNUM_STATE_FREE;
break;
}
}
}
EXPORT_SYMBOL(qe_put_snum);
static int qe_sdma_init(void)
{
sdma_t *sdma = &qe_immr->sdma;
uint sdma_buf_offset;
if (!sdma)
return -ENODEV;
/* allocate 2 internal temporary buffers (512 bytes size each) for the SDMA */
sdma_buf_offset = qe_muram_alloc(512 * 2, 64);
if (IS_MURAM_ERR(sdma_buf_offset))
return -ENOMEM;
out_be32(&sdma->sdebcr, sdma_buf_offset & QE_SDEBCR_BA_MASK);
out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | (0x1 >> QE_SDMR_CEN_SHIFT)));
return 0;
}
/*
* muram_alloc / muram_free bits.
*/
static spinlock_t qe_muram_lock;
/* 16 blocks should be enough to satisfy all requests
* until the memory subsystem goes up... */
static rh_block_t qe_boot_muram_rh_block[16];
static rh_info_t qe_muram_info;
static void qe_muram_init(void)
{
spin_lock_init(&qe_muram_lock);
/* initialize the info header */
rh_init(&qe_muram_info, 1,
sizeof(qe_boot_muram_rh_block) /
sizeof(qe_boot_muram_rh_block[0]), qe_boot_muram_rh_block);
/* Attach the usable muram area */
/* XXX: This is actually crap. QE_DATAONLY_BASE and
* QE_DATAONLY_SIZE is only a subset of the available muram. It
* varies with the processor and the microcode patches activated.
* But the following should be at least safe.
*/
rh_attach_region(&qe_muram_info,
(void *)QE_MURAM_DATAONLY_BASE,
QE_MURAM_DATAONLY_SIZE);
}
/* This function returns an index into the MURAM area.
*/
uint qe_muram_alloc(uint size, uint align)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&qe_muram_lock, flags);
start = rh_alloc_align(&qe_muram_info, size, align, "QE");
spin_unlock_irqrestore(&qe_muram_lock, flags);
return (uint) start;
}
EXPORT_SYMBOL(qe_muram_alloc);
int qe_muram_free(uint offset)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&qe_muram_lock, flags);
ret = rh_free(&qe_muram_info, (void *)offset);
spin_unlock_irqrestore(&qe_muram_lock, flags);
return ret;
}
EXPORT_SYMBOL(qe_muram_free);
/* not sure if this is ever needed */
uint qe_muram_alloc_fixed(uint offset, uint size)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&qe_muram_lock, flags);
start =
rh_alloc_fixed(&qe_muram_info, (void *)offset, size, "commproc");
spin_unlock_irqrestore(&qe_muram_lock, flags);
return (uint) start;
}
EXPORT_SYMBOL(qe_muram_alloc_fixed);
void qe_muram_dump(void)
{
rh_dump(&qe_muram_info);
}
EXPORT_SYMBOL(qe_muram_dump);
void *qe_muram_addr(uint offset)
{
return (void *)&qe_immr->muram[offset];
}
EXPORT_SYMBOL(qe_muram_addr);
| gpl-2.0 |
charlesDGY/coflo | coflo-0.0.4/third_party/boost_1_48_0/libs/geometry/doc/src/examples/algorithms/comparable_distance.cpp | 2 | 1461 | // Boost.Geometry (aka GGL, Generic Geometry Library)
// QuickBook Example
// Copyright (c) 2011 Barend Gehrels, Amsterdam, the Netherlands.
// Use, modification and distribution is subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//[comparable_distance
//` Shows how to efficiently get the closest point
#include <iostream>
#include <boost/geometry.hpp>
#include <boost/geometry/geometries/point_xy.hpp>
#include <boost/numeric/conversion/bounds.hpp>
#include <boost/foreach.hpp>
int main()
{
typedef boost::geometry::model::d2::point_xy<double> point_type;
point_type p(1.4, 2.6);
std::vector<point_type> v;
for (double x = 0.0; x <= 4.0; x++)
{
for (double y = 0.0; y <= 4.0; y++)
{
v.push_back(point_type(x, y));
}
}
point_type min_p;
double min_d = boost::numeric::bounds<double>::highest();
BOOST_FOREACH(point_type const& pv, v)
{
double d = boost::geometry::comparable_distance(p, pv);
if (d < min_d)
{
min_d = d;
min_p = pv;
}
}
std::cout
<< "Closest: " << boost::geometry::dsv(min_p) << std::endl
<< "At: " << boost::geometry::distance(p, min_p) << std::endl;
return 0;
}
//]
//[comparable_distance_output
/*`
Output:
[pre
Closest: (1, 3)
At: 0.565685
]
*/
//]
| gpl-2.0 |
c10ud/CHDK | platform/sx530hs/sub/boot_hdr.c | 2 | 2211 | #include "lolevel.h"
#include "platform.h"
#include "core.h"
#include "dryos31.h"
#define offsetof(TYPE, MEMBER) ((int) &((TYPE *)0)->MEMBER)
const char * const new_sa = &_end;
extern void task_CaptSeq();
extern void task_InitFileModules();
extern void task_RotaryEncoder();
//extern void task_MovieRecord();
extern void task_ExpDrv();
//extern void task_FileWrite();
/*----------------------------------------------------------------------
spytask
-----------------------------------------------------------------------*/
void spytask(long ua, long ub, long uc, long ud, long ue, long uf)
{
core_spytask();
}
/*----------------------------------------------------------------------
CreateTask_spytask
-----------------------------------------------------------------------*/
void CreateTask_spytask()
{
_CreateTask("SpyTask", 0x19, 0x2000, spytask, 0);
}
///*----------------------------------------------------------------------
// Pointer to stack location where jogdial task records previous and current
// jogdial positions
short *jog_position;
#define GREEN_LED 0xC022D1FC
#define AF_LED 0xC022D034
//debug use only
int debug_blink(int save_R0) {
int i;
*((volatile int *) GREEN_LED) = 0x93d800; // Turn on LED
for (i=0; i<800000; i++) // Wait a while
{
asm volatile ( "nop\n" );
}
*((volatile int *) GREEN_LED) = 0x83dc00; // Turn off LED
for (i=0; i<800000; i++) // Wait a while
{
asm volatile ( "nop\n" );
}
return save_R0;
};
void __attribute__((naked,noinline)) my_blinker(int n) {
asm volatile (
" STMFD SP!, {R0-R9,LR}\n"
);
int i, j;
for (j=0; j<n; j++)
{
*((volatile int *) GREEN_LED) = 0x93d800; // Turn on LED
for (i=0; i<0x200000; i++) { asm volatile ( "nop \n" ); }
*((volatile int *) GREEN_LED) = 0x83dc00; // Turn off LED
for (i=0; i<0x400000; i++) { asm volatile ( "nop \n" ); }
}
for (i=0; i<0x900000; i++) { asm volatile ( "nop \n" ); }
asm volatile (
" LDMFD SP!, {R0-R9,PC}\n"
);
}
/*----------------------------------------------------------------------
boot()
Main entry point for the CHDK code
-----------------------------------------------------------------------*/
| gpl-2.0 |
cloudlinux/cl7-kernel | net/ipv4/gre_offload.c | 2 | 6443 | /*
* IPV4 GSO/GRO offload support
* Linux INET implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* GRE GSO support
*/
#include <linux/skbuff.h>
#include <linux/init.h>
#include <net/protocol.h>
#include <net/gre.h>
static int gre_gso_send_check(struct sk_buff *skb)
{
if (!skb->encapsulation)
return -EINVAL;
return 0;
}
static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t enc_features;
int ghl;
struct gre_base_hdr *greh;
u16 mac_offset = skb->mac_header;
int mac_len = skb->mac_len;
__be16 protocol = skb->protocol;
int tnl_hlen;
bool csum;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_TCPV4 |
SKB_GSO_TCPV6 |
SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP)))
goto out;
if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
goto out;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
if (unlikely(ghl < sizeof(*greh)))
goto out;
csum = !!(greh->flags & GRE_CSUM);
if (csum)
skb->encap_hdr_csum = 1;
/* setup inner skb. */
skb->protocol = greh->protocol;
skb->encapsulation = 0;
if (unlikely(!pskb_may_pull(skb, ghl)))
goto out;
__skb_pull(skb, ghl);
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb_inner_network_offset(skb));
skb->mac_len = skb_inner_network_offset(skb);
/* segment inner packet. */
enc_features = skb->dev->hw_enc_features & features;
segs = skb_mac_gso_segment(skb, enc_features);
if (IS_ERR_OR_NULL(segs)) {
skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
goto out;
}
skb = segs;
tnl_hlen = skb_tnl_header_len(skb);
do {
__skb_push(skb, ghl);
if (csum) {
__be32 *pcsum;
if (skb_has_shared_frag(skb)) {
int err;
err = __skb_linearize(skb);
if (err) {
kfree_skb_list(segs);
segs = ERR_PTR(err);
goto out;
}
}
skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)
skb_transport_header(skb);
pcsum = (__be32 *)(greh + 1);
*pcsum = 0;
*(__sum16 *)pcsum = gso_make_checksum(skb, 0);
}
__skb_push(skb, tnl_hlen - ghl);
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len);
skb->mac_len = mac_len;
skb->protocol = protocol;
} while ((skb = skb->next));
out:
return segs;
}
static struct sk_buff **gre_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
struct sk_buff **pp = NULL;
struct sk_buff *p;
const struct gre_base_hdr *greh;
unsigned int hlen, grehlen;
unsigned int off;
int flush = 1;
struct packet_offload *ptype;
__be16 type;
off = skb_gro_offset(skb);
hlen = off + sizeof(*greh);
greh = skb_gro_header_fast(skb, off);
if (skb_gro_header_hard(skb, hlen)) {
greh = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!greh))
goto out;
}
/* Only support version 0 and K (key), C (csum) flags. Note that
* although the support for the S (seq#) flag can be added easily
* for GRO, this is problematic for GSO hence can not be enabled
* here because a GRO pkt may end up in the forwarding path, thus
* requiring GSO support to break it up correctly.
*/
if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
goto out;
type = greh->protocol;
rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (ptype == NULL)
goto out_unlock;
grehlen = GRE_HEADER_SECTION;
if (greh->flags & GRE_KEY)
grehlen += GRE_HEADER_SECTION;
if (greh->flags & GRE_CSUM)
grehlen += GRE_HEADER_SECTION;
hlen = off + grehlen;
if (skb_gro_header_hard(skb, hlen)) {
greh = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!greh))
goto out_unlock;
}
/* Don't bother verifying checksum if we're going to flush anyway. */
if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
if (skb_gro_checksum_simple_validate(skb))
goto out_unlock;
skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0,
null_compute_pseudo);
}
flush = 0;
for (p = *head; p; p = p->next) {
const struct gre_base_hdr *greh2;
if (!NAPI_GRO_CB(p)->same_flow)
continue;
/* The following checks are needed to ensure only pkts
* from the same tunnel are considered for aggregation.
* The criteria for "the same tunnel" includes:
* 1) same version (we only support version 0 here)
* 2) same protocol (we only support ETH_P_IP for now)
* 3) same set of flags
* 4) same key if the key field is present.
*/
greh2 = (struct gre_base_hdr *)(p->data + off);
if (greh2->flags != greh->flags ||
greh2->protocol != greh->protocol) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
if (greh->flags & GRE_KEY) {
/* compare keys */
if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
}
}
skb_gro_pull(skb, grehlen);
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, greh, grehlen);
pp = ptype->callbacks.gro_receive(head, skb);
out_unlock:
rcu_read_unlock();
out:
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
}
static int gre_gro_complete(struct sk_buff *skb, int nhoff)
{
struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
struct packet_offload *ptype;
unsigned int grehlen = sizeof(*greh);
int err = -ENOENT;
__be16 type;
skb->encapsulation = 1;
skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
type = greh->protocol;
if (greh->flags & GRE_KEY)
grehlen += GRE_HEADER_SECTION;
if (greh->flags & GRE_CSUM)
grehlen += GRE_HEADER_SECTION;
rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype != NULL)
err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
rcu_read_unlock();
return err;
}
static const struct net_offload gre_offload = {
.callbacks = {
.gso_send_check = gre_gso_send_check,
.gso_segment = gre_gso_segment,
.gro_receive = gre_gro_receive,
.gro_complete = gre_gro_complete,
},
};
static int __init gre_offload_init(void)
{
return inet_add_offload(&gre_offload, IPPROTO_GRE);
}
device_initcall(gre_offload_init);
| gpl-2.0 |
teamfx/openjfx-10-dev-rt | modules/javafx.web/src/main/native/Source/WebCore/rendering/svg/RenderSVGText.cpp | 2 | 20863 | /*
* Copyright (C) 2006 Apple Inc.
* Copyright (C) 2006 Alexander Kellett <lypanov@kde.org>
* Copyright (C) 2006 Oliver Hunt <ojh16@student.canterbury.ac.nz>
* Copyright (C) 2007 Nikolas Zimmermann <zimmermann@kde.org>
* Copyright (C) 2008 Rob Buis <buis@kde.org>
* Copyright (C) 2009 Dirk Schulze <krit@webkit.org>
* Copyright (C) Research In Motion Limited 2010-2012. All rights reserved.
* Copyright (C) 2012 Google Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "config.h"
#include "RenderSVGText.h"
#include "FloatQuad.h"
#include "Font.h"
#include "GraphicsContext.h"
#include "HitTestRequest.h"
#include "HitTestResult.h"
#include "LayoutRepainter.h"
#include "PointerEventsHitRules.h"
#include "RenderIterator.h"
#include "RenderSVGInline.h"
#include "RenderSVGInlineText.h"
#include "RenderSVGResource.h"
#include "RenderSVGRoot.h"
#include "SVGLengthList.h"
#include "SVGResourcesCache.h"
#include "SVGRootInlineBox.h"
#include "SVGTextElement.h"
#include "SVGURIReference.h"
#include "TransformState.h"
#include "VisiblePosition.h"
#include <wtf/StackStats.h>
namespace WebCore {
RenderSVGText::RenderSVGText(SVGTextElement& element, RenderStyle&& style)
: RenderSVGBlock(element, WTFMove(style))
, m_needsReordering(false)
, m_needsPositioningValuesUpdate(false)
, m_needsTransformUpdate(true)
, m_needsTextMetricsUpdate(false)
{
}
RenderSVGText::~RenderSVGText()
{
ASSERT(m_layoutAttributes.isEmpty());
}
SVGTextElement& RenderSVGText::textElement() const
{
return downcast<SVGTextElement>(RenderSVGBlock::graphicsElement());
}
bool RenderSVGText::isChildAllowed(const RenderObject& child, const RenderStyle&) const
{
return child.isInline();
}
RenderSVGText* RenderSVGText::locateRenderSVGTextAncestor(RenderObject& start)
{
return lineageOfType<RenderSVGText>(start).first();
}
const RenderSVGText* RenderSVGText::locateRenderSVGTextAncestor(const RenderObject& start)
{
return lineageOfType<RenderSVGText>(start).first();
}
LayoutRect RenderSVGText::clippedOverflowRectForRepaint(const RenderLayerModelObject* repaintContainer) const
{
return SVGRenderSupport::clippedOverflowRectForRepaint(*this, repaintContainer);
}
LayoutRect RenderSVGText::computeRectForRepaint(const LayoutRect& rect, const RenderLayerModelObject* repaintContainer, RepaintContext context) const
{
return enclosingLayoutRect(computeFloatRectForRepaint(rect, repaintContainer, context.m_hasPositionFixedDescendant));
}
FloatRect RenderSVGText::computeFloatRectForRepaint(const FloatRect& repaintRect, const RenderLayerModelObject* repaintContainer, bool fixed) const
{
return SVGRenderSupport::computeFloatRectForRepaint(*this, repaintRect, repaintContainer, fixed);
}
void RenderSVGText::mapLocalToContainer(const RenderLayerModelObject* repaintContainer, TransformState& transformState, MapCoordinatesFlags, bool* wasFixed) const
{
SVGRenderSupport::mapLocalToContainer(*this, repaintContainer, transformState, wasFixed);
}
const RenderObject* RenderSVGText::pushMappingToContainer(const RenderLayerModelObject* ancestorToStopAt, RenderGeometryMap& geometryMap) const
{
return SVGRenderSupport::pushMappingToContainer(*this, ancestorToStopAt, geometryMap);
}
static inline void collectLayoutAttributes(RenderObject* text, Vector<SVGTextLayoutAttributes*>& attributes)
{
for (RenderObject* descendant = text; descendant; descendant = descendant->nextInPreOrder(text)) {
if (is<RenderSVGInlineText>(*descendant))
attributes.append(downcast<RenderSVGInlineText>(*descendant).layoutAttributes());
}
}
static inline bool findPreviousAndNextAttributes(RenderElement& start, RenderSVGInlineText* locateElement, bool& stopAfterNext, SVGTextLayoutAttributes*& previous, SVGTextLayoutAttributes*& next)
{
ASSERT(locateElement);
// FIXME: Make this iterative.
for (auto& child : childrenOfType<RenderObject>(start)) {
if (is<RenderSVGInlineText>(child)) {
auto& text = downcast<RenderSVGInlineText>(child);
if (locateElement != &text) {
if (stopAfterNext) {
next = text.layoutAttributes();
return true;
}
previous = text.layoutAttributes();
continue;
}
stopAfterNext = true;
continue;
}
if (!is<RenderSVGInline>(child))
continue;
if (findPreviousAndNextAttributes(downcast<RenderElement>(child), locateElement, stopAfterNext, previous, next))
return true;
}
return false;
}
inline bool RenderSVGText::shouldHandleSubtreeMutations() const
{
if (beingDestroyed() || !everHadLayout()) {
ASSERT(m_layoutAttributes.isEmpty());
ASSERT(!m_layoutAttributesBuilder.numberOfTextPositioningElements());
return false;
}
return true;
}
void RenderSVGText::subtreeChildWasAdded(RenderObject* child)
{
ASSERT(child);
if (!shouldHandleSubtreeMutations() || renderTreeBeingDestroyed())
return;
// The positioning elements cache doesn't include the new 'child' yet. Clear the
// cache, as the next buildLayoutAttributesForTextRenderer() call rebuilds it.
m_layoutAttributesBuilder.clearTextPositioningElements();
if (!child->isSVGInlineText() && !child->isSVGInline())
return;
// Detect changes in layout attributes and only measure those text parts that have changed!
Vector<SVGTextLayoutAttributes*> newLayoutAttributes;
collectLayoutAttributes(this, newLayoutAttributes);
if (newLayoutAttributes.isEmpty()) {
ASSERT(m_layoutAttributes.isEmpty());
return;
}
// Compare m_layoutAttributes with newLayoutAttributes to figure out which attribute got added.
size_t size = newLayoutAttributes.size();
SVGTextLayoutAttributes* attributes = 0;
for (size_t i = 0; i < size; ++i) {
attributes = newLayoutAttributes[i];
if (m_layoutAttributes.find(attributes) == notFound) {
// Every time this is invoked, there's only a single new entry in the newLayoutAttributes list, compared to the old in m_layoutAttributes.
bool stopAfterNext = false;
SVGTextLayoutAttributes* previous = 0;
SVGTextLayoutAttributes* next = 0;
ASSERT_UNUSED(child, &attributes->context() == child);
findPreviousAndNextAttributes(*this, &attributes->context(), stopAfterNext, previous, next);
if (previous)
m_layoutAttributesBuilder.buildLayoutAttributesForTextRenderer(previous->context());
m_layoutAttributesBuilder.buildLayoutAttributesForTextRenderer(attributes->context());
if (next)
m_layoutAttributesBuilder.buildLayoutAttributesForTextRenderer(next->context());
break;
}
}
#ifndef NDEBUG
// Verify that m_layoutAttributes only differs by a maximum of one entry.
for (size_t i = 0; i < size; ++i)
ASSERT(m_layoutAttributes.find(newLayoutAttributes[i]) != notFound || newLayoutAttributes[i] == attributes);
#endif
m_layoutAttributes = newLayoutAttributes;
}
static inline void checkLayoutAttributesConsistency(RenderSVGText* text, Vector<SVGTextLayoutAttributes*>& expectedLayoutAttributes)
{
#ifndef NDEBUG
Vector<SVGTextLayoutAttributes*> newLayoutAttributes;
collectLayoutAttributes(text, newLayoutAttributes);
ASSERT(newLayoutAttributes == expectedLayoutAttributes);
#else
UNUSED_PARAM(text);
UNUSED_PARAM(expectedLayoutAttributes);
#endif
}
void RenderSVGText::willBeDestroyed()
{
m_layoutAttributes.clear();
m_layoutAttributesBuilder.clearTextPositioningElements();
RenderSVGBlock::willBeDestroyed();
}
void RenderSVGText::subtreeChildWillBeRemoved(RenderObject* child, Vector<SVGTextLayoutAttributes*, 2>& affectedAttributes)
{
ASSERT(child);
if (!shouldHandleSubtreeMutations())
return;
checkLayoutAttributesConsistency(this, m_layoutAttributes);
// The positioning elements cache depends on the size of each text renderer in the
// subtree. If this changes, clear the cache. It's going to be rebuilt below.
m_layoutAttributesBuilder.clearTextPositioningElements();
if (m_layoutAttributes.isEmpty() || !child->isSVGInlineText())
return;
// This logic requires that the 'text' child is still inserted in the tree.
auto& text = downcast<RenderSVGInlineText>(*child);
bool stopAfterNext = false;
SVGTextLayoutAttributes* previous = nullptr;
SVGTextLayoutAttributes* next = nullptr;
if (!renderTreeBeingDestroyed())
findPreviousAndNextAttributes(*this, &text, stopAfterNext, previous, next);
if (previous)
affectedAttributes.append(previous);
if (next)
affectedAttributes.append(next);
bool removed = m_layoutAttributes.removeFirst(text.layoutAttributes());
ASSERT_UNUSED(removed, removed);
}
void RenderSVGText::subtreeChildWasRemoved(const Vector<SVGTextLayoutAttributes*, 2>& affectedAttributes)
{
if (!shouldHandleSubtreeMutations() || renderTreeBeingDestroyed()) {
ASSERT(affectedAttributes.isEmpty());
return;
}
// This is called immediately after subtreeChildWillBeDestroyed, once the RenderSVGInlineText::willBeDestroyed() method
// passes on to the base class, which removes us from the render tree. At this point we can update the layout attributes.
unsigned size = affectedAttributes.size();
for (unsigned i = 0; i < size; ++i)
m_layoutAttributesBuilder.buildLayoutAttributesForTextRenderer(affectedAttributes[i]->context());
}
void RenderSVGText::subtreeStyleDidChange(RenderSVGInlineText* text)
{
ASSERT(text);
if (!shouldHandleSubtreeMutations() || renderTreeBeingDestroyed())
return;
checkLayoutAttributesConsistency(this, m_layoutAttributes);
// Only update the metrics cache, but not the text positioning element cache
// nor the layout attributes cached in the leaf #text renderers.
for (RenderObject* descendant = text; descendant; descendant = descendant->nextInPreOrder(text)) {
if (is<RenderSVGInlineText>(*descendant))
m_layoutAttributesBuilder.rebuildMetricsForTextRenderer(downcast<RenderSVGInlineText>(*descendant));
}
}
void RenderSVGText::subtreeTextDidChange(RenderSVGInlineText* text)
{
ASSERT(text);
ASSERT(!beingDestroyed());
if (!everHadLayout()) {
ASSERT(m_layoutAttributes.isEmpty());
ASSERT(!m_layoutAttributesBuilder.numberOfTextPositioningElements());
return;
}
// Text transforms can cause text change to be signaled during addChild before m_layoutAttributes has been updated.
if (!m_layoutAttributes.contains(text->layoutAttributes())) {
ASSERT(!text->everHadLayout());
return;
}
// The positioning elements cache depends on the size of each text renderer in the
// subtree. If this changes, clear the cache. It's going to be rebuilt below.
m_layoutAttributesBuilder.clearTextPositioningElements();
checkLayoutAttributesConsistency(this, m_layoutAttributes);
for (RenderObject* descendant = text; descendant; descendant = descendant->nextInPreOrder(text)) {
if (is<RenderSVGInlineText>(*descendant))
m_layoutAttributesBuilder.buildLayoutAttributesForTextRenderer(downcast<RenderSVGInlineText>(*descendant));
}
}
static inline void updateFontInAllDescendants(RenderObject* start, SVGTextLayoutAttributesBuilder* builder = nullptr)
{
for (RenderObject* descendant = start; descendant; descendant = descendant->nextInPreOrder(start)) {
if (!is<RenderSVGInlineText>(*descendant))
continue;
auto& text = downcast<RenderSVGInlineText>(*descendant);
text.updateScaledFont();
if (builder)
builder->rebuildMetricsForTextRenderer(text);
}
}
void RenderSVGText::layout()
{
StackStats::LayoutCheckPoint layoutCheckPoint;
ASSERT(needsLayout());
LayoutRepainter repainter(*this, SVGRenderSupport::checkForSVGRepaintDuringLayout(*this));
bool updateCachedBoundariesInParents = false;
if (m_needsTransformUpdate) {
m_localTransform = textElement().animatedLocalTransform();
m_needsTransformUpdate = false;
updateCachedBoundariesInParents = true;
}
if (!everHadLayout()) {
// When laying out initially, collect all layout attributes, build the character data map,
// and propogate resulting SVGLayoutAttributes to all RenderSVGInlineText children in the subtree.
ASSERT(m_layoutAttributes.isEmpty());
collectLayoutAttributes(this, m_layoutAttributes);
updateFontInAllDescendants(this);
m_layoutAttributesBuilder.buildLayoutAttributesForForSubtree(*this);
m_needsReordering = true;
m_needsTextMetricsUpdate = false;
m_needsPositioningValuesUpdate = false;
updateCachedBoundariesInParents = true;
} else if (m_needsPositioningValuesUpdate) {
// When the x/y/dx/dy/rotate lists change, recompute the layout attributes, and eventually
// update the on-screen font objects as well in all descendants.
if (m_needsTextMetricsUpdate) {
updateFontInAllDescendants(this);
m_needsTextMetricsUpdate = false;
}
m_layoutAttributesBuilder.buildLayoutAttributesForForSubtree(*this);
m_needsReordering = true;
m_needsPositioningValuesUpdate = false;
updateCachedBoundariesInParents = true;
} else if (m_needsTextMetricsUpdate || SVGRenderSupport::findTreeRootObject(*this)->isLayoutSizeChanged()) {
// If the root layout size changed (eg. window size changes) or the transform to the root
// context has changed then recompute the on-screen font size.
updateFontInAllDescendants(this, &m_layoutAttributesBuilder);
ASSERT(!m_needsReordering);
ASSERT(!m_needsPositioningValuesUpdate);
m_needsTextMetricsUpdate = false;
updateCachedBoundariesInParents = true;
}
checkLayoutAttributesConsistency(this, m_layoutAttributes);
// Reduced version of RenderBlock::layoutBlock(), which only takes care of SVG text.
// All if branches that could cause early exit in RenderBlocks layoutBlock() method are turned into assertions.
ASSERT(!isInline());
ASSERT(!simplifiedLayout());
ASSERT(!scrollsOverflow());
ASSERT(!hasControlClip());
ASSERT(!multiColumnFlowThread());
ASSERT(!positionedObjects());
ASSERT(!m_overflow);
ASSERT(!isAnonymousBlock());
if (!firstChild())
setChildrenInline(true);
// FIXME: We need to find a way to only layout the child boxes, if needed.
FloatRect oldBoundaries = objectBoundingBox();
ASSERT(childrenInline());
LayoutUnit repaintLogicalTop = 0;
LayoutUnit repaintLogicalBottom = 0;
rebuildFloatingObjectSetFromIntrudingFloats();
layoutInlineChildren(true, repaintLogicalTop, repaintLogicalBottom);
if (m_needsReordering)
m_needsReordering = false;
if (!updateCachedBoundariesInParents)
updateCachedBoundariesInParents = oldBoundaries != objectBoundingBox();
// Invalidate all resources of this client if our layout changed.
if (everHadLayout() && selfNeedsLayout())
SVGResourcesCache::clientLayoutChanged(*this);
// If our bounds changed, notify the parents.
if (updateCachedBoundariesInParents)
RenderSVGBlock::setNeedsBoundariesUpdate();
repainter.repaintAfterLayout();
clearNeedsLayout();
}
std::unique_ptr<RootInlineBox> RenderSVGText::createRootInlineBox()
{
auto box = std::make_unique<SVGRootInlineBox>(*this);
box->setHasVirtualLogicalHeight();
return WTFMove(box);
}
bool RenderSVGText::nodeAtFloatPoint(const HitTestRequest& request, HitTestResult& result, const FloatPoint& pointInParent, HitTestAction hitTestAction)
{
PointerEventsHitRules hitRules(PointerEventsHitRules::SVG_TEXT_HITTESTING, request, style().pointerEvents());
bool isVisible = (style().visibility() == VISIBLE);
if (isVisible || !hitRules.requireVisible) {
if ((hitRules.canHitStroke && (style().svgStyle().hasStroke() || !hitRules.requireStroke))
|| (hitRules.canHitFill && (style().svgStyle().hasFill() || !hitRules.requireFill))) {
FloatPoint localPoint = localToParentTransform().inverse().value_or(AffineTransform()).mapPoint(pointInParent);
if (!SVGRenderSupport::pointInClippingArea(*this, localPoint))
return false;
HitTestLocation hitTestLocation(LayoutPoint(flooredIntPoint(localPoint)));
return RenderBlock::nodeAtPoint(request, result, hitTestLocation, LayoutPoint(), hitTestAction);
}
}
return false;
}
bool RenderSVGText::nodeAtPoint(const HitTestRequest&, HitTestResult&, const HitTestLocation&, const LayoutPoint&, HitTestAction)
{
ASSERT_NOT_REACHED();
return false;
}
VisiblePosition RenderSVGText::positionForPoint(const LayoutPoint& pointInContents, const RenderRegion* region)
{
RootInlineBox* rootBox = firstRootBox();
if (!rootBox)
return createVisiblePosition(0, DOWNSTREAM);
ASSERT(!rootBox->nextRootBox());
ASSERT(childrenInline());
InlineBox* closestBox = downcast<SVGRootInlineBox>(*rootBox).closestLeafChildForPosition(pointInContents);
if (!closestBox)
return createVisiblePosition(0, DOWNSTREAM);
return closestBox->renderer().positionForPoint(LayoutPoint(pointInContents.x(), closestBox->y()), region);
}
void RenderSVGText::absoluteQuads(Vector<FloatQuad>& quads, bool* wasFixed) const
{
quads.append(localToAbsoluteQuad(strokeBoundingBox(), UseTransforms, wasFixed));
}
void RenderSVGText::paint(PaintInfo& paintInfo, const LayoutPoint&)
{
if (paintInfo.context().paintingDisabled())
return;
if (paintInfo.phase != PaintPhaseForeground
&& paintInfo.phase != PaintPhaseSelection)
return;
PaintInfo blockInfo(paintInfo);
GraphicsContextStateSaver stateSaver(blockInfo.context());
blockInfo.applyTransform(localToParentTransform());
RenderBlock::paint(blockInfo, LayoutPoint());
// Paint the outlines, if any
if (paintInfo.phase == PaintPhaseForeground) {
blockInfo.phase = PaintPhaseSelfOutline;
RenderBlock::paint(blockInfo, LayoutPoint());
}
}
FloatRect RenderSVGText::strokeBoundingBox() const
{
FloatRect strokeBoundaries = objectBoundingBox();
const SVGRenderStyle& svgStyle = style().svgStyle();
if (!svgStyle.hasStroke())
return strokeBoundaries;
SVGLengthContext lengthContext(&textElement());
strokeBoundaries.inflate(lengthContext.valueForLength(style().strokeWidth()));
return strokeBoundaries;
}
FloatRect RenderSVGText::repaintRectInLocalCoordinates() const
{
FloatRect repaintRect = strokeBoundingBox();
SVGRenderSupport::intersectRepaintRectWithResources(*this, repaintRect);
if (const ShadowData* textShadow = style().textShadow())
textShadow->adjustRectForShadow(repaintRect);
return repaintRect;
}
void RenderSVGText::addChild(RenderObject* child, RenderObject* beforeChild)
{
RenderSVGBlock::addChild(child, beforeChild);
SVGResourcesCache::clientWasAddedToTree(*child);
subtreeChildWasAdded(child);
}
void RenderSVGText::removeChild(RenderObject& child)
{
SVGResourcesCache::clientWillBeRemovedFromTree(child);
Vector<SVGTextLayoutAttributes*, 2> affectedAttributes;
subtreeChildWillBeRemoved(&child, affectedAttributes);
RenderSVGBlock::removeChild(child);
subtreeChildWasRemoved(affectedAttributes);
}
// Fix for <rdar://problem/8048875>. We should not render :first-line CSS Style
// in a SVG text element context.
RenderBlock* RenderSVGText::firstLineBlock() const
{
return 0;
}
// Fix for <rdar://problem/8048875>. We should not render :first-letter CSS Style
// in a SVG text element context.
void RenderSVGText::updateFirstLetter(RenderTreeMutationIsAllowed)
{
}
}
| gpl-2.0 |
Bdaman80/BDA-ACTV | arch/ia64/kernel/topology.c | 514 | 11113 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* This file contains NUMA specific variables and functions which can
* be split away from DISCONTIGMEM and are used on NUMA machines with
* contiguous memory.
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
* Populate cpu entries in sysfs for non-numa systems as well
* Intel Corporation - Ashok Raj
* 02/27/2006 Zhang, Yanmin
* Populate cpu cache entries in sysfs for cpu cache info
*/
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/node.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/nodemask.h>
#include <linux/notifier.h>
#include <asm/mmzone.h>
#include <asm/numa.h>
#include <asm/cpu.h>
static struct ia64_cpu *sysfs_cpus;
void arch_fix_phys_package_id(int num, u32 slot)
{
#ifdef CONFIG_SMP
if (cpu_data(num)->socket_id == -1)
cpu_data(num)->socket_id = slot;
#endif
}
EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
#ifdef CONFIG_HOTPLUG_CPU
int __ref arch_register_cpu(int num)
{
#ifdef CONFIG_ACPI
/*
* If CPEI can be re-targetted or if this is not
* CPEI target, then it is hotpluggable
*/
if (can_cpei_retarget() || !is_cpu_cpei_target(num))
sysfs_cpus[num].cpu.hotpluggable = 1;
map_cpu_to_node(num, node_cpuid[num].nid);
#endif
return register_cpu(&sysfs_cpus[num].cpu, num);
}
EXPORT_SYMBOL(arch_register_cpu);
void __ref arch_unregister_cpu(int num)
{
unregister_cpu(&sysfs_cpus[num].cpu);
#ifdef CONFIG_ACPI
unmap_cpu_from_node(num, cpu_to_node(num));
#endif
}
EXPORT_SYMBOL(arch_unregister_cpu);
#else
static int __init arch_register_cpu(int num)
{
return register_cpu(&sysfs_cpus[num].cpu, num);
}
#endif /*CONFIG_HOTPLUG_CPU*/
static int __init topology_init(void)
{
int i, err = 0;
#ifdef CONFIG_NUMA
/*
* MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
*/
for_each_online_node(i) {
if ((err = register_one_node(i)))
goto out;
}
#endif
sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
if (!sysfs_cpus)
panic("kzalloc in topology_init failed - NR_CPUS too big?");
for_each_present_cpu(i) {
if((err = arch_register_cpu(i)))
goto out;
}
out:
return err;
}
subsys_initcall(topology_init);
/*
* Export cpu cache information through sysfs
*/
/*
* A bunch of string array to get pretty printing
*/
static const char *cache_types[] = {
"", /* not used */
"Instruction",
"Data",
"Unified" /* unified */
};
static const char *cache_mattrib[]={
"WriteThrough",
"WriteBack",
"", /* reserved */
"" /* reserved */
};
struct cache_info {
pal_cache_config_info_t cci;
cpumask_t shared_cpu_map;
int level;
int type;
struct kobject kobj;
};
struct cpu_cache_info {
struct cache_info *cache_leaves;
int num_cache_leaves;
struct kobject kobj;
};
static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata;
#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
#ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
struct cache_info * this_leaf)
{
pal_cache_shared_info_t csi;
int num_shared, i = 0;
unsigned int j;
if (cpu_data(cpu)->threads_per_core <= 1 &&
cpu_data(cpu)->cores_per_socket <= 1) {
cpu_set(cpu, this_leaf->shared_cpu_map);
return;
}
if (ia64_pal_cache_shared_info(this_leaf->level,
this_leaf->type,
0,
&csi) != PAL_STATUS_SUCCESS)
return;
num_shared = (int) csi.num_shared;
do {
for_each_possible_cpu(j)
if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
&& cpu_data(j)->core_id == csi.log1_cid
&& cpu_data(j)->thread_id == csi.log1_tid)
cpu_set(j, this_leaf->shared_cpu_map);
i++;
} while (i < num_shared &&
ia64_pal_cache_shared_info(this_leaf->level,
this_leaf->type,
i,
&csi) == PAL_STATUS_SUCCESS);
}
#else
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu,
struct cache_info * this_leaf)
{
cpu_set(cpu, this_leaf->shared_cpu_map);
return;
}
#endif
static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
char *buf)
{
return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
}
static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
char *buf)
{
return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
}
static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
{
return sprintf(buf,
"%s\n",
cache_mattrib[this_leaf->cci.pcci_cache_attr]);
}
static ssize_t show_size(struct cache_info *this_leaf, char *buf)
{
return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
}
static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
{
unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
number_of_sets /= this_leaf->cci.pcci_assoc;
number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
return sprintf(buf, "%u\n", number_of_sets);
}
static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
{
ssize_t len;
cpumask_t shared_cpu_map;
cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
len += sprintf(buf+len, "\n");
return len;
}
static ssize_t show_type(struct cache_info *this_leaf, char *buf)
{
int type = this_leaf->type + this_leaf->cci.pcci_unified;
return sprintf(buf, "%s\n", cache_types[type]);
}
static ssize_t show_level(struct cache_info *this_leaf, char *buf)
{
return sprintf(buf, "%u\n", this_leaf->level);
}
struct cache_attr {
struct attribute attr;
ssize_t (*show)(struct cache_info *, char *);
ssize_t (*store)(struct cache_info *, const char *, size_t count);
};
#ifdef define_one_ro
#undef define_one_ro
#endif
#define define_one_ro(_name) \
static struct cache_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro(level);
define_one_ro(type);
define_one_ro(coherency_line_size);
define_one_ro(ways_of_associativity);
define_one_ro(size);
define_one_ro(number_of_sets);
define_one_ro(shared_cpu_map);
define_one_ro(attributes);
static struct attribute * cache_default_attrs[] = {
&type.attr,
&level.attr,
&coherency_line_size.attr,
&ways_of_associativity.attr,
&attributes.attr,
&size.attr,
&number_of_sets.attr,
&shared_cpu_map.attr,
NULL
};
#define to_object(k) container_of(k, struct cache_info, kobj)
#define to_attr(a) container_of(a, struct cache_attr, attr)
static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
{
struct cache_attr *fattr = to_attr(attr);
struct cache_info *this_leaf = to_object(kobj);
ssize_t ret;
ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
return ret;
}
static struct sysfs_ops cache_sysfs_ops = {
.show = cache_show
};
static struct kobj_type cache_ktype = {
.sysfs_ops = &cache_sysfs_ops,
.default_attrs = cache_default_attrs,
};
static struct kobj_type cache_ktype_percpu_entry = {
.sysfs_ops = &cache_sysfs_ops,
};
static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
{
kfree(all_cpu_cache_info[cpu].cache_leaves);
all_cpu_cache_info[cpu].cache_leaves = NULL;
all_cpu_cache_info[cpu].num_cache_leaves = 0;
memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
return;
}
static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
{
unsigned long i, levels, unique_caches;
pal_cache_config_info_t cci;
int j;
long status;
struct cache_info *this_cache;
int num_cache_leaves = 0;
if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
return -1;
}
this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
GFP_KERNEL);
if (this_cache == NULL)
return -ENOMEM;
for (i=0; i < levels; i++) {
for (j=2; j >0 ; j--) {
if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
PAL_STATUS_SUCCESS)
continue;
this_cache[num_cache_leaves].cci = cci;
this_cache[num_cache_leaves].level = i + 1;
this_cache[num_cache_leaves].type = j;
cache_shared_cpu_map_setup(cpu,
&this_cache[num_cache_leaves]);
num_cache_leaves ++;
}
}
all_cpu_cache_info[cpu].cache_leaves = this_cache;
all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
return 0;
}
/* Add cache interface for CPU device */
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i, j;
struct cache_info *this_object;
int retval = 0;
cpumask_t oldmask;
if (all_cpu_cache_info[cpu].kobj.parent)
return 0;
oldmask = current->cpus_allowed;
retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
if (unlikely(retval))
return retval;
retval = cpu_cache_sysfs_init(cpu);
set_cpus_allowed(current, oldmask);
if (unlikely(retval < 0))
return retval;
retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
&cache_ktype_percpu_entry, &sys_dev->kobj,
"%s", "cache");
if (unlikely(retval < 0)) {
cpu_cache_sysfs_exit(cpu);
return retval;
}
for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
this_object = LEAF_KOBJECT_PTR(cpu,i);
retval = kobject_init_and_add(&(this_object->kobj),
&cache_ktype,
&all_cpu_cache_info[cpu].kobj,
"index%1lu", i);
if (unlikely(retval)) {
for (j = 0; j < i; j++) {
kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
}
kobject_put(&all_cpu_cache_info[cpu].kobj);
cpu_cache_sysfs_exit(cpu);
return retval;
}
kobject_uevent(&(this_object->kobj), KOBJ_ADD);
}
kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
return retval;
}
/* Remove cache interface for CPU device */
static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i;
for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
if (all_cpu_cache_info[cpu].kobj.parent) {
kobject_put(&all_cpu_cache_info[cpu].kobj);
memset(&all_cpu_cache_info[cpu].kobj,
0,
sizeof(struct kobject));
}
cpu_cache_sysfs_exit(cpu);
return 0;
}
/*
* When a cpu is hot-plugged, do a check and initiate
* cache kobject if necessary
*/
static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct sys_device *sys_dev;
sys_dev = get_cpu_sysdev(cpu);
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
cache_add_dev(sys_dev);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
cache_remove_dev(sys_dev);
break;
}
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata cache_cpu_notifier =
{
.notifier_call = cache_cpu_callback
};
static int __init cache_sysfs_init(void)
{
int i;
for_each_online_cpu(i) {
struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
cache_add_dev(sys_dev);
}
register_hotcpu_notifier(&cache_cpu_notifier);
return 0;
}
device_initcall(cache_sysfs_init);
| gpl-2.0 |
kraml/desire-sense-kernel | drivers/media/dvb/frontends/zl10039.c | 514 | 6971 | /*
* Driver for Zarlink ZL10039 DVB-S tuner
*
* Copyright 2007 Jan D. Louw <jd.louw@mweb.co.za>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
#include "zl10039.h"
static int debug;
#define dprintk(args...) \
do { \
if (debug) \
printk(KERN_DEBUG args); \
} while (0)
enum zl10039_model_id {
ID_ZL10039 = 1
};
struct zl10039_state {
struct i2c_adapter *i2c;
u8 i2c_addr;
u8 id;
};
enum zl10039_reg_addr {
PLL0 = 0,
PLL1,
PLL2,
PLL3,
RFFE,
BASE0,
BASE1,
BASE2,
LO0,
LO1,
LO2,
LO3,
LO4,
LO5,
LO6,
GENERAL
};
static int zl10039_read(const struct zl10039_state *state,
const enum zl10039_reg_addr reg, u8 *buf,
const size_t count)
{
u8 regbuf[] = { reg };
struct i2c_msg msg[] = {
{/* Write register address */
.addr = state->i2c_addr,
.flags = 0,
.buf = regbuf,
.len = 1,
}, {/* Read count bytes */
.addr = state->i2c_addr,
.flags = I2C_M_RD,
.buf = buf,
.len = count,
},
};
dprintk("%s\n", __func__);
if (i2c_transfer(state->i2c, msg, 2) != 2) {
dprintk("%s: i2c read error\n", __func__);
return -EREMOTEIO;
}
return 0; /* Success */
}
static int zl10039_write(struct zl10039_state *state,
const enum zl10039_reg_addr reg, const u8 *src,
const size_t count)
{
u8 buf[count + 1];
struct i2c_msg msg = {
.addr = state->i2c_addr,
.flags = 0,
.buf = buf,
.len = count + 1,
};
dprintk("%s\n", __func__);
/* Write register address and data in one go */
buf[0] = reg;
memcpy(&buf[1], src, count);
if (i2c_transfer(state->i2c, &msg, 1) != 1) {
dprintk("%s: i2c write error\n", __func__);
return -EREMOTEIO;
}
return 0; /* Success */
}
static inline int zl10039_readreg(struct zl10039_state *state,
const enum zl10039_reg_addr reg, u8 *val)
{
return zl10039_read(state, reg, val, 1);
}
static inline int zl10039_writereg(struct zl10039_state *state,
const enum zl10039_reg_addr reg,
const u8 val)
{
return zl10039_write(state, reg, &val, 1);
}
static int zl10039_init(struct dvb_frontend *fe)
{
struct zl10039_state *state = fe->tuner_priv;
int ret;
dprintk("%s\n", __func__);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
/* Reset logic */
ret = zl10039_writereg(state, GENERAL, 0x40);
if (ret < 0) {
dprintk("Note: i2c write error normal when resetting the "
"tuner\n");
}
/* Wake up */
ret = zl10039_writereg(state, GENERAL, 0x01);
if (ret < 0) {
dprintk("Tuner power up failed\n");
return ret;
}
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
return 0;
}
static int zl10039_sleep(struct dvb_frontend *fe)
{
struct zl10039_state *state = fe->tuner_priv;
int ret;
dprintk("%s\n", __func__);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
ret = zl10039_writereg(state, GENERAL, 0x80);
if (ret < 0) {
dprintk("Tuner sleep failed\n");
return ret;
}
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
return 0;
}
static int zl10039_set_params(struct dvb_frontend *fe,
struct dvb_frontend_parameters *params)
{
struct zl10039_state *state = fe->tuner_priv;
u8 buf[6];
u8 bf;
u32 fbw;
u32 div;
int ret;
dprintk("%s\n", __func__);
dprintk("Set frequency = %d, symbol rate = %d\n",
params->frequency, params->u.qpsk.symbol_rate);
/* Assumed 10.111 MHz crystal oscillator */
/* Cancelled num/den 80 to prevent overflow */
div = (params->frequency * 1000) / 126387;
fbw = (params->u.qpsk.symbol_rate * 27) / 32000;
/* Cancelled num/den 10 to prevent overflow */
bf = ((fbw * 5088) / 1011100) - 1;
/*PLL divider*/
buf[0] = (div >> 8) & 0x7f;
buf[1] = (div >> 0) & 0xff;
/*Reference divider*/
/* Select reference ratio of 80 */
buf[2] = 0x1D;
/*PLL test modes*/
buf[3] = 0x40;
/*RF Control register*/
buf[4] = 0x6E; /* Bypass enable */
/*Baseband filter cutoff */
buf[5] = bf;
/* Open i2c gate */
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
/* BR = 10, Enable filter adjustment */
ret = zl10039_writereg(state, BASE1, 0x0A);
if (ret < 0)
goto error;
/* Write new config values */
ret = zl10039_write(state, PLL0, buf, sizeof(buf));
if (ret < 0)
goto error;
/* BR = 10, Disable filter adjustment */
ret = zl10039_writereg(state, BASE1, 0x6A);
if (ret < 0)
goto error;
/* Close i2c gate */
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
return 0;
error:
dprintk("Error setting tuner\n");
return ret;
}
static int zl10039_release(struct dvb_frontend *fe)
{
struct zl10039_state *state = fe->tuner_priv;
dprintk("%s\n", __func__);
kfree(state);
fe->tuner_priv = NULL;
return 0;
}
static struct dvb_tuner_ops zl10039_ops = {
.release = zl10039_release,
.init = zl10039_init,
.sleep = zl10039_sleep,
.set_params = zl10039_set_params,
};
struct dvb_frontend *zl10039_attach(struct dvb_frontend *fe,
u8 i2c_addr, struct i2c_adapter *i2c)
{
struct zl10039_state *state = NULL;
dprintk("%s\n", __func__);
state = kmalloc(sizeof(struct zl10039_state), GFP_KERNEL);
if (state == NULL)
goto error;
state->i2c = i2c;
state->i2c_addr = i2c_addr;
/* Open i2c gate */
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
/* check if this is a valid tuner */
if (zl10039_readreg(state, GENERAL, &state->id) < 0) {
/* Close i2c gate */
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
goto error;
}
/* Close i2c gate */
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
state->id = state->id & 0x0f;
switch (state->id) {
case ID_ZL10039:
strcpy(fe->ops.tuner_ops.info.name,
"Zarlink ZL10039 DVB-S tuner");
break;
default:
dprintk("Chip ID=%x does not match a known type\n", state->id);
break;
goto error;
}
memcpy(&fe->ops.tuner_ops, &zl10039_ops, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = state;
dprintk("Tuner attached @ i2c address 0x%02x\n", i2c_addr);
return fe;
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(zl10039_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Zarlink ZL10039 DVB-S tuner driver");
MODULE_AUTHOR("Jan D. Louw <jd.louw@mweb.co.za>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
tusisma/linux-2.6.35_10.11.01 | sound/soc/codecs/wm8961.c | 514 | 32940 | /*
* wm8961.c -- WM8961 ALSA SoC Audio driver
*
* Author: Mark Brown
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Currently unimplemented features:
* - ALC
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8961.h"
#define WM8961_MAX_REGISTER 0xFC
static u16 wm8961_reg_defaults[] = {
0x009F, /* R0 - Left Input volume */
0x009F, /* R1 - Right Input volume */
0x0000, /* R2 - LOUT1 volume */
0x0000, /* R3 - ROUT1 volume */
0x0020, /* R4 - Clocking1 */
0x0008, /* R5 - ADC & DAC Control 1 */
0x0000, /* R6 - ADC & DAC Control 2 */
0x000A, /* R7 - Audio Interface 0 */
0x01F4, /* R8 - Clocking2 */
0x0000, /* R9 - Audio Interface 1 */
0x00FF, /* R10 - Left DAC volume */
0x00FF, /* R11 - Right DAC volume */
0x0000, /* R12 */
0x0000, /* R13 */
0x0040, /* R14 - Audio Interface 2 */
0x0000, /* R15 - Software Reset */
0x0000, /* R16 */
0x007B, /* R17 - ALC1 */
0x0000, /* R18 - ALC2 */
0x0032, /* R19 - ALC3 */
0x0000, /* R20 - Noise Gate */
0x00C0, /* R21 - Left ADC volume */
0x00C0, /* R22 - Right ADC volume */
0x0120, /* R23 - Additional control(1) */
0x0000, /* R24 - Additional control(2) */
0x0000, /* R25 - Pwr Mgmt (1) */
0x0000, /* R26 - Pwr Mgmt (2) */
0x0000, /* R27 - Additional Control (3) */
0x0000, /* R28 - Anti-pop */
0x0000, /* R29 */
0x005F, /* R30 - Clocking 3 */
0x0000, /* R31 */
0x0000, /* R32 - ADCL signal path */
0x0000, /* R33 - ADCR signal path */
0x0000, /* R34 */
0x0000, /* R35 */
0x0000, /* R36 */
0x0000, /* R37 */
0x0000, /* R38 */
0x0000, /* R39 */
0x0000, /* R40 - LOUT2 volume */
0x0000, /* R41 - ROUT2 volume */
0x0000, /* R42 */
0x0000, /* R43 */
0x0000, /* R44 */
0x0000, /* R45 */
0x0000, /* R46 */
0x0000, /* R47 - Pwr Mgmt (3) */
0x0023, /* R48 - Additional Control (4) */
0x0000, /* R49 - Class D Control 1 */
0x0000, /* R50 */
0x0003, /* R51 - Class D Control 2 */
0x0000, /* R52 */
0x0000, /* R53 */
0x0000, /* R54 */
0x0000, /* R55 */
0x0106, /* R56 - Clocking 4 */
0x0000, /* R57 - DSP Sidetone 0 */
0x0000, /* R58 - DSP Sidetone 1 */
0x0000, /* R59 */
0x0000, /* R60 - DC Servo 0 */
0x0000, /* R61 - DC Servo 1 */
0x0000, /* R62 */
0x015E, /* R63 - DC Servo 3 */
0x0010, /* R64 */
0x0010, /* R65 - DC Servo 5 */
0x0000, /* R66 */
0x0001, /* R67 */
0x0003, /* R68 - Analogue PGA Bias */
0x0000, /* R69 - Analogue HP 0 */
0x0060, /* R70 */
0x01FB, /* R71 - Analogue HP 2 */
0x0000, /* R72 - Charge Pump 1 */
0x0065, /* R73 */
0x005F, /* R74 */
0x0059, /* R75 */
0x006B, /* R76 */
0x0038, /* R77 */
0x000C, /* R78 */
0x000A, /* R79 */
0x006B, /* R80 */
0x0000, /* R81 */
0x0000, /* R82 - Charge Pump B */
0x0087, /* R83 */
0x0000, /* R84 */
0x005C, /* R85 */
0x0000, /* R86 */
0x0000, /* R87 - Write Sequencer 1 */
0x0000, /* R88 - Write Sequencer 2 */
0x0000, /* R89 - Write Sequencer 3 */
0x0000, /* R90 - Write Sequencer 4 */
0x0000, /* R91 - Write Sequencer 5 */
0x0000, /* R92 - Write Sequencer 6 */
0x0000, /* R93 - Write Sequencer 7 */
0x0000, /* R94 */
0x0000, /* R95 */
0x0000, /* R96 */
0x0000, /* R97 */
0x0000, /* R98 */
0x0000, /* R99 */
0x0000, /* R100 */
0x0000, /* R101 */
0x0000, /* R102 */
0x0000, /* R103 */
0x0000, /* R104 */
0x0000, /* R105 */
0x0000, /* R106 */
0x0000, /* R107 */
0x0000, /* R108 */
0x0000, /* R109 */
0x0000, /* R110 */
0x0000, /* R111 */
0x0000, /* R112 */
0x0000, /* R113 */
0x0000, /* R114 */
0x0000, /* R115 */
0x0000, /* R116 */
0x0000, /* R117 */
0x0000, /* R118 */
0x0000, /* R119 */
0x0000, /* R120 */
0x0000, /* R121 */
0x0000, /* R122 */
0x0000, /* R123 */
0x0000, /* R124 */
0x0000, /* R125 */
0x0000, /* R126 */
0x0000, /* R127 */
0x0000, /* R128 */
0x0000, /* R129 */
0x0000, /* R130 */
0x0000, /* R131 */
0x0000, /* R132 */
0x0000, /* R133 */
0x0000, /* R134 */
0x0000, /* R135 */
0x0000, /* R136 */
0x0000, /* R137 */
0x0000, /* R138 */
0x0000, /* R139 */
0x0000, /* R140 */
0x0000, /* R141 */
0x0000, /* R142 */
0x0000, /* R143 */
0x0000, /* R144 */
0x0000, /* R145 */
0x0000, /* R146 */
0x0000, /* R147 */
0x0000, /* R148 */
0x0000, /* R149 */
0x0000, /* R150 */
0x0000, /* R151 */
0x0000, /* R152 */
0x0000, /* R153 */
0x0000, /* R154 */
0x0000, /* R155 */
0x0000, /* R156 */
0x0000, /* R157 */
0x0000, /* R158 */
0x0000, /* R159 */
0x0000, /* R160 */
0x0000, /* R161 */
0x0000, /* R162 */
0x0000, /* R163 */
0x0000, /* R164 */
0x0000, /* R165 */
0x0000, /* R166 */
0x0000, /* R167 */
0x0000, /* R168 */
0x0000, /* R169 */
0x0000, /* R170 */
0x0000, /* R171 */
0x0000, /* R172 */
0x0000, /* R173 */
0x0000, /* R174 */
0x0000, /* R175 */
0x0000, /* R176 */
0x0000, /* R177 */
0x0000, /* R178 */
0x0000, /* R179 */
0x0000, /* R180 */
0x0000, /* R181 */
0x0000, /* R182 */
0x0000, /* R183 */
0x0000, /* R184 */
0x0000, /* R185 */
0x0000, /* R186 */
0x0000, /* R187 */
0x0000, /* R188 */
0x0000, /* R189 */
0x0000, /* R190 */
0x0000, /* R191 */
0x0000, /* R192 */
0x0000, /* R193 */
0x0000, /* R194 */
0x0000, /* R195 */
0x0030, /* R196 */
0x0006, /* R197 */
0x0000, /* R198 */
0x0060, /* R199 */
0x0000, /* R200 */
0x003F, /* R201 */
0x0000, /* R202 */
0x0000, /* R203 */
0x0000, /* R204 */
0x0001, /* R205 */
0x0000, /* R206 */
0x0181, /* R207 */
0x0005, /* R208 */
0x0008, /* R209 */
0x0008, /* R210 */
0x0000, /* R211 */
0x013B, /* R212 */
0x0000, /* R213 */
0x0000, /* R214 */
0x0000, /* R215 */
0x0000, /* R216 */
0x0070, /* R217 */
0x0000, /* R218 */
0x0000, /* R219 */
0x0000, /* R220 */
0x0000, /* R221 */
0x0000, /* R222 */
0x0003, /* R223 */
0x0000, /* R224 */
0x0000, /* R225 */
0x0001, /* R226 */
0x0008, /* R227 */
0x0000, /* R228 */
0x0000, /* R229 */
0x0000, /* R230 */
0x0000, /* R231 */
0x0004, /* R232 */
0x0000, /* R233 */
0x0000, /* R234 */
0x0000, /* R235 */
0x0000, /* R236 */
0x0000, /* R237 */
0x0080, /* R238 */
0x0000, /* R239 */
0x0000, /* R240 */
0x0000, /* R241 */
0x0000, /* R242 */
0x0000, /* R243 */
0x0000, /* R244 */
0x0052, /* R245 */
0x0110, /* R246 */
0x0040, /* R247 */
0x0000, /* R248 */
0x0030, /* R249 */
0x0000, /* R250 */
0x0000, /* R251 */
0x0001, /* R252 - General test 1 */
};
struct wm8961_priv {
struct snd_soc_codec codec;
int sysclk;
u16 reg_cache[WM8961_MAX_REGISTER];
};
static int wm8961_volatile_register(unsigned int reg)
{
switch (reg) {
case WM8961_SOFTWARE_RESET:
case WM8961_WRITE_SEQUENCER_7:
case WM8961_DC_SERVO_1:
return 1;
default:
return 0;
}
}
static int wm8961_reset(struct snd_soc_codec *codec)
{
return snd_soc_write(codec, WM8961_SOFTWARE_RESET, 0);
}
/*
* The headphone output supports special anti-pop sequences giving
* silent power up and power down.
*/
static int wm8961_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
u16 hp_reg = snd_soc_read(codec, WM8961_ANALOGUE_HP_0);
u16 cp_reg = snd_soc_read(codec, WM8961_CHARGE_PUMP_1);
u16 pwr_reg = snd_soc_read(codec, WM8961_PWR_MGMT_2);
u16 dcs_reg = snd_soc_read(codec, WM8961_DC_SERVO_1);
int timeout = 500;
if (event & SND_SOC_DAPM_POST_PMU) {
/* Make sure the output is shorted */
hp_reg &= ~(WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT);
snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
/* Enable the charge pump */
cp_reg |= WM8961_CP_ENA;
snd_soc_write(codec, WM8961_CHARGE_PUMP_1, cp_reg);
mdelay(5);
/* Enable the PGA */
pwr_reg |= WM8961_LOUT1_PGA | WM8961_ROUT1_PGA;
snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
/* Enable the amplifier */
hp_reg |= WM8961_HPR_ENA | WM8961_HPL_ENA;
snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
/* Second stage enable */
hp_reg |= WM8961_HPR_ENA_DLY | WM8961_HPL_ENA_DLY;
snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
/* Enable the DC servo & trigger startup */
dcs_reg |=
WM8961_DCS_ENA_CHAN_HPR | WM8961_DCS_TRIG_STARTUP_HPR |
WM8961_DCS_ENA_CHAN_HPL | WM8961_DCS_TRIG_STARTUP_HPL;
dev_dbg(codec->dev, "Enabling DC servo\n");
snd_soc_write(codec, WM8961_DC_SERVO_1, dcs_reg);
do {
msleep(1);
dcs_reg = snd_soc_read(codec, WM8961_DC_SERVO_1);
} while (--timeout &&
dcs_reg & (WM8961_DCS_TRIG_STARTUP_HPR |
WM8961_DCS_TRIG_STARTUP_HPL));
if (dcs_reg & (WM8961_DCS_TRIG_STARTUP_HPR |
WM8961_DCS_TRIG_STARTUP_HPL))
dev_err(codec->dev, "DC servo timed out\n");
else
dev_dbg(codec->dev, "DC servo startup complete\n");
/* Enable the output stage */
hp_reg |= WM8961_HPR_ENA_OUTP | WM8961_HPL_ENA_OUTP;
snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
/* Remove the short on the output stage */
hp_reg |= WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT;
snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
}
if (event & SND_SOC_DAPM_PRE_PMD) {
/* Short the output */
hp_reg &= ~(WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT);
snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
/* Disable the output stage */
hp_reg &= ~(WM8961_HPR_ENA_OUTP | WM8961_HPL_ENA_OUTP);
snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
/* Disable DC offset cancellation */
dcs_reg &= ~(WM8961_DCS_ENA_CHAN_HPR |
WM8961_DCS_ENA_CHAN_HPL);
snd_soc_write(codec, WM8961_DC_SERVO_1, dcs_reg);
/* Finish up */
hp_reg &= ~(WM8961_HPR_ENA_DLY | WM8961_HPR_ENA |
WM8961_HPL_ENA_DLY | WM8961_HPL_ENA);
snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
/* Disable the PGA */
pwr_reg &= ~(WM8961_LOUT1_PGA | WM8961_ROUT1_PGA);
snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
/* Disable the charge pump */
dev_dbg(codec->dev, "Disabling charge pump\n");
snd_soc_write(codec, WM8961_CHARGE_PUMP_1,
cp_reg & ~WM8961_CP_ENA);
}
return 0;
}
static int wm8961_spk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
u16 pwr_reg = snd_soc_read(codec, WM8961_PWR_MGMT_2);
u16 spk_reg = snd_soc_read(codec, WM8961_CLASS_D_CONTROL_1);
if (event & SND_SOC_DAPM_POST_PMU) {
/* Enable the PGA */
pwr_reg |= WM8961_SPKL_PGA | WM8961_SPKR_PGA;
snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
/* Enable the amplifier */
spk_reg |= WM8961_SPKL_ENA | WM8961_SPKR_ENA;
snd_soc_write(codec, WM8961_CLASS_D_CONTROL_1, spk_reg);
}
if (event & SND_SOC_DAPM_PRE_PMD) {
/* Enable the amplifier */
spk_reg &= ~(WM8961_SPKL_ENA | WM8961_SPKR_ENA);
snd_soc_write(codec, WM8961_CLASS_D_CONTROL_1, spk_reg);
/* Enable the PGA */
pwr_reg &= ~(WM8961_SPKL_PGA | WM8961_SPKR_PGA);
snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
}
return 0;
}
static const char *adc_hpf_text[] = {
"Hi-fi", "Voice 1", "Voice 2", "Voice 3",
};
static const struct soc_enum adc_hpf =
SOC_ENUM_SINGLE(WM8961_ADC_DAC_CONTROL_2, 7, 4, adc_hpf_text);
static const char *dac_deemph_text[] = {
"None", "32kHz", "44.1kHz", "48kHz",
};
static const struct soc_enum dac_deemph =
SOC_ENUM_SINGLE(WM8961_ADC_DAC_CONTROL_1, 1, 4, dac_deemph_text);
static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
static const DECLARE_TLV_DB_SCALE(hp_sec_tlv, -700, 100, 0);
static const DECLARE_TLV_DB_SCALE(adc_tlv, -7200, 75, 1);
static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 300, 0);
static unsigned int boost_tlv[] = {
TLV_DB_RANGE_HEAD(4),
0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
1, 1, TLV_DB_SCALE_ITEM(13, 0, 0),
2, 2, TLV_DB_SCALE_ITEM(20, 0, 0),
3, 3, TLV_DB_SCALE_ITEM(29, 0, 0),
};
static const DECLARE_TLV_DB_SCALE(pga_tlv, -2325, 75, 0);
static const struct snd_kcontrol_new wm8961_snd_controls[] = {
SOC_DOUBLE_R_TLV("Headphone Volume", WM8961_LOUT1_VOLUME, WM8961_ROUT1_VOLUME,
0, 127, 0, out_tlv),
SOC_DOUBLE_TLV("Headphone Secondary Volume", WM8961_ANALOGUE_HP_2,
6, 3, 7, 0, hp_sec_tlv),
SOC_DOUBLE_R("Headphone ZC Switch", WM8961_LOUT1_VOLUME, WM8961_ROUT1_VOLUME,
7, 1, 0),
SOC_DOUBLE_R_TLV("Speaker Volume", WM8961_LOUT2_VOLUME, WM8961_ROUT2_VOLUME,
0, 127, 0, out_tlv),
SOC_DOUBLE_R("Speaker ZC Switch", WM8961_LOUT2_VOLUME, WM8961_ROUT2_VOLUME,
7, 1, 0),
SOC_SINGLE("Speaker AC Gain", WM8961_CLASS_D_CONTROL_2, 0, 7, 0),
SOC_SINGLE("DAC x128 OSR Switch", WM8961_ADC_DAC_CONTROL_2, 0, 1, 0),
SOC_ENUM("DAC Deemphasis", dac_deemph),
SOC_SINGLE("DAC Soft Mute Switch", WM8961_ADC_DAC_CONTROL_2, 3, 1, 0),
SOC_DOUBLE_R_TLV("Sidetone Volume", WM8961_DSP_SIDETONE_0,
WM8961_DSP_SIDETONE_1, 4, 12, 0, sidetone_tlv),
SOC_SINGLE("ADC High Pass Filter Switch", WM8961_ADC_DAC_CONTROL_1, 0, 1, 0),
SOC_ENUM("ADC High Pass Filter Mode", adc_hpf),
SOC_DOUBLE_R_TLV("Capture Volume",
WM8961_LEFT_ADC_VOLUME, WM8961_RIGHT_ADC_VOLUME,
1, 119, 0, adc_tlv),
SOC_DOUBLE_R_TLV("Capture Boost Volume",
WM8961_ADCL_SIGNAL_PATH, WM8961_ADCR_SIGNAL_PATH,
4, 3, 0, boost_tlv),
SOC_DOUBLE_R_TLV("Capture PGA Volume",
WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
0, 62, 0, pga_tlv),
SOC_DOUBLE_R("Capture PGA ZC Switch",
WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
6, 1, 1),
SOC_DOUBLE_R("Capture PGA Switch",
WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
7, 1, 1),
};
static const char *sidetone_text[] = {
"None", "Left", "Right"
};
static const struct soc_enum dacl_sidetone =
SOC_ENUM_SINGLE(WM8961_DSP_SIDETONE_0, 2, 3, sidetone_text);
static const struct soc_enum dacr_sidetone =
SOC_ENUM_SINGLE(WM8961_DSP_SIDETONE_1, 2, 3, sidetone_text);
static const struct snd_kcontrol_new dacl_mux =
SOC_DAPM_ENUM("DACL Sidetone", dacl_sidetone);
static const struct snd_kcontrol_new dacr_mux =
SOC_DAPM_ENUM("DACR Sidetone", dacr_sidetone);
static const struct snd_soc_dapm_widget wm8961_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("LINPUT"),
SND_SOC_DAPM_INPUT("RINPUT"),
SND_SOC_DAPM_SUPPLY("CLK_DSP", WM8961_CLOCKING2, 4, 0, NULL, 0),
SND_SOC_DAPM_PGA("Left Input", WM8961_PWR_MGMT_1, 5, 0, NULL, 0),
SND_SOC_DAPM_PGA("Right Input", WM8961_PWR_MGMT_1, 4, 0, NULL, 0),
SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", WM8961_PWR_MGMT_1, 3, 0),
SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", WM8961_PWR_MGMT_1, 2, 0),
SND_SOC_DAPM_MICBIAS("MICBIAS", WM8961_PWR_MGMT_1, 1, 0),
SND_SOC_DAPM_MUX("DACL Sidetone", SND_SOC_NOPM, 0, 0, &dacl_mux),
SND_SOC_DAPM_MUX("DACR Sidetone", SND_SOC_NOPM, 0, 0, &dacr_mux),
SND_SOC_DAPM_DAC("DACL", "HiFi Playback", WM8961_PWR_MGMT_2, 8, 0),
SND_SOC_DAPM_DAC("DACR", "HiFi Playback", WM8961_PWR_MGMT_2, 7, 0),
/* Handle as a mono path for DCS */
SND_SOC_DAPM_PGA_E("Headphone Output", SND_SOC_NOPM,
4, 0, NULL, 0, wm8961_hp_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_PGA_E("Speaker Output", SND_SOC_NOPM,
4, 0, NULL, 0, wm8961_spk_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_OUTPUT("HP_L"),
SND_SOC_DAPM_OUTPUT("HP_R"),
SND_SOC_DAPM_OUTPUT("SPK_LN"),
SND_SOC_DAPM_OUTPUT("SPK_LP"),
SND_SOC_DAPM_OUTPUT("SPK_RN"),
SND_SOC_DAPM_OUTPUT("SPK_RP"),
};
static const struct snd_soc_dapm_route audio_paths[] = {
{ "DACL", NULL, "CLK_DSP" },
{ "DACL", NULL, "DACL Sidetone" },
{ "DACR", NULL, "CLK_DSP" },
{ "DACR", NULL, "DACR Sidetone" },
{ "DACL Sidetone", "Left", "ADCL" },
{ "DACL Sidetone", "Right", "ADCR" },
{ "DACR Sidetone", "Left", "ADCL" },
{ "DACR Sidetone", "Right", "ADCR" },
{ "HP_L", NULL, "Headphone Output" },
{ "HP_R", NULL, "Headphone Output" },
{ "Headphone Output", NULL, "DACL" },
{ "Headphone Output", NULL, "DACR" },
{ "SPK_LN", NULL, "Speaker Output" },
{ "SPK_LP", NULL, "Speaker Output" },
{ "SPK_RN", NULL, "Speaker Output" },
{ "SPK_RP", NULL, "Speaker Output" },
{ "Speaker Output", NULL, "DACL" },
{ "Speaker Output", NULL, "DACR" },
{ "ADCL", NULL, "Left Input" },
{ "ADCL", NULL, "CLK_DSP" },
{ "ADCR", NULL, "Right Input" },
{ "ADCR", NULL, "CLK_DSP" },
{ "Left Input", NULL, "LINPUT" },
{ "Right Input", NULL, "RINPUT" },
};
/* Values for CLK_SYS_RATE */
static struct {
int ratio;
u16 val;
} wm8961_clk_sys_ratio[] = {
{ 64, 0 },
{ 128, 1 },
{ 192, 2 },
{ 256, 3 },
{ 384, 4 },
{ 512, 5 },
{ 768, 6 },
{ 1024, 7 },
{ 1408, 8 },
{ 1536, 9 },
};
/* Values for SAMPLE_RATE */
static struct {
int rate;
u16 val;
} wm8961_srate[] = {
{ 48000, 0 },
{ 44100, 0 },
{ 32000, 1 },
{ 22050, 2 },
{ 24000, 2 },
{ 16000, 3 },
{ 11250, 4 },
{ 12000, 4 },
{ 8000, 5 },
};
static int wm8961_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8961_priv *wm8961 = snd_soc_codec_get_drvdata(codec);
int i, best, target, fs;
u16 reg;
fs = params_rate(params);
if (!wm8961->sysclk) {
dev_err(codec->dev, "MCLK has not been specified\n");
return -EINVAL;
}
/* Find the closest sample rate for the filters */
best = 0;
for (i = 0; i < ARRAY_SIZE(wm8961_srate); i++) {
if (abs(wm8961_srate[i].rate - fs) <
abs(wm8961_srate[best].rate - fs))
best = i;
}
reg = snd_soc_read(codec, WM8961_ADDITIONAL_CONTROL_3);
reg &= ~WM8961_SAMPLE_RATE_MASK;
reg |= wm8961_srate[best].val;
snd_soc_write(codec, WM8961_ADDITIONAL_CONTROL_3, reg);
dev_dbg(codec->dev, "Selected SRATE %dHz for %dHz\n",
wm8961_srate[best].rate, fs);
/* Select a CLK_SYS/fs ratio equal to or higher than required */
target = wm8961->sysclk / fs;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && target < 64) {
dev_err(codec->dev,
"SYSCLK must be at least 64*fs for DAC\n");
return -EINVAL;
}
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE && target < 256) {
dev_err(codec->dev,
"SYSCLK must be at least 256*fs for ADC\n");
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(wm8961_clk_sys_ratio); i++) {
if (wm8961_clk_sys_ratio[i].ratio >= target)
break;
}
if (i == ARRAY_SIZE(wm8961_clk_sys_ratio)) {
dev_err(codec->dev, "Unable to generate CLK_SYS_RATE\n");
return -EINVAL;
}
dev_dbg(codec->dev, "Selected CLK_SYS_RATE of %d for %d/%d=%d\n",
wm8961_clk_sys_ratio[i].ratio, wm8961->sysclk, fs,
wm8961->sysclk / fs);
reg = snd_soc_read(codec, WM8961_CLOCKING_4);
reg &= ~WM8961_CLK_SYS_RATE_MASK;
reg |= wm8961_clk_sys_ratio[i].val << WM8961_CLK_SYS_RATE_SHIFT;
snd_soc_write(codec, WM8961_CLOCKING_4, reg);
reg = snd_soc_read(codec, WM8961_AUDIO_INTERFACE_0);
reg &= ~WM8961_WL_MASK;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
break;
case SNDRV_PCM_FORMAT_S20_3LE:
reg |= 1 << WM8961_WL_SHIFT;
break;
case SNDRV_PCM_FORMAT_S24_LE:
reg |= 2 << WM8961_WL_SHIFT;
break;
case SNDRV_PCM_FORMAT_S32_LE:
reg |= 3 << WM8961_WL_SHIFT;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8961_AUDIO_INTERFACE_0, reg);
/* Sloping stop-band filter is recommended for <= 24kHz */
reg = snd_soc_read(codec, WM8961_ADC_DAC_CONTROL_2);
if (fs <= 24000)
reg |= WM8961_DACSLOPE;
else
reg &= WM8961_DACSLOPE;
snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
return 0;
}
static int wm8961_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq,
int dir)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8961_priv *wm8961 = snd_soc_codec_get_drvdata(codec);
u16 reg = snd_soc_read(codec, WM8961_CLOCKING1);
if (freq > 33000000) {
dev_err(codec->dev, "MCLK must be <33MHz\n");
return -EINVAL;
}
if (freq > 16500000) {
dev_dbg(codec->dev, "Using MCLK/2 for %dHz MCLK\n", freq);
reg |= WM8961_MCLKDIV;
freq /= 2;
} else {
dev_dbg(codec->dev, "Using MCLK/1 for %dHz MCLK\n", freq);
reg &= WM8961_MCLKDIV;
}
snd_soc_write(codec, WM8961_CLOCKING1, reg);
wm8961->sysclk = freq;
return 0;
}
static int wm8961_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_codec *codec = dai->codec;
u16 aif = snd_soc_read(codec, WM8961_AUDIO_INTERFACE_0);
aif &= ~(WM8961_BCLKINV | WM8961_LRP |
WM8961_MS | WM8961_FORMAT_MASK);
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
aif |= WM8961_MS;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
aif |= 1;
break;
case SND_SOC_DAIFMT_I2S:
aif |= 2;
break;
case SND_SOC_DAIFMT_DSP_B:
aif |= WM8961_LRP;
case SND_SOC_DAIFMT_DSP_A:
aif |= 3;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
case SND_SOC_DAIFMT_IB_NF:
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_NB_IF:
aif |= WM8961_LRP;
break;
case SND_SOC_DAIFMT_IB_NF:
aif |= WM8961_BCLKINV;
break;
case SND_SOC_DAIFMT_IB_IF:
aif |= WM8961_BCLKINV | WM8961_LRP;
break;
default:
return -EINVAL;
}
return snd_soc_write(codec, WM8961_AUDIO_INTERFACE_0, aif);
}
static int wm8961_set_tristate(struct snd_soc_dai *dai, int tristate)
{
struct snd_soc_codec *codec = dai->codec;
u16 reg = snd_soc_read(codec, WM8961_ADDITIONAL_CONTROL_2);
if (tristate)
reg |= WM8961_TRIS;
else
reg &= ~WM8961_TRIS;
return snd_soc_write(codec, WM8961_ADDITIONAL_CONTROL_2, reg);
}
static int wm8961_digital_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
u16 reg = snd_soc_read(codec, WM8961_ADC_DAC_CONTROL_1);
if (mute)
reg |= WM8961_DACMU;
else
reg &= ~WM8961_DACMU;
msleep(17);
return snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_1, reg);
}
static int wm8961_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
{
struct snd_soc_codec *codec = dai->codec;
u16 reg;
switch (div_id) {
case WM8961_BCLK:
reg = snd_soc_read(codec, WM8961_CLOCKING2);
reg &= ~WM8961_BCLKDIV_MASK;
reg |= div;
snd_soc_write(codec, WM8961_CLOCKING2, reg);
break;
case WM8961_LRCLK:
reg = snd_soc_read(codec, WM8961_AUDIO_INTERFACE_2);
reg &= ~WM8961_LRCLK_RATE_MASK;
reg |= div;
snd_soc_write(codec, WM8961_AUDIO_INTERFACE_2, reg);
break;
default:
return -EINVAL;
}
return 0;
}
static int wm8961_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
u16 reg;
/* This is all slightly unusual since we have no bypass paths
* and the output amplifier structure means we can just slam
* the biases straight up rather than having to ramp them
* slowly.
*/
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
if (codec->bias_level == SND_SOC_BIAS_STANDBY) {
/* Enable bias generation */
reg = snd_soc_read(codec, WM8961_ANTI_POP);
reg |= WM8961_BUFIOEN | WM8961_BUFDCOPEN;
snd_soc_write(codec, WM8961_ANTI_POP, reg);
/* VMID=2*50k, VREF */
reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
reg &= ~WM8961_VMIDSEL_MASK;
reg |= (1 << WM8961_VMIDSEL_SHIFT) | WM8961_VREF;
snd_soc_write(codec, WM8961_PWR_MGMT_1, reg);
}
break;
case SND_SOC_BIAS_STANDBY:
if (codec->bias_level == SND_SOC_BIAS_PREPARE) {
/* VREF off */
reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
reg &= ~WM8961_VREF;
snd_soc_write(codec, WM8961_PWR_MGMT_1, reg);
/* Bias generation off */
reg = snd_soc_read(codec, WM8961_ANTI_POP);
reg &= ~(WM8961_BUFIOEN | WM8961_BUFDCOPEN);
snd_soc_write(codec, WM8961_ANTI_POP, reg);
/* VMID off */
reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
reg &= ~WM8961_VMIDSEL_MASK;
snd_soc_write(codec, WM8961_PWR_MGMT_1, reg);
}
break;
case SND_SOC_BIAS_OFF:
break;
}
codec->bias_level = level;
return 0;
}
#define WM8961_RATES SNDRV_PCM_RATE_8000_48000
#define WM8961_FORMATS \
(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_LE)
static struct snd_soc_dai_ops wm8961_dai_ops = {
.hw_params = wm8961_hw_params,
.set_sysclk = wm8961_set_sysclk,
.set_fmt = wm8961_set_fmt,
.digital_mute = wm8961_digital_mute,
.set_tristate = wm8961_set_tristate,
.set_clkdiv = wm8961_set_clkdiv,
};
struct snd_soc_dai wm8961_dai = {
.name = "WM8961",
.playback = {
.stream_name = "HiFi Playback",
.channels_min = 1,
.channels_max = 2,
.rates = WM8961_RATES,
.formats = WM8961_FORMATS,},
.capture = {
.stream_name = "HiFi Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM8961_RATES,
.formats = WM8961_FORMATS,},
.ops = &wm8961_dai_ops,
};
EXPORT_SYMBOL_GPL(wm8961_dai);
static struct snd_soc_codec *wm8961_codec;
static int wm8961_probe(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec;
int ret = 0;
if (wm8961_codec == NULL) {
dev_err(&pdev->dev, "Codec device not registered\n");
return -ENODEV;
}
socdev->card->codec = wm8961_codec;
codec = wm8961_codec;
/* register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
dev_err(codec->dev, "failed to create pcms: %d\n", ret);
goto pcm_err;
}
snd_soc_add_controls(codec, wm8961_snd_controls,
ARRAY_SIZE(wm8961_snd_controls));
snd_soc_dapm_new_controls(codec, wm8961_dapm_widgets,
ARRAY_SIZE(wm8961_dapm_widgets));
snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
return ret;
pcm_err:
return ret;
}
static int wm8961_remove(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
snd_soc_free_pcms(socdev);
snd_soc_dapm_free(socdev);
return 0;
}
#ifdef CONFIG_PM
static int wm8961_suspend(struct platform_device *pdev, pm_message_t state)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
wm8961_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static int wm8961_resume(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
u16 *reg_cache = codec->reg_cache;
int i;
for (i = 0; i < codec->reg_cache_size; i++) {
if (reg_cache[i] == wm8961_reg_defaults[i])
continue;
if (i == WM8961_SOFTWARE_RESET)
continue;
snd_soc_write(codec, i, reg_cache[i]);
}
wm8961_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
#else
#define wm8961_suspend NULL
#define wm8961_resume NULL
#endif
struct snd_soc_codec_device soc_codec_dev_wm8961 = {
.probe = wm8961_probe,
.remove = wm8961_remove,
.suspend = wm8961_suspend,
.resume = wm8961_resume,
};
EXPORT_SYMBOL_GPL(soc_codec_dev_wm8961);
static int wm8961_register(struct wm8961_priv *wm8961)
{
struct snd_soc_codec *codec = &wm8961->codec;
int ret;
u16 reg;
if (wm8961_codec) {
dev_err(codec->dev, "Another WM8961 is registered\n");
ret = -EINVAL;
goto err;
}
mutex_init(&codec->mutex);
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
snd_soc_codec_set_drvdata(codec, wm8961);
codec->name = "WM8961";
codec->owner = THIS_MODULE;
codec->dai = &wm8961_dai;
codec->num_dai = 1;
codec->reg_cache_size = ARRAY_SIZE(wm8961->reg_cache);
codec->reg_cache = &wm8961->reg_cache;
codec->bias_level = SND_SOC_BIAS_OFF;
codec->set_bias_level = wm8961_set_bias_level;
codec->volatile_register = wm8961_volatile_register;
memcpy(codec->reg_cache, wm8961_reg_defaults,
sizeof(wm8961_reg_defaults));
ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
goto err;
}
reg = snd_soc_read(codec, WM8961_SOFTWARE_RESET);
if (reg != 0x1801) {
dev_err(codec->dev, "Device is not a WM8961: ID=0x%x\n", reg);
ret = -EINVAL;
goto err;
}
/* This isn't volatile - readback doesn't correspond to write */
reg = codec->hw_read(codec, WM8961_RIGHT_INPUT_VOLUME);
dev_info(codec->dev, "WM8961 family %d revision %c\n",
(reg & WM8961_DEVICE_ID_MASK) >> WM8961_DEVICE_ID_SHIFT,
((reg & WM8961_CHIP_REV_MASK) >> WM8961_CHIP_REV_SHIFT)
+ 'A');
ret = wm8961_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset\n");
return ret;
}
/* Enable class W */
reg = snd_soc_read(codec, WM8961_CHARGE_PUMP_B);
reg |= WM8961_CP_DYN_PWR_MASK;
snd_soc_write(codec, WM8961_CHARGE_PUMP_B, reg);
/* Latch volume update bits (right channel only, we always
* write both out) and default ZC on. */
reg = snd_soc_read(codec, WM8961_ROUT1_VOLUME);
snd_soc_write(codec, WM8961_ROUT1_VOLUME,
reg | WM8961_LO1ZC | WM8961_OUT1VU);
snd_soc_write(codec, WM8961_LOUT1_VOLUME, reg | WM8961_LO1ZC);
reg = snd_soc_read(codec, WM8961_ROUT2_VOLUME);
snd_soc_write(codec, WM8961_ROUT2_VOLUME,
reg | WM8961_SPKRZC | WM8961_SPKVU);
snd_soc_write(codec, WM8961_LOUT2_VOLUME, reg | WM8961_SPKLZC);
reg = snd_soc_read(codec, WM8961_RIGHT_ADC_VOLUME);
snd_soc_write(codec, WM8961_RIGHT_ADC_VOLUME, reg | WM8961_ADCVU);
reg = snd_soc_read(codec, WM8961_RIGHT_INPUT_VOLUME);
snd_soc_write(codec, WM8961_RIGHT_INPUT_VOLUME, reg | WM8961_IPVU);
/* Use soft mute by default */
reg = snd_soc_read(codec, WM8961_ADC_DAC_CONTROL_2);
reg |= WM8961_DACSMM;
snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
/* Use automatic clocking mode by default; for now this is all
* we support.
*/
reg = snd_soc_read(codec, WM8961_CLOCKING_3);
reg &= ~WM8961_MANUAL_MODE;
snd_soc_write(codec, WM8961_CLOCKING_3, reg);
wm8961_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
wm8961_dai.dev = codec->dev;
wm8961_codec = codec;
ret = snd_soc_register_codec(codec);
if (ret != 0) {
dev_err(codec->dev, "Failed to register codec: %d\n", ret);
return ret;
}
ret = snd_soc_register_dai(&wm8961_dai);
if (ret != 0) {
dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
snd_soc_unregister_codec(codec);
return ret;
}
return 0;
err:
kfree(wm8961);
return ret;
}
static void wm8961_unregister(struct wm8961_priv *wm8961)
{
wm8961_set_bias_level(&wm8961->codec, SND_SOC_BIAS_OFF);
snd_soc_unregister_dai(&wm8961_dai);
snd_soc_unregister_codec(&wm8961->codec);
kfree(wm8961);
wm8961_codec = NULL;
}
static __devinit int wm8961_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8961_priv *wm8961;
struct snd_soc_codec *codec;
wm8961 = kzalloc(sizeof(struct wm8961_priv), GFP_KERNEL);
if (wm8961 == NULL)
return -ENOMEM;
codec = &wm8961->codec;
i2c_set_clientdata(i2c, wm8961);
codec->control_data = i2c;
codec->dev = &i2c->dev;
return wm8961_register(wm8961);
}
static __devexit int wm8961_i2c_remove(struct i2c_client *client)
{
struct wm8961_priv *wm8961 = i2c_get_clientdata(client);
wm8961_unregister(wm8961);
return 0;
}
static const struct i2c_device_id wm8961_i2c_id[] = {
{ "wm8961", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8961_i2c_id);
static struct i2c_driver wm8961_i2c_driver = {
.driver = {
.name = "wm8961",
.owner = THIS_MODULE,
},
.probe = wm8961_i2c_probe,
.remove = __devexit_p(wm8961_i2c_remove),
.id_table = wm8961_i2c_id,
};
static int __init wm8961_modinit(void)
{
int ret;
ret = i2c_add_driver(&wm8961_i2c_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register WM8961 I2C driver: %d\n",
ret);
}
return ret;
}
module_init(wm8961_modinit);
static void __exit wm8961_exit(void)
{
i2c_del_driver(&wm8961_i2c_driver);
}
module_exit(wm8961_exit);
MODULE_DESCRIPTION("ASoC WM8961 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
vk2rq/linux-stable | drivers/gpu/drm/radeon/r300_cmdbuf.c | 514 | 32051 | /* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
*
* Copyright (C) The Weather Channel, Inc. 2002.
* Copyright (C) 2004 Nicolai Haehnle.
* All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Nicolai Haehnle <prefect_@gmx.net>
*/
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "r300_reg.h"
#include <asm/unaligned.h>
#define R300_SIMULTANEOUS_CLIPRECTS 4
/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
*/
static const int r300_cliprect_cntl[4] = {
0xAAAA,
0xEEEE,
0xFEFE,
0xFFFE
};
/**
* Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
* buffer, starting with index n.
*/
static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf, int n)
{
struct drm_clip_rect box;
int nr;
int i;
RING_LOCALS;
nr = cmdbuf->nbox - n;
if (nr > R300_SIMULTANEOUS_CLIPRECTS)
nr = R300_SIMULTANEOUS_CLIPRECTS;
DRM_DEBUG("%i cliprects\n", nr);
if (nr) {
BEGIN_RING(6 + nr * 2);
OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
for (i = 0; i < nr; ++i) {
if (DRM_COPY_FROM_USER_UNCHECKED
(&box, &cmdbuf->boxes[n + i], sizeof(box))) {
DRM_ERROR("copy cliprect faulted\n");
return -EFAULT;
}
box.x2--; /* Hardware expects inclusive bottom-right corner */
box.y2--;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
box.x1 = (box.x1) &
R300_CLIPRECT_MASK;
box.y1 = (box.y1) &
R300_CLIPRECT_MASK;
box.x2 = (box.x2) &
R300_CLIPRECT_MASK;
box.y2 = (box.y2) &
R300_CLIPRECT_MASK;
} else {
box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
}
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
(box.y2 << R300_CLIPRECT_Y_SHIFT));
}
OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
/* TODO/SECURITY: Force scissors to a safe value, otherwise the
* client might be able to trample over memory.
* The impact should be very limited, but I'd rather be safe than
* sorry.
*/
OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
OUT_RING(0);
OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
ADVANCE_RING();
} else {
/* Why we allow zero cliprect rendering:
* There are some commands in a command buffer that must be submitted
* even when there are no cliprects, e.g. DMA buffer discard
* or state setting (though state setting could be avoided by
* simulating a loss of context).
*
* Now since the cmdbuf interface is so chaotic right now (and is
* bound to remain that way for a bit until things settle down),
* it is basically impossible to filter out the commands that are
* necessary and those that aren't.
*
* So I choose the safe way and don't do any filtering at all;
* instead, I simply set up the engine so that all rendering
* can't produce any fragments.
*/
BEGIN_RING(2);
OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
ADVANCE_RING();
}
/* flus cache and wait idle clean after cliprect change */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(R300_RB3D_DC_FLUSH);
ADVANCE_RING();
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
return 0;
}
static u8 r300_reg_flags[0x10000 >> 2];
void r300_init_reg_flags(struct drm_device *dev)
{
int i;
drm_radeon_private_t *dev_priv = dev->dev_private;
memset(r300_reg_flags, 0, 0x10000 >> 2);
#define ADD_RANGE_MARK(reg, count,mark) \
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
r300_reg_flags[i]|=(mark);
#define MARK_SAFE 1
#define MARK_CHECK_OFFSET 2
#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
ADD_RANGE(R300_VAP_CNTL, 1);
ADD_RANGE(R300_SE_VTE_CNTL, 2);
ADD_RANGE(0x2134, 2);
ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
ADD_RANGE(0x21DC, 1);
ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
ADD_RANGE(R300_VAP_CLIP_X_0, 4);
ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
ADD_RANGE(R300_GB_MSPOS0, 5);
ADD_RANGE(R300_TX_INVALTAGS, 1);
ADD_RANGE(R300_TX_ENABLE, 1);
ADD_RANGE(0x4200, 4);
ADD_RANGE(0x4214, 1);
ADD_RANGE(R300_RE_POINTSIZE, 1);
ADD_RANGE(0x4230, 3);
ADD_RANGE(R300_RE_LINE_CNT, 1);
ADD_RANGE(R300_RE_UNK4238, 1);
ADD_RANGE(0x4260, 3);
ADD_RANGE(R300_RE_SHADE, 4);
ADD_RANGE(R300_RE_POLYGON_MODE, 5);
ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
ADD_RANGE(R300_RE_CULL_CNTL, 1);
ADD_RANGE(0x42C0, 2);
ADD_RANGE(R300_RS_CNTL_0, 2);
ADD_RANGE(R300_SU_REG_DEST, 1);
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530)
ADD_RANGE(RV530_FG_ZBREG_DEST, 1);
ADD_RANGE(R300_SC_HYPERZ, 2);
ADD_RANGE(0x43E8, 1);
ADD_RANGE(0x46A4, 5);
ADD_RANGE(R300_RE_FOG_STATE, 1);
ADD_RANGE(R300_FOG_COLOR_R, 3);
ADD_RANGE(R300_PP_ALPHA_TEST, 2);
ADD_RANGE(0x4BD8, 1);
ADD_RANGE(R300_PFS_PARAM_0_X, 64);
ADD_RANGE(0x4E00, 1);
ADD_RANGE(R300_RB3D_CBLEND, 2);
ADD_RANGE(R300_RB3D_COLORMASK, 1);
ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
ADD_RANGE(0x4E50, 9);
ADD_RANGE(0x4E88, 1);
ADD_RANGE(0x4EA0, 2);
ADD_RANGE(R300_ZB_CNTL, 3);
ADD_RANGE(R300_ZB_FORMAT, 4);
ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
ADD_RANGE(R300_ZB_ZPASS_DATA, 2); /* ZB_ZPASS_DATA, ZB_ZPASS_ADDR */
ADD_RANGE(R300_TX_FILTER_0, 16);
ADD_RANGE(R300_TX_FILTER1_0, 16);
ADD_RANGE(R300_TX_SIZE_0, 16);
ADD_RANGE(R300_TX_FORMAT_0, 16);
ADD_RANGE(R300_TX_PITCH_0, 16);
/* Texture offset is dangerous and needs more checking */
ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
/* Sporadic registers used as primitives are emitted */
ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
ADD_RANGE(R500_US_CONFIG, 2);
ADD_RANGE(R500_US_CODE_ADDR, 3);
ADD_RANGE(R500_US_FC_CTRL, 1);
ADD_RANGE(R500_RS_IP_0, 16);
ADD_RANGE(R500_RS_INST_0, 16);
ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
} else {
ADD_RANGE(R300_PFS_CNTL_0, 3);
ADD_RANGE(R300_PFS_NODE_0, 4);
ADD_RANGE(R300_PFS_TEXI_0, 64);
ADD_RANGE(R300_PFS_INSTR0_0, 64);
ADD_RANGE(R300_PFS_INSTR1_0, 64);
ADD_RANGE(R300_PFS_INSTR2_0, 64);
ADD_RANGE(R300_PFS_INSTR3_0, 64);
ADD_RANGE(R300_RS_INTERP_0, 8);
ADD_RANGE(R300_RS_ROUTE_0, 8);
}
}
static __inline__ int r300_check_range(unsigned reg, int count)
{
int i;
if (reg & ~0xffff)
return -1;
for (i = (reg >> 2); i < (reg >> 2) + count; i++)
if (r300_reg_flags[i] != MARK_SAFE)
return 1;
return 0;
}
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
dev_priv,
drm_radeon_kcmd_buffer_t
* cmdbuf,
drm_r300_cmd_header_t
header)
{
int reg;
int sz;
int i;
int values[64];
RING_LOCALS;
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if ((sz > 64) || (sz < 0)) {
DRM_ERROR
("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
reg, sz);
return -EINVAL;
}
for (i = 0; i < sz; i++) {
values[i] = ((int *)cmdbuf->buf)[i];
switch (r300_reg_flags[(reg >> 2) + i]) {
case MARK_SAFE:
break;
case MARK_CHECK_OFFSET:
if (!radeon_check_offset(dev_priv, (u32) values[i])) {
DRM_ERROR
("Offset failed range check (reg=%04x sz=%d)\n",
reg, sz);
return -EINVAL;
}
break;
default:
DRM_ERROR("Register %04x failed check as flag=%02x\n",
reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
return -EINVAL;
}
}
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
OUT_RING_TABLE(values, sz);
ADVANCE_RING();
cmdbuf->buf += sz * 4;
cmdbuf->bufsz -= sz * 4;
return 0;
}
/**
* Emits a packet0 setting arbitrary registers.
* Called by r300_do_cp_cmdbuf.
*
* Note that checks are performed on contents and addresses of the registers
*/
static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int reg;
int sz;
RING_LOCALS;
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if (!sz)
return 0;
if (sz * 4 > cmdbuf->bufsz)
return -EINVAL;
if (reg + sz * 4 >= 0x10000) {
DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
sz);
return -EINVAL;
}
if (r300_check_range(reg, sz)) {
/* go and check everything */
return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
header);
}
/* the rest of the data is safe to emit, whatever the values the user passed */
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz);
ADVANCE_RING();
cmdbuf->buf += sz * 4;
cmdbuf->bufsz -= sz * 4;
return 0;
}
/**
* Uploads user-supplied vertex program instructions or parameters onto
* the graphics card.
* Called by r300_do_cp_cmdbuf.
*/
static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int sz;
int addr;
RING_LOCALS;
sz = header.vpu.count;
addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
if (!sz)
return 0;
if (sz * 16 > cmdbuf->bufsz)
return -EINVAL;
/* VAP is very sensitive so we purge cache before we program it
* and we also flush its state before & after */
BEGIN_RING(6);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(R300_RB3D_DC_FLUSH);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
OUT_RING(0);
ADVANCE_RING();
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
BEGIN_RING(3 + sz * 4);
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
ADVANCE_RING();
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
OUT_RING(0);
ADVANCE_RING();
cmdbuf->buf += sz * 16;
cmdbuf->bufsz -= sz * 16;
return 0;
}
/**
* Emit a clear packet from userspace.
* Called by r300_emit_packet3.
*/
static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
RING_LOCALS;
if (8 * 4 > cmdbuf->bufsz)
return -EINVAL;
BEGIN_RING(10);
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
(1 << R300_PRIM_NUM_VERTICES_SHIFT));
OUT_RING_TABLE((int *)cmdbuf->buf, 8);
ADVANCE_RING();
BEGIN_RING(4);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(R300_RB3D_DC_FLUSH);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
cmdbuf->buf += 8 * 4;
cmdbuf->bufsz -= 8 * 4;
return 0;
}
static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
u32 header)
{
int count, i, k;
#define MAX_ARRAY_PACKET 64
u32 payload[MAX_ARRAY_PACKET];
u32 narrays;
RING_LOCALS;
count = (header >> 16) & 0x3fff;
if ((count + 1) > MAX_ARRAY_PACKET) {
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
count);
return -EINVAL;
}
memset(payload, 0, MAX_ARRAY_PACKET * 4);
memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
/* carefully check packet contents */
narrays = payload[0];
k = 0;
i = 1;
while ((k < narrays) && (i < (count + 1))) {
i++; /* skip attribute field */
if (!radeon_check_offset(dev_priv, payload[i])) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
return -EINVAL;
}
k++;
i++;
if (k == narrays)
break;
/* have one more to process, they come in pairs */
if (!radeon_check_offset(dev_priv, payload[i])) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
return -EINVAL;
}
k++;
i++;
}
/* do the counts match what we expect ? */
if ((k != narrays) || (i != (count + 1))) {
DRM_ERROR
("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
k, i, narrays, count + 1);
return -EINVAL;
}
/* all clear, output packet */
BEGIN_RING(count + 2);
OUT_RING(header);
OUT_RING_TABLE(payload, count + 1);
ADVANCE_RING();
cmdbuf->buf += (count + 2) * 4;
cmdbuf->bufsz -= (count + 2) * 4;
return 0;
}
static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 *cmd = (u32 *) cmdbuf->buf;
int count, ret;
RING_LOCALS;
count=(cmd[0]>>16) & 0x3fff;
if (cmd[0] & 0x8000) {
u32 offset;
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[2] << 10;
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
return -EINVAL;
}
}
if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
(cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[3] << 10;
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
return -EINVAL;
}
}
}
BEGIN_RING(count+2);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
return 0;
}
static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 *cmd;
int count;
int expected_count;
RING_LOCALS;
cmd = (u32 *) cmdbuf->buf;
count = (cmd[0]>>16) & 0x3fff;
expected_count = cmd[1] >> 16;
if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
expected_count = (expected_count+1)/2;
if (count && count != expected_count) {
DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
count, expected_count);
return -EINVAL;
}
BEGIN_RING(count+2);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
if (!count) {
drm_r300_cmd_header_t header;
if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
return -EINVAL;
}
header.u = *(unsigned int *)cmdbuf->buf;
cmdbuf->buf += sizeof(header);
cmdbuf->bufsz -= sizeof(header);
cmd = (u32 *) cmdbuf->buf;
if (header.header.cmd_type != R300_CMD_PACKET3 ||
header.packet3.packet != R300_CMD_PACKET3_RAW ||
cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
return -EINVAL;
}
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
return -EINVAL;
}
if (!radeon_check_offset(dev_priv, cmd[2])) {
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
return -EINVAL;
}
if (cmd[3] != expected_count) {
DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
cmd[3], expected_count);
return -EINVAL;
}
BEGIN_RING(4);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
ADVANCE_RING();
cmdbuf->buf += 4*4;
cmdbuf->bufsz -= 4*4;
}
return 0;
}
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 header;
int count;
RING_LOCALS;
if (4 > cmdbuf->bufsz)
return -EINVAL;
/* Fixme !! This simply emits a packet without much checking.
We need to be smarter. */
/* obtain first word - actual packet3 header */
header = *(u32 *) cmdbuf->buf;
/* Is it packet 3 ? */
if ((header >> 30) != 0x3) {
DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
return -EINVAL;
}
count = (header >> 16) & 0x3fff;
/* Check again now that we know how much data to expect */
if ((count + 2) * 4 > cmdbuf->bufsz) {
DRM_ERROR
("Expected packet3 of length %d but have only %d bytes left\n",
(count + 2) * 4, cmdbuf->bufsz);
return -EINVAL;
}
/* Is it a packet type we know about ? */
switch (header & 0xff00) {
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
case RADEON_CNTL_BITBLT_MULTI:
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
case RADEON_CP_INDX_BUFFER:
DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
return -EINVAL;
case RADEON_CP_3D_DRAW_IMMD_2:
/* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2:
/* triggers drawing of vertex buffers setup elsewhere */
dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
RADEON_PURGE_EMITED);
break;
case RADEON_CP_3D_DRAW_INDX_2:
/* triggers drawing using indices to vertex buffer */
/* whenever we send vertex we clear flush & purge */
dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
RADEON_PURGE_EMITED);
return r300_emit_draw_indx_2(dev_priv, cmdbuf);
case RADEON_WAIT_FOR_IDLE:
case RADEON_CP_NOP:
/* these packets are safe */
break;
default:
DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
return -EINVAL;
}
BEGIN_RING(count + 2);
OUT_RING(header);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
ADVANCE_RING();
cmdbuf->buf += (count + 2) * 4;
cmdbuf->bufsz -= (count + 2) * 4;
return 0;
}
/**
* Emit a rendering packet3 from userspace.
* Called by r300_do_cp_cmdbuf.
*/
static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int n;
int ret;
char *orig_buf = cmdbuf->buf;
int orig_bufsz = cmdbuf->bufsz;
/* This is a do-while-loop so that we run the interior at least once,
* even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
*/
n = 0;
do {
if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
if (ret)
return ret;
cmdbuf->buf = orig_buf;
cmdbuf->bufsz = orig_bufsz;
}
switch (header.packet3.packet) {
case R300_CMD_PACKET3_CLEAR:
DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
ret = r300_emit_clear(dev_priv, cmdbuf);
if (ret) {
DRM_ERROR("r300_emit_clear failed\n");
return ret;
}
break;
case R300_CMD_PACKET3_RAW:
DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
if (ret) {
DRM_ERROR("r300_emit_raw_packet3 failed\n");
return ret;
}
break;
default:
DRM_ERROR("bad packet3 type %i at %p\n",
header.packet3.packet,
cmdbuf->buf - sizeof(header));
return -EINVAL;
}
n += R300_SIMULTANEOUS_CLIPRECTS;
} while (n < cmdbuf->nbox);
return 0;
}
/* Some of the R300 chips seem to be extremely touchy about the two registers
* that are configured in r300_pacify.
* Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
* sends a command buffer that contains only state setting commands and a
* vertex program/parameter upload sequence, this will eventually lead to a
* lockup, unless the sequence is bracketed by calls to r300_pacify.
* So we should take great care to *always* call r300_pacify before
* *anything* 3D related, and again afterwards. This is what the
* call bracket in r300_do_cp_cmdbuf is for.
*/
/**
* Emit the sequence to pacify R300.
*/
static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
{
uint32_t cache_z, cache_3d, cache_2d;
RING_LOCALS;
cache_z = R300_ZC_FLUSH;
cache_2d = R300_RB2D_DC_FLUSH;
cache_3d = R300_RB3D_DC_FLUSH;
if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
/* we can purge, primitive where draw since last purge */
cache_z |= R300_ZC_FREE;
cache_2d |= R300_RB2D_DC_FREE;
cache_3d |= R300_RB3D_DC_FREE;
}
/* flush & purge zbuffer */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
OUT_RING(cache_z);
ADVANCE_RING();
/* flush & purge 3d */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(cache_3d);
ADVANCE_RING();
/* flush & purge texture */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
OUT_RING(0);
ADVANCE_RING();
/* FIXME: is this one really needed ? */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
OUT_RING(0);
ADVANCE_RING();
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
/* flush & purge 2d through E2 as RB2D will trigger lockup */
BEGIN_RING(4);
OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
OUT_RING(cache_2d);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN);
ADVANCE_RING();
/* set flush & purge flags */
dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
}
/**
* Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
* The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
* be careful about how this function is called.
*/
static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
{
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
struct drm_radeon_master_private *master_priv = master->driver_priv;
buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
buf->pending = 1;
buf->used = 0;
}
static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
drm_r300_cmd_header_t header)
{
u32 wait_until;
RING_LOCALS;
if (!header.wait.flags)
return;
wait_until = 0;
switch(header.wait.flags) {
case R300_WAIT_2D:
wait_until = RADEON_WAIT_2D_IDLE;
break;
case R300_WAIT_3D:
wait_until = RADEON_WAIT_3D_IDLE;
break;
case R300_NEW_WAIT_2D_3D:
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
break;
case R300_NEW_WAIT_2D_2D_CLEAN:
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
break;
case R300_NEW_WAIT_3D_3D_CLEAN:
wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
break;
case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
break;
default:
return;
}
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(wait_until);
ADVANCE_RING();
}
static int r300_scratch(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
u32 *ref_age_base;
u32 i, buf_idx, h_pending;
u64 ptr_addr;
RING_LOCALS;
if (cmdbuf->bufsz <
(sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
return -EINVAL;
}
if (header.scratch.reg >= 5) {
return -EINVAL;
}
dev_priv->scratch_ages[header.scratch.reg]++;
ptr_addr = get_unaligned((u64 *)cmdbuf->buf);
ref_age_base = (u32 *)(unsigned long)ptr_addr;
cmdbuf->buf += sizeof(u64);
cmdbuf->bufsz -= sizeof(u64);
for (i=0; i < header.scratch.n_bufs; i++) {
buf_idx = *(u32 *)cmdbuf->buf;
buf_idx *= 2; /* 8 bytes per buf */
if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
return -EINVAL;
}
if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
return -EINVAL;
}
if (h_pending == 0) {
return -EINVAL;
}
h_pending--;
if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
return -EINVAL;
}
cmdbuf->buf += sizeof(buf_idx);
cmdbuf->bufsz -= sizeof(buf_idx);
}
BEGIN_RING(2);
OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
ADVANCE_RING();
return 0;
}
/**
* Uploads user-supplied vertex program instructions or parameters onto
* the graphics card.
* Called by r300_do_cp_cmdbuf.
*/
static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int sz;
int addr;
int type;
int clamp;
int stride;
RING_LOCALS;
sz = header.r500fp.count;
/* address is 9 bits 0 - 8, bit 1 of flags is part of address */
addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
addr |= (type << 16);
addr |= (clamp << 17);
stride = type ? 4 : 6;
DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
if (!sz)
return 0;
if (sz * stride * 4 > cmdbuf->bufsz)
return -EINVAL;
BEGIN_RING(3 + sz * stride);
OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
ADVANCE_RING();
cmdbuf->buf += sz * stride * 4;
cmdbuf->bufsz -= sz * stride * 4;
return 0;
}
/**
* Parses and validates a user-supplied command buffer and emits appropriate
* commands on the DMA ring buffer.
* Called by the ioctl handler function radeon_cp_cmdbuf.
*/
int r300_do_cp_cmdbuf(struct drm_device *dev,
struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf = NULL;
int emit_dispatch_age = 0;
int ret = 0;
DRM_DEBUG("\n");
/* pacify */
r300_pacify(dev_priv);
if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
if (ret)
goto cleanup;
}
while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
int idx;
drm_r300_cmd_header_t header;
header.u = *(unsigned int *)cmdbuf->buf;
cmdbuf->buf += sizeof(header);
cmdbuf->bufsz -= sizeof(header);
switch (header.header.cmd_type) {
case R300_CMD_PACKET0:
DRM_DEBUG("R300_CMD_PACKET0\n");
ret = r300_emit_packet0(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_packet0 failed\n");
goto cleanup;
}
break;
case R300_CMD_VPU:
DRM_DEBUG("R300_CMD_VPU\n");
ret = r300_emit_vpu(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_vpu failed\n");
goto cleanup;
}
break;
case R300_CMD_PACKET3:
DRM_DEBUG("R300_CMD_PACKET3\n");
ret = r300_emit_packet3(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_packet3 failed\n");
goto cleanup;
}
break;
case R300_CMD_END3D:
DRM_DEBUG("R300_CMD_END3D\n");
/* TODO:
Ideally userspace driver should not need to issue this call,
i.e. the drm driver should issue it automatically and prevent
lockups.
In practice, we do not understand why this call is needed and what
it does (except for some vague guesses that it has to do with cache
coherence) and so the user space driver does it.
Once we are sure which uses prevent lockups the code could be moved
into the kernel and the userspace driver will not
need to use this command.
Note that issuing this command does not hurt anything
except, possibly, performance */
r300_pacify(dev_priv);
break;
case R300_CMD_CP_DELAY:
/* simple enough, we can do it here */
DRM_DEBUG("R300_CMD_CP_DELAY\n");
{
int i;
RING_LOCALS;
BEGIN_RING(header.delay.count);
for (i = 0; i < header.delay.count; i++)
OUT_RING(RADEON_CP_PACKET2);
ADVANCE_RING();
}
break;
case R300_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
idx = header.dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
ret = -EINVAL;
goto cleanup;
}
buf = dma->buflist[idx];
if (buf->file_priv != file_priv || buf->pending) {
DRM_ERROR("bad buffer %p %p %d\n",
buf->file_priv, file_priv,
buf->pending);
ret = -EINVAL;
goto cleanup;
}
emit_dispatch_age = 1;
r300_discard_buffer(dev, file_priv->master, buf);
break;
case R300_CMD_WAIT:
DRM_DEBUG("R300_CMD_WAIT\n");
r300_cmd_wait(dev_priv, header);
break;
case R300_CMD_SCRATCH:
DRM_DEBUG("R300_CMD_SCRATCH\n");
ret = r300_scratch(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_scratch failed\n");
goto cleanup;
}
break;
case R300_CMD_R500FP:
if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
DRM_ERROR("Calling r500 command on r300 card\n");
ret = -EINVAL;
goto cleanup;
}
DRM_DEBUG("R300_CMD_R500FP\n");
ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_r500fp failed\n");
goto cleanup;
}
break;
default:
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,
cmdbuf->buf - sizeof(header));
ret = -EINVAL;
goto cleanup;
}
}
DRM_DEBUG("END\n");
cleanup:
r300_pacify(dev_priv);
/* We emit the vertex buffer age here, outside the pacifier "brackets"
* for two reasons:
* (1) This may coalesce multiple age emissions into a single one and
* (2) more importantly, some chips lock up hard when scratch registers
* are written inside the pacifier bracket.
*/
if (emit_dispatch_age) {
RING_LOCALS;
/* Emit the vertex buffer age */
BEGIN_RING(2);
RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
ADVANCE_RING();
}
COMMIT_RING();
return ret;
}
| gpl-2.0 |
caibo2014/linux | arch/m68k/mvme147/config.c | 1026 | 4218 | /*
* arch/m68k/mvme147/config.c
*
* Copyright (C) 1996 Dave Frascone [chaos@mindspring.com]
* Cloned from Richard Hirst [richard@sleepie.demon.co.uk]
*
* Based on:
*
* Copyright (C) 1993 Hamish Macdonald
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file README.legal in the main directory of this archive
* for more details.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/rtc.h>
#include <linux/interrupt.h>
#include <asm/bootinfo.h>
#include <asm/bootinfo-vme.h>
#include <asm/byteorder.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/rtc.h>
#include <asm/machdep.h>
#include <asm/mvme147hw.h>
static void mvme147_get_model(char *model);
extern void mvme147_sched_init(irq_handler_t handler);
extern u32 mvme147_gettimeoffset(void);
extern int mvme147_hwclk (int, struct rtc_time *);
extern int mvme147_set_clock_mmss (unsigned long);
extern void mvme147_reset (void);
static int bcd2int (unsigned char b);
/* Save tick handler routine pointer, will point to xtime_update() in
* kernel/time/timekeeping.c, called via mvme147_process_int() */
irq_handler_t tick_handler;
int __init mvme147_parse_bootinfo(const struct bi_record *bi)
{
uint16_t tag = be16_to_cpu(bi->tag);
if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO)
return 0;
else
return 1;
}
void mvme147_reset(void)
{
printk ("\r\n\nCalled mvme147_reset\r\n");
m147_pcc->watchdog = 0x0a; /* Clear timer */
m147_pcc->watchdog = 0xa5; /* Enable watchdog - 100ms to reset */
while (1)
;
}
static void mvme147_get_model(char *model)
{
sprintf(model, "Motorola MVME147");
}
/*
* This function is called during kernel startup to initialize
* the mvme147 IRQ handling routines.
*/
void __init mvme147_init_IRQ(void)
{
m68k_setup_user_interrupt(VEC_USER, 192);
}
void __init config_mvme147(void)
{
mach_max_dma_address = 0x01000000;
mach_sched_init = mvme147_sched_init;
mach_init_IRQ = mvme147_init_IRQ;
arch_gettimeoffset = mvme147_gettimeoffset;
mach_hwclk = mvme147_hwclk;
mach_set_clock_mmss = mvme147_set_clock_mmss;
mach_reset = mvme147_reset;
mach_get_model = mvme147_get_model;
/* Board type is only set by newer versions of vmelilo/tftplilo */
if (!vme_brdtype)
vme_brdtype = VME_TYPE_MVME147;
}
/* Using pcc tick timer 1 */
static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
{
m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
return tick_handler(irq, dev_id);
}
void mvme147_sched_init (irq_handler_t timer_routine)
{
tick_handler = timer_routine;
if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1", NULL))
pr_err("Couldn't register timer interrupt\n");
/* Init the clock with a value */
/* our clock goes off every 6.25us */
m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
m147_pcc->t1_cntrl = 0x0; /* clear timer */
m147_pcc->t1_cntrl = 0x3; /* start timer */
m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; /* clear pending ints */
m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
}
/* This is always executed with interrupts disabled. */
/* XXX There are race hazards in this code XXX */
u32 mvme147_gettimeoffset(void)
{
volatile unsigned short *cp = (volatile unsigned short *)0xfffe1012;
unsigned short n;
n = *cp;
while (n != *cp)
n = *cp;
n -= PCC_TIMER_PRELOAD;
return ((unsigned long)n * 25 / 4) * 1000;
}
static int bcd2int (unsigned char b)
{
return ((b>>4)*10 + (b&15));
}
int mvme147_hwclk(int op, struct rtc_time *t)
{
#warning check me!
if (!op) {
m147_rtc->ctrl = RTC_READ;
t->tm_year = bcd2int (m147_rtc->bcd_year);
t->tm_mon = bcd2int (m147_rtc->bcd_mth);
t->tm_mday = bcd2int (m147_rtc->bcd_dom);
t->tm_hour = bcd2int (m147_rtc->bcd_hr);
t->tm_min = bcd2int (m147_rtc->bcd_min);
t->tm_sec = bcd2int (m147_rtc->bcd_sec);
m147_rtc->ctrl = 0;
}
return 0;
}
int mvme147_set_clock_mmss (unsigned long nowtime)
{
return 0;
}
| gpl-2.0 |
diorahman/linux | arch/arm/mach-omap2/fb.c | 1282 | 3621 | /*
* Framebuffer device registration for TI OMAP platforms
*
* Copyright (C) 2006 Nokia Corporation
* Author: Imre Deak <imre.deak@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/memblock.h>
#include <linux/io.h>
#include <linux/omapfb.h>
#include <linux/dma-mapping.h>
#include <asm/mach/map.h>
#include "soc.h"
#include "display.h"
#ifdef CONFIG_OMAP2_VRFB
/*
* The first memory resource is the register region for VRFB,
* the rest are VRFB virtual memory areas for each VRFB context.
*/
static const struct resource omap2_vrfb_resources[] = {
DEFINE_RES_MEM_NAMED(0x68008000u, 0x40, "vrfb-regs"),
DEFINE_RES_MEM_NAMED(0x70000000u, 0x4000000, "vrfb-area-0"),
DEFINE_RES_MEM_NAMED(0x74000000u, 0x4000000, "vrfb-area-1"),
DEFINE_RES_MEM_NAMED(0x78000000u, 0x4000000, "vrfb-area-2"),
DEFINE_RES_MEM_NAMED(0x7c000000u, 0x4000000, "vrfb-area-3"),
};
static const struct resource omap3_vrfb_resources[] = {
DEFINE_RES_MEM_NAMED(0x6C000180u, 0xc0, "vrfb-regs"),
DEFINE_RES_MEM_NAMED(0x70000000u, 0x4000000, "vrfb-area-0"),
DEFINE_RES_MEM_NAMED(0x74000000u, 0x4000000, "vrfb-area-1"),
DEFINE_RES_MEM_NAMED(0x78000000u, 0x4000000, "vrfb-area-2"),
DEFINE_RES_MEM_NAMED(0x7c000000u, 0x4000000, "vrfb-area-3"),
DEFINE_RES_MEM_NAMED(0xe0000000u, 0x4000000, "vrfb-area-4"),
DEFINE_RES_MEM_NAMED(0xe4000000u, 0x4000000, "vrfb-area-5"),
DEFINE_RES_MEM_NAMED(0xe8000000u, 0x4000000, "vrfb-area-6"),
DEFINE_RES_MEM_NAMED(0xec000000u, 0x4000000, "vrfb-area-7"),
DEFINE_RES_MEM_NAMED(0xf0000000u, 0x4000000, "vrfb-area-8"),
DEFINE_RES_MEM_NAMED(0xf4000000u, 0x4000000, "vrfb-area-9"),
DEFINE_RES_MEM_NAMED(0xf8000000u, 0x4000000, "vrfb-area-10"),
DEFINE_RES_MEM_NAMED(0xfc000000u, 0x4000000, "vrfb-area-11"),
};
int __init omap_init_vrfb(void)
{
struct platform_device *pdev;
const struct resource *res;
unsigned int num_res;
if (cpu_is_omap24xx()) {
res = omap2_vrfb_resources;
num_res = ARRAY_SIZE(omap2_vrfb_resources);
} else if (cpu_is_omap34xx()) {
res = omap3_vrfb_resources;
num_res = ARRAY_SIZE(omap3_vrfb_resources);
} else {
return 0;
}
pdev = platform_device_register_resndata(NULL, "omapvrfb", -1,
res, num_res, NULL, 0);
return PTR_RET(pdev);
}
#else
int __init omap_init_vrfb(void) { return 0; }
#endif
#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
static u64 omap_fb_dma_mask = ~(u32)0;
static struct omapfb_platform_data omapfb_config;
static struct platform_device omap_fb_device = {
.name = "omapfb",
.id = -1,
.dev = {
.dma_mask = &omap_fb_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &omapfb_config,
},
.num_resources = 0,
};
int __init omap_init_fb(void)
{
return platform_device_register(&omap_fb_device);
}
#else
int __init omap_init_fb(void) { return 0; }
#endif
| gpl-2.0 |
chris4824/cyberkernel-cm | drivers/staging/iio/resolver/ad2s1210.c | 2306 | 21756 | /*
* ad2s1210.c support for the ADI Resolver to Digital Converters: AD2S1210
*
* Copyright (c) 2010-2010 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include "../iio.h"
#include "../sysfs.h"
#define DRV_NAME "ad2s1210"
#define DEF_CONTROL 0x7E
#define MSB_IS_HIGH 0x80
#define MSB_IS_LOW 0x7F
#define PHASE_LOCK_RANGE_44 0x20
#define ENABLE_HYSTERESIS 0x10
#define SET_ENRES1 0x08
#define SET_ENRES0 0x04
#define SET_RES1 0x02
#define SET_RES0 0x01
#define SET_ENRESOLUTION (SET_ENRES1 | SET_ENRES0)
#define SET_RESOLUTION (SET_RES1 | SET_RES0)
#define REG_POSITION 0x80
#define REG_VELOCITY 0x82
#define REG_LOS_THRD 0x88
#define REG_DOS_OVR_THRD 0x89
#define REG_DOS_MIS_THRD 0x8A
#define REG_DOS_RST_MAX_THRD 0x8B
#define REG_DOS_RST_MIN_THRD 0x8C
#define REG_LOT_HIGH_THRD 0x8D
#define REG_LOT_LOW_THRD 0x8E
#define REG_EXCIT_FREQ 0x91
#define REG_CONTROL 0x92
#define REG_SOFT_RESET 0xF0
#define REG_FAULT 0xFF
/* pin SAMPLE, A0, A1, RES0, RES1, is controlled by driver */
#define AD2S1210_SAA 3
#if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
# define AD2S1210_RES 2
#else
# define AD2S1210_RES 0
#endif
#define AD2S1210_PN (AD2S1210_SAA + AD2S1210_RES)
#define AD2S1210_MIN_CLKIN 6144000
#define AD2S1210_MAX_CLKIN 10240000
#define AD2S1210_MIN_EXCIT 2000
#define AD2S1210_MAX_EXCIT 20000
#define AD2S1210_MIN_FCW 0x4
#define AD2S1210_MAX_FCW 0x50
/* default input clock on serial interface */
#define AD2S1210_DEF_CLKIN 8192000
/* clock period in nano second */
#define AD2S1210_DEF_TCK (1000000000/AD2S1210_DEF_CLKIN)
#define AD2S1210_DEF_EXCIT 10000
enum ad2s1210_mode {
MOD_POS = 0,
MOD_VEL,
MOD_RESERVED,
MOD_CONFIG,
};
enum ad2s1210_res {
RES_10 = 10,
RES_12 = 12,
RES_14 = 14,
RES_16 = 16,
};
static unsigned int resolution_value[] = {
RES_10, RES_12, RES_14, RES_16};
struct ad2s1210_state {
struct mutex lock;
struct iio_dev *idev;
struct spi_device *sdev;
struct spi_transfer xfer;
unsigned int hysteresis;
unsigned int old_data;
enum ad2s1210_mode mode;
enum ad2s1210_res resolution;
unsigned int fclkin;
unsigned int fexcit;
unsigned short sample;
unsigned short a0;
unsigned short a1;
unsigned short res0;
unsigned short res1;
u8 rx[3];
u8 tx[3];
};
static inline void start_sample(struct ad2s1210_state *st)
{
gpio_set_value(st->sample, 0);
}
static inline void stop_sample(struct ad2s1210_state *st)
{
gpio_set_value(st->sample, 1);
}
static inline void set_mode(enum ad2s1210_mode mode, struct ad2s1210_state *st)
{
switch (mode) {
case MOD_POS:
gpio_set_value(st->a0, 0);
gpio_set_value(st->a1, 0);
break;
case MOD_VEL:
gpio_set_value(st->a0, 0);
gpio_set_value(st->a1, 1);
break;
case MOD_CONFIG:
gpio_set_value(st->a0, 1);
gpio_set_value(st->a1, 1);
break;
default:
/* set to reserved mode */
gpio_set_value(st->a0, 1);
gpio_set_value(st->a1, 0);
}
st->mode = mode;
}
/* write 1 bytes (address or data) to the chip */
static int config_write(struct ad2s1210_state *st,
unsigned char data)
{
struct spi_message msg;
int ret = 0;
st->xfer.len = 1;
set_mode(MOD_CONFIG, st);
spi_message_init(&msg);
spi_message_add_tail(&st->xfer, &msg);
st->tx[0] = data;
ret = spi_sync(st->sdev, &msg);
if (ret)
return ret;
st->old_data = 1;
return ret;
}
/* read value from one of the registers */
static int config_read(struct ad2s1210_state *st,
unsigned char address,
unsigned char *data)
{
struct spi_message msg;
int ret = 0;
st->xfer.len = 2;
set_mode(MOD_CONFIG, st);
spi_message_init(&msg);
spi_message_add_tail(&st->xfer, &msg);
st->tx[0] = address | MSB_IS_HIGH;
st->tx[1] = REG_FAULT;
ret = spi_sync(st->sdev, &msg);
if (ret)
return ret;
*data = st->rx[1];
st->old_data = 1;
return ret;
}
static inline void update_frequency_control_word(struct ad2s1210_state *st)
{
unsigned char fcw;
fcw = (unsigned char)(st->fexcit * (1 << 15) / st->fclkin);
if (fcw >= AD2S1210_MIN_FCW && fcw <= AD2S1210_MAX_FCW) {
config_write(st, REG_EXCIT_FREQ);
config_write(st, fcw);
} else
pr_err("ad2s1210: FCW out of range\n");
}
#if defined(CONFIG_AD2S1210_GPIO_INPUT)
static inline unsigned char read_resolution_pin(struct ad2s1210_state *st)
{
unsigned int data;
data = (gpio_get_value(st->res0) << 1) |
gpio_get_value(st->res1);
return resolution_value[data];
}
#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
static inline void set_resolution_pin(struct ad2s1210_state *st)
{
switch (st->resolution) {
case RES_10:
gpio_set_value(st->res0, 0);
gpio_set_value(st->res1, 0);
break;
case RES_12:
gpio_set_value(st->res0, 0);
gpio_set_value(st->res1, 1);
break;
case RES_14:
gpio_set_value(st->res0, 1);
gpio_set_value(st->res1, 0);
break;
case RES_16:
gpio_set_value(st->res0, 1);
gpio_set_value(st->res1, 1);
break;
}
}
#endif
static inline void soft_reset(struct ad2s1210_state *st)
{
config_write(st, REG_SOFT_RESET);
config_write(st, 0x0);
}
/* return the OLD DATA since last spi bus write */
static ssize_t ad2s1210_show_raw(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
int ret;
mutex_lock(&st->lock);
if (st->old_data) {
ret = sprintf(buf, "0x%x\n", st->rx[0]);
st->old_data = 0;
} else
ret = 0;
mutex_unlock(&st->lock);
return ret;
}
static ssize_t ad2s1210_store_raw(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
unsigned long udata;
unsigned char data;
int ret;
ret = strict_strtoul(buf, 16, &udata);
if (ret)
return -EINVAL;
data = udata & 0xff;
mutex_lock(&st->lock);
config_write(st, data);
mutex_unlock(&st->lock);
return 1;
}
static ssize_t ad2s1210_store_softreset(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
mutex_lock(&st->lock);
soft_reset(st);
mutex_unlock(&st->lock);
return len;
}
static ssize_t ad2s1210_show_fclkin(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
return sprintf(buf, "%d\n", st->fclkin);
}
static ssize_t ad2s1210_store_fclkin(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
unsigned long fclkin;
int ret;
ret = strict_strtoul(buf, 10, &fclkin);
if (!ret && fclkin >= AD2S1210_MIN_CLKIN &&
fclkin <= AD2S1210_MAX_CLKIN) {
mutex_lock(&st->lock);
st->fclkin = fclkin;
} else {
pr_err("ad2s1210: fclkin out of range\n");
return -EINVAL;
}
update_frequency_control_word(st);
soft_reset(st);
mutex_unlock(&st->lock);
return len;
}
static ssize_t ad2s1210_show_fexcit(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
return sprintf(buf, "%d\n", st->fexcit);
}
static ssize_t ad2s1210_store_fexcit(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
unsigned long fexcit;
int ret;
ret = strict_strtoul(buf, 10, &fexcit);
if (!ret && fexcit >= AD2S1210_MIN_EXCIT &&
fexcit <= AD2S1210_MAX_EXCIT) {
mutex_lock(&st->lock);
st->fexcit = fexcit;
} else {
pr_err("ad2s1210: excitation frequency out of range\n");
return -EINVAL;
}
update_frequency_control_word(st);
soft_reset(st);
mutex_unlock(&st->lock);
return len;
}
static ssize_t ad2s1210_show_control(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
unsigned char data;
mutex_lock(&st->lock);
config_read(st, REG_CONTROL, &data);
mutex_unlock(&st->lock);
return sprintf(buf, "0x%x\n", data);
}
static ssize_t ad2s1210_store_control(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
unsigned long udata;
unsigned char data;
int ret;
ret = strict_strtoul(buf, 16, &udata);
if (ret) {
ret = -EINVAL;
goto error_ret;
}
mutex_lock(&st->lock);
config_write(st, REG_CONTROL);
data = udata & MSB_IS_LOW;
config_write(st, data);
config_read(st, REG_CONTROL, &data);
if (data & MSB_IS_HIGH) {
ret = -EIO;
pr_err("ad2s1210: write control register fail\n");
goto error_ret;
}
st->resolution = resolution_value[data & SET_RESOLUTION];
#if defined(CONFIG_AD2S1210_GPIO_INPUT)
data = read_resolution_pin(st);
if (data != st->resolution)
pr_warning("ad2s1210: resolution settings not match\n");
#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
set_resolution_pin(st);
#endif
ret = len;
if (data & ENABLE_HYSTERESIS)
st->hysteresis = 1;
else
st->hysteresis = 0;
error_ret:
mutex_unlock(&st->lock);
return ret;
}
static ssize_t ad2s1210_show_resolution(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
return sprintf(buf, "%d\n", st->resolution);
}
static ssize_t ad2s1210_store_resolution(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
unsigned char data;
unsigned long udata;
int ret;
ret = strict_strtoul(buf, 10, &udata);
if (ret || udata < RES_10 || udata > RES_16) {
pr_err("ad2s1210: resolution out of range\n");
return -EINVAL;
}
mutex_lock(&st->lock);
config_read(st, REG_CONTROL, &data);
data &= ~SET_RESOLUTION;
data |= (udata - RES_10) >> 1;
config_write(st, REG_CONTROL);
config_write(st, data & MSB_IS_LOW);
config_read(st, REG_CONTROL, &data);
if (data & MSB_IS_HIGH) {
ret = -EIO;
pr_err("ad2s1210: setting resolution fail\n");
goto error_ret;
}
st->resolution = resolution_value[data & SET_RESOLUTION];
#if defined(CONFIG_AD2S1210_GPIO_INPUT)
data = read_resolution_pin(st);
if (data != st->resolution)
pr_warning("ad2s1210: resolution settings not match\n");
#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
set_resolution_pin(st);
#endif
ret = len;
error_ret:
mutex_unlock(&st->lock);
return ret;
}
/* read the fault register since last sample */
static ssize_t ad2s1210_show_fault(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret = 0;
ssize_t len = 0;
unsigned char data;
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
mutex_lock(&st->lock);
ret = config_read(st, REG_FAULT, &data);
if (ret)
goto error_ret;
len = sprintf(buf, "0x%x\n", data);
error_ret:
mutex_unlock(&st->lock);
return ret ? ret : len;
}
static ssize_t ad2s1210_clear_fault(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
unsigned char data;
mutex_lock(&st->lock);
start_sample(st);
/* delay (2 * tck + 20) nano seconds */
udelay(1);
stop_sample(st);
config_read(st, REG_FAULT, &data);
start_sample(st);
stop_sample(st);
mutex_unlock(&st->lock);
return 0;
}
static ssize_t ad2s1210_show_reg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
unsigned char data;
struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
mutex_lock(&st->lock);
config_read(st, iattr->address, &data);
mutex_unlock(&st->lock);
return sprintf(buf, "%d\n", data);
}
static ssize_t ad2s1210_store_reg(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
unsigned long data;
int ret;
struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
ret = strict_strtoul(buf, 10, &data);
if (ret)
return -EINVAL;
mutex_lock(&st->lock);
config_write(st, iattr->address);
config_write(st, data & MSB_IS_LOW);
mutex_unlock(&st->lock);
return len;
}
static ssize_t ad2s1210_show_pos(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct spi_message msg;
int ret = 0;
ssize_t len = 0;
u16 pos;
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
st->xfer.len = 2;
mutex_lock(&st->lock);
start_sample(st);
/* delay (6 * tck + 20) nano seconds */
udelay(1);
set_mode(MOD_POS, st);
spi_message_init(&msg);
spi_message_add_tail(&st->xfer, &msg);
ret = spi_sync(st->sdev, &msg);
if (ret)
goto error_ret;
pos = ((((u16)(st->rx[0])) << 8) | (st->rx[1]));
if (st->hysteresis)
pos >>= 16 - st->resolution;
len = sprintf(buf, "%d\n", pos);
error_ret:
stop_sample(st);
/* delay (2 * tck + 20) nano seconds */
udelay(1);
mutex_unlock(&st->lock);
return ret ? ret : len;
}
static ssize_t ad2s1210_show_vel(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct spi_message msg;
unsigned short negative;
int ret = 0;
ssize_t len = 0;
s16 vel;
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
st->xfer.len = 2;
mutex_lock(&st->lock);
start_sample(st);
/* delay (6 * tck + 20) nano seconds */
udelay(1);
set_mode(MOD_VEL, st);
spi_message_init(&msg);
spi_message_add_tail(&st->xfer, &msg);
ret = spi_sync(st->sdev, &msg);
if (ret)
goto error_ret;
negative = st->rx[0] & 0x80;
vel = ((((s16)(st->rx[0])) << 8) | (st->rx[1]));
vel >>= 16 - st->resolution;
if (negative) {
negative = (0xffff >> st->resolution) << st->resolution;
vel |= negative;
}
len = sprintf(buf, "%d\n", vel);
error_ret:
stop_sample(st);
/* delay (2 * tck + 20) nano seconds */
udelay(1);
mutex_unlock(&st->lock);
return ret ? ret : len;
}
static ssize_t ad2s1210_show_pos_vel(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct spi_message msg;
unsigned short negative;
int ret = 0;
ssize_t len = 0;
u16 pos;
s16 vel;
struct iio_dev *idev = dev_get_drvdata(dev);
struct ad2s1210_state *st = idev->dev_data;
st->xfer.len = 2;
mutex_lock(&st->lock);
start_sample(st);
/* delay (6 * tck + 20) nano seconds */
udelay(1);
set_mode(MOD_POS, st);
spi_message_init(&msg);
spi_message_add_tail(&st->xfer, &msg);
ret = spi_sync(st->sdev, &msg);
if (ret)
goto error_ret;
pos = ((((u16)(st->rx[0])) << 8) | (st->rx[1]));
if (st->hysteresis)
pos >>= 16 - st->resolution;
len = sprintf(buf, "%d ", pos);
st->xfer.len = 2;
set_mode(MOD_VEL, st);
spi_message_init(&msg);
spi_message_add_tail(&st->xfer, &msg);
ret = spi_sync(st->sdev, &msg);
if (ret)
goto error_ret;
negative = st->rx[0] & 0x80;
vel = ((((s16)(st->rx[0])) << 8) | (st->rx[1]));
vel >>= 16 - st->resolution;
if (negative) {
negative = (0xffff >> st->resolution) << st->resolution;
vel |= negative;
}
len += sprintf(buf + len, "%d\n", vel);
error_ret:
stop_sample(st);
/* delay (2 * tck + 20) nano seconds */
udelay(1);
mutex_unlock(&st->lock);
return ret ? ret : len;
}
static IIO_CONST_ATTR(description,
"Variable Resolution, 10-Bit to 16Bit R/D\n\
Converter with Reference Oscillator");
static IIO_DEVICE_ATTR(raw_io, S_IRUGO | S_IWUSR,
ad2s1210_show_raw, ad2s1210_store_raw, 0);
static IIO_DEVICE_ATTR(reset, S_IWUSR,
NULL, ad2s1210_store_softreset, 0);
static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR,
ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0);
static IIO_DEVICE_ATTR(fexcit, S_IRUGO | S_IWUSR,
ad2s1210_show_fexcit, ad2s1210_store_fexcit, 0);
static IIO_DEVICE_ATTR(control, S_IRUGO | S_IWUSR,
ad2s1210_show_control, ad2s1210_store_control, 0);
static IIO_DEVICE_ATTR(bits, S_IRUGO | S_IWUSR,
ad2s1210_show_resolution, ad2s1210_store_resolution, 0);
static IIO_DEVICE_ATTR(fault, S_IRUGO | S_IWUSR,
ad2s1210_show_fault, ad2s1210_clear_fault, 0);
static IIO_DEVICE_ATTR(pos, S_IRUGO,
ad2s1210_show_pos, NULL, 0);
static IIO_DEVICE_ATTR(vel, S_IRUGO,
ad2s1210_show_vel, NULL, 0);
static IIO_DEVICE_ATTR(pos_vel, S_IRUGO,
ad2s1210_show_pos_vel, NULL, 0);
static IIO_DEVICE_ATTR(los_thrd, S_IRUGO | S_IWUSR,
ad2s1210_show_reg, ad2s1210_store_reg, REG_LOS_THRD);
static IIO_DEVICE_ATTR(dos_ovr_thrd, S_IRUGO | S_IWUSR,
ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_OVR_THRD);
static IIO_DEVICE_ATTR(dos_mis_thrd, S_IRUGO | S_IWUSR,
ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_MIS_THRD);
static IIO_DEVICE_ATTR(dos_rst_max_thrd, S_IRUGO | S_IWUSR,
ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_RST_MAX_THRD);
static IIO_DEVICE_ATTR(dos_rst_min_thrd, S_IRUGO | S_IWUSR,
ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_RST_MIN_THRD);
static IIO_DEVICE_ATTR(lot_high_thrd, S_IRUGO | S_IWUSR,
ad2s1210_show_reg, ad2s1210_store_reg, REG_LOT_HIGH_THRD);
static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR,
ad2s1210_show_reg, ad2s1210_store_reg, REG_LOT_LOW_THRD);
static struct attribute *ad2s1210_attributes[] = {
&iio_const_attr_description.dev_attr.attr,
&iio_dev_attr_raw_io.dev_attr.attr,
&iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_fclkin.dev_attr.attr,
&iio_dev_attr_fexcit.dev_attr.attr,
&iio_dev_attr_control.dev_attr.attr,
&iio_dev_attr_bits.dev_attr.attr,
&iio_dev_attr_fault.dev_attr.attr,
&iio_dev_attr_pos.dev_attr.attr,
&iio_dev_attr_vel.dev_attr.attr,
&iio_dev_attr_pos_vel.dev_attr.attr,
&iio_dev_attr_los_thrd.dev_attr.attr,
&iio_dev_attr_dos_ovr_thrd.dev_attr.attr,
&iio_dev_attr_dos_mis_thrd.dev_attr.attr,
&iio_dev_attr_dos_rst_max_thrd.dev_attr.attr,
&iio_dev_attr_dos_rst_min_thrd.dev_attr.attr,
&iio_dev_attr_lot_high_thrd.dev_attr.attr,
&iio_dev_attr_lot_low_thrd.dev_attr.attr,
NULL,
};
static const struct attribute_group ad2s1210_attribute_group = {
.name = DRV_NAME,
.attrs = ad2s1210_attributes,
};
static int __devinit ad2s1210_initial(struct ad2s1210_state *st)
{
unsigned char data;
int ret;
mutex_lock(&st->lock);
#if defined(CONFIG_AD2S1210_GPIO_INPUT)
st->resolution = read_resolution_pin(st);
#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
set_resolution_pin(st);
#endif
config_write(st, REG_CONTROL);
data = DEF_CONTROL & ~(SET_RESOLUTION);
data |= (st->resolution - RES_10) >> 1;
config_write(st, data);
ret = config_read(st, REG_CONTROL, &data);
if (ret)
goto error_ret;
if (data & MSB_IS_HIGH) {
ret = -EIO;
goto error_ret;
}
update_frequency_control_word(st);
soft_reset(st);
error_ret:
mutex_unlock(&st->lock);
return ret;
}
static const struct iio_info ad2s1210_info = {
.attrs = &ad2s1210_attribute_group,
.driver_module = THIS_MODULE,
};
static int __devinit ad2s1210_probe(struct spi_device *spi)
{
struct ad2s1210_state *st;
int pn, ret = 0;
unsigned short *pins = spi->dev.platform_data;
for (pn = 0; pn < AD2S1210_PN; pn++) {
if (gpio_request(pins[pn], DRV_NAME)) {
pr_err("%s: request gpio pin %d failed\n",
DRV_NAME, pins[pn]);
goto error_ret;
}
if (pn < AD2S1210_SAA)
gpio_direction_output(pins[pn], 1);
else {
#if defined(CONFIG_AD2S1210_GPIO_INPUT)
gpio_direction_input(pins[pn]);
#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
gpio_direction_output(pins[pn], 1);
#endif
}
}
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
goto error_ret;
}
spi_set_drvdata(spi, st);
mutex_init(&st->lock);
st->sdev = spi;
st->xfer.tx_buf = st->tx;
st->xfer.rx_buf = st->rx;
st->hysteresis = 1;
st->mode = MOD_CONFIG;
st->resolution = RES_12;
st->fclkin = AD2S1210_DEF_CLKIN;
st->fexcit = AD2S1210_DEF_EXCIT;
st->sample = pins[0];
st->a0 = pins[1];
st->a1 = pins[2];
st->res0 = pins[3];
st->res1 = pins[4];
st->idev = iio_allocate_device(0);
if (st->idev == NULL) {
ret = -ENOMEM;
goto error_free_st;
}
st->idev->dev.parent = &spi->dev;
st->idev->info = &ad2s1210_info;
st->idev->dev_data = (void *)(st);
st->idev->modes = INDIO_DIRECT_MODE;
ret = iio_device_register(st->idev);
if (ret)
goto error_free_dev;
if (spi->max_speed_hz != AD2S1210_DEF_CLKIN)
st->fclkin = spi->max_speed_hz;
spi->mode = SPI_MODE_3;
spi_setup(spi);
ad2s1210_initial(st);
return 0;
error_free_dev:
iio_free_device(st->idev);
error_free_st:
kfree(st);
error_ret:
for (--pn; pn >= 0; pn--)
gpio_free(pins[pn]);
return ret;
}
static int __devexit ad2s1210_remove(struct spi_device *spi)
{
struct ad2s1210_state *st = spi_get_drvdata(spi);
iio_device_unregister(st->idev);
kfree(st);
return 0;
}
static struct spi_driver ad2s1210_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = ad2s1210_probe,
.remove = __devexit_p(ad2s1210_remove),
};
static __init int ad2s1210_spi_init(void)
{
return spi_register_driver(&ad2s1210_driver);
}
module_init(ad2s1210_spi_init);
static __exit void ad2s1210_spi_exit(void)
{
spi_unregister_driver(&ad2s1210_driver);
}
module_exit(ad2s1210_spi_exit);
MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
MikeC84/android_kernel_moto_shamu | drivers/gpu/drm/nouveau/nouveau_agp.c | 2306 | 3733 | #include <linux/module.h>
#include <core/device.h>
#include "nouveau_drm.h"
#include "nouveau_agp.h"
#include "nouveau_reg.h"
#if __OS_HAS_AGP
MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
static int nouveau_agpmode = -1;
module_param_named(agpmode, nouveau_agpmode, int, 0400);
static unsigned long
get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
{
struct nouveau_device *device = nv_device(drm->device);
/*
* FW seems to be broken on nv18, it makes the card lock up
* randomly.
*/
if (device->chipset == 0x18)
mode &= ~PCI_AGP_COMMAND_FW;
/*
* AGP mode set in the command line.
*/
if (nouveau_agpmode > 0) {
bool agpv3 = mode & 0x8;
int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
mode = (mode & ~0x7) | (rate & 0x7);
}
return mode;
}
static bool
nouveau_agp_enabled(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
if (!drm_pci_device_is_agp(dev) || !dev->agp)
return false;
if (drm->agp.stat == UNKNOWN) {
if (!nouveau_agpmode)
return false;
#ifdef __powerpc__
/* Disable AGP by default on all PowerPC machines for
* now -- At least some UniNorth-2 AGP bridges are
* known to be broken: DMA from the host to the card
* works just fine, but writeback from the card to the
* host goes straight to memory untranslated bypassing
* the GATT somehow, making them quite painful to deal
* with...
*/
if (nouveau_agpmode == -1)
return false;
#endif
return true;
}
return (drm->agp.stat == ENABLED);
}
#endif
void
nouveau_agp_reset(struct nouveau_drm *drm)
{
#if __OS_HAS_AGP
struct nouveau_device *device = nv_device(drm->device);
struct drm_device *dev = drm->dev;
u32 save[2];
int ret;
if (!nouveau_agp_enabled(drm))
return;
/* First of all, disable fast writes, otherwise if it's
* already enabled in the AGP bridge and we disable the card's
* AGP controller we might be locking ourselves out of it. */
if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) |
dev->agp->mode) & PCI_AGP_COMMAND_FW) {
struct drm_agp_info info;
struct drm_agp_mode mode;
ret = drm_agp_info(dev, &info);
if (ret)
return;
mode.mode = get_agp_mode(drm, info.mode);
mode.mode &= ~PCI_AGP_COMMAND_FW;
ret = drm_agp_enable(dev, mode);
if (ret)
return;
}
/* clear busmaster bit, and disable AGP */
save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
nv_wr32(device, NV04_PBUS_PCI_NV_19, 0);
/* reset PGRAPH, PFIFO and PTIMER */
save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000);
nv_mask(device, 0x000200, 0x00011100, save[1]);
/* and restore bustmaster bit (gives effect of resetting AGP) */
nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
#endif
}
void
nouveau_agp_init(struct nouveau_drm *drm)
{
#if __OS_HAS_AGP
struct nouveau_device *device = nv_device(drm->device);
struct drm_device *dev = drm->dev;
struct drm_agp_info info;
struct drm_agp_mode mode;
int ret;
if (!nouveau_agp_enabled(drm))
return;
drm->agp.stat = DISABLE;
ret = drm_agp_acquire(dev);
if (ret) {
nv_error(device, "unable to acquire AGP: %d\n", ret);
return;
}
ret = drm_agp_info(dev, &info);
if (ret) {
nv_error(device, "unable to get AGP info: %d\n", ret);
return;
}
/* see agp.h for the AGPSTAT_* modes available */
mode.mode = get_agp_mode(drm, info.mode);
ret = drm_agp_enable(dev, mode);
if (ret) {
nv_error(device, "unable to enable AGP: %d\n", ret);
return;
}
drm->agp.stat = ENABLED;
drm->agp.base = info.aperture_base;
drm->agp.size = info.aperture_size;
#endif
}
void
nouveau_agp_fini(struct nouveau_drm *drm)
{
#if __OS_HAS_AGP
struct drm_device *dev = drm->dev;
if (dev->agp && dev->agp->acquired)
drm_agp_release(dev);
#endif
}
| gpl-2.0 |
EloYGomeZ/android_kernel_huawei_alice | drivers/hwmon/max1111.c | 2562 | 7144 | /*
* max1111.c - +2.7V, Low-Power, Multichannel, Serial 8-bit ADCs
*
* Based on arch/arm/mach-pxa/corgi_ssp.c
*
* Copyright (C) 2004-2005 Richard Purdie
*
* Copyright (C) 2008 Marvell International Ltd.
* Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
enum chips { max1110, max1111, max1112, max1113 };
#define MAX1111_TX_BUF_SIZE 1
#define MAX1111_RX_BUF_SIZE 2
/* MAX1111 Commands */
#define MAX1111_CTRL_PD0 (1u << 0)
#define MAX1111_CTRL_PD1 (1u << 1)
#define MAX1111_CTRL_SGL (1u << 2)
#define MAX1111_CTRL_UNI (1u << 3)
#define MAX1110_CTRL_SEL_SH (4)
#define MAX1111_CTRL_SEL_SH (5) /* NOTE: bit 4 is ignored */
#define MAX1111_CTRL_STR (1u << 7)
struct max1111_data {
struct spi_device *spi;
struct device *hwmon_dev;
struct spi_message msg;
struct spi_transfer xfer[2];
uint8_t tx_buf[MAX1111_TX_BUF_SIZE];
uint8_t rx_buf[MAX1111_RX_BUF_SIZE];
struct mutex drvdata_lock;
/* protect msg, xfer and buffers from multiple access */
int sel_sh;
int lsb;
};
static int max1111_read(struct device *dev, int channel)
{
struct max1111_data *data = dev_get_drvdata(dev);
uint8_t v1, v2;
int err;
/* writing to drvdata struct is not thread safe, wait on mutex */
mutex_lock(&data->drvdata_lock);
data->tx_buf[0] = (channel << data->sel_sh) |
MAX1111_CTRL_PD0 | MAX1111_CTRL_PD1 |
MAX1111_CTRL_SGL | MAX1111_CTRL_UNI | MAX1111_CTRL_STR;
err = spi_sync(data->spi, &data->msg);
if (err < 0) {
dev_err(dev, "spi_sync failed with %d\n", err);
mutex_unlock(&data->drvdata_lock);
return err;
}
v1 = data->rx_buf[0];
v2 = data->rx_buf[1];
mutex_unlock(&data->drvdata_lock);
if ((v1 & 0xc0) || (v2 & 0x3f))
return -EINVAL;
return (v1 << 2) | (v2 >> 6);
}
#ifdef CONFIG_SHARPSL_PM
static struct max1111_data *the_max1111;
int max1111_read_channel(int channel)
{
return max1111_read(&the_max1111->spi->dev, channel);
}
EXPORT_SYMBOL(max1111_read_channel);
#endif
/*
* NOTE: SPI devices do not have a default 'name' attribute, which is
* likely to be used by hwmon applications to distinguish between
* different devices, explicitly add a name attribute here.
*/
static ssize_t show_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
}
static ssize_t show_adc(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct max1111_data *data = dev_get_drvdata(dev);
int channel = to_sensor_dev_attr(attr)->index;
int ret;
ret = max1111_read(dev, channel);
if (ret < 0)
return ret;
/*
* Assume the reference voltage to be 2.048V or 4.096V, with an 8-bit
* sample. The LSB weight is 8mV or 16mV depending on the chip type.
*/
return sprintf(buf, "%d\n", ret * data->lsb);
}
#define MAX1111_ADC_ATTR(_id) \
SENSOR_DEVICE_ATTR(in##_id##_input, S_IRUGO, show_adc, NULL, _id)
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static MAX1111_ADC_ATTR(0);
static MAX1111_ADC_ATTR(1);
static MAX1111_ADC_ATTR(2);
static MAX1111_ADC_ATTR(3);
static MAX1111_ADC_ATTR(4);
static MAX1111_ADC_ATTR(5);
static MAX1111_ADC_ATTR(6);
static MAX1111_ADC_ATTR(7);
static struct attribute *max1111_attributes[] = {
&dev_attr_name.attr,
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
NULL,
};
static const struct attribute_group max1111_attr_group = {
.attrs = max1111_attributes,
};
static struct attribute *max1110_attributes[] = {
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in7_input.dev_attr.attr,
NULL,
};
static const struct attribute_group max1110_attr_group = {
.attrs = max1110_attributes,
};
static int setup_transfer(struct max1111_data *data)
{
struct spi_message *m;
struct spi_transfer *x;
m = &data->msg;
x = &data->xfer[0];
spi_message_init(m);
x->tx_buf = &data->tx_buf[0];
x->len = MAX1111_TX_BUF_SIZE;
spi_message_add_tail(x, m);
x++;
x->rx_buf = &data->rx_buf[0];
x->len = MAX1111_RX_BUF_SIZE;
spi_message_add_tail(x, m);
return 0;
}
static int max1111_probe(struct spi_device *spi)
{
enum chips chip = spi_get_device_id(spi)->driver_data;
struct max1111_data *data;
int err;
spi->bits_per_word = 8;
spi->mode = SPI_MODE_0;
err = spi_setup(spi);
if (err < 0)
return err;
data = devm_kzalloc(&spi->dev, sizeof(struct max1111_data), GFP_KERNEL);
if (data == NULL) {
dev_err(&spi->dev, "failed to allocate memory\n");
return -ENOMEM;
}
switch (chip) {
case max1110:
data->lsb = 8;
data->sel_sh = MAX1110_CTRL_SEL_SH;
break;
case max1111:
data->lsb = 8;
data->sel_sh = MAX1111_CTRL_SEL_SH;
break;
case max1112:
data->lsb = 16;
data->sel_sh = MAX1110_CTRL_SEL_SH;
break;
case max1113:
data->lsb = 16;
data->sel_sh = MAX1111_CTRL_SEL_SH;
break;
}
err = setup_transfer(data);
if (err)
return err;
mutex_init(&data->drvdata_lock);
data->spi = spi;
spi_set_drvdata(spi, data);
err = sysfs_create_group(&spi->dev.kobj, &max1111_attr_group);
if (err) {
dev_err(&spi->dev, "failed to create attribute group\n");
return err;
}
if (chip == max1110 || chip == max1112) {
err = sysfs_create_group(&spi->dev.kobj, &max1110_attr_group);
if (err) {
dev_err(&spi->dev,
"failed to create extended attribute group\n");
goto err_remove;
}
}
data->hwmon_dev = hwmon_device_register(&spi->dev);
if (IS_ERR(data->hwmon_dev)) {
dev_err(&spi->dev, "failed to create hwmon device\n");
err = PTR_ERR(data->hwmon_dev);
goto err_remove;
}
#ifdef CONFIG_SHARPSL_PM
the_max1111 = data;
#endif
return 0;
err_remove:
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
return err;
}
static int max1111_remove(struct spi_device *spi)
{
struct max1111_data *data = spi_get_drvdata(spi);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
mutex_destroy(&data->drvdata_lock);
return 0;
}
static const struct spi_device_id max1111_ids[] = {
{ "max1110", max1110 },
{ "max1111", max1111 },
{ "max1112", max1112 },
{ "max1113", max1113 },
{ },
};
MODULE_DEVICE_TABLE(spi, max1111_ids);
static struct spi_driver max1111_driver = {
.driver = {
.name = "max1111",
.owner = THIS_MODULE,
},
.id_table = max1111_ids,
.probe = max1111_probe,
.remove = max1111_remove,
};
module_spi_driver(max1111_driver);
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
MODULE_DESCRIPTION("MAX1110/MAX1111/MAX1112/MAX1113 ADC Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
moongtaeng/android_kernel_pantech_ef56s | drivers/mtd/nand/nand_bbt.c | 2818 | 39001 | /*
* drivers/mtd/nand_bbt.c
*
* Overview:
* Bad block table support for the NAND driver
*
* Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Description:
*
* When nand_scan_bbt is called, then it tries to find the bad block table
* depending on the options in the BBT descriptor(s). If no flash based BBT
* (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
* marked good / bad blocks. This information is used to create a memory BBT.
* Once a new bad block is discovered then the "factory" information is updated
* on the device.
* If a flash based BBT is specified then the function first tries to find the
* BBT on flash. If a BBT is found then the contents are read and the memory
* based BBT is created. If a mirrored BBT is selected then the mirror is
* searched too and the versions are compared. If the mirror has a greater
* version number than the mirror BBT is used to build the memory based BBT.
* If the tables are not versioned, then we "or" the bad block information.
* If one of the BBTs is out of date or does not exist it is (re)created.
* If no BBT exists at all then the device is scanned for factory marked
* good / bad blocks and the bad block tables are created.
*
* For manufacturer created BBTs like the one found on M-SYS DOC devices
* the BBT is searched and read but never created
*
* The auto generated bad block table is located in the last good blocks
* of the device. The table is mirrored, so it can be updated eventually.
* The table is marked in the OOB area with an ident pattern and a version
* number which indicates which of both tables is more up to date. If the NAND
* controller needs the complete OOB area for the ECC information then the
* option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
* course): it moves the ident pattern and the version byte into the data area
* and the OOB area will remain untouched.
*
* The table uses 2 bits per block
* 11b: block is good
* 00b: block is factory marked bad
* 01b, 10b: block is marked bad due to wear
*
* The memory bad block table uses the following scheme:
* 00b: block is good
* 01b: block is marked bad due to wear
* 10b: block is reserved (to protect the bbt area)
* 11b: block is factory marked bad
*
* Multichip devices like DOC store the bad block info per floor.
*
* Following assumptions are made:
* - bbts start at a page boundary, if autolocated on a block boundary
* - the space necessary for a bbt in FLASH does not exceed a block boundary
*
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
{
int ret;
ret = memcmp(buf, td->pattern, td->len);
if (!ret)
return ret;
return -1;
}
/**
* check_pattern - [GENERIC] check if a pattern is in the buffer
* @buf: the buffer to search
* @len: the length of buffer to search
* @paglen: the pagelength
* @td: search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block tables and
* good / bad block identifiers. If the SCAN_EMPTY option is set then check, if
* all bytes except the pattern area contain 0xff.
*/
static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
int i, end = 0;
uint8_t *p = buf;
if (td->options & NAND_BBT_NO_OOB)
return check_pattern_no_oob(buf, td);
end = paglen + td->offs;
if (td->options & NAND_BBT_SCANEMPTY) {
for (i = 0; i < end; i++) {
if (p[i] != 0xff)
return -1;
}
}
p += end;
/* Compare the pattern */
if (memcmp(p, td->pattern, td->len))
return -1;
if (td->options & NAND_BBT_SCANEMPTY) {
p += td->len;
end += td->len;
for (i = end; i < len; i++) {
if (*p++ != 0xff)
return -1;
}
}
return 0;
}
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
* @buf: the buffer to search
* @td: search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block tables and
* good / bad block identifiers. Same as check_pattern, but no optional empty
* check.
*/
static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
{
int i;
uint8_t *p = buf;
/* Compare the pattern */
for (i = 0; i < td->len; i++) {
if (p[td->offs + i] != td->pattern[i])
return -1;
}
return 0;
}
/**
* add_marker_len - compute the length of the marker in data area
* @td: BBT descriptor used for computation
*
* The length will be 0 if the marker is located in OOB area.
*/
static u32 add_marker_len(struct nand_bbt_descr *td)
{
u32 len;
if (!(td->options & NAND_BBT_NO_OOB))
return 0;
len = td->len;
if (td->options & NAND_BBT_VERSION)
len++;
return len;
}
/**
* read_bbt - [GENERIC] Read the bad block table starting from page
* @mtd: MTD device structure
* @buf: temporary buffer
* @page: the starting page
* @num: the number of bbt descriptors to read
* @td: the bbt describtion table
* @offs: offset in the memory table
*
* Read the bad block table starting from page.
*/
static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
struct nand_bbt_descr *td, int offs)
{
int res, ret = 0, i, j, act = 0;
struct nand_chip *this = mtd->priv;
size_t retlen, len, totlen;
loff_t from;
int bits = td->options & NAND_BBT_NRBITS_MSK;
uint8_t msk = (uint8_t)((1 << bits) - 1);
u32 marker_len;
int reserved_block_code = td->reserved_block_code;
totlen = (num * bits) >> 3;
marker_len = add_marker_len(td);
from = ((loff_t)page) << this->page_shift;
while (totlen) {
len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
if (marker_len) {
/*
* In case the BBT marker is not in the OOB area it
* will be just in the first page.
*/
len -= marker_len;
from += marker_len;
marker_len = 0;
}
res = mtd_read(mtd, from, len, &retlen, buf);
if (res < 0) {
if (mtd_is_eccerr(res)) {
pr_info("nand_bbt: ECC error in BBT at "
"0x%012llx\n", from & ~mtd->writesize);
return res;
} else if (mtd_is_bitflip(res)) {
pr_info("nand_bbt: corrected error in BBT at "
"0x%012llx\n", from & ~mtd->writesize);
ret = res;
} else {
pr_info("nand_bbt: error reading BBT\n");
return res;
}
}
/* Analyse data */
for (i = 0; i < len; i++) {
uint8_t dat = buf[i];
for (j = 0; j < 8; j += bits, act += 2) {
uint8_t tmp = (dat >> j) & msk;
if (tmp == msk)
continue;
if (reserved_block_code && (tmp == reserved_block_code)) {
pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
(loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
mtd->ecc_stats.bbtblocks++;
continue;
}
/*
* Leave it for now, if it's matured we can
* move this message to pr_debug.
*/
pr_info("nand_read_bbt: bad block at 0x%012llx\n",
(loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
/* Factory marked bad or worn out? */
if (tmp == 0)
this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
else
this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
mtd->ecc_stats.badblocks++;
}
}
totlen -= len;
from += len;
}
return ret;
}
/**
* read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
* @mtd: MTD device structure
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @chip: read the table for a specific chip, -1 read all chips; applies only if
* NAND_BBT_PERCHIP option is set
*
* Read the bad block table for all chips starting at a given page. We assume
* that the bbt bits are in consecutive order.
*/
static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
{
struct nand_chip *this = mtd->priv;
int res = 0, i;
if (td->options & NAND_BBT_PERCHIP) {
int offs = 0;
for (i = 0; i < this->numchips; i++) {
if (chip == -1 || chip == i)
res = read_bbt(mtd, buf, td->pages[i],
this->chipsize >> this->bbt_erase_shift,
td, offs);
if (res)
return res;
offs += this->chipsize >> (this->bbt_erase_shift + 2);
}
} else {
res = read_bbt(mtd, buf, td->pages[0],
mtd->size >> this->bbt_erase_shift, td, 0);
if (res)
return res;
}
return 0;
}
/* BBT marker is in the first page, no OOB */
static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
struct nand_bbt_descr *td)
{
size_t retlen;
size_t len;
len = td->len;
if (td->options & NAND_BBT_VERSION)
len++;
return mtd_read(mtd, offs, len, &retlen, buf);
}
/* Scan read raw data from flash */
static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
int res;
ops.mode = MTD_OPS_RAW;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
while (len > 0) {
ops.datbuf = buf;
ops.len = min(len, (size_t)mtd->writesize);
ops.oobbuf = buf + ops.len;
res = mtd_read_oob(mtd, offs, &ops);
if (res)
return res;
buf += mtd->oobsize + mtd->writesize;
len -= mtd->writesize;
}
return 0;
}
static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len, struct nand_bbt_descr *td)
{
if (td->options & NAND_BBT_NO_OOB)
return scan_read_raw_data(mtd, buf, offs, td);
else
return scan_read_raw_oob(mtd, buf, offs, len);
}
/* Scan write data with oob to flash */
static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
uint8_t *buf, uint8_t *oob)
{
struct mtd_oob_ops ops;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.datbuf = buf;
ops.oobbuf = oob;
ops.len = len;
return mtd_write_oob(mtd, offs, &ops);
}
static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
{
u32 ver_offs = td->veroffs;
if (!(td->options & NAND_BBT_NO_OOB))
ver_offs += mtd->writesize;
return ver_offs;
}
/**
* read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
* @mtd: MTD device structure
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
*
* Read the bad block table(s) for all chips starting at a given page. We
* assume that the bbt bits are in consecutive order.
*/
static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
struct nand_chip *this = mtd->priv;
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
mtd->writesize, td);
td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
pr_info("Bad block table at page %d, version 0x%02X\n",
td->pages[0], td->version[0]);
}
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
mtd->writesize, td);
md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
pr_info("Bad block table at page %d, version 0x%02X\n",
md->pages[0], md->version[0]);
}
return 1;
}
/* Scan a given block full */
static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, size_t readlen,
int scanlen, int len)
{
int ret, j;
ret = scan_read_raw_oob(mtd, buf, offs, readlen);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
for (j = 0; j < len; j++, buf += scanlen) {
if (check_pattern(buf, scanlen, mtd->writesize, bd))
return 1;
}
return 0;
}
/* Scan a given block partially */
static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, int len)
{
struct mtd_oob_ops ops;
int j, ret;
ops.ooblen = mtd->oobsize;
ops.oobbuf = buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
for (j = 0; j < len; j++) {
/*
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
*/
ret = mtd_read_oob(mtd, offs, &ops);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
if (check_short_pattern(buf, bd))
return 1;
offs += mtd->writesize;
}
return 0;
}
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
* @mtd: MTD device structure
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
* @chip: create the table for a specific chip, -1 read all chips; applies only
* if NAND_BBT_PERCHIP option is set
*
* Create a bad block table by scanning the device for the given good/bad block
* identify pattern.
*/
static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *bd, int chip)
{
struct nand_chip *this = mtd->priv;
int i, numblocks, len, scanlen;
int startblock;
loff_t from;
size_t readlen;
pr_info("Scanning device for bad blocks\n");
if (bd->options & NAND_BBT_SCANALLPAGES)
len = 1 << (this->bbt_erase_shift - this->page_shift);
else if (bd->options & NAND_BBT_SCAN2NDPAGE)
len = 2;
else
len = 1;
if (!(bd->options & NAND_BBT_SCANEMPTY)) {
/* We need only read few bytes from the OOB area */
scanlen = 0;
readlen = bd->len;
} else {
/* Full page content should be read */
scanlen = mtd->writesize + mtd->oobsize;
readlen = len * mtd->writesize;
}
if (chip == -1) {
/*
* Note that numblocks is 2 * (real numblocks) here, see i+=2
* below as it makes shifting and masking less painful
*/
numblocks = mtd->size >> (this->bbt_erase_shift - 1);
startblock = 0;
from = 0;
} else {
if (chip >= this->numchips) {
pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
chip + 1, this->numchips);
return -EINVAL;
}
numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
startblock = chip * numblocks;
numblocks += startblock;
from = (loff_t)startblock << (this->bbt_erase_shift - 1);
}
if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
from += mtd->erasesize - (mtd->writesize * len);
for (i = startblock; i < numblocks;) {
int ret;
BUG_ON(bd->options & NAND_BBT_NO_OOB);
if (bd->options & NAND_BBT_SCANALLPAGES)
ret = scan_block_full(mtd, bd, from, buf, readlen,
scanlen, len);
else
ret = scan_block_fast(mtd, bd, from, buf, len);
if (ret < 0)
return ret;
if (ret) {
this->bbt[i >> 3] |= 0x03 << (i & 0x6);
pr_warn("Bad eraseblock %d at 0x%012llx\n",
i >> 1, (unsigned long long)from);
mtd->ecc_stats.badblocks++;
}
i += 2;
from += (1 << this->bbt_erase_shift);
}
return 0;
}
/**
* search_bbt - [GENERIC] scan the device for a specific bad block table
* @mtd: MTD device structure
* @buf: temporary buffer
* @td: descriptor for the bad block table
*
* Read the bad block table by searching for a given ident pattern. Search is
* preformed either from the beginning up or from the end of the device
* downwards. The search starts always at the start of a block. If the option
* NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
* the bad block information of this chip. This is necessary to provide support
* for certain DOC devices.
*
* The bbt ident pattern resides in the oob area of the first page in a block.
*/
static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
{
struct nand_chip *this = mtd->priv;
int i, chips;
int bits, startblock, block, dir;
int scanlen = mtd->writesize + mtd->oobsize;
int bbtblocks;
int blocktopage = this->bbt_erase_shift - this->page_shift;
/* Search direction top -> down? */
if (td->options & NAND_BBT_LASTBLOCK) {
startblock = (mtd->size >> this->bbt_erase_shift) - 1;
dir = -1;
} else {
startblock = 0;
dir = 1;
}
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = this->numchips;
bbtblocks = this->chipsize >> this->bbt_erase_shift;
startblock &= bbtblocks - 1;
} else {
chips = 1;
bbtblocks = mtd->size >> this->bbt_erase_shift;
}
/* Number of bits for each erase block in the bbt */
bits = td->options & NAND_BBT_NRBITS_MSK;
for (i = 0; i < chips; i++) {
/* Reset version information */
td->version[i] = 0;
td->pages[i] = -1;
/* Scan the maximum number of blocks */
for (block = 0; block < td->maxblocks; block++) {
int actblock = startblock + dir * block;
loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
/* Read first page */
scan_read_raw(mtd, buf, offs, mtd->writesize, td);
if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
td->pages[i] = actblock << blocktopage;
if (td->options & NAND_BBT_VERSION) {
offs = bbt_get_ver_offs(mtd, td);
td->version[i] = buf[offs];
}
break;
}
}
startblock += this->chipsize >> this->bbt_erase_shift;
}
/* Check, if we found a bbt for each requested chip */
for (i = 0; i < chips; i++) {
if (td->pages[i] == -1)
pr_warn("Bad block table not found for chip %d\n", i);
else
pr_info("Bad block table found at page %d, version "
"0x%02X\n", td->pages[i], td->version[i]);
}
return 0;
}
/**
* search_read_bbts - [GENERIC] scan the device for bad block table(s)
* @mtd: MTD device structure
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
*
* Search and read the bad block table(s).
*/
static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
/* Search the primary table */
search_bbt(mtd, buf, td);
/* Search the mirror table */
if (md)
search_bbt(mtd, buf, md);
/* Force result check */
return 1;
}
/**
* write_bbt - [GENERIC] (Re)write the bad block table
* @mtd: MTD device structure
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
* @chipsel: selector for a specific chip, -1 for all
*
* (Re)write the bad block table.
*/
static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
int chipsel)
{
struct nand_chip *this = mtd->priv;
struct erase_info einfo;
int i, j, res, chip = 0;
int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
int nrchips, bbtoffs, pageoffs, ooboffs;
uint8_t msk[4];
uint8_t rcode = td->reserved_block_code;
size_t retlen, len = 0;
loff_t to;
struct mtd_oob_ops ops;
ops.ooblen = mtd->oobsize;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
if (!rcode)
rcode = 0xff;
/* Write bad block table per chip rather than per device? */
if (td->options & NAND_BBT_PERCHIP) {
numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
/* Full device write or specific chip? */
if (chipsel == -1) {
nrchips = this->numchips;
} else {
nrchips = chipsel + 1;
chip = chipsel;
}
} else {
numblocks = (int)(mtd->size >> this->bbt_erase_shift);
nrchips = 1;
}
/* Loop through the chips */
for (; chip < nrchips; chip++) {
/*
* There was already a version of the table, reuse the page
* This applies for absolute placement too, as we have the
* page nr. in td->pages.
*/
if (td->pages[chip] != -1) {
page = td->pages[chip];
goto write;
}
/*
* Automatic placement of the bad block table. Search direction
* top -> down?
*/
if (td->options & NAND_BBT_LASTBLOCK) {
startblock = numblocks * (chip + 1) - 1;
dir = -1;
} else {
startblock = chip * numblocks;
dir = 1;
}
for (i = 0; i < td->maxblocks; i++) {
int block = startblock + dir * i;
/* Check, if the block is bad */
switch ((this->bbt[block >> 2] >>
(2 * (block & 0x03))) & 0x03) {
case 0x01:
case 0x03:
continue;
}
page = block <<
(this->bbt_erase_shift - this->page_shift);
/* Check, if the block is used by the mirror table */
if (!md || md->pages[chip] != page)
goto write;
}
pr_err("No space left to write bad block table\n");
return -ENOSPC;
write:
/* Set up shift count and masks for the flash table */
bits = td->options & NAND_BBT_NRBITS_MSK;
msk[2] = ~rcode;
switch (bits) {
case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
msk[3] = 0x01;
break;
case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
msk[3] = 0x03;
break;
case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
msk[3] = 0x0f;
break;
case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
msk[3] = 0xff;
break;
default: return -EINVAL;
}
bbtoffs = chip * (numblocks >> 2);
to = ((loff_t)page) << this->page_shift;
/* Must we save the block contents? */
if (td->options & NAND_BBT_SAVECONTENT) {
/* Make it block aligned */
to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
len = 1 << this->bbt_erase_shift;
res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
pr_info("nand_bbt: error reading block "
"for writing the bad block table\n");
return res;
}
pr_warn("nand_bbt: ECC error while reading "
"block for writing bad block table\n");
}
/* Read oob data */
ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
ops.oobbuf = &buf[len];
res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
if (res < 0 || ops.oobretlen != ops.ooblen)
goto outerr;
/* Calc the byte offset in the buffer */
pageoffs = page - (int)(to >> this->page_shift);
offs = pageoffs << this->page_shift;
/* Preset the bbt area with 0xff */
memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
ooboffs = len + (pageoffs * mtd->oobsize);
} else if (td->options & NAND_BBT_NO_OOB) {
ooboffs = 0;
offs = td->len;
/* The version byte */
if (td->options & NAND_BBT_VERSION)
offs++;
/* Calc length */
len = (size_t)(numblocks >> sft);
len += offs;
/* Make it page aligned! */
len = ALIGN(len, mtd->writesize);
/* Preset the buffer with 0xff */
memset(buf, 0xff, len);
/* Pattern is located at the begin of first page */
memcpy(buf, td->pattern, td->len);
} else {
/* Calc length */
len = (size_t)(numblocks >> sft);
/* Make it page aligned! */
len = ALIGN(len, mtd->writesize);
/* Preset the buffer with 0xff */
memset(buf, 0xff, len +
(len >> this->page_shift)* mtd->oobsize);
offs = 0;
ooboffs = len;
/* Pattern is located in oob area of first page */
memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
}
if (td->options & NAND_BBT_VERSION)
buf[ooboffs + td->veroffs] = td->version[chip];
/* Walk through the memory table */
for (i = 0; i < numblocks;) {
uint8_t dat;
dat = this->bbt[bbtoffs + (i >> 2)];
for (j = 0; j < 4; j++, i++) {
int sftcnt = (i << (3 - sft)) & sftmsk;
/* Do not store the reserved bbt blocks! */
buf[offs + (i >> sft)] &=
~(msk[dat & 0x03] << sftcnt);
dat >>= 2;
}
}
memset(&einfo, 0, sizeof(einfo));
einfo.mtd = mtd;
einfo.addr = to;
einfo.len = 1 << this->bbt_erase_shift;
res = nand_erase_nand(mtd, &einfo, 1);
if (res < 0)
goto outerr;
res = scan_write_bbt(mtd, to, len, buf,
td->options & NAND_BBT_NO_OOB ? NULL :
&buf[len]);
if (res < 0)
goto outerr;
pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
(unsigned long long)to, td->version[chip]);
/* Mark it as used */
td->pages[chip] = page;
}
return 0;
outerr:
pr_warn("nand_bbt: error while writing bad block table %d\n", res);
return res;
}
/**
* nand_memory_bbt - [GENERIC] create a memory based bad block table
* @mtd: MTD device structure
* @bd: descriptor for the good/bad block search pattern
*
* The function creates a memory based bbt by scanning the device for
* manufacturer / software marked good / bad blocks.
*/
static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
bd->options &= ~NAND_BBT_SCANEMPTY;
return create_bbt(mtd, this->buffers->databuf, bd, -1);
}
/**
* check_create - [GENERIC] create and write bbt(s) if necessary
* @mtd: MTD device structure
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
*
* The function checks the results of the previous call to read_bbt and creates
* / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
* for the chip/device. Update is necessary if one of the tables is missing or
* the version nr. of one table is less than the other.
*/
static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
{
int i, chips, writeops, create, chipsel, res, res2;
struct nand_chip *this = mtd->priv;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
struct nand_bbt_descr *rd, *rd2;
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP)
chips = this->numchips;
else
chips = 1;
for (i = 0; i < chips; i++) {
writeops = 0;
create = 0;
rd = NULL;
rd2 = NULL;
res = res2 = 0;
/* Per chip or per device? */
chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
/* Mirrored table available? */
if (md) {
if (td->pages[i] == -1 && md->pages[i] == -1) {
create = 1;
writeops = 0x03;
} else if (td->pages[i] == -1) {
rd = md;
writeops = 0x01;
} else if (md->pages[i] == -1) {
rd = td;
writeops = 0x02;
} else if (td->version[i] == md->version[i]) {
rd = td;
if (!(td->options & NAND_BBT_VERSION))
rd2 = md;
} else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
rd = td;
writeops = 0x02;
} else {
rd = md;
writeops = 0x01;
}
} else {
if (td->pages[i] == -1) {
create = 1;
writeops = 0x01;
} else {
rd = td;
}
}
if (create) {
/* Create the bad block table by scanning the device? */
if (!(td->options & NAND_BBT_CREATE))
continue;
/* Create the table in memory by scanning the chip(s) */
if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
create_bbt(mtd, buf, bd, chipsel);
td->version[i] = 1;
if (md)
md->version[i] = 1;
}
/* Read back first? */
if (rd) {
res = read_abs_bbt(mtd, buf, rd, chipsel);
if (mtd_is_eccerr(res)) {
/* Mark table as invalid */
rd->pages[i] = -1;
rd->version[i] = 0;
i--;
continue;
}
}
/* If they weren't versioned, read both */
if (rd2) {
res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
if (mtd_is_eccerr(res2)) {
/* Mark table as invalid */
rd2->pages[i] = -1;
rd2->version[i] = 0;
i--;
continue;
}
}
/* Scrub the flash table(s)? */
if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
writeops = 0x03;
/* Update version numbers before writing */
if (md) {
td->version[i] = max(td->version[i], md->version[i]);
md->version[i] = td->version[i];
}
/* Write the bad block table to the device? */
if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, td, md, chipsel);
if (res < 0)
return res;
}
/* Write the mirror bad block table to the device? */
if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, md, td, chipsel);
if (res < 0)
return res;
}
}
return 0;
}
/**
* mark_bbt_regions - [GENERIC] mark the bad block table regions
* @mtd: MTD device structure
* @td: bad block table descriptor
*
* The bad block table regions are marked as "bad" to prevent accidental
* erasures / writes. The regions are identified by the mark 0x02.
*/
static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
{
struct nand_chip *this = mtd->priv;
int i, j, chips, block, nrblocks, update;
uint8_t oldval, newval;
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = this->numchips;
nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
} else {
chips = 1;
nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
}
for (i = 0; i < chips; i++) {
if ((td->options & NAND_BBT_ABSPAGE) ||
!(td->options & NAND_BBT_WRITE)) {
if (td->pages[i] == -1)
continue;
block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
block <<= 1;
oldval = this->bbt[(block >> 3)];
newval = oldval | (0x2 << (block & 0x06));
this->bbt[(block >> 3)] = newval;
if ((oldval != newval) && td->reserved_block_code)
nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
continue;
}
update = 0;
if (td->options & NAND_BBT_LASTBLOCK)
block = ((i + 1) * nrblocks) - td->maxblocks;
else
block = i * nrblocks;
block <<= 1;
for (j = 0; j < td->maxblocks; j++) {
oldval = this->bbt[(block >> 3)];
newval = oldval | (0x2 << (block & 0x06));
this->bbt[(block >> 3)] = newval;
if (oldval != newval)
update = 1;
block += 2;
}
/*
* If we want reserved blocks to be recorded to flash, and some
* new ones have been marked, then we need to update the stored
* bbts. This should only happen once.
*/
if (update && td->reserved_block_code)
nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
}
}
/**
* verify_bbt_descr - verify the bad block description
* @mtd: MTD device structure
* @bd: the table to verify
*
* This functions performs a few sanity checks on the bad block description
* table.
*/
static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
u32 pattern_len;
u32 bits;
u32 table_size;
if (!bd)
return;
pattern_len = bd->len;
bits = bd->options & NAND_BBT_NRBITS_MSK;
BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
!(this->bbt_options & NAND_BBT_USE_FLASH));
BUG_ON(!bits);
if (bd->options & NAND_BBT_VERSION)
pattern_len++;
if (bd->options & NAND_BBT_NO_OOB) {
BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
BUG_ON(bd->offs);
if (bd->options & NAND_BBT_VERSION)
BUG_ON(bd->veroffs != bd->len);
BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
}
if (bd->options & NAND_BBT_PERCHIP)
table_size = this->chipsize >> this->bbt_erase_shift;
else
table_size = mtd->size >> this->bbt_erase_shift;
table_size >>= 3;
table_size *= bits;
if (bd->options & NAND_BBT_NO_OOB)
table_size += pattern_len;
BUG_ON(table_size > (1 << this->bbt_erase_shift));
}
/**
* nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
* @mtd: MTD device structure
* @bd: descriptor for the good/bad block search pattern
*
* The function checks, if a bad block table(s) is/are already available. If
* not it scans the device for manufacturer marked good / bad blocks and writes
* the bad block table(s) to the selected place.
*
* The bad block table memory is allocated here. It must be freed by calling
* the nand_free_bbt function.
*/
int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
int len, res = 0;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
len = mtd->size >> (this->bbt_erase_shift + 2);
/*
* Allocate memory (2bit per block) and clear the memory bad block
* table.
*/
this->bbt = kzalloc(len, GFP_KERNEL);
if (!this->bbt)
return -ENOMEM;
/*
* If no primary table decriptor is given, scan the device to build a
* memory based bad block table.
*/
if (!td) {
if ((res = nand_memory_bbt(mtd, bd))) {
pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
kfree(this->bbt);
this->bbt = NULL;
}
return res;
}
verify_bbt_descr(mtd, td);
verify_bbt_descr(mtd, md);
/* Allocate a temporary buffer for one eraseblock incl. oob */
len = (1 << this->bbt_erase_shift);
len += (len >> this->page_shift) * mtd->oobsize;
buf = vmalloc(len);
if (!buf) {
kfree(this->bbt);
this->bbt = NULL;
return -ENOMEM;
}
/* Is the bbt at a given page? */
if (td->options & NAND_BBT_ABSPAGE) {
res = read_abs_bbts(mtd, buf, td, md);
} else {
/* Search the bad block table using a pattern in oob */
res = search_read_bbts(mtd, buf, td, md);
}
if (res)
res = check_create(mtd, buf, bd);
/* Prevent the bbt regions from erasing / writing */
mark_bbt_region(mtd, td);
if (md)
mark_bbt_region(mtd, md);
vfree(buf);
return res;
}
/**
* nand_update_bbt - [NAND Interface] update bad block table(s)
* @mtd: MTD device structure
* @offs: the offset of the newly marked block
*
* The function updates the bad block table(s).
*/
int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
{
struct nand_chip *this = mtd->priv;
int len, res = 0;
int chip, chipsel;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
if (!this->bbt || !td)
return -EINVAL;
/* Allocate a temporary buffer for one eraseblock incl. oob */
len = (1 << this->bbt_erase_shift);
len += (len >> this->page_shift) * mtd->oobsize;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chip = (int)(offs >> this->chip_shift);
chipsel = chip;
} else {
chip = 0;
chipsel = -1;
}
td->version[chip]++;
if (md)
md->version[chip]++;
/* Write the bad block table to the device? */
if (td->options & NAND_BBT_WRITE) {
res = write_bbt(mtd, buf, td, md, chipsel);
if (res < 0)
goto out;
}
/* Write the mirror bad block table to the device? */
if (md && (md->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, md, td, chipsel);
}
out:
kfree(buf);
return res;
}
/*
* Define some generic bad / good block scan pattern which are used
* while scanning a device for factory marked good / bad blocks.
*/
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
static struct nand_bbt_descr agand_flashbased = {
.options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
.offs = 0x20,
.len = 6,
.pattern = scan_agand_pattern
};
/* Generic flash bbt descriptors */
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
static struct nand_bbt_descr bbt_main_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 8,
.len = 4,
.veroffs = 12,
.maxblocks = 4,
.pattern = bbt_pattern
};
static struct nand_bbt_descr bbt_mirror_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 8,
.len = 4,
.veroffs = 12,
.maxblocks = 4,
.pattern = mirror_pattern
};
static struct nand_bbt_descr bbt_main_no_bbt_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
| NAND_BBT_NO_OOB,
.len = 4,
.veroffs = 4,
.maxblocks = 4,
.pattern = bbt_pattern
};
static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
| NAND_BBT_NO_OOB,
.len = 4,
.veroffs = 4,
.maxblocks = 4,
.pattern = mirror_pattern
};
#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
/**
* nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
* @this: NAND chip to create descriptor for
*
* This function allocates and initializes a nand_bbt_descr for BBM detection
* based on the properties of @this. The new descriptor is stored in
* this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
* passed to this function.
*/
static int nand_create_badblock_pattern(struct nand_chip *this)
{
struct nand_bbt_descr *bd;
if (this->badblock_pattern) {
pr_warn("Bad block pattern already allocated; not replacing\n");
return -EINVAL;
}
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
if (!bd)
return -ENOMEM;
bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
bd->offs = this->badblockpos;
bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
bd->pattern = scan_ff_pattern;
bd->options |= NAND_BBT_DYNAMICSTRUCT;
this->badblock_pattern = bd;
return 0;
}
/**
* nand_default_bbt - [NAND Interface] Select a default bad block table for the device
* @mtd: MTD device structure
*
* This function selects the default bad block table support for the device and
* calls the nand_scan_bbt function.
*/
int nand_default_bbt(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
/*
* Default for AG-AND. We must use a flash based bad block table as the
* devices have factory marked _good_ blocks. Erasing those blocks
* leads to loss of the good / bad information, so we _must_ store this
* information in a good / bad table during startup.
*/
if (this->options & NAND_IS_AND) {
/* Use the default pattern descriptors */
if (!this->bbt_td) {
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
}
this->bbt_options |= NAND_BBT_USE_FLASH;
return nand_scan_bbt(mtd, &agand_flashbased);
}
/* Is a flash based bad block table requested? */
if (this->bbt_options & NAND_BBT_USE_FLASH) {
/* Use the default pattern descriptors */
if (!this->bbt_td) {
if (this->bbt_options & NAND_BBT_NO_OOB) {
this->bbt_td = &bbt_main_no_bbt_descr;
this->bbt_md = &bbt_mirror_no_bbt_descr;
} else {
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
}
}
} else {
this->bbt_td = NULL;
this->bbt_md = NULL;
}
if (!this->badblock_pattern)
nand_create_badblock_pattern(this);
return nand_scan_bbt(mtd, this->badblock_pattern);
}
/**
* nand_isbad_bbt - [NAND Interface] Check if a block is bad
* @mtd: MTD device structure
* @offs: offset in the device
* @allowbbt: allow access to bad block table region
*/
int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
{
struct nand_chip *this = mtd->priv;
int block;
uint8_t res;
/* Get block number * 2 */
block = (int)(offs >> (this->bbt_erase_shift - 1));
res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
"(block %d) 0x%02x\n",
(unsigned int)offs, block >> 1, res);
switch ((int)res) {
case 0x00:
return 0;
case 0x01:
return 1;
case 0x02:
return allowbbt ? 0 : 1;
}
return 1;
}
EXPORT_SYMBOL(nand_scan_bbt);
EXPORT_SYMBOL(nand_default_bbt);
| gpl-2.0 |
zarboz/Ville-Z_Blackout_edition | drivers/ata/pata_rdc.c | 3586 | 10956 | /*
* pata_rdc - Driver for later RDC PATA controllers
*
* This is actually a driver for hardware meeting
* INCITS 370-2004 (1510D): ATA Host Adapter Standards
*
* Based on ata_piix.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_rdc"
#define DRV_VERSION "0.01"
struct rdc_host_priv {
u32 saved_iocfg;
};
/**
* rdc_pata_cable_detect - Probe host controller cable detect info
* @ap: Port for which cable detect info is desired
*
* Read 80c cable indicator from ATA PCI device's PCI config
* register. This register is normally set by firmware (BIOS).
*
* LOCKING:
* None (inherited from caller).
*/
static int rdc_pata_cable_detect(struct ata_port *ap)
{
struct rdc_host_priv *hpriv = ap->host->private_data;
u8 mask;
/* check BIOS cable detect results */
mask = 0x30 << (2 * ap->port_no);
if ((hpriv->saved_iocfg & mask) == 0)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* rdc_pata_prereset - prereset for PATA host controller
* @link: Target link
* @deadline: deadline jiffies for the operation
*
* LOCKING:
* None (inherited from caller).
*/
static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits rdc_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* rdc_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
u16 master_data;
u8 slave_data;
u8 udma_enable;
int control = 0;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio >= 2)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE enable */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
/* PIO configuration clears DTE unconditionally. It will be
* programmed in set_dmamode which is guaranteed to be called
* after set_piomode if any DMA mode is available.
*/
pci_read_config_word(dev, master_port, &master_data);
if (is_slave) {
/* clear TIME1|IE1|PPE1|DTE1 */
master_data &= 0xff0f;
/* Enable SITRE (separate slave timing register) */
master_data |= 0x4000;
/* enable PPE1, IE1 and TIME1 as needed */
master_data |= (control << 4);
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data &= (ap->port_no ? 0x0f : 0xf0);
/* Load the timing nibble for this slave */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
<< (ap->port_no ? 4 : 0);
} else {
/* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
master_data &= 0xccf0;
/* Enable PPE, IE and TIME as appropriate */
master_data |= control;
/* load ISP and RCT */
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
/* Ensure the UDMA bit is off - it will be turned back on if
UDMA is selected */
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
pci_write_config_byte(dev, 0x48, udma_enable);
}
/**
* rdc_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Drive in question
*
* Set UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u8 master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
u8 udma_enable = 0;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
pci_read_config_word(dev, master_port, &master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
if (speed >= XFER_UDMA_0) {
unsigned int udma = adev->dma_mode - XFER_UDMA_0;
u16 udma_timing;
u16 ideconf;
int u_clock, u_speed;
/*
* UDMA is handled by a combination of clock switching and
* selection of dividers
*
* Handy rule: Odd modes are UDMATIMx 01, even are 02
* except UDMA0 which is 00
*/
u_speed = min(2 - (udma & 1), udma);
if (udma == 5)
u_clock = 0x1000; /* 100Mhz */
else if (udma > 2)
u_clock = 1; /* 66Mhz */
else
u_clock = 0; /* 33Mhz */
udma_enable |= (1 << devid);
/* Load the CT/RP selection */
pci_read_config_word(dev, 0x4A, &udma_timing);
udma_timing &= ~(3 << (4 * devid));
udma_timing |= u_speed << (4 * devid);
pci_write_config_word(dev, 0x4A, udma_timing);
/* Select a 33/66/100Mhz clock */
pci_read_config_word(dev, 0x54, &ideconf);
ideconf &= ~(0x1001 << devid);
ideconf |= u_clock << devid;
pci_write_config_word(dev, 0x54, ideconf);
} else {
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally along with TIME1. PPE has already
* been set when the PIO timing was set.
*/
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
unsigned int control;
u8 slave_data;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
control = 3; /* IORDY|TIME1 */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO into PIO0 */
if (adev->pio_mode < needed_pio[mwdma])
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
if (adev->devno) { /* Slave */
master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
master_data |= control << 4;
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= (ap->port_no ? 0x0f : 0xf0);
/* Load the matching timing */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
pci_write_config_byte(dev, 0x44, slave_data);
} else { /* Master */
master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
and master timing bits */
master_data |= control;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
udma_enable &= ~(1 << devid);
pci_write_config_word(dev, master_port, master_data);
}
pci_write_config_byte(dev, 0x48, udma_enable);
}
static struct ata_port_operations rdc_pata_ops = {
.inherits = &ata_bmdma32_port_ops,
.cable_detect = rdc_pata_cable_detect,
.set_piomode = rdc_set_piomode,
.set_dmamode = rdc_set_dmamode,
.prereset = rdc_pata_prereset,
};
static struct ata_port_info rdc_port_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA5,
.port_ops = &rdc_pata_ops,
};
static struct scsi_host_template rdc_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
/**
* rdc_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in rdc_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int __devinit rdc_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static int printed_version;
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
unsigned long port_flags;
struct ata_host *host;
struct rdc_host_priv *hpriv;
int rc;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
port_info[0] = rdc_port_info;
port_info[1] = rdc_port_info;
port_flags = port_info[0].flags;
/* enable device and prepare host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
/* Save IOCFG, this will be used for cable detection, quirk
* detection and restoration on detach.
*/
pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
pci_intx(pdev, 1);
host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
}
static void rdc_remove_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct rdc_host_priv *hpriv = host->private_data;
pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg);
ata_pci_remove_one(pdev);
}
static const struct pci_device_id rdc_pci_tbl[] = {
{ PCI_DEVICE(0x17F3, 0x1011), },
{ PCI_DEVICE(0x17F3, 0x1012), },
{ } /* terminate list */
};
static struct pci_driver rdc_pci_driver = {
.name = DRV_NAME,
.id_table = rdc_pci_tbl,
.probe = rdc_init_one,
.remove = rdc_remove_one,
};
static int __init rdc_init(void)
{
return pci_register_driver(&rdc_pci_driver);
}
static void __exit rdc_exit(void)
{
pci_unregister_driver(&rdc_pci_driver);
}
module_init(rdc_init);
module_exit(rdc_exit);
MODULE_AUTHOR("Alan Cox (based on ata_piix)");
MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, rdc_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
alwold/rpi-linux | drivers/s390/cio/crw.c | 4354 | 4147 | /*
* Channel report handling code
*
* Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <asm/crw.h>
#include <asm/ctl_reg.h>
static DEFINE_MUTEX(crw_handler_mutex);
static crw_handler_t crw_handlers[NR_RSCS];
static atomic_t crw_nr_req = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
/**
* crw_register_handler() - register a channel report word handler
* @rsc: reporting source code to handle
* @handler: handler to be registered
*
* Returns %0 on success and a negative error value otherwise.
*/
int crw_register_handler(int rsc, crw_handler_t handler)
{
int rc = 0;
if ((rsc < 0) || (rsc >= NR_RSCS))
return -EINVAL;
mutex_lock(&crw_handler_mutex);
if (crw_handlers[rsc])
rc = -EBUSY;
else
crw_handlers[rsc] = handler;
mutex_unlock(&crw_handler_mutex);
return rc;
}
/**
* crw_unregister_handler() - unregister a channel report word handler
* @rsc: reporting source code to handle
*/
void crw_unregister_handler(int rsc)
{
if ((rsc < 0) || (rsc >= NR_RSCS))
return;
mutex_lock(&crw_handler_mutex);
crw_handlers[rsc] = NULL;
mutex_unlock(&crw_handler_mutex);
}
/*
* Retrieve CRWs and call function to handle event.
*/
static int crw_collect_info(void *unused)
{
struct crw crw[2];
int ccode, signal;
unsigned int chain;
repeat:
signal = wait_event_interruptible(crw_handler_wait_q,
atomic_read(&crw_nr_req) > 0);
if (unlikely(signal))
atomic_inc(&crw_nr_req);
chain = 0;
while (1) {
crw_handler_t handler;
if (unlikely(chain > 1)) {
struct crw tmp_crw;
printk(KERN_WARNING"%s: Code does not support more "
"than two chained crws; please report to "
"linux390@de.ibm.com!\n", __func__);
ccode = stcrw(&tmp_crw);
printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
__func__, tmp_crw.slct, tmp_crw.oflw,
tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
tmp_crw.erc, tmp_crw.rsid);
printk(KERN_WARNING"%s: This was crw number %x in the "
"chain\n", __func__, chain);
if (ccode != 0)
break;
chain = tmp_crw.chn ? chain + 1 : 0;
continue;
}
ccode = stcrw(&crw[chain]);
if (ccode != 0)
break;
printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw[chain].slct, crw[chain].oflw, crw[chain].chn,
crw[chain].rsc, crw[chain].anc, crw[chain].erc,
crw[chain].rsid);
/* Check for overflows. */
if (crw[chain].oflw) {
int i;
pr_debug("%s: crw overflow detected!\n", __func__);
mutex_lock(&crw_handler_mutex);
for (i = 0; i < NR_RSCS; i++) {
if (crw_handlers[i])
crw_handlers[i](NULL, NULL, 1);
}
mutex_unlock(&crw_handler_mutex);
chain = 0;
continue;
}
if (crw[0].chn && !chain) {
chain++;
continue;
}
mutex_lock(&crw_handler_mutex);
handler = crw_handlers[crw[chain].rsc];
if (handler)
handler(&crw[0], chain ? &crw[1] : NULL, 0);
mutex_unlock(&crw_handler_mutex);
/* chain is always 0 or 1 here. */
chain = crw[chain].chn ? chain + 1 : 0;
}
if (atomic_dec_and_test(&crw_nr_req))
wake_up(&crw_handler_wait_q);
goto repeat;
return 0;
}
void crw_handle_channel_report(void)
{
atomic_inc(&crw_nr_req);
wake_up(&crw_handler_wait_q);
}
void crw_wait_for_channel_report(void)
{
crw_handle_channel_report();
wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
}
/*
* Machine checks for the channel subsystem must be enabled
* after the channel subsystem is initialized
*/
static int __init crw_machine_check_init(void)
{
struct task_struct *task;
task = kthread_run(crw_collect_info, NULL, "kmcheck");
if (IS_ERR(task))
return PTR_ERR(task);
ctl_set_bit(14, 28); /* enable channel report MCH */
return 0;
}
device_initcall(crw_machine_check_init);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.