repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
Smartandroidtech/platform_kernel_lge_hammerhead | arch/powerpc/platforms/ps3/interrupt.c | 7601 | 20118 | /*
* PS3 interrupt routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/smp.h>
#include "platform.h"
#if defined(DEBUG)
#define DBG udbg_printf
#define FAIL udbg_printf
#else
#define DBG pr_devel
#define FAIL pr_debug
#endif
/**
* struct ps3_bmp - a per cpu irq status and mask bitmap structure
* @status: 256 bit status bitmap indexed by plug
* @unused_1: Alignment
* @mask: 256 bit mask bitmap indexed by plug
* @unused_2: Alignment
*
* The HV maintains per SMT thread mappings of HV outlet to HV plug on
* behalf of the guest. These mappings are implemented as 256 bit guest
* supplied bitmaps indexed by plug number. The addresses of the bitmaps
* are registered with the HV through lv1_configure_irq_state_bitmap().
* The HV requires that the 512 bits of status + mask not cross a page
* boundary. PS3_BMP_MINALIGN is used to define this minimal 64 byte
* alignment.
*
* The HV supports 256 plugs per thread, assigned as {0..255}, for a total
* of 512 plugs supported on a processor. To simplify the logic this
* implementation equates HV plug value to Linux virq value, constrains each
* interrupt to have a system wide unique plug number, and limits the range
* of the plug values to map into the first dword of the bitmaps. This
* gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note
* that there is no constraint on how many in this set an individual thread
* can acquire.
*
* The mask is declared as unsigned long so we can use set/clear_bit on it.
*/
#define PS3_BMP_MINALIGN 64
struct ps3_bmp {
struct {
u64 status;
u64 unused_1[3];
unsigned long mask;
u64 unused_2[3];
};
};
/**
* struct ps3_private - a per cpu data structure
* @bmp: ps3_bmp structure
* @bmp_lock: Syncronize access to bmp.
* @ipi_debug_brk_mask: Mask for debug break IPIs
* @ppe_id: HV logical_ppe_id
* @thread_id: HV thread_id
* @ipi_mask: Mask of IPI virqs
*/
struct ps3_private {
struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
spinlock_t bmp_lock;
u64 ppe_id;
u64 thread_id;
unsigned long ipi_debug_brk_mask;
unsigned long ipi_mask;
};
static DEFINE_PER_CPU(struct ps3_private, ps3_private);
/**
* ps3_chip_mask - Set an interrupt mask bit in ps3_bmp.
* @virq: The assigned Linux virq.
*
* Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
*/
static void ps3_chip_mask(struct irq_data *d)
{
struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
pd->thread_id, d->irq);
local_irq_save(flags);
clear_bit(63 - d->irq, &pd->bmp.mask);
lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
local_irq_restore(flags);
}
/**
* ps3_chip_unmask - Clear an interrupt mask bit in ps3_bmp.
* @virq: The assigned Linux virq.
*
* Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
*/
static void ps3_chip_unmask(struct irq_data *d)
{
struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
pd->thread_id, d->irq);
local_irq_save(flags);
set_bit(63 - d->irq, &pd->bmp.mask);
lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
local_irq_restore(flags);
}
/**
* ps3_chip_eoi - HV end-of-interrupt.
* @virq: The assigned Linux virq.
*
* Calls lv1_end_of_interrupt_ext().
*/
static void ps3_chip_eoi(struct irq_data *d)
{
const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
/* non-IPIs are EOIed here. */
if (!test_bit(63 - d->irq, &pd->ipi_mask))
lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
}
/**
* ps3_irq_chip - Represents the ps3_bmp as a Linux struct irq_chip.
*/
static struct irq_chip ps3_irq_chip = {
.name = "ps3",
.irq_mask = ps3_chip_mask,
.irq_unmask = ps3_chip_unmask,
.irq_eoi = ps3_chip_eoi,
};
/**
* ps3_virq_setup - virq related setup.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @outlet: The HV outlet from the various create outlet routines.
* @virq: The assigned Linux virq.
*
* Calls irq_create_mapping() to get a virq and sets the chip data to
* ps3_private data.
*/
static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
unsigned int *virq)
{
int result;
struct ps3_private *pd;
/* This defines the default interrupt distribution policy. */
if (cpu == PS3_BINDING_CPU_ANY)
cpu = 0;
pd = &per_cpu(ps3_private, cpu);
*virq = irq_create_mapping(NULL, outlet);
if (*virq == NO_IRQ) {
FAIL("%s:%d: irq_create_mapping failed: outlet %lu\n",
__func__, __LINE__, outlet);
result = -ENOMEM;
goto fail_create;
}
DBG("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
outlet, cpu, *virq);
result = irq_set_chip_data(*virq, pd);
if (result) {
FAIL("%s:%d: irq_set_chip_data failed\n",
__func__, __LINE__);
goto fail_set;
}
ps3_chip_mask(irq_get_irq_data(*virq));
return result;
fail_set:
irq_dispose_mapping(*virq);
fail_create:
return result;
}
/**
* ps3_virq_destroy - virq related teardown.
* @virq: The assigned Linux virq.
*
* Clears chip data and calls irq_dispose_mapping() for the virq.
*/
static int ps3_virq_destroy(unsigned int virq)
{
const struct ps3_private *pd = irq_get_chip_data(virq);
DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
irq_set_chip_data(virq, NULL);
irq_dispose_mapping(virq);
DBG("%s:%d <-\n", __func__, __LINE__);
return 0;
}
/**
* ps3_irq_plug_setup - Generic outlet and virq related setup.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @outlet: The HV outlet from the various create outlet routines.
* @virq: The assigned Linux virq.
*
* Sets up virq and connects the irq plug.
*/
int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
unsigned int *virq)
{
int result;
struct ps3_private *pd;
result = ps3_virq_setup(cpu, outlet, virq);
if (result) {
FAIL("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__);
goto fail_setup;
}
pd = irq_get_chip_data(*virq);
/* Binds outlet to cpu + virq. */
result = lv1_connect_irq_plug_ext(pd->ppe_id, pd->thread_id, *virq,
outlet, 0);
if (result) {
FAIL("%s:%d: lv1_connect_irq_plug_ext failed: %s\n",
__func__, __LINE__, ps3_result(result));
result = -EPERM;
goto fail_connect;
}
return result;
fail_connect:
ps3_virq_destroy(*virq);
fail_setup:
return result;
}
EXPORT_SYMBOL_GPL(ps3_irq_plug_setup);
/**
* ps3_irq_plug_destroy - Generic outlet and virq related teardown.
* @virq: The assigned Linux virq.
*
* Disconnects the irq plug and tears down virq.
* Do not call for system bus event interrupts setup with
* ps3_sb_event_receive_port_setup().
*/
int ps3_irq_plug_destroy(unsigned int virq)
{
int result;
const struct ps3_private *pd = irq_get_chip_data(virq);
DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq);
if (result)
FAIL("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n",
__func__, __LINE__, ps3_result(result));
ps3_virq_destroy(virq);
return result;
}
EXPORT_SYMBOL_GPL(ps3_irq_plug_destroy);
/**
* ps3_event_receive_port_setup - Setup an event receive port.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @virq: The assigned Linux virq.
*
* The virq can be used with lv1_connect_interrupt_event_receive_port() to
* arrange to receive interrupts from system-bus devices, or with
* ps3_send_event_locally() to signal events.
*/
int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq)
{
int result;
u64 outlet;
result = lv1_construct_event_receive_port(&outlet);
if (result) {
FAIL("%s:%d: lv1_construct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
*virq = NO_IRQ;
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_event_receive_port_setup);
/**
* ps3_event_receive_port_destroy - Destroy an event receive port.
* @virq: The assigned Linux virq.
*
* Since ps3_event_receive_port_destroy destroys the receive port outlet,
* SB devices need to call disconnect_interrupt_event_receive_port() before
* this.
*/
int ps3_event_receive_port_destroy(unsigned int virq)
{
int result;
DBG(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_destruct_event_receive_port(virq_to_hw(virq));
if (result)
FAIL("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
/*
* Don't call ps3_virq_destroy() here since ps3_smp_cleanup_cpu()
* calls from interrupt context (smp_call_function) when kexecing.
*/
DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
int ps3_send_event_locally(unsigned int virq)
{
return lv1_send_event_locally(virq_to_hw(virq));
}
/**
* ps3_sb_event_receive_port_setup - Setup a system bus event receive port.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @dev: The system bus device instance.
* @virq: The assigned Linux virq.
*
* An event irq represents a virtual device interrupt. The interrupt_id
* coresponds to the software interrupt number.
*/
int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
enum ps3_cpu_binding cpu, unsigned int *virq)
{
/* this should go in system-bus.c */
int result;
result = ps3_event_receive_port_setup(cpu, virq);
if (result)
return result;
result = lv1_connect_interrupt_event_receive_port(dev->bus_id,
dev->dev_id, virq_to_hw(*virq), dev->interrupt_id);
if (result) {
FAIL("%s:%d: lv1_connect_interrupt_event_receive_port"
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
ps3_event_receive_port_destroy(*virq);
*virq = NO_IRQ;
return result;
}
DBG("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
dev->interrupt_id, *virq);
return 0;
}
EXPORT_SYMBOL(ps3_sb_event_receive_port_setup);
int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
unsigned int virq)
{
/* this should go in system-bus.c */
int result;
DBG(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
dev->interrupt_id, virq);
result = lv1_disconnect_interrupt_event_receive_port(dev->bus_id,
dev->dev_id, virq_to_hw(virq), dev->interrupt_id);
if (result)
FAIL("%s:%d: lv1_disconnect_interrupt_event_receive_port"
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
result = ps3_event_receive_port_destroy(virq);
BUG_ON(result);
/*
* ps3_event_receive_port_destroy() destroys the IRQ plug,
* so don't call ps3_irq_plug_destroy() here.
*/
result = ps3_virq_destroy(virq);
BUG_ON(result);
DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
EXPORT_SYMBOL(ps3_sb_event_receive_port_destroy);
/**
* ps3_io_irq_setup - Setup a system bus io irq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @interrupt_id: The device interrupt id read from the system repository.
* @virq: The assigned Linux virq.
*
* An io irq represents a non-virtualized device interrupt. interrupt_id
* coresponds to the interrupt number of the interrupt controller.
*/
int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
unsigned int *virq)
{
int result;
u64 outlet;
result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
if (result) {
FAIL("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_io_irq_setup);
int ps3_io_irq_destroy(unsigned int virq)
{
int result;
unsigned long outlet = virq_to_hw(virq);
ps3_chip_mask(irq_get_irq_data(virq));
/*
* lv1_destruct_io_irq_outlet() will destroy the IRQ plug,
* so call ps3_irq_plug_destroy() first.
*/
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
result = lv1_destruct_io_irq_outlet(outlet);
if (result)
FAIL("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
EXPORT_SYMBOL_GPL(ps3_io_irq_destroy);
/**
* ps3_vuart_irq_setup - Setup the system virtual uart virq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @virt_addr_bmp: The caller supplied virtual uart interrupt bitmap.
* @virq: The assigned Linux virq.
*
* The system supports only a single virtual uart, so multiple calls without
* freeing the interrupt will return a wrong state error.
*/
int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
unsigned int *virq)
{
int result;
u64 outlet;
u64 lpar_addr;
BUG_ON(!is_kernel_addr((u64)virt_addr_bmp));
lpar_addr = ps3_mm_phys_to_lpar(__pa(virt_addr_bmp));
result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
if (result) {
FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_vuart_irq_setup);
int ps3_vuart_irq_destroy(unsigned int virq)
{
int result;
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_deconfigure_virtual_uart_irq();
if (result) {
FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_vuart_irq_destroy);
/**
* ps3_spe_irq_setup - Setup an spe virq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @spe_id: The spe_id returned from lv1_construct_logical_spe().
* @class: The spe interrupt class {0,1,2}.
* @virq: The assigned Linux virq.
*
*/
int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
unsigned int class, unsigned int *virq)
{
int result;
u64 outlet;
BUG_ON(class > 2);
result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
if (result) {
FAIL("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
int ps3_spe_irq_destroy(unsigned int virq)
{
int result;
ps3_chip_mask(irq_get_irq_data(virq));
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
return result;
}
#define PS3_INVALID_OUTLET ((irq_hw_number_t)-1)
#define PS3_PLUG_MAX 63
#if defined(DEBUG)
static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
const char* func, int line)
{
pr_debug("%s:%d: %s %u {%04llx_%04llx_%04llx_%04llx}\n",
func, line, header, cpu,
*p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
*p & 0xffff);
}
static void __maybe_unused _dump_256_bmp(const char *header,
const u64 *p, unsigned cpu, const char* func, int line)
{
pr_debug("%s:%d: %s %u {%016llx:%016llx:%016llx:%016llx}\n",
func, line, header, cpu, p[0], p[1], p[2], p[3]);
}
#define dump_bmp(_x) _dump_bmp(_x, __func__, __LINE__)
static void _dump_bmp(struct ps3_private* pd, const char* func, int line)
{
unsigned long flags;
spin_lock_irqsave(&pd->bmp_lock, flags);
_dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line);
_dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
spin_unlock_irqrestore(&pd->bmp_lock, flags);
}
#define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
static void __maybe_unused _dump_mask(struct ps3_private *pd,
const char* func, int line)
{
unsigned long flags;
spin_lock_irqsave(&pd->bmp_lock, flags);
_dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
spin_unlock_irqrestore(&pd->bmp_lock, flags);
}
#else
static void dump_bmp(struct ps3_private* pd) {};
#endif /* defined(DEBUG) */
static int ps3_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
virq);
irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
return 0;
}
static int ps3_host_match(struct irq_domain *h, struct device_node *np)
{
/* Match all */
return 1;
}
static const struct irq_domain_ops ps3_host_ops = {
.map = ps3_host_map,
.match = ps3_host_match,
};
void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
{
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
set_bit(63 - virq, &pd->ipi_debug_brk_mask);
DBG("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
cpu, virq, pd->ipi_debug_brk_mask);
}
void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
{
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
set_bit(63 - virq, &pd->ipi_mask);
DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
cpu, virq, pd->ipi_mask);
}
static unsigned int ps3_get_irq(void)
{
struct ps3_private *pd = &__get_cpu_var(ps3_private);
u64 x = (pd->bmp.status & pd->bmp.mask);
unsigned int plug;
/* check for ipi break first to stop this cpu ASAP */
if (x & pd->ipi_debug_brk_mask)
x &= pd->ipi_debug_brk_mask;
asm volatile("cntlzd %0,%1" : "=r" (plug) : "r" (x));
plug &= 0x3f;
if (unlikely(plug == NO_IRQ)) {
DBG("%s:%d: no plug found: thread_id %llu\n", __func__,
__LINE__, pd->thread_id);
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
return NO_IRQ;
}
#if defined(DEBUG)
if (unlikely(plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX)) {
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
BUG();
}
#endif
/* IPIs are EOIed here. */
if (test_bit(63 - plug, &pd->ipi_mask))
lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
return plug;
}
void __init ps3_init_IRQ(void)
{
int result;
unsigned cpu;
struct irq_domain *host;
host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
irq_set_default_host(host);
for_each_possible_cpu(cpu) {
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
lv1_get_logical_ppe_id(&pd->ppe_id);
pd->thread_id = get_hard_smp_processor_id(cpu);
spin_lock_init(&pd->bmp_lock);
DBG("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n",
__func__, __LINE__, pd->ppe_id, pd->thread_id,
ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
result = lv1_configure_irq_state_bitmap(pd->ppe_id,
pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
if (result)
FAIL("%s:%d: lv1_configure_irq_state_bitmap failed:"
" %s\n", __func__, __LINE__,
ps3_result(result));
}
ppc_md.get_irq = ps3_get_irq;
}
void ps3_shutdown_IRQ(int cpu)
{
int result;
u64 ppe_id;
u64 thread_id = get_hard_smp_processor_id(cpu);
lv1_get_logical_ppe_id(&ppe_id);
result = lv1_configure_irq_state_bitmap(ppe_id, thread_id, 0);
DBG("%s:%d: lv1_configure_irq_state_bitmap (%llu:%llu/%d) %s\n", __func__,
__LINE__, ppe_id, thread_id, cpu, ps3_result(result));
}
| gpl-2.0 |
nitroglycerine33/kernel_asus_grouper | drivers/staging/vt6655/mac.c | 8369 | 44114 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: mac.c
*
* Purpose: MAC routines
*
* Author: Tevin Chen
*
* Date: May 21, 1996
*
* Functions:
* MACvReadAllRegs - Read All MAC Registers to buffer
* MACbIsRegBitsOn - Test if All test Bits On
* MACbIsRegBitsOff - Test if All test Bits Off
* MACbIsIntDisable - Test if MAC interrupt disable
* MACbyReadMultiAddr - Read Multicast Address Mask Pattern
* MACvWriteMultiAddr - Write Multicast Address Mask Pattern
* MACvSetMultiAddrByHash - Set Multicast Address Mask by Hash value
* MACvResetMultiAddrByHash - Clear Multicast Address Mask by Hash value
* MACvSetRxThreshold - Set Rx Threshold value
* MACvGetRxThreshold - Get Rx Threshold value
* MACvSetTxThreshold - Set Tx Threshold value
* MACvGetTxThreshold - Get Tx Threshold value
* MACvSetDmaLength - Set Dma Length value
* MACvGetDmaLength - Get Dma Length value
* MACvSetShortRetryLimit - Set 802.11 Short Retry limit
* MACvGetShortRetryLimit - Get 802.11 Short Retry limit
* MACvSetLongRetryLimit - Set 802.11 Long Retry limit
* MACvGetLongRetryLimit - Get 802.11 Long Retry limit
* MACvSetLoopbackMode - Set MAC Loopback Mode
* MACbIsInLoopbackMode - Test if MAC in Loopback mode
* MACvSetPacketFilter - Set MAC Address Filter
* MACvSaveContext - Save Context of MAC Registers
* MACvRestoreContext - Restore Context of MAC Registers
* MACbCompareContext - Compare if values of MAC Registers same as Context
* MACbSoftwareReset - Software Reset MAC
* MACbSafeRxOff - Turn Off MAC Rx
* MACbSafeTxOff - Turn Off MAC Tx
* MACbSafeStop - Stop MAC function
* MACbShutdown - Shut down MAC
* MACvInitialize - Initialize MAC
* MACvSetCurrRxDescAddr - Set Rx Descriptos Address
* MACvSetCurrTx0DescAddr - Set Tx0 Descriptos Address
* MACvSetCurrTx1DescAddr - Set Tx1 Descriptos Address
* MACvTimer0MicroSDelay - Micro Second Delay Loop by MAC
*
* Revision History:
* 08-22-2003 Kyle Hsu : Porting MAC functions from sim53
* 09-03-2003 Bryan YC Fan : Add MACvClearBusSusInd()& MACvEnableBusSusEn()
* 09-18-2003 Jerry Chen : Add MACvSetKeyEntry & MACvDisableKeyEntry
*
*/
#include "tmacro.h"
#include "tether.h"
#include "mac.h"
unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester
/*--------------------- Static Definitions -------------------------*/
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
/*
* Description:
* Read All MAC Registers to buffer
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* pbyMacRegs - buffer to read
*
* Return Value: none
*
*/
void MACvReadAllRegs (unsigned long dwIoBase, unsigned char *pbyMacRegs)
{
int ii;
// read page0 register
for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE0; ii++) {
VNSvInPortB(dwIoBase + ii, pbyMacRegs);
pbyMacRegs++;
}
MACvSelectPage1(dwIoBase);
// read page1 register
for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE1; ii++) {
VNSvInPortB(dwIoBase + ii, pbyMacRegs);
pbyMacRegs++;
}
MACvSelectPage0(dwIoBase);
}
/*
* Description:
* Test if all test bits on
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* byRegOfs - Offset of MAC Register
* byTestBits - Test bits
* Out:
* none
*
* Return Value: true if all test bits On; otherwise false
*
*/
bool MACbIsRegBitsOn (unsigned long dwIoBase, unsigned char byRegOfs, unsigned char byTestBits)
{
unsigned char byData;
VNSvInPortB(dwIoBase + byRegOfs, &byData);
return (byData & byTestBits) == byTestBits;
}
/*
* Description:
* Test if all test bits off
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* byRegOfs - Offset of MAC Register
* byTestBits - Test bits
* Out:
* none
*
* Return Value: true if all test bits Off; otherwise false
*
*/
bool MACbIsRegBitsOff (unsigned long dwIoBase, unsigned char byRegOfs, unsigned char byTestBits)
{
unsigned char byData;
VNSvInPortB(dwIoBase + byRegOfs, &byData);
return !(byData & byTestBits);
}
/*
* Description:
* Test if MAC interrupt disable
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* none
*
* Return Value: true if interrupt is disable; otherwise false
*
*/
bool MACbIsIntDisable (unsigned long dwIoBase)
{
unsigned long dwData;
VNSvInPortD(dwIoBase + MAC_REG_IMR, &dwData);
if (dwData != 0)
return false;
return true;
}
/*
* Description:
* Read MAC Multicast Address Mask
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* uByteidx - Index of Mask
* Out:
* none
*
* Return Value: Mask Value read
*
*/
unsigned char MACbyReadMultiAddr (unsigned long dwIoBase, unsigned int uByteIdx)
{
unsigned char byData;
MACvSelectPage1(dwIoBase);
VNSvInPortB(dwIoBase + MAC_REG_MAR0 + uByteIdx, &byData);
MACvSelectPage0(dwIoBase);
return byData;
}
/*
* Description:
* Write MAC Multicast Address Mask
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* uByteidx - Index of Mask
* byData - Mask Value to write
* Out:
* none
*
* Return Value: none
*
*/
void MACvWriteMultiAddr (unsigned long dwIoBase, unsigned int uByteIdx, unsigned char byData)
{
MACvSelectPage1(dwIoBase);
VNSvOutPortB(dwIoBase + MAC_REG_MAR0 + uByteIdx, byData);
MACvSelectPage0(dwIoBase);
}
/*
* Description:
* Set this hash index into multicast address register bit
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* byHashIdx - Hash index to set
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetMultiAddrByHash (unsigned long dwIoBase, unsigned char byHashIdx)
{
unsigned int uByteIdx;
unsigned char byBitMask;
unsigned char byOrgValue;
// calculate byte position
uByteIdx = byHashIdx / 8;
ASSERT(uByteIdx < 8);
// calculate bit position
byBitMask = 1;
byBitMask <<= (byHashIdx % 8);
// turn on the bit
byOrgValue = MACbyReadMultiAddr(dwIoBase, uByteIdx);
MACvWriteMultiAddr(dwIoBase, uByteIdx, (unsigned char)(byOrgValue | byBitMask));
}
/*
* Description:
* Reset this hash index into multicast address register bit
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* byHashIdx - Hash index to clear
* Out:
* none
*
* Return Value: none
*
*/
void MACvResetMultiAddrByHash (unsigned long dwIoBase, unsigned char byHashIdx)
{
unsigned int uByteIdx;
unsigned char byBitMask;
unsigned char byOrgValue;
// calculate byte position
uByteIdx = byHashIdx / 8;
ASSERT(uByteIdx < 8);
// calculate bit position
byBitMask = 1;
byBitMask <<= (byHashIdx % 8);
// turn off the bit
byOrgValue = MACbyReadMultiAddr(dwIoBase, uByteIdx);
MACvWriteMultiAddr(dwIoBase, uByteIdx, (unsigned char)(byOrgValue & (~byBitMask)));
}
/*
* Description:
* Set Rx Threshold
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* byThreshold - Threshold Value
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetRxThreshold (unsigned long dwIoBase, unsigned char byThreshold)
{
unsigned char byOrgValue;
ASSERT(byThreshold < 4);
// set FCR0
VNSvInPortB(dwIoBase + MAC_REG_FCR0, &byOrgValue);
byOrgValue = (byOrgValue & 0xCF) | (byThreshold << 4);
VNSvOutPortB(dwIoBase + MAC_REG_FCR0, byOrgValue);
}
/*
* Description:
* Get Rx Threshold
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* pbyThreshold- Threshold Value Get
*
* Return Value: none
*
*/
void MACvGetRxThreshold (unsigned long dwIoBase, unsigned char *pbyThreshold)
{
// get FCR0
VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyThreshold);
*pbyThreshold = (*pbyThreshold >> 4) & 0x03;
}
/*
* Description:
* Set Tx Threshold
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* byThreshold - Threshold Value
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetTxThreshold (unsigned long dwIoBase, unsigned char byThreshold)
{
unsigned char byOrgValue;
ASSERT(byThreshold < 4);
// set FCR0
VNSvInPortB(dwIoBase + MAC_REG_FCR0, &byOrgValue);
byOrgValue = (byOrgValue & 0xF3) | (byThreshold << 2);
VNSvOutPortB(dwIoBase + MAC_REG_FCR0, byOrgValue);
}
/*
* Description:
* Get Tx Threshold
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* pbyThreshold- Threshold Value Get
*
* Return Value: none
*
*/
void MACvGetTxThreshold (unsigned long dwIoBase, unsigned char *pbyThreshold)
{
// get FCR0
VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyThreshold);
*pbyThreshold = (*pbyThreshold >> 2) & 0x03;
}
/*
* Description:
* Set Dma Length
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* byDmaLength - Dma Length Value
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetDmaLength (unsigned long dwIoBase, unsigned char byDmaLength)
{
unsigned char byOrgValue;
ASSERT(byDmaLength < 4);
// set FCR0
VNSvInPortB(dwIoBase + MAC_REG_FCR0, &byOrgValue);
byOrgValue = (byOrgValue & 0xFC) | byDmaLength;
VNSvOutPortB(dwIoBase + MAC_REG_FCR0, byOrgValue);
}
/*
* Description:
* Get Dma Length
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* pbyDmaLength- Dma Length Value Get
*
* Return Value: none
*
*/
void MACvGetDmaLength (unsigned long dwIoBase, unsigned char *pbyDmaLength)
{
// get FCR0
VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyDmaLength);
*pbyDmaLength &= 0x03;
}
/*
* Description:
* Set 802.11 Short Retry Limit
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* byRetryLimit- Retry Limit
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetShortRetryLimit (unsigned long dwIoBase, unsigned char byRetryLimit)
{
// set SRT
VNSvOutPortB(dwIoBase + MAC_REG_SRT, byRetryLimit);
}
/*
* Description:
* Get 802.11 Short Retry Limit
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* pbyRetryLimit - Retry Limit Get
*
* Return Value: none
*
*/
void MACvGetShortRetryLimit (unsigned long dwIoBase, unsigned char *pbyRetryLimit)
{
// get SRT
VNSvInPortB(dwIoBase + MAC_REG_SRT, pbyRetryLimit);
}
/*
* Description:
* Set 802.11 Long Retry Limit
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* byRetryLimit- Retry Limit
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetLongRetryLimit (unsigned long dwIoBase, unsigned char byRetryLimit)
{
// set LRT
VNSvOutPortB(dwIoBase + MAC_REG_LRT, byRetryLimit);
}
/*
* Description:
* Get 802.11 Long Retry Limit
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* pbyRetryLimit - Retry Limit Get
*
* Return Value: none
*
*/
void MACvGetLongRetryLimit (unsigned long dwIoBase, unsigned char *pbyRetryLimit)
{
// get LRT
VNSvInPortB(dwIoBase + MAC_REG_LRT, pbyRetryLimit);
}
/*
* Description:
* Set MAC Loopback mode
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* byLoopbackMode - Loopback Mode
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetLoopbackMode (unsigned long dwIoBase, unsigned char byLoopbackMode)
{
unsigned char byOrgValue;
ASSERT(byLoopbackMode < 3);
byLoopbackMode <<= 6;
// set TCR
VNSvInPortB(dwIoBase + MAC_REG_TEST, &byOrgValue);
byOrgValue = byOrgValue & 0x3F;
byOrgValue = byOrgValue | byLoopbackMode;
VNSvOutPortB(dwIoBase + MAC_REG_TEST, byOrgValue);
}
/*
* Description:
* Test if MAC in Loopback mode
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* none
*
* Return Value: true if in Loopback mode; otherwise false
*
*/
bool MACbIsInLoopbackMode (unsigned long dwIoBase)
{
unsigned char byOrgValue;
VNSvInPortB(dwIoBase + MAC_REG_TEST, &byOrgValue);
if (byOrgValue & (TEST_LBINT | TEST_LBEXT))
return true;
return false;
}
/*
* Description:
* Set MAC Address filter
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* wFilterType - Filter Type
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetPacketFilter (unsigned long dwIoBase, unsigned short wFilterType)
{
unsigned char byOldRCR;
unsigned char byNewRCR = 0;
// if only in DIRECTED mode, multicast-address will set to zero,
// but if other mode exist (e.g. PROMISCUOUS), multicast-address
// will be open
if (wFilterType & PKT_TYPE_DIRECTED) {
// set multicast address to accept none
MACvSelectPage1(dwIoBase);
VNSvOutPortD(dwIoBase + MAC_REG_MAR0, 0L);
VNSvOutPortD(dwIoBase + MAC_REG_MAR0 + sizeof(unsigned long), 0L);
MACvSelectPage0(dwIoBase);
}
if (wFilterType & (PKT_TYPE_PROMISCUOUS | PKT_TYPE_ALL_MULTICAST)) {
// set multicast address to accept all
MACvSelectPage1(dwIoBase);
VNSvOutPortD(dwIoBase + MAC_REG_MAR0, 0xFFFFFFFFL);
VNSvOutPortD(dwIoBase + MAC_REG_MAR0 + sizeof(unsigned long), 0xFFFFFFFFL);
MACvSelectPage0(dwIoBase);
}
if (wFilterType & PKT_TYPE_PROMISCUOUS) {
byNewRCR |= (RCR_RXALLTYPE | RCR_UNICAST | RCR_MULTICAST | RCR_BROADCAST);
byNewRCR &= ~RCR_BSSID;
}
if (wFilterType & (PKT_TYPE_ALL_MULTICAST | PKT_TYPE_MULTICAST))
byNewRCR |= RCR_MULTICAST;
if (wFilterType & PKT_TYPE_BROADCAST)
byNewRCR |= RCR_BROADCAST;
if (wFilterType & PKT_TYPE_ERROR_CRC)
byNewRCR |= RCR_ERRCRC;
VNSvInPortB(dwIoBase + MAC_REG_RCR, &byOldRCR);
if (byNewRCR != byOldRCR) {
// Modify the Receive Command Register
VNSvOutPortB(dwIoBase + MAC_REG_RCR, byNewRCR);
}
}
/*
* Description:
* Save MAC registers to context buffer
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* pbyCxtBuf - Context buffer
*
* Return Value: none
*
*/
void MACvSaveContext (unsigned long dwIoBase, unsigned char *pbyCxtBuf)
{
int ii;
// read page0 register
for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE0; ii++) {
VNSvInPortB((dwIoBase + ii), (pbyCxtBuf + ii));
}
MACvSelectPage1(dwIoBase);
// read page1 register
for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE1; ii++) {
VNSvInPortB((dwIoBase + ii), (pbyCxtBuf + MAC_MAX_CONTEXT_SIZE_PAGE0 + ii));
}
MACvSelectPage0(dwIoBase);
}
/*
* Description:
* Restore MAC registers from context buffer
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* pbyCxtBuf - Context buffer
* Out:
* none
*
* Return Value: none
*
*/
void MACvRestoreContext (unsigned long dwIoBase, unsigned char *pbyCxtBuf)
{
int ii;
MACvSelectPage1(dwIoBase);
// restore page1
for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE1; ii++) {
VNSvOutPortB((dwIoBase + ii), *(pbyCxtBuf + MAC_MAX_CONTEXT_SIZE_PAGE0 + ii));
}
MACvSelectPage0(dwIoBase);
// restore RCR,TCR,IMR...
for (ii = MAC_REG_RCR; ii < MAC_REG_ISR; ii++) {
VNSvOutPortB(dwIoBase + ii, *(pbyCxtBuf + ii));
}
// restore MAC Config.
for (ii = MAC_REG_LRT; ii < MAC_REG_PAGE1SEL; ii++) {
VNSvOutPortB(dwIoBase + ii, *(pbyCxtBuf + ii));
}
VNSvOutPortB(dwIoBase + MAC_REG_CFG, *(pbyCxtBuf + MAC_REG_CFG));
// restore PS Config.
for (ii = MAC_REG_PSCFG; ii < MAC_REG_BBREGCTL; ii++) {
VNSvOutPortB(dwIoBase + ii, *(pbyCxtBuf + ii));
}
// restore CURR_RX_DESC_ADDR, CURR_TX_DESC_ADDR
VNSvOutPortD(dwIoBase + MAC_REG_TXDMAPTR0, *(unsigned long *)(pbyCxtBuf + MAC_REG_TXDMAPTR0));
VNSvOutPortD(dwIoBase + MAC_REG_AC0DMAPTR, *(unsigned long *)(pbyCxtBuf + MAC_REG_AC0DMAPTR));
VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR, *(unsigned long *)(pbyCxtBuf + MAC_REG_BCNDMAPTR));
VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR0, *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR0));
VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR1, *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR1));
}
/*
* Description:
* Compare if MAC registers same as context buffer
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* pbyCxtBuf - Context buffer
* Out:
* none
*
* Return Value: true if all values are the same; otherwise false
*
*/
bool MACbCompareContext (unsigned long dwIoBase, unsigned char *pbyCxtBuf)
{
unsigned long dwData;
// compare MAC context to determine if this is a power lost init,
// return true for power remaining init, return false for power lost init
// compare CURR_RX_DESC_ADDR, CURR_TX_DESC_ADDR
VNSvInPortD(dwIoBase + MAC_REG_TXDMAPTR0, &dwData);
if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_TXDMAPTR0)) {
return false;
}
VNSvInPortD(dwIoBase + MAC_REG_AC0DMAPTR, &dwData);
if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_AC0DMAPTR)) {
return false;
}
VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR0, &dwData);
if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR0)) {
return false;
}
VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR1, &dwData);
if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR1)) {
return false;
}
return true;
}
/*
* Description:
* Software Reset MAC
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* none
*
* Return Value: true if Reset Success; otherwise false
*
*/
bool MACbSoftwareReset (unsigned long dwIoBase)
{
unsigned char byData;
unsigned short ww;
// turn on HOSTCR_SOFTRST, just write 0x01 to reset
//MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_SOFTRST);
VNSvOutPortB(dwIoBase+ MAC_REG_HOSTCR, 0x01);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_HOSTCR, &byData);
if ( !(byData & HOSTCR_SOFTRST))
break;
}
if (ww == W_MAX_TIMEOUT)
return false;
return true;
}
/*
* Description:
* save some important register's value, then do reset, then restore register's value
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
bool MACbSafeSoftwareReset (unsigned long dwIoBase)
{
unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1];
bool bRetVal;
// PATCH....
// save some important register's value, then do
// reset, then restore register's value
// save MAC context
MACvSaveContext(dwIoBase, abyTmpRegData);
// do reset
bRetVal = MACbSoftwareReset(dwIoBase);
//BBvSoftwareReset(pDevice->PortOffset);
// restore MAC context, except CR0
MACvRestoreContext(dwIoBase, abyTmpRegData);
return bRetVal;
}
/*
* Description:
* Trun Off MAC Rx
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
bool MACbSafeRxOff (unsigned long dwIoBase)
{
unsigned short ww;
unsigned long dwData;
unsigned char byData;
// turn off wow temp for turn off Rx safely
// Clear RX DMA0,1
VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_CLRRUN);
VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_CLRRUN);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL0, &dwData);
if (!(dwData & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x10);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x10)\n");
return(false);
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL1, &dwData);
if ( !(dwData & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x11);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x11)\n");
return(false);
}
// try to safe shutdown RX
MACvRegBitsOff(dwIoBase, MAC_REG_HOSTCR, HOSTCR_RXON);
// W_MAX_TIMEOUT is the timeout period
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_HOSTCR, &byData);
if ( !(byData & HOSTCR_RXONST))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x12);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x12)\n");
return(false);
}
return true;
}
/*
* Description:
* Trun Off MAC Tx
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
bool MACbSafeTxOff (unsigned long dwIoBase)
{
unsigned short ww;
unsigned long dwData;
unsigned char byData;
// Clear TX DMA
//Tx0
VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_CLRRUN);
//AC0
VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_CLRRUN);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortD(dwIoBase + MAC_REG_TXDMACTL0, &dwData);
if ( !(dwData & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x20);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x20)\n");
return(false);
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortD(dwIoBase + MAC_REG_AC0DMACTL, &dwData);
if ( !(dwData & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x21);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x21)\n");
return(false);
}
// try to safe shutdown TX
MACvRegBitsOff(dwIoBase, MAC_REG_HOSTCR, HOSTCR_TXON);
// W_MAX_TIMEOUT is the timeout period
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_HOSTCR, &byData);
if ( !(byData & HOSTCR_TXONST))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x24);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x24)\n");
return(false);
}
return true;
}
/*
* Description:
* Stop MAC function
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
bool MACbSafeStop (unsigned long dwIoBase)
{
MACvRegBitsOff(dwIoBase, MAC_REG_TCR, TCR_AUTOBCNTX);
if (MACbSafeRxOff(dwIoBase) == false) {
DBG_PORT80(0xA1);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" MACbSafeRxOff == false)\n");
MACbSafeSoftwareReset(dwIoBase);
return false;
}
if (MACbSafeTxOff(dwIoBase) == false) {
DBG_PORT80(0xA2);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" MACbSafeTxOff == false)\n");
MACbSafeSoftwareReset(dwIoBase);
return false;
}
MACvRegBitsOff(dwIoBase, MAC_REG_HOSTCR, HOSTCR_MACEN);
return true;
}
/*
* Description:
* Shut Down MAC
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
bool MACbShutdown (unsigned long dwIoBase)
{
// disable MAC IMR
MACvIntDisable(dwIoBase);
MACvSetLoopbackMode(dwIoBase, MAC_LB_INTERNAL);
// stop the adapter
if (!MACbSafeStop(dwIoBase)) {
MACvSetLoopbackMode(dwIoBase, MAC_LB_NONE);
return false;
}
MACvSetLoopbackMode(dwIoBase, MAC_LB_NONE);
return true;
}
/*
* Description:
* Initialize MAC
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* Out:
* none
*
* Return Value: none
*
*/
void MACvInitialize (unsigned long dwIoBase)
{
// clear sticky bits
MACvClearStckDS(dwIoBase);
// disable force PME-enable
VNSvOutPortB(dwIoBase + MAC_REG_PMC1, PME_OVR);
// only 3253 A
/*
MACvPwrEvntDisable(dwIoBase);
// clear power status
VNSvOutPortW(dwIoBase + MAC_REG_WAKEUPSR0, 0x0F0F);
*/
// do reset
MACbSoftwareReset(dwIoBase);
// issue AUTOLD in EECSR to reload eeprom
//MACvRegBitsOn(dwIoBase, MAC_REG_I2MCSR, I2MCSR_AUTOLD);
// wait until EEPROM loading complete
//while (true) {
// u8 u8Data;
// VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &u8Data);
// if ( !(u8Data & I2MCSR_AUTOLD))
// break;
//}
// reset TSF counter
VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
// enable TSF counter
VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
// set packet filter
// receive directed and broadcast address
MACvSetPacketFilter(dwIoBase, PKT_TYPE_DIRECTED | PKT_TYPE_BROADCAST);
}
/*
* Description:
* Set the chip with current rx descriptor address
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* dwCurrDescAddr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetCurrRx0DescAddr (unsigned long dwIoBase, unsigned long dwCurrDescAddr)
{
unsigned short ww;
unsigned char byData;
unsigned char byOrgDMACtl;
VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL0, &byOrgDMACtl);
if (byOrgDMACtl & DMACTL_RUN) {
VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL0+2, DMACTL_RUN);
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL0, &byData);
if ( !(byData & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x13);
}
VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR0, dwCurrDescAddr);
if (byOrgDMACtl & DMACTL_RUN) {
VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_RUN);
}
}
/*
* Description:
* Set the chip with current rx descriptor address
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* dwCurrDescAddr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetCurrRx1DescAddr (unsigned long dwIoBase, unsigned long dwCurrDescAddr)
{
unsigned short ww;
unsigned char byData;
unsigned char byOrgDMACtl;
VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL1, &byOrgDMACtl);
if (byOrgDMACtl & DMACTL_RUN) {
VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL1+2, DMACTL_RUN);
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL1, &byData);
if ( !(byData & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x14);
}
VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR1, dwCurrDescAddr);
if (byOrgDMACtl & DMACTL_RUN) {
VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_RUN);
}
}
/*
* Description:
* Set the chip with current tx0 descriptor address
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* dwCurrDescAddr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetCurrTx0DescAddrEx (unsigned long dwIoBase, unsigned long dwCurrDescAddr)
{
unsigned short ww;
unsigned char byData;
unsigned char byOrgDMACtl;
VNSvInPortB(dwIoBase + MAC_REG_TXDMACTL0, &byOrgDMACtl);
if (byOrgDMACtl & DMACTL_RUN) {
VNSvOutPortB(dwIoBase + MAC_REG_TXDMACTL0+2, DMACTL_RUN);
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_TXDMACTL0, &byData);
if ( !(byData & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x25);
}
VNSvOutPortD(dwIoBase + MAC_REG_TXDMAPTR0, dwCurrDescAddr);
if (byOrgDMACtl & DMACTL_RUN) {
VNSvOutPortB(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_RUN);
}
}
/*
* Description:
* Set the chip with current AC0 descriptor address
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* dwCurrDescAddr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
//TxDMA1 = AC0DMA
void MACvSetCurrAC0DescAddrEx (unsigned long dwIoBase, unsigned long dwCurrDescAddr)
{
unsigned short ww;
unsigned char byData;
unsigned char byOrgDMACtl;
VNSvInPortB(dwIoBase + MAC_REG_AC0DMACTL, &byOrgDMACtl);
if (byOrgDMACtl & DMACTL_RUN) {
VNSvOutPortB(dwIoBase + MAC_REG_AC0DMACTL+2, DMACTL_RUN);
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_AC0DMACTL, &byData);
if (!(byData & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x26);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x26)\n");
}
VNSvOutPortD(dwIoBase + MAC_REG_AC0DMAPTR, dwCurrDescAddr);
if (byOrgDMACtl & DMACTL_RUN) {
VNSvOutPortB(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_RUN);
}
}
void MACvSetCurrTXDescAddr (int iTxType, unsigned long dwIoBase, unsigned long dwCurrDescAddr)
{
if(iTxType == TYPE_AC0DMA){
MACvSetCurrAC0DescAddrEx(dwIoBase, dwCurrDescAddr);
}else if(iTxType == TYPE_TXDMA0){
MACvSetCurrTx0DescAddrEx(dwIoBase, dwCurrDescAddr);
}
}
/*
* Description:
* Micro Second Delay via MAC
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* uDelay - Delay time (timer resolution is 4 us)
* Out:
* none
*
* Return Value: none
*
*/
void MACvTimer0MicroSDelay (unsigned long dwIoBase, unsigned int uDelay)
{
unsigned char byValue;
unsigned int uu,ii;
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
VNSvOutPortD(dwIoBase + MAC_REG_TMDATA0, uDelay);
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, (TMCTL_TMD | TMCTL_TE));
for(ii=0;ii<66;ii++) { // assume max PCI clock is 66Mhz
for (uu = 0; uu < uDelay; uu++) {
VNSvInPortB(dwIoBase + MAC_REG_TMCTL0, &byValue);
if ((byValue == 0) ||
(byValue & TMCTL_TSUSP)) {
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
return;
}
}
}
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
}
/*
* Description:
* Micro Second One shot timer via MAC
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* uDelay - Delay time
* Out:
* none
*
* Return Value: none
*
*/
void MACvOneShotTimer0MicroSec (unsigned long dwIoBase, unsigned int uDelayTime)
{
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
VNSvOutPortD(dwIoBase + MAC_REG_TMDATA0, uDelayTime);
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, (TMCTL_TMD | TMCTL_TE));
}
/*
* Description:
* Micro Second One shot timer via MAC
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
* uDelay - Delay time
* Out:
* none
*
* Return Value: none
*
*/
void MACvOneShotTimer1MicroSec (unsigned long dwIoBase, unsigned int uDelayTime)
{
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, 0);
VNSvOutPortD(dwIoBase + MAC_REG_TMDATA1, uDelayTime);
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, (TMCTL_TMD | TMCTL_TE));
}
void MACvSetMISCFifo (unsigned long dwIoBase, unsigned short wOffset, unsigned long dwData)
{
if (wOffset > 273)
return;
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
}
bool MACbTxDMAOff (unsigned long dwIoBase, unsigned int idx)
{
unsigned char byData;
unsigned int ww = 0;
if (idx == TYPE_TXDMA0) {
VNSvOutPortB(dwIoBase + MAC_REG_TXDMACTL0+2, DMACTL_RUN);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_TXDMACTL0, &byData);
if ( !(byData & DMACTL_RUN))
break;
}
} else if (idx == TYPE_AC0DMA) {
VNSvOutPortB(dwIoBase + MAC_REG_AC0DMACTL+2, DMACTL_RUN);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_AC0DMACTL, &byData);
if ( !(byData & DMACTL_RUN))
break;
}
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x29);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x29)\n");
return false;
}
return true;
}
void MACvClearBusSusInd (unsigned long dwIoBase)
{
unsigned long dwOrgValue;
unsigned int ww;
// check if BcnSusInd enabled
VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue);
if( !(dwOrgValue & EnCFG_BcnSusInd))
return;
//Set BcnSusClr
dwOrgValue = dwOrgValue | EnCFG_BcnSusClr;
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue);
if( !(dwOrgValue & EnCFG_BcnSusInd))
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x33);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x33)\n");
}
}
void MACvEnableBusSusEn (unsigned long dwIoBase)
{
unsigned char byOrgValue;
unsigned long dwOrgValue;
unsigned int ww;
// check if BcnSusInd enabled
VNSvInPortB(dwIoBase + MAC_REG_CFG , &byOrgValue);
//Set BcnSusEn
byOrgValue = byOrgValue | CFG_BCNSUSEN;
VNSvOutPortB(dwIoBase + MAC_REG_ENCFG, byOrgValue);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue);
if(dwOrgValue & EnCFG_BcnSusInd)
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x34);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x34)\n");
}
}
bool MACbFlushSYNCFifo (unsigned long dwIoBase)
{
unsigned char byOrgValue;
unsigned int ww;
// Read MACCR
VNSvInPortB(dwIoBase + MAC_REG_MACCR , &byOrgValue);
// Set SYNCFLUSH
byOrgValue = byOrgValue | MACCR_SYNCFLUSH;
VNSvOutPortB(dwIoBase + MAC_REG_MACCR, byOrgValue);
// Check if SyncFlushOK
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_MACCR , &byOrgValue);
if(byOrgValue & MACCR_SYNCFLUSHOK)
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x35);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x33)\n");
}
return true;
}
bool MACbPSWakeup (unsigned long dwIoBase)
{
unsigned char byOrgValue;
unsigned int ww;
// Read PSCTL
if (MACbIsRegBitsOff(dwIoBase, MAC_REG_PSCTL, PSCTL_PS)) {
return true;
}
// Disable PS
MACvRegBitsOff(dwIoBase, MAC_REG_PSCTL, PSCTL_PSEN);
// Check if SyncFlushOK
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortB(dwIoBase + MAC_REG_PSCTL , &byOrgValue);
if(byOrgValue & PSCTL_WAKEDONE)
break;
}
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x36);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x33)\n");
return false;
}
return true;
}
/*
* Description:
* Set the Key by MISCFIFO
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
*
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetKeyEntry (unsigned long dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx,
unsigned int uKeyIdx, unsigned char *pbyAddr, unsigned long *pdwKey, unsigned char byLocalID)
{
unsigned short wOffset;
unsigned long dwData;
int ii;
if (byLocalID <= 1)
return;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MACvSetKeyEntry\n");
wOffset = MISCFIFO_KEYETRY0;
wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
dwData = 0;
dwData |= wKeyCtl;
dwData <<= 16;
dwData |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData, wKeyCtl);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
wOffset++;
dwData = 0;
dwData |= *(pbyAddr+3);
dwData <<= 8;
dwData |= *(pbyAddr+2);
dwData <<= 8;
dwData |= *(pbyAddr+1);
dwData <<= 8;
dwData |= *(pbyAddr+0);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %lX\n", wOffset, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
wOffset++;
wOffset += (uKeyIdx * 4);
for (ii=0;ii<4;ii++) {
// alway push 128 bits
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"3.(%d) wOffset: %d, Data: %lX\n", ii, wOffset+ii, *pdwKey);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
}
}
/*
* Description:
* Disable the Key Entry by MISCFIFO
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
*
* Out:
* none
*
* Return Value: none
*
*/
void MACvDisableKeyEntry (unsigned long dwIoBase, unsigned int uEntryIdx)
{
unsigned short wOffset;
wOffset = MISCFIFO_KEYETRY0;
wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, 0);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
}
/*
* Description:
* Set the default Key (KeyEntry[10]) by MISCFIFO
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
*
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetDefaultKeyEntry (unsigned long dwIoBase, unsigned int uKeyLen,
unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID)
{
unsigned short wOffset;
unsigned long dwData;
int ii;
if (byLocalID <= 1)
return;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MACvSetDefaultKeyEntry\n");
wOffset = MISCFIFO_KEYETRY0;
wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
wOffset++;
wOffset++;
wOffset += (uKeyIdx * 4);
// alway push 128 bits
for (ii=0; ii<3; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"(%d) wOffset: %d, Data: %lX\n", ii, wOffset+ii, *pdwKey);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
}
dwData = *pdwKey;
if (uKeyLen == WLAN_WEP104_KEYLEN) {
dwData |= 0x80000000;
}
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+3);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"End. wOffset: %d, Data: %lX\n", wOffset+3, dwData);
}
/*
* Description:
* Enable default Key (KeyEntry[10]) by MISCFIFO
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
*
* Out:
* none
*
* Return Value: none
*
*/
/*
void MACvEnableDefaultKey (unsigned long dwIoBase, unsigned char byLocalID)
{
unsigned short wOffset;
unsigned long dwData;
if (byLocalID <= 1)
return;
wOffset = MISCFIFO_KEYETRY0;
wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
dwData = 0xC0440000;
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MACvEnableDefaultKey: wOffset: %d, Data: %lX\n", wOffset, dwData);
}
*/
/*
* Description:
* Disable default Key (KeyEntry[10]) by MISCFIFO
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
*
* Out:
* none
*
* Return Value: none
*
*/
void MACvDisableDefaultKey (unsigned long dwIoBase)
{
unsigned short wOffset;
unsigned long dwData;
wOffset = MISCFIFO_KEYETRY0;
wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
dwData = 0x0;
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MACvDisableDefaultKey: wOffset: %d, Data: %lX\n", wOffset, dwData);
}
/*
* Description:
* Set the default TKIP Group Key (KeyEntry[10]) by MISCFIFO
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
*
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetDefaultTKIPKeyEntry (unsigned long dwIoBase, unsigned int uKeyLen,
unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID)
{
unsigned short wOffset;
unsigned long dwData;
int ii;
if (byLocalID <= 1)
return;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MACvSetDefaultTKIPKeyEntry\n");
wOffset = MISCFIFO_KEYETRY0;
// Kyle test : change offset from 10 -> 0
wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
dwData = 0xC0660000;
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
wOffset++;
dwData = 0;
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
wOffset++;
wOffset += (uKeyIdx * 4);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, idx:%d\n", wOffset, *pdwKey, uKeyIdx);
// alway push 128 bits
for (ii=0; ii<4; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2.(%d) wOffset: %d, Data: %lX\n", ii, wOffset+ii, *pdwKey);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
}
}
/*
* Description:
* Set the Key Control by MISCFIFO
*
* Parameters:
* In:
* dwIoBase - Base Address for MAC
*
* Out:
* none
*
* Return Value: none
*
*/
void MACvSetDefaultKeyCtl (unsigned long dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx, unsigned char byLocalID)
{
unsigned short wOffset;
unsigned long dwData;
if (byLocalID <= 1)
return;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MACvSetKeyEntry\n");
wOffset = MISCFIFO_KEYETRY0;
wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
dwData = 0;
dwData |= wKeyCtl;
dwData <<= 16;
dwData |= 0xffff;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData, wKeyCtl);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
}
| gpl-2.0 |
kejar31/kernel-caf-msm | drivers/s390/char/tape_class.c | 9137 | 3017 | /*
* (C) Copyright IBM Corp. 2004
* tape_class.c
*
* Tape class device support
*
* Author: Stefan Bader <shbader@de.ibm.com>
* Based on simple class device code by Greg K-H
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include "tape_class.h"
MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
MODULE_DESCRIPTION(
"(C) Copyright IBM Corp. 2004 All Rights Reserved.\n"
"tape_class.c"
);
MODULE_LICENSE("GPL");
static struct class *tape_class;
/*
* Register a tape device and return a pointer to the cdev structure.
*
* device
* The pointer to the struct device of the physical (base) device.
* drivername
* The pointer to the drivers name for it's character devices.
* dev
* The intended major/minor number. The major number may be 0 to
* get a dynamic major number.
* fops
* The pointer to the drivers file operations for the tape device.
* devname
* The pointer to the name of the character device.
*/
struct tape_class_device *register_tape_dev(
struct device * device,
dev_t dev,
const struct file_operations *fops,
char * device_name,
char * mode_name)
{
struct tape_class_device * tcd;
int rc;
char * s;
tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL);
if (!tcd)
return ERR_PTR(-ENOMEM);
strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
tcd->char_device = cdev_alloc();
if (!tcd->char_device) {
rc = -ENOMEM;
goto fail_with_tcd;
}
tcd->char_device->owner = fops->owner;
tcd->char_device->ops = fops;
tcd->char_device->dev = dev;
rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
if (rc)
goto fail_with_cdev;
tcd->class_device = device_create(tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
if (rc)
goto fail_with_cdev;
rc = sysfs_create_link(
&device->kobj,
&tcd->class_device->kobj,
tcd->mode_name
);
if (rc)
goto fail_with_class_device;
return tcd;
fail_with_class_device:
device_destroy(tape_class, tcd->char_device->dev);
fail_with_cdev:
cdev_del(tcd->char_device);
fail_with_tcd:
kfree(tcd);
return ERR_PTR(rc);
}
EXPORT_SYMBOL(register_tape_dev);
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{
if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&device->kobj, tcd->mode_name);
device_destroy(tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device);
kfree(tcd);
}
}
EXPORT_SYMBOL(unregister_tape_dev);
static int __init tape_init(void)
{
tape_class = class_create(THIS_MODULE, "tape390");
return 0;
}
static void __exit tape_exit(void)
{
class_destroy(tape_class);
tape_class = NULL;
}
postcore_initcall(tape_init);
module_exit(tape_exit);
| gpl-2.0 |
Maxr1998/hellsCore-mako | net/bridge/netfilter/ebt_nflog.c | 9393 | 1769 | /*
* ebt_nflog
*
* Author:
* Peter Warasin <peter@endian.com>
*
* February, 2008
*
* Based on:
* xt_NFLOG.c, (C) 2006 by Patrick McHardy <kaber@trash.net>
* ebt_ulog.c, (C) 2004 by Bart De Schuymer <bdschuym@pandora.be>
*
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_nflog.h>
#include <net/netfilter/nf_log.h>
static unsigned int
ebt_nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nflog_info *info = par->targinfo;
struct nf_loginfo li;
li.type = NF_LOG_TYPE_ULOG;
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
nf_log_packet(PF_BRIDGE, par->hooknum, skb, par->in, par->out,
&li, "%s", info->prefix);
return EBT_CONTINUE;
}
static int ebt_nflog_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_nflog_info *info = par->targinfo;
if (info->flags & ~EBT_NFLOG_MASK)
return -EINVAL;
info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0';
return 0;
}
static struct xt_target ebt_nflog_tg_reg __read_mostly = {
.name = "nflog",
.revision = 0,
.family = NFPROTO_BRIDGE,
.target = ebt_nflog_tg,
.checkentry = ebt_nflog_tg_check,
.targetsize = sizeof(struct ebt_nflog_info),
.me = THIS_MODULE,
};
static int __init ebt_nflog_init(void)
{
return xt_register_target(&ebt_nflog_tg_reg);
}
static void __exit ebt_nflog_fini(void)
{
xt_unregister_target(&ebt_nflog_tg_reg);
}
module_init(ebt_nflog_init);
module_exit(ebt_nflog_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Warasin <peter@endian.com>");
MODULE_DESCRIPTION("ebtables NFLOG netfilter logging module");
| gpl-2.0 |
civato/KK_Xplorer-9005 | fs/jffs2/erase.c | 9649 | 14584 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/compiler.h>
#include <linux/crc32.h>
#include <linux/sched.h>
#include <linux/pagemap.h>
#include "nodelist.h"
struct erase_priv_struct {
struct jffs2_eraseblock *jeb;
struct jffs2_sb_info *c;
};
#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *);
#endif
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_erase_block(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
int ret;
uint32_t bad_offset;
#ifdef __ECOS
ret = jffs2_flash_erase(c, jeb);
if (!ret) {
jffs2_erase_succeeded(c, jeb);
return;
}
bad_offset = jeb->offset;
#else /* Linux */
struct erase_info *instr;
jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
__func__,
jeb->offset, jeb->offset, jeb->offset + c->sector_size);
instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
if (!instr) {
pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
memset(instr, 0, sizeof(*instr));
instr->mtd = c->mtd;
instr->addr = jeb->offset;
instr->len = c->sector_size;
instr->callback = jffs2_erase_callback;
instr->priv = (unsigned long)(&instr[1]);
((struct erase_priv_struct *)instr->priv)->jeb = jeb;
((struct erase_priv_struct *)instr->priv)->c = c;
ret = mtd_erase(c->mtd, instr);
if (!ret)
return;
bad_offset = instr->fail_addr;
kfree(instr);
#endif /* __ECOS */
if (ret == -ENOMEM || ret == -EAGAIN) {
/* Erase failed immediately. Refile it on the list */
jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n",
jeb->offset, ret);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
if (ret == -EROFS)
pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n",
jeb->offset);
else
pr_warn("Erase at 0x%08x failed immediately: errno %d\n",
jeb->offset, ret);
jffs2_erase_failed(c, jeb, bad_offset);
}
int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
{
struct jffs2_eraseblock *jeb;
int work_done = 0;
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
while (!list_empty(&c->erase_complete_list) ||
!list_empty(&c->erase_pending_list)) {
if (!list_empty(&c->erase_complete_list)) {
jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
list_move(&jeb->list, &c->erase_checking_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
jffs2_mark_erased_block(c, jeb);
work_done++;
if (!--count) {
jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n");
goto done;
}
} else if (!list_empty(&c->erase_pending_list)) {
jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
jffs2_dbg(1, "Starting erase of pending block 0x%08x\n",
jeb->offset);
list_del(&jeb->list);
c->erasing_size += c->sector_size;
c->wasted_size -= jeb->wasted_size;
c->free_size -= jeb->free_size;
c->used_size -= jeb->used_size;
c->dirty_size -= jeb->dirty_size;
jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
jffs2_free_jeb_node_refs(c, jeb);
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
jffs2_erase_block(c, jeb);
} else {
BUG();
}
/* Be nice */
cond_resched();
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
}
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
done:
jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n");
return work_done;
}
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move_tail(&jeb->list, &c->erase_complete_list);
/* Wake the GC thread to mark them clean */
jffs2_garbage_collect_trigger(c);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
}
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
{
/* For NAND, if the failure did not occur at the device level for a
specific physical page, don't bother updating the bad block table. */
if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) {
/* We had a device-level failure to erase. Let's see if we've
failed too many times. */
if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
/* We'd like to give this block another try. */
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
}
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size;
c->bad_size += c->sector_size;
list_move(&jeb->list, &c->bad_list);
c->nr_erasing_blocks--;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
}
#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *instr)
{
struct erase_priv_struct *priv = (void *)instr->priv;
if(instr->state != MTD_ERASE_DONE) {
pr_warn("Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
(unsigned long long)instr->addr, instr->state);
jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
} else {
jffs2_erase_succeeded(priv->c, priv->jeb);
}
kfree(instr);
}
#endif /* !__ECOS */
/* Hmmm. Maybe we should accept the extra space it takes and make
this a standard doubly-linked list? */
static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb)
{
struct jffs2_inode_cache *ic = NULL;
struct jffs2_raw_node_ref **prev;
prev = &ref->next_in_ino;
/* Walk the inode's list once, removing any nodes from this eraseblock */
while (1) {
if (!(*prev)->next_in_ino) {
/* We're looking at the jffs2_inode_cache, which is
at the end of the linked list. Stash it and continue
from the beginning of the list */
ic = (struct jffs2_inode_cache *)(*prev);
prev = &ic->nodes;
continue;
}
if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) {
/* It's in the block we're erasing */
struct jffs2_raw_node_ref *this;
this = *prev;
*prev = this->next_in_ino;
this->next_in_ino = NULL;
if (this == ref)
break;
continue;
}
/* Not to be deleted. Skip */
prev = &((*prev)->next_in_ino);
}
/* PARANOIA */
if (!ic) {
JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref"
" not found in remove_node_refs()!!\n");
return;
}
jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
jeb->offset, jeb->offset + c->sector_size, ic->ino);
D2({
int i=0;
struct jffs2_raw_node_ref *this;
printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n");
this = ic->nodes;
printk(KERN_DEBUG);
while(this) {
pr_cont("0x%08x(%d)->",
ref_offset(this), ref_flags(this));
if (++i == 5) {
printk(KERN_DEBUG);
i=0;
}
this = this->next_in_ino;
}
pr_cont("\n");
});
switch (ic->class) {
#ifdef CONFIG_JFFS2_FS_XATTR
case RAWNODE_CLASS_XATTR_DATUM:
jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
break;
case RAWNODE_CLASS_XATTR_REF:
jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
break;
#endif
default:
if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
jffs2_del_ino_cache(c, ic);
}
}
void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
struct jffs2_raw_node_ref *block, *ref;
jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n",
jeb->offset);
block = ref = jeb->first_node;
while (ref) {
if (ref->flash_offset == REF_LINK_NODE) {
ref = ref->next_in_ino;
jffs2_free_refblock(block);
block = ref;
continue;
}
if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino)
jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
/* else it was a non-inode node or already removed, so don't bother */
ref++;
}
jeb->first_node = jeb->last_node = NULL;
}
static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
{
void *ebuf;
uint32_t ofs;
size_t retlen;
int ret;
unsigned long *wordebuf;
ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen,
&ebuf, NULL);
if (ret != -EOPNOTSUPP) {
if (ret) {
jffs2_dbg(1, "MTD point failed %d\n", ret);
goto do_flash_read;
}
if (retlen < c->sector_size) {
/* Don't muck about if it won't let us point to the whole erase sector */
jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
retlen);
mtd_unpoint(c->mtd, jeb->offset, retlen);
goto do_flash_read;
}
wordebuf = ebuf-sizeof(*wordebuf);
retlen /= sizeof(*wordebuf);
do {
if (*++wordebuf != ~0)
break;
} while(--retlen);
mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
if (retlen) {
pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
*wordebuf,
jeb->offset +
c->sector_size-retlen * sizeof(*wordebuf));
return -EIO;
}
return 0;
}
do_flash_read:
ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ebuf) {
pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n",
jeb->offset);
return -EAGAIN;
}
jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset);
for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
int i;
*bad_offset = ofs;
ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf);
if (ret) {
pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n",
ofs, ret);
ret = -EIO;
goto fail;
}
if (retlen != readlen) {
pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n",
ofs, readlen, retlen);
ret = -EIO;
goto fail;
}
for (i=0; i<readlen; i += sizeof(unsigned long)) {
/* It's OK. We know it's properly aligned */
unsigned long *datum = ebuf + i;
if (*datum + 1) {
*bad_offset += i;
pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
*datum, *bad_offset);
ret = -EIO;
goto fail;
}
}
ofs += readlen;
cond_resched();
}
ret = 0;
fail:
kfree(ebuf);
return ret;
}
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
size_t retlen;
int ret;
uint32_t uninitialized_var(bad_offset);
switch (jffs2_block_check_erase(c, jeb, &bad_offset)) {
case -EAGAIN: goto refile;
case -EIO: goto filebad;
}
/* Write the erase complete marker */
jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset);
bad_offset = jeb->offset;
/* Cleanmarker in oob area or no cleanmarker at all ? */
if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) {
if (jffs2_cleanmarker_oob(c)) {
if (jffs2_write_nand_cleanmarker(c, jeb))
goto filebad;
}
} else {
struct kvec vecs[1];
struct jffs2_unknown_node marker = {
.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
.totlen = cpu_to_je32(c->cleanmarker_size)
};
jffs2_prealloc_raw_node_refs(c, jeb, 1);
marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
vecs[0].iov_base = (unsigned char *) ▮
vecs[0].iov_len = sizeof(marker);
ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
if (ret || retlen != sizeof(marker)) {
if (ret)
pr_warn("Write clean marker to block at 0x%08x failed: %d\n",
jeb->offset, ret);
else
pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
jeb->offset, sizeof(marker), retlen);
goto filebad;
}
}
/* Everything else got zeroed before the erase */
jeb->free_size = c->sector_size;
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size;
c->free_size += c->sector_size;
/* Account for cleanmarker now, if it's in-band */
if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c))
jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
list_move_tail(&jeb->list, &c->free_list);
c->nr_erasing_blocks--;
c->nr_free_blocks++;
jffs2_dbg_acct_sanity_check_nolock(c, jeb);
jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
return;
filebad:
jffs2_erase_failed(c, jeb, bad_offset);
return;
refile:
/* Stick it back on the list from whence it came and come back later */
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
jffs2_garbage_collect_trigger(c);
list_move(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
| gpl-2.0 |
IllusionRom-deprecated/android_kernel_lge_msm8974 | drivers/tty/serial/pnx8xxx_uart.c | 9905 | 21362 | /*
* UART driver for PNX8XXX SoCs
*
* Author: Per Hallsmark per.hallsmark@mvista.com
* Ported to 2.6 kernel by EmbeddedAlley
* Reworked by Vitaly Wool <vitalywool@gmail.com>
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of
* any kind, whether express or implied.
*
*/
#if defined(CONFIG_SERIAL_PNX8XXX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/serial_pnx8xxx.h>
#include <asm/io.h>
#include <asm/irq.h>
/* We'll be using StrongARM sa1100 serial port major/minor */
#define SERIAL_PNX8XXX_MAJOR 204
#define MINOR_START 5
#define NR_PORTS 2
#define PNX8XXX_ISR_PASS_LIMIT 256
/*
* Convert from ignore_status_mask or read_status_mask to FIFO
* and interrupt status bits
*/
#define SM_TO_FIFO(x) ((x) >> 10)
#define SM_TO_ISTAT(x) ((x) & 0x000001ff)
#define FIFO_TO_SM(x) ((x) << 10)
#define ISTAT_TO_SM(x) ((x) & 0x000001ff)
/*
* This is the size of our serial port register set.
*/
#define UART_PORT_SIZE 0x1000
/*
* This determines how often we check the modem status signals
* for any change. They generally aren't connected to an IRQ
* so we have to poll them. We also check immediately before
* filling the TX fifo incase CTS has been dropped.
*/
#define MCTRL_TIMEOUT (250*HZ/1000)
extern struct pnx8xxx_port pnx8xxx_ports[];
static inline int serial_in(struct pnx8xxx_port *sport, int offset)
{
return (__raw_readl(sport->port.membase + offset));
}
static inline void serial_out(struct pnx8xxx_port *sport, int offset, int value)
{
__raw_writel(value, sport->port.membase + offset);
}
/*
* Handle any change of modem status signal since we were last called.
*/
static void pnx8xxx_mctrl_check(struct pnx8xxx_port *sport)
{
unsigned int status, changed;
status = sport->port.ops->get_mctrl(&sport->port);
changed = status ^ sport->old_status;
if (changed == 0)
return;
sport->old_status = status;
if (changed & TIOCM_RI)
sport->port.icount.rng++;
if (changed & TIOCM_DSR)
sport->port.icount.dsr++;
if (changed & TIOCM_CAR)
uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
if (changed & TIOCM_CTS)
uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
}
/*
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
static void pnx8xxx_timeout(unsigned long data)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)data;
unsigned long flags;
if (sport->port.state) {
spin_lock_irqsave(&sport->port.lock, flags);
pnx8xxx_mctrl_check(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
}
}
/*
* interrupts disabled on entry
*/
static void pnx8xxx_stop_tx(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
u32 ien;
/* Disable TX intr */
ien = serial_in(sport, PNX8XXX_IEN);
serial_out(sport, PNX8XXX_IEN, ien & ~PNX8XXX_UART_INT_ALLTX);
/* Clear all pending TX intr */
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLTX);
}
/*
* interrupts may not be disabled on entry
*/
static void pnx8xxx_start_tx(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
u32 ien;
/* Clear all pending TX intr */
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLTX);
/* Enable TX intr */
ien = serial_in(sport, PNX8XXX_IEN);
serial_out(sport, PNX8XXX_IEN, ien | PNX8XXX_UART_INT_ALLTX);
}
/*
* Interrupts enabled
*/
static void pnx8xxx_stop_rx(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
u32 ien;
/* Disable RX intr */
ien = serial_in(sport, PNX8XXX_IEN);
serial_out(sport, PNX8XXX_IEN, ien & ~PNX8XXX_UART_INT_ALLRX);
/* Clear all pending RX intr */
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLRX);
}
/*
* Set the modem control timer to fire immediately.
*/
static void pnx8xxx_enable_ms(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
mod_timer(&sport->timer, jiffies);
}
static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
{
struct tty_struct *tty = sport->port.state->port.tty;
unsigned int status, ch, flg;
status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
while (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFIFO)) {
ch = serial_in(sport, PNX8XXX_FIFO) & 0xff;
sport->port.icount.rx++;
flg = TTY_NORMAL;
/*
* note that the error handling code is
* out of the main execution path
*/
if (status & (FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE |
PNX8XXX_UART_FIFO_RXPAR |
PNX8XXX_UART_FIFO_RXBRK) |
ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))) {
if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXBRK)) {
status &= ~(FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR));
sport->port.icount.brk++;
if (uart_handle_break(&sport->port))
goto ignore_char;
} else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
sport->port.icount.parity++;
else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
sport->port.icount.frame++;
if (status & ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))
sport->port.icount.overrun++;
status &= sport->port.read_status_mask;
if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
flg = TTY_PARITY;
else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
flg = TTY_FRAME;
#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
#endif
}
if (uart_handle_sysrq_char(&sport->port, ch))
goto ignore_char;
uart_insert_char(&sport->port, status,
ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN), ch, flg);
ignore_char:
serial_out(sport, PNX8XXX_LCR, serial_in(sport, PNX8XXX_LCR) |
PNX8XXX_UART_LCR_RX_NEXT);
status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
}
tty_flip_buffer_push(tty);
}
static void pnx8xxx_tx_chars(struct pnx8xxx_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
if (sport->port.x_char) {
serial_out(sport, PNX8XXX_FIFO, sport->port.x_char);
sport->port.icount.tx++;
sport->port.x_char = 0;
return;
}
/*
* Check the modem control lines before
* transmitting anything.
*/
pnx8xxx_mctrl_check(sport);
if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
pnx8xxx_stop_tx(&sport->port);
return;
}
/*
* TX while bytes available
*/
while (((serial_in(sport, PNX8XXX_FIFO) &
PNX8XXX_UART_FIFO_TXFIFO) >> 16) < 16) {
serial_out(sport, PNX8XXX_FIFO, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
if (uart_circ_empty(xmit))
pnx8xxx_stop_tx(&sport->port);
}
static irqreturn_t pnx8xxx_int(int irq, void *dev_id)
{
struct pnx8xxx_port *sport = dev_id;
unsigned int status;
spin_lock(&sport->port.lock);
/* Get the interrupts */
status = serial_in(sport, PNX8XXX_ISTAT) & serial_in(sport, PNX8XXX_IEN);
/* Byte or break signal received */
if (status & (PNX8XXX_UART_INT_RX | PNX8XXX_UART_INT_BREAK))
pnx8xxx_rx_chars(sport);
/* TX holding register empty - transmit a byte */
if (status & PNX8XXX_UART_INT_TX)
pnx8xxx_tx_chars(sport);
/* Clear the ISTAT register */
serial_out(sport, PNX8XXX_ICLR, status);
spin_unlock(&sport->port.lock);
return IRQ_HANDLED;
}
/*
* Return TIOCSER_TEMT when transmitter is not busy.
*/
static unsigned int pnx8xxx_tx_empty(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
return serial_in(sport, PNX8XXX_FIFO) & PNX8XXX_UART_FIFO_TXFIFO_STA ? 0 : TIOCSER_TEMT;
}
static unsigned int pnx8xxx_get_mctrl(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
unsigned int mctrl = TIOCM_DSR;
unsigned int msr;
/* REVISIT */
msr = serial_in(sport, PNX8XXX_MCR);
mctrl |= msr & PNX8XXX_UART_MCR_CTS ? TIOCM_CTS : 0;
mctrl |= msr & PNX8XXX_UART_MCR_DCD ? TIOCM_CAR : 0;
return mctrl;
}
static void pnx8xxx_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
#if 0 /* FIXME */
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
unsigned int msr;
#endif
}
/*
* Interrupts always disabled.
*/
static void pnx8xxx_break_ctl(struct uart_port *port, int break_state)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
unsigned long flags;
unsigned int lcr;
spin_lock_irqsave(&sport->port.lock, flags);
lcr = serial_in(sport, PNX8XXX_LCR);
if (break_state == -1)
lcr |= PNX8XXX_UART_LCR_TXBREAK;
else
lcr &= ~PNX8XXX_UART_LCR_TXBREAK;
serial_out(sport, PNX8XXX_LCR, lcr);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static int pnx8xxx_startup(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
int retval;
/*
* Allocate the IRQ
*/
retval = request_irq(sport->port.irq, pnx8xxx_int, 0,
"pnx8xxx-uart", sport);
if (retval)
return retval;
/*
* Finally, clear and enable interrupts
*/
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLRX |
PNX8XXX_UART_INT_ALLTX);
serial_out(sport, PNX8XXX_IEN, serial_in(sport, PNX8XXX_IEN) |
PNX8XXX_UART_INT_ALLRX |
PNX8XXX_UART_INT_ALLTX);
/*
* Enable modem status interrupts
*/
spin_lock_irq(&sport->port.lock);
pnx8xxx_enable_ms(&sport->port);
spin_unlock_irq(&sport->port.lock);
return 0;
}
static void pnx8xxx_shutdown(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
int lcr;
/*
* Stop our timer.
*/
del_timer_sync(&sport->timer);
/*
* Disable all interrupts
*/
serial_out(sport, PNX8XXX_IEN, 0);
/*
* Reset the Tx and Rx FIFOS, disable the break condition
*/
lcr = serial_in(sport, PNX8XXX_LCR);
lcr &= ~PNX8XXX_UART_LCR_TXBREAK;
lcr |= PNX8XXX_UART_LCR_TX_RST | PNX8XXX_UART_LCR_RX_RST;
serial_out(sport, PNX8XXX_LCR, lcr);
/*
* Clear all interrupts
*/
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLRX |
PNX8XXX_UART_INT_ALLTX);
/*
* Free the interrupt
*/
free_irq(sport->port.irq, sport);
}
static void
pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
unsigned long flags;
unsigned int lcr_fcr, old_ien, baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
/*
* We only support CS7 and CS8.
*/
while ((termios->c_cflag & CSIZE) != CS7 &&
(termios->c_cflag & CSIZE) != CS8) {
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= old_csize;
old_csize = CS8;
}
if ((termios->c_cflag & CSIZE) == CS8)
lcr_fcr = PNX8XXX_UART_LCR_8BIT;
else
lcr_fcr = 0;
if (termios->c_cflag & CSTOPB)
lcr_fcr |= PNX8XXX_UART_LCR_2STOPB;
if (termios->c_cflag & PARENB) {
lcr_fcr |= PNX8XXX_UART_LCR_PAREN;
if (!(termios->c_cflag & PARODD))
lcr_fcr |= PNX8XXX_UART_LCR_PAREVN;
}
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
spin_lock_irqsave(&sport->port.lock, flags);
sport->port.read_status_mask = ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN) |
ISTAT_TO_SM(PNX8XXX_UART_INT_EMPTY) |
ISTAT_TO_SM(PNX8XXX_UART_INT_RX);
if (termios->c_iflag & INPCK)
sport->port.read_status_mask |=
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
if (termios->c_iflag & (BRKINT | PARMRK))
sport->port.read_status_mask |=
ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
/*
* Characters to ignore
*/
sport->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |=
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
if (termios->c_iflag & IGNBRK) {
sport->port.ignore_status_mask |=
ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |=
ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN);
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
sport->port.ignore_status_mask |=
ISTAT_TO_SM(PNX8XXX_UART_INT_RX);
del_timer_sync(&sport->timer);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
/*
* disable interrupts and drain transmitter
*/
old_ien = serial_in(sport, PNX8XXX_IEN);
serial_out(sport, PNX8XXX_IEN, old_ien & ~(PNX8XXX_UART_INT_ALLTX |
PNX8XXX_UART_INT_ALLRX));
while (serial_in(sport, PNX8XXX_FIFO) & PNX8XXX_UART_FIFO_TXFIFO_STA)
barrier();
/* then, disable everything */
serial_out(sport, PNX8XXX_IEN, 0);
/* Reset the Rx and Tx FIFOs too */
lcr_fcr |= PNX8XXX_UART_LCR_TX_RST;
lcr_fcr |= PNX8XXX_UART_LCR_RX_RST;
/* set the parity, stop bits and data size */
serial_out(sport, PNX8XXX_LCR, lcr_fcr);
/* set the baud rate */
quot -= 1;
serial_out(sport, PNX8XXX_BAUD, quot);
serial_out(sport, PNX8XXX_ICLR, -1);
serial_out(sport, PNX8XXX_IEN, old_ien);
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
pnx8xxx_enable_ms(&sport->port);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static const char *pnx8xxx_type(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
return sport->port.type == PORT_PNX8XXX ? "PNX8XXX" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void pnx8xxx_release_port(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
release_mem_region(sport->port.mapbase, UART_PORT_SIZE);
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int pnx8xxx_request_port(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
return request_mem_region(sport->port.mapbase, UART_PORT_SIZE,
"pnx8xxx-uart") != NULL ? 0 : -EBUSY;
}
/*
* Configure/autoconfigure the port.
*/
static void pnx8xxx_config_port(struct uart_port *port, int flags)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
if (flags & UART_CONFIG_TYPE &&
pnx8xxx_request_port(&sport->port) == 0)
sport->port.type = PORT_PNX8XXX;
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
* The only change we allow are to the flags and type, and
* even then only between PORT_PNX8XXX and PORT_UNKNOWN
*/
static int
pnx8xxx_verify_port(struct uart_port *port, struct serial_struct *ser)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_PNX8XXX)
ret = -EINVAL;
if (sport->port.irq != ser->irq)
ret = -EINVAL;
if (ser->io_type != SERIAL_IO_MEM)
ret = -EINVAL;
if (sport->port.uartclk / 16 != ser->baud_base)
ret = -EINVAL;
if ((void *)sport->port.mapbase != ser->iomem_base)
ret = -EINVAL;
if (sport->port.iobase != ser->port)
ret = -EINVAL;
if (ser->hub6 != 0)
ret = -EINVAL;
return ret;
}
static struct uart_ops pnx8xxx_pops = {
.tx_empty = pnx8xxx_tx_empty,
.set_mctrl = pnx8xxx_set_mctrl,
.get_mctrl = pnx8xxx_get_mctrl,
.stop_tx = pnx8xxx_stop_tx,
.start_tx = pnx8xxx_start_tx,
.stop_rx = pnx8xxx_stop_rx,
.enable_ms = pnx8xxx_enable_ms,
.break_ctl = pnx8xxx_break_ctl,
.startup = pnx8xxx_startup,
.shutdown = pnx8xxx_shutdown,
.set_termios = pnx8xxx_set_termios,
.type = pnx8xxx_type,
.release_port = pnx8xxx_release_port,
.request_port = pnx8xxx_request_port,
.config_port = pnx8xxx_config_port,
.verify_port = pnx8xxx_verify_port,
};
/*
* Setup the PNX8XXX serial ports.
*
* Note also that we support "console=ttySx" where "x" is either 0 or 1.
*/
static void __init pnx8xxx_init_ports(void)
{
static int first = 1;
int i;
if (!first)
return;
first = 0;
for (i = 0; i < NR_PORTS; i++) {
init_timer(&pnx8xxx_ports[i].timer);
pnx8xxx_ports[i].timer.function = pnx8xxx_timeout;
pnx8xxx_ports[i].timer.data = (unsigned long)&pnx8xxx_ports[i];
pnx8xxx_ports[i].port.ops = &pnx8xxx_pops;
}
}
#ifdef CONFIG_SERIAL_PNX8XXX_CONSOLE
static void pnx8xxx_console_putchar(struct uart_port *port, int ch)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
int status;
do {
/* Wait for UART_TX register to empty */
status = serial_in(sport, PNX8XXX_FIFO);
} while (status & PNX8XXX_UART_FIFO_TXFIFO);
serial_out(sport, PNX8XXX_FIFO, ch);
}
/*
* Interrupts are disabled on entering
*/static void
pnx8xxx_console_write(struct console *co, const char *s, unsigned int count)
{
struct pnx8xxx_port *sport = &pnx8xxx_ports[co->index];
unsigned int old_ien, status;
/*
* First, save IEN and then disable interrupts
*/
old_ien = serial_in(sport, PNX8XXX_IEN);
serial_out(sport, PNX8XXX_IEN, old_ien & ~(PNX8XXX_UART_INT_ALLTX |
PNX8XXX_UART_INT_ALLRX));
uart_console_write(&sport->port, s, count, pnx8xxx_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore IEN
*/
do {
/* Wait for UART_TX register to empty */
status = serial_in(sport, PNX8XXX_FIFO);
} while (status & PNX8XXX_UART_FIFO_TXFIFO);
/* Clear TX and EMPTY interrupt */
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_TX |
PNX8XXX_UART_INT_EMPTY);
serial_out(sport, PNX8XXX_IEN, old_ien);
}
static int __init
pnx8xxx_console_setup(struct console *co, char *options)
{
struct pnx8xxx_port *sport;
int baud = 38400;
int bits = 8;
int parity = 'n';
int flow = 'n';
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index == -1 || co->index >= NR_PORTS)
co->index = 0;
sport = &pnx8xxx_ports[co->index];
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&sport->port, co, baud, parity, bits, flow);
}
static struct uart_driver pnx8xxx_reg;
static struct console pnx8xxx_console = {
.name = "ttyS",
.write = pnx8xxx_console_write,
.device = uart_console_device,
.setup = pnx8xxx_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &pnx8xxx_reg,
};
static int __init pnx8xxx_rs_console_init(void)
{
pnx8xxx_init_ports();
register_console(&pnx8xxx_console);
return 0;
}
console_initcall(pnx8xxx_rs_console_init);
#define PNX8XXX_CONSOLE &pnx8xxx_console
#else
#define PNX8XXX_CONSOLE NULL
#endif
static struct uart_driver pnx8xxx_reg = {
.owner = THIS_MODULE,
.driver_name = "ttyS",
.dev_name = "ttyS",
.major = SERIAL_PNX8XXX_MAJOR,
.minor = MINOR_START,
.nr = NR_PORTS,
.cons = PNX8XXX_CONSOLE,
};
static int pnx8xxx_serial_suspend(struct platform_device *pdev, pm_message_t state)
{
struct pnx8xxx_port *sport = platform_get_drvdata(pdev);
return uart_suspend_port(&pnx8xxx_reg, &sport->port);
}
static int pnx8xxx_serial_resume(struct platform_device *pdev)
{
struct pnx8xxx_port *sport = platform_get_drvdata(pdev);
return uart_resume_port(&pnx8xxx_reg, &sport->port);
}
static int pnx8xxx_serial_probe(struct platform_device *pdev)
{
struct resource *res = pdev->resource;
int i;
for (i = 0; i < pdev->num_resources; i++, res++) {
if (!(res->flags & IORESOURCE_MEM))
continue;
for (i = 0; i < NR_PORTS; i++) {
if (pnx8xxx_ports[i].port.mapbase != res->start)
continue;
pnx8xxx_ports[i].port.dev = &pdev->dev;
uart_add_one_port(&pnx8xxx_reg, &pnx8xxx_ports[i].port);
platform_set_drvdata(pdev, &pnx8xxx_ports[i]);
break;
}
}
return 0;
}
static int pnx8xxx_serial_remove(struct platform_device *pdev)
{
struct pnx8xxx_port *sport = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
if (sport)
uart_remove_one_port(&pnx8xxx_reg, &sport->port);
return 0;
}
static struct platform_driver pnx8xxx_serial_driver = {
.driver = {
.name = "pnx8xxx-uart",
.owner = THIS_MODULE,
},
.probe = pnx8xxx_serial_probe,
.remove = pnx8xxx_serial_remove,
.suspend = pnx8xxx_serial_suspend,
.resume = pnx8xxx_serial_resume,
};
static int __init pnx8xxx_serial_init(void)
{
int ret;
printk(KERN_INFO "Serial: PNX8XXX driver\n");
pnx8xxx_init_ports();
ret = uart_register_driver(&pnx8xxx_reg);
if (ret == 0) {
ret = platform_driver_register(&pnx8xxx_serial_driver);
if (ret)
uart_unregister_driver(&pnx8xxx_reg);
}
return ret;
}
static void __exit pnx8xxx_serial_exit(void)
{
platform_driver_unregister(&pnx8xxx_serial_driver);
uart_unregister_driver(&pnx8xxx_reg);
}
module_init(pnx8xxx_serial_init);
module_exit(pnx8xxx_serial_exit);
MODULE_AUTHOR("Embedded Alley Solutions, Inc.");
MODULE_DESCRIPTION("PNX8XXX SoCs serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_PNX8XXX_MAJOR);
MODULE_ALIAS("platform:pnx8xxx-uart");
| gpl-2.0 |
ea4862/boeffla_cm12.1 | drivers/s390/char/monwriter.c | 10417 | 9781 | /*
* Character device driver for writing z/VM *MONITOR service records.
*
* Copyright IBM Corp. 2006, 2009
*
* Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
*/
#define KMSG_COMPONENT "monwriter"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/ctype.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/appldata.h>
#include <asm/monwriter.h>
#define MONWRITE_MAX_DATALEN 4010
static int mon_max_bufs = 255;
static int mon_buf_count;
struct mon_buf {
struct list_head list;
struct monwrite_hdr hdr;
int diag_done;
char *data;
};
static LIST_HEAD(mon_priv_list);
struct mon_private {
struct list_head priv_list;
struct list_head list;
struct monwrite_hdr hdr;
size_t hdr_to_read;
size_t data_to_read;
struct mon_buf *current_buf;
struct mutex thread_mutex;
};
/*
* helper functions
*/
static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
{
struct appldata_product_id id;
int rc;
strcpy(id.prod_nr, "LNXAPPL");
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
id.release_nr = myhdr->release;
id.mod_lvl = myhdr->mod_level;
rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
if (rc <= 0)
return rc;
pr_err("Writing monitor data failed with rc=%i\n", rc);
if (rc == 5)
return -EPERM;
return -EINVAL;
}
static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
struct monwrite_hdr *monhdr)
{
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list)
if ((entry->hdr.mon_function == monhdr->mon_function ||
monhdr->mon_function == MONWRITE_STOP_INTERVAL) &&
entry->hdr.applid == monhdr->applid &&
entry->hdr.record_num == monhdr->record_num &&
entry->hdr.version == monhdr->version &&
entry->hdr.release == monhdr->release &&
entry->hdr.mod_level == monhdr->mod_level)
return entry;
return NULL;
}
static int monwrite_new_hdr(struct mon_private *monpriv)
{
struct monwrite_hdr *monhdr = &monpriv->hdr;
struct mon_buf *monbuf;
int rc = 0;
if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
monhdr->mon_function > MONWRITE_START_CONFIG ||
monhdr->hdrlen != sizeof(struct monwrite_hdr))
return -EINVAL;
monbuf = NULL;
if (monhdr->mon_function != MONWRITE_GEN_EVENT)
monbuf = monwrite_find_hdr(monpriv, monhdr);
if (monbuf) {
if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
monhdr->datalen = monbuf->hdr.datalen;
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_STOP_REC);
list_del(&monbuf->list);
mon_buf_count--;
kfree(monbuf->data);
kfree(monbuf);
monbuf = NULL;
}
} else if (monhdr->mon_function != MONWRITE_STOP_INTERVAL) {
if (mon_buf_count >= mon_max_bufs)
return -ENOSPC;
monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
if (!monbuf)
return -ENOMEM;
monbuf->data = kzalloc(monhdr->datalen,
GFP_KERNEL | GFP_DMA);
if (!monbuf->data) {
kfree(monbuf);
return -ENOMEM;
}
monbuf->hdr = *monhdr;
list_add_tail(&monbuf->list, &monpriv->list);
if (monhdr->mon_function != MONWRITE_GEN_EVENT)
mon_buf_count++;
}
monpriv->current_buf = monbuf;
return rc;
}
static int monwrite_new_data(struct mon_private *monpriv)
{
struct monwrite_hdr *monhdr = &monpriv->hdr;
struct mon_buf *monbuf = monpriv->current_buf;
int rc = 0;
switch (monhdr->mon_function) {
case MONWRITE_START_INTERVAL:
if (!monbuf->diag_done) {
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_START_INTERVAL_REC);
monbuf->diag_done = 1;
}
break;
case MONWRITE_START_CONFIG:
if (!monbuf->diag_done) {
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_START_CONFIG_REC);
monbuf->diag_done = 1;
}
break;
case MONWRITE_GEN_EVENT:
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_GEN_EVENT_REC);
list_del(&monpriv->current_buf->list);
kfree(monpriv->current_buf->data);
kfree(monpriv->current_buf);
monpriv->current_buf = NULL;
break;
default:
/* monhdr->mon_function is checked in monwrite_new_hdr */
BUG();
}
return rc;
}
/*
* file operations
*/
static int monwrite_open(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return -ENOMEM;
INIT_LIST_HEAD(&monpriv->list);
monpriv->hdr_to_read = sizeof(monpriv->hdr);
mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
list_add_tail(&monpriv->priv_list, &mon_priv_list);
return nonseekable_open(inode, filp);
}
static int monwrite_close(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv = filp->private_data;
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list) {
if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&entry->hdr, entry->data,
APPLDATA_STOP_REC);
mon_buf_count--;
list_del(&entry->list);
kfree(entry->data);
kfree(entry);
}
list_del(&monpriv->priv_list);
kfree(monpriv);
return 0;
}
static ssize_t monwrite_write(struct file *filp, const char __user *data,
size_t count, loff_t *ppos)
{
struct mon_private *monpriv = filp->private_data;
size_t len, written;
void *to;
int rc;
mutex_lock(&monpriv->thread_mutex);
for (written = 0; written < count; ) {
if (monpriv->hdr_to_read) {
len = min(count - written, monpriv->hdr_to_read);
to = (char *) &monpriv->hdr +
sizeof(monpriv->hdr) - monpriv->hdr_to_read;
if (copy_from_user(to, data + written, len)) {
rc = -EFAULT;
goto out_error;
}
monpriv->hdr_to_read -= len;
written += len;
if (monpriv->hdr_to_read > 0)
continue;
rc = monwrite_new_hdr(monpriv);
if (rc)
goto out_error;
monpriv->data_to_read = monpriv->current_buf ?
monpriv->current_buf->hdr.datalen : 0;
}
if (monpriv->data_to_read) {
len = min(count - written, monpriv->data_to_read);
to = monpriv->current_buf->data +
monpriv->hdr.datalen - monpriv->data_to_read;
if (copy_from_user(to, data + written, len)) {
rc = -EFAULT;
goto out_error;
}
monpriv->data_to_read -= len;
written += len;
if (monpriv->data_to_read > 0)
continue;
rc = monwrite_new_data(monpriv);
if (rc)
goto out_error;
}
monpriv->hdr_to_read = sizeof(monpriv->hdr);
}
mutex_unlock(&monpriv->thread_mutex);
return written;
out_error:
monpriv->data_to_read = 0;
monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
mutex_unlock(&monpriv->thread_mutex);
return rc;
}
static const struct file_operations monwrite_fops = {
.owner = THIS_MODULE,
.open = &monwrite_open,
.release = &monwrite_close,
.write = &monwrite_write,
.llseek = noop_llseek,
};
static struct miscdevice mon_dev = {
.name = "monwriter",
.fops = &monwrite_fops,
.minor = MISC_DYNAMIC_MINOR,
};
/*
* suspend/resume
*/
static int monwriter_freeze(struct device *dev)
{
struct mon_private *monpriv;
struct mon_buf *monbuf;
list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
list_for_each_entry(monbuf, &monpriv->list, list) {
if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_STOP_REC);
}
}
return 0;
}
static int monwriter_restore(struct device *dev)
{
struct mon_private *monpriv;
struct mon_buf *monbuf;
list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
list_for_each_entry(monbuf, &monpriv->list, list) {
if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_START_INTERVAL_REC);
if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_START_CONFIG_REC);
}
}
return 0;
}
static int monwriter_thaw(struct device *dev)
{
return monwriter_restore(dev);
}
static const struct dev_pm_ops monwriter_pm_ops = {
.freeze = monwriter_freeze,
.thaw = monwriter_thaw,
.restore = monwriter_restore,
};
static struct platform_driver monwriter_pdrv = {
.driver = {
.name = "monwriter",
.owner = THIS_MODULE,
.pm = &monwriter_pm_ops,
},
};
static struct platform_device *monwriter_pdev;
/*
* module init/exit
*/
static int __init mon_init(void)
{
int rc;
if (!MACHINE_IS_VM)
return -ENODEV;
rc = platform_driver_register(&monwriter_pdrv);
if (rc)
return rc;
monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
0);
if (IS_ERR(monwriter_pdev)) {
rc = PTR_ERR(monwriter_pdev);
goto out_driver;
}
/*
* misc_register() has to be the last action in module_init(), because
* file operations will be available right after this.
*/
rc = misc_register(&mon_dev);
if (rc)
goto out_device;
return 0;
out_device:
platform_device_unregister(monwriter_pdev);
out_driver:
platform_driver_unregister(&monwriter_pdrv);
return rc;
}
static void __exit mon_exit(void)
{
misc_deregister(&mon_dev);
platform_device_unregister(monwriter_pdev);
platform_driver_unregister(&monwriter_pdrv);
}
module_init(mon_init);
module_exit(mon_exit);
module_param_named(max_bufs, mon_max_bufs, int, 0644);
MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers "
"that can be active at one time");
MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
MODULE_DESCRIPTION("Character device driver for writing z/VM "
"APPLDATA monitor records.");
MODULE_LICENSE("GPL");
| gpl-2.0 |
tellapart/ubuntu-precise | arch/powerpc/platforms/cell/celleb_scc_uhc.c | 12721 | 2518 | /*
* SCC (Super Companion Chip) UHC setup
*
* (C) Copyright 2006-2007 TOSHIBA CORPORATION
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <asm/delay.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include "celleb_scc.h"
#define UHC_RESET_WAIT_MAX 10000
static inline int uhc_clkctrl_ready(u32 val)
{
const u32 mask = SCC_UHC_USBCEN | SCC_UHC_USBCEN;
return((val & mask) == mask);
}
/*
* UHC(usb host controller) enable function.
* affect to both of OHCI and EHCI core module.
*/
static void enable_scc_uhc(struct pci_dev *dev)
{
void __iomem *uhc_base;
u32 __iomem *uhc_clkctrl;
u32 __iomem *uhc_ecmode;
u32 val = 0;
int i;
if (!machine_is(celleb_beat) &&
!machine_is(celleb_native))
return;
uhc_base = ioremap(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
if (!uhc_base) {
printk(KERN_ERR "failed to map UHC register base.\n");
return;
}
uhc_clkctrl = uhc_base + SCC_UHC_CKRCTRL;
uhc_ecmode = uhc_base + SCC_UHC_ECMODE;
/* setup for normal mode */
val |= SCC_UHC_F48MCKLEN;
out_be32(uhc_clkctrl, val);
val |= SCC_UHC_PHY_SUSPEND_SEL;
out_be32(uhc_clkctrl, val);
udelay(10);
val |= SCC_UHC_PHYEN;
out_be32(uhc_clkctrl, val);
udelay(50);
/* disable reset */
val |= SCC_UHC_HCLKEN;
out_be32(uhc_clkctrl, val);
val |= (SCC_UHC_USBCEN | SCC_UHC_USBEN);
out_be32(uhc_clkctrl, val);
i = 0;
while (!uhc_clkctrl_ready(in_be32(uhc_clkctrl))) {
udelay(10);
if (i++ > UHC_RESET_WAIT_MAX) {
printk(KERN_ERR "Failed to disable UHC reset %x\n",
in_be32(uhc_clkctrl));
break;
}
}
/* Endian Conversion Mode for Master ALL area */
out_be32(uhc_ecmode, SCC_UHC_ECMODE_BY_BYTE);
iounmap(uhc_base);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
PCI_DEVICE_ID_TOSHIBA_SCC_USB, enable_scc_uhc);
| gpl-2.0 |
oppo-source/Find7-kernel-source | sound/oss/v_midi.c | 12721 | 6457 | /*
* sound/oss/v_midi.c
*
* The low level driver for the Sound Blaster DS chips.
*
*
* Copyright (C) by Hannu Savolainen 1993-1996
*
* USS/Lite for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
* ??
*
* Changes
* Alan Cox Modularisation, changed memory allocations
* Christoph Hellwig Adapted to module_init/module_exit
*
* Status
* Untested
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "v_midi.h"
static vmidi_devc *v_devc[2] = { NULL, NULL};
static int midi1,midi2;
static void *midi_mem = NULL;
/*
* The DSP channel can be used either for input or output. Variable
* 'sb_irq_mode' will be set when the program calls read or write first time
* after open. Current version doesn't support mode changes without closing
* and reopening the device. Support for this feature may be implemented in a
* future version of this driver.
*/
static int v_midi_open (int dev, int mode,
void (*input) (int dev, unsigned char data),
void (*output) (int dev)
)
{
vmidi_devc *devc = midi_devs[dev]->devc;
unsigned long flags;
if (devc == NULL)
return -(ENXIO);
spin_lock_irqsave(&devc->lock,flags);
if (devc->opened)
{
spin_unlock_irqrestore(&devc->lock,flags);
return -(EBUSY);
}
devc->opened = 1;
spin_unlock_irqrestore(&devc->lock,flags);
devc->intr_active = 1;
if (mode & OPEN_READ)
{
devc->input_opened = 1;
devc->midi_input_intr = input;
}
return 0;
}
static void v_midi_close (int dev)
{
vmidi_devc *devc = midi_devs[dev]->devc;
unsigned long flags;
if (devc == NULL)
return;
spin_lock_irqsave(&devc->lock,flags);
devc->intr_active = 0;
devc->input_opened = 0;
devc->opened = 0;
spin_unlock_irqrestore(&devc->lock,flags);
}
static int v_midi_out (int dev, unsigned char midi_byte)
{
vmidi_devc *devc = midi_devs[dev]->devc;
vmidi_devc *pdevc;
if (devc == NULL)
return -ENXIO;
pdevc = midi_devs[devc->pair_mididev]->devc;
if (pdevc->input_opened > 0){
if (MIDIbuf_avail(pdevc->my_mididev) > 500)
return 0;
pdevc->midi_input_intr (pdevc->my_mididev, midi_byte);
}
return 1;
}
static inline int v_midi_start_read (int dev)
{
return 0;
}
static int v_midi_end_read (int dev)
{
vmidi_devc *devc = midi_devs[dev]->devc;
if (devc == NULL)
return -ENXIO;
devc->intr_active = 0;
return 0;
}
/* why -EPERM and not -EINVAL?? */
static inline int v_midi_ioctl (int dev, unsigned cmd, void __user *arg)
{
return -EPERM;
}
#define MIDI_SYNTH_NAME "Loopback MIDI"
#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
#include "midi_synth.h"
static struct midi_operations v_midi_operations =
{
.owner = THIS_MODULE,
.info = {"Loopback MIDI Port 1", 0, 0, SNDCARD_VMIDI},
.converter = &std_midi_synth,
.in_info = {0},
.open = v_midi_open,
.close = v_midi_close,
.ioctl = v_midi_ioctl,
.outputc = v_midi_out,
.start_read = v_midi_start_read,
.end_read = v_midi_end_read,
};
static struct midi_operations v_midi_operations2 =
{
.owner = THIS_MODULE,
.info = {"Loopback MIDI Port 2", 0, 0, SNDCARD_VMIDI},
.converter = &std_midi_synth,
.in_info = {0},
.open = v_midi_open,
.close = v_midi_close,
.ioctl = v_midi_ioctl,
.outputc = v_midi_out,
.start_read = v_midi_start_read,
.end_read = v_midi_end_read,
};
/*
* We kmalloc just one of these - it makes life simpler and the code
* cleaner and the memory handling far more efficient
*/
struct vmidi_memory
{
/* Must be first */
struct midi_operations m_ops[2];
struct synth_operations s_ops[2];
struct vmidi_devc v_ops[2];
};
static void __init attach_v_midi (struct address_info *hw_config)
{
struct vmidi_memory *m;
/* printk("Attaching v_midi device.....\n"); */
midi1 = sound_alloc_mididev();
if (midi1 == -1)
{
printk(KERN_ERR "v_midi: Too many midi devices detected\n");
return;
}
m = kmalloc(sizeof(struct vmidi_memory), GFP_KERNEL);
if (m == NULL)
{
printk(KERN_WARNING "Loopback MIDI: Failed to allocate memory\n");
sound_unload_mididev(midi1);
return;
}
midi_mem = m;
midi_devs[midi1] = &m->m_ops[0];
midi2 = sound_alloc_mididev();
if (midi2 == -1)
{
printk (KERN_ERR "v_midi: Too many midi devices detected\n");
kfree(m);
sound_unload_mididev(midi1);
return;
}
midi_devs[midi2] = &m->m_ops[1];
/* printk("VMIDI1: %d VMIDI2: %d\n",midi1,midi2); */
/* for MIDI-1 */
v_devc[0] = &m->v_ops[0];
memcpy ((char *) midi_devs[midi1], (char *) &v_midi_operations,
sizeof (struct midi_operations));
v_devc[0]->my_mididev = midi1;
v_devc[0]->pair_mididev = midi2;
v_devc[0]->opened = v_devc[0]->input_opened = 0;
v_devc[0]->intr_active = 0;
v_devc[0]->midi_input_intr = NULL;
spin_lock_init(&v_devc[0]->lock);
midi_devs[midi1]->devc = v_devc[0];
midi_devs[midi1]->converter = &m->s_ops[0];
std_midi_synth.midi_dev = midi1;
memcpy ((char *) midi_devs[midi1]->converter, (char *) &std_midi_synth,
sizeof (struct synth_operations));
midi_devs[midi1]->converter->id = "V_MIDI 1";
/* for MIDI-2 */
v_devc[1] = &m->v_ops[1];
memcpy ((char *) midi_devs[midi2], (char *) &v_midi_operations2,
sizeof (struct midi_operations));
v_devc[1]->my_mididev = midi2;
v_devc[1]->pair_mididev = midi1;
v_devc[1]->opened = v_devc[1]->input_opened = 0;
v_devc[1]->intr_active = 0;
v_devc[1]->midi_input_intr = NULL;
spin_lock_init(&v_devc[1]->lock);
midi_devs[midi2]->devc = v_devc[1];
midi_devs[midi2]->converter = &m->s_ops[1];
std_midi_synth.midi_dev = midi2;
memcpy ((char *) midi_devs[midi2]->converter, (char *) &std_midi_synth,
sizeof (struct synth_operations));
midi_devs[midi2]->converter->id = "V_MIDI 2";
sequencer_init();
/* printk("Attached v_midi device\n"); */
}
static inline int __init probe_v_midi(struct address_info *hw_config)
{
return(1); /* always OK */
}
static void __exit unload_v_midi(struct address_info *hw_config)
{
sound_unload_mididev(midi1);
sound_unload_mididev(midi2);
kfree(midi_mem);
}
static struct address_info cfg; /* dummy */
static int __init init_vmidi(void)
{
printk("MIDI Loopback device driver\n");
if (!probe_v_midi(&cfg))
return -ENODEV;
attach_v_midi(&cfg);
return 0;
}
static void __exit cleanup_vmidi(void)
{
unload_v_midi(&cfg);
}
module_init(init_vmidi);
module_exit(cleanup_vmidi);
MODULE_LICENSE("GPL");
| gpl-2.0 |
miiicmueller/android_kernel_raspberryPi_rpiv2 | drivers/video/console/tileblit.c | 13233 | 3928 | /*
* linux/drivers/video/console/tileblit.c -- Tile Blitting Operation
*
* Copyright (C) 2004 Antonino Daplas <adaplas @pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <asm/types.h>
#include "fbcon.h"
static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width)
{
struct fb_tilearea area;
area.sx = sx;
area.sy = sy;
area.dx = dx;
area.dy = dy;
area.height = height;
area.width = width;
info->tileops->fb_tilecopy(info, &area);
}
static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
struct fb_tilerect rect;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
int fgshift = (vc->vc_hi_font_mask) ? 9 : 8;
rect.index = vc->vc_video_erase_char &
((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
rect.fg = attr_fgcol_ec(fgshift, vc, info);
rect.bg = attr_bgcol_ec(bgshift, vc, info);
rect.sx = sx;
rect.sy = sy;
rect.width = width;
rect.height = height;
rect.rop = ROP_COPY;
info->tileops->fb_tilefill(info, &rect);
}
static void tile_putcs(struct vc_data *vc, struct fb_info *info,
const unsigned short *s, int count, int yy, int xx,
int fg, int bg)
{
struct fb_tileblit blit;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int size = sizeof(u32) * count, i;
blit.sx = xx;
blit.sy = yy;
blit.width = count;
blit.height = 1;
blit.fg = fg;
blit.bg = bg;
blit.length = count;
blit.indices = (u32 *) fb_get_buffer_offset(info, &info->pixmap, size);
for (i = 0; i < count; i++)
blit.indices[i] = (u32)(scr_readw(s++) & charmask);
info->tileops->fb_tileblit(info, &blit);
}
static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
int bottom_only)
{
return;
}
static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int softback_lines, int fg, int bg)
{
struct fb_tilecursor cursor;
int use_sw = (vc->vc_cursor_type & 0x10);
cursor.sx = vc->vc_x;
cursor.sy = vc->vc_y;
cursor.mode = (mode == CM_ERASE || use_sw) ? 0 : 1;
cursor.fg = fg;
cursor.bg = bg;
switch (vc->vc_cursor_type & 0x0f) {
case CUR_NONE:
cursor.shape = FB_TILE_CURSOR_NONE;
break;
case CUR_UNDERLINE:
cursor.shape = FB_TILE_CURSOR_UNDERLINE;
break;
case CUR_LOWER_THIRD:
cursor.shape = FB_TILE_CURSOR_LOWER_THIRD;
break;
case CUR_LOWER_HALF:
cursor.shape = FB_TILE_CURSOR_LOWER_HALF;
break;
case CUR_TWO_THIRDS:
cursor.shape = FB_TILE_CURSOR_TWO_THIRDS;
break;
case CUR_BLOCK:
default:
cursor.shape = FB_TILE_CURSOR_BLOCK;
break;
}
info->tileops->fb_tilecursor(info, &cursor);
}
static int tile_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
int err;
err = fb_pan_display(info, &ops->var);
ops->var.xoffset = info->var.xoffset;
ops->var.yoffset = info->var.yoffset;
ops->var.vmode = info->var.vmode;
return err;
}
void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info)
{
struct fb_tilemap map;
struct fbcon_ops *ops = info->fbcon_par;
ops->bmove = tile_bmove;
ops->clear = tile_clear;
ops->putcs = tile_putcs;
ops->clear_margins = tile_clear_margins;
ops->cursor = tile_cursor;
ops->update_start = tile_update_start;
if (ops->p) {
map.width = vc->vc_font.width;
map.height = vc->vc_font.height;
map.depth = 1;
map.length = (ops->p->userfont) ?
FNTCHARCNT(ops->p->fontdata) : 256;
map.data = ops->p->fontdata;
info->tileops->fb_settile(info, &map);
}
}
EXPORT_SYMBOL(fbcon_set_tileops);
MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
MODULE_DESCRIPTION("Tile Blitting Operation");
MODULE_LICENSE("GPL");
| gpl-2.0 |
xjljian/huawei-kernel-3.4 | drivers/staging/android/lowmemorykiller.c | 178 | 5605 | /* drivers/misc/lowmemorykiller.c
*
* The lowmemorykiller driver lets user-space specify a set of memory thresholds
* where processes with a range of oom_score_adj values will get killed. Specify
* the minimum oom_score_adj values in
* /sys/module/lowmemorykiller/parameters/adj and the number of free pages in
* /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma
* separated list of numbers in ascending order.
*
* For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
* "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
* processes with a oom_score_adj value of 8 or higher when the free memory
* drops below 4096 pages and kill processes with a oom_score_adj value of 0 or
* higher when the free memory drops below 1024 pages.
*
* The driver considers memory used for caches to be free, but if a large
* percentage of the cached memory is locked this can be very inaccurate
* and processes may not get killed until the normal oom killer is triggered.
*
* Copyright (C) 2007-2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/oom.h>
#include <linux/sched.h>
#include <linux/rcupdate.h>
#include <linux/notifier.h>
static uint32_t lowmem_debug_level = 2;
static int lowmem_adj[6] = {
0,
1,
6,
12,
};
static int lowmem_adj_size = 4;
static int lowmem_minfree[6] = {
3 * 512, /* 6MB */
2 * 1024, /* 8MB */
4 * 1024, /* 16MB */
16 * 1024, /* 64MB */
};
static int lowmem_minfree_size = 4;
static unsigned long lowmem_deathpending_timeout;
#define lowmem_print(level, x...) \
do { \
if (lowmem_debug_level >= (level)) \
printk(x); \
} while (0)
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int selected_tasksize = 0;
int selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES);
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
if (other_free < lowmem_minfree[i] &&
other_file < lowmem_minfree[i]) {
min_score_adj = lowmem_adj[i];
break;
}
}
if (sc->nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
sc->nr_to_scan, sc->gfp_mask, other_free,
other_file, min_score_adj);
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
return rem;
}
selected_oom_score_adj = min_score_adj;
rcu_read_lock();
for_each_process(tsk) {
struct task_struct *p;
int oom_score_adj;
if (tsk->flags & PF_KTHREAD)
continue;
p = find_lock_task_mm(tsk);
if (!p)
continue;
if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
time_before_eq(jiffies, lowmem_deathpending_timeout)) {
task_unlock(p);
rcu_read_unlock();
return 0;
}
oom_score_adj = p->signal->oom_score_adj;
if (oom_score_adj < min_score_adj) {
task_unlock(p);
continue;
}
tasksize = get_mm_rss(p->mm);
task_unlock(p);
if (tasksize <= 0)
continue;
if (selected) {
if (oom_score_adj < selected_oom_score_adj)
continue;
if (oom_score_adj == selected_oom_score_adj &&
tasksize <= selected_tasksize)
continue;
}
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
p->pid, p->comm, oom_score_adj, tasksize);
}
if (selected) {
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
selected->pid, selected->comm,
selected_oom_score_adj, selected_tasksize);
lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
rem -= selected_tasksize;
}
lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
rcu_read_unlock();
return rem;
}
static struct shrinker lowmem_shrinker = {
.shrink = lowmem_shrink,
.seeks = DEFAULT_SEEKS * 16
};
static int __init lowmem_init(void)
{
register_shrinker(&lowmem_shrinker);
return 0;
}
static void __exit lowmem_exit(void)
{
unregister_shrinker(&lowmem_shrinker);
}
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
S_IRUGO | S_IWUSR);
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
S_IRUGO | S_IWUSR);
module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
module_init(lowmem_init);
module_exit(lowmem_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
frustreated/linux | arch/x86/kernel/apic/apic_noop.c | 434 | 3467 | // SPDX-License-Identifier: GPL-2.0
/*
* NOOP APIC driver.
*
* Does almost nothing and should be substituted by a real apic driver via
* probe routine.
*
* Though in case if apic is disabled (for some reason) we try
* to not uglify the caller's code and allow to call (some) apic routines
* like self-ipi, etc...
*/
#include <linux/cpumask.h>
#include <linux/thread_info.h>
#include <asm/apic.h>
static void noop_init_apic_ldr(void) { }
static void noop_send_IPI(int cpu, int vector) { }
static void noop_send_IPI_mask(const struct cpumask *cpumask, int vector) { }
static void noop_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { }
static void noop_send_IPI_allbutself(int vector) { }
static void noop_send_IPI_all(int vector) { }
static void noop_send_IPI_self(int vector) { }
static void noop_apic_wait_icr_idle(void) { }
static void noop_apic_icr_write(u32 low, u32 id) { }
static int noop_wakeup_secondary_cpu(int apicid, unsigned long start_eip)
{
return -1;
}
static u32 noop_safe_apic_wait_icr_idle(void)
{
return 0;
}
static u64 noop_apic_icr_read(void)
{
return 0;
}
static int noop_phys_pkg_id(int cpuid_apic, int index_msb)
{
return 0;
}
static unsigned int noop_get_apic_id(unsigned long x)
{
return 0;
}
static int noop_probe(void)
{
/*
* NOOP apic should not ever be
* enabled via probe routine
*/
return 0;
}
static int noop_apic_id_registered(void)
{
/*
* if we would be really "pedantic"
* we should pass read_apic_id() here
* but since NOOP suppose APIC ID = 0
* lets save a few cycles
*/
return physid_isset(0, phys_cpu_present_map);
}
static u32 noop_apic_read(u32 reg)
{
WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
return 0;
}
static void noop_apic_write(u32 reg, u32 v)
{
WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
}
#ifdef CONFIG_X86_32
static int noop_x86_32_early_logical_apicid(int cpu)
{
return BAD_APICID;
}
#endif
struct apic apic_noop __ro_after_init = {
.name = "noop",
.probe = noop_probe,
.acpi_madt_oem_check = NULL,
.apic_id_valid = default_apic_id_valid,
.apic_id_registered = noop_apic_id_registered,
.delivery_mode = APIC_DELIVERY_MODE_FIXED,
.dest_mode_logical = true,
.disable_esr = 0,
.check_apicid_used = default_check_apicid_used,
.init_apic_ldr = noop_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map,
.setup_apic_routing = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid,
.check_phys_apicid_present = default_check_phys_apicid_present,
.phys_pkg_id = noop_phys_pkg_id,
.get_apic_id = noop_get_apic_id,
.set_apic_id = NULL,
.calc_dest_apicid = apic_flat_calc_apicid,
.send_IPI = noop_send_IPI,
.send_IPI_mask = noop_send_IPI_mask,
.send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself,
.send_IPI_allbutself = noop_send_IPI_allbutself,
.send_IPI_all = noop_send_IPI_all,
.send_IPI_self = noop_send_IPI_self,
.wakeup_secondary_cpu = noop_wakeup_secondary_cpu,
.inquire_remote_apic = NULL,
.read = noop_apic_read,
.write = noop_apic_write,
.eoi_write = noop_apic_write,
.icr_read = noop_apic_icr_read,
.icr_write = noop_apic_icr_write,
.wait_icr_idle = noop_apic_wait_icr_idle,
.safe_wait_icr_idle = noop_safe_apic_wait_icr_idle,
#ifdef CONFIG_X86_32
.x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
#endif
};
| gpl-2.0 |
ma34s/so03c_kernel | drivers/block/ub.c | 434 | 63135 | /*
* The low performance USB storage driver (ub).
*
* Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
* Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
*
* This work is a part of Linux kernel, is derived from it,
* and is not licensed separately. See file COPYING for details.
*
* TODO (sorted by decreasing priority)
* -- Return sense now that rq allows it (we always auto-sense anyway).
* -- set readonly flag for CDs, set removable flag for CF readers
* -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
* -- verify the 13 conditions and do bulk resets
* -- highmem
* -- move top_sense and work_bcs into separate allocations (if they survive)
* for cache purists and esoteric architectures.
* -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
* -- prune comments, they are too volumnous
* -- Resove XXX's
* -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb_usual.h>
#include <linux/blkdev.h>
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#define DRV_NAME "ub"
#define UB_MAJOR 180
/*
* The command state machine is the key model for understanding of this driver.
*
* The general rule is that all transitions are done towards the bottom
* of the diagram, thus preventing any loops.
*
* An exception to that is how the STAT state is handled. A counter allows it
* to be re-entered along the path marked with [C].
*
* +--------+
* ! INIT !
* +--------+
* !
* ub_scsi_cmd_start fails ->--------------------------------------\
* ! !
* V !
* +--------+ !
* ! CMD ! !
* +--------+ !
* ! +--------+ !
* was -EPIPE -->-------------------------------->! CLEAR ! !
* ! +--------+ !
* ! ! !
* was error -->------------------------------------- ! --------->\
* ! ! !
* /--<-- cmd->dir == NONE ? ! !
* ! ! ! !
* ! V ! !
* ! +--------+ ! !
* ! ! DATA ! ! !
* ! +--------+ ! !
* ! ! +---------+ ! !
* ! was -EPIPE -->--------------->! CLR2STS ! ! !
* ! ! +---------+ ! !
* ! ! ! ! !
* ! ! was error -->---- ! --------->\
* ! was error -->--------------------- ! ------------- ! --------->\
* ! ! ! ! !
* ! V ! ! !
* \--->+--------+ ! ! !
* ! STAT !<--------------------------/ ! !
* /--->+--------+ ! !
* ! ! ! !
* [C] was -EPIPE -->-----------\ ! !
* ! ! ! ! !
* +<---- len == 0 ! ! !
* ! ! ! ! !
* ! was error -->--------------------------------------!---------->\
* ! ! ! ! !
* +<---- bad CSW ! ! !
* +<---- bad tag ! ! !
* ! ! V ! !
* ! ! +--------+ ! !
* ! ! ! CLRRS ! ! !
* ! ! +--------+ ! !
* ! ! ! ! !
* \------- ! --------------------[C]--------\ ! !
* ! ! ! !
* cmd->error---\ +--------+ ! !
* ! +--------------->! SENSE !<----------/ !
* STAT_FAIL----/ +--------+ !
* ! ! V
* ! V +--------+
* \--------------------------------\--------------------->! DONE !
* +--------+
*/
/*
* This many LUNs per USB device.
* Every one of them takes a host, see UB_MAX_HOSTS.
*/
#define UB_MAX_LUNS 9
/*
*/
#define UB_PARTS_PER_LUN 8
#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */
#define UB_SENSE_SIZE 18
/*
*/
/* command block wrapper */
struct bulk_cb_wrap {
__le32 Signature; /* contains 'USBC' */
u32 Tag; /* unique per command id */
__le32 DataTransferLength; /* size of data */
u8 Flags; /* direction in bit 0 */
u8 Lun; /* LUN */
u8 Length; /* of of the CDB */
u8 CDB[UB_MAX_CDB_SIZE]; /* max command */
};
#define US_BULK_CB_WRAP_LEN 31
#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */
#define US_BULK_FLAG_IN 1
#define US_BULK_FLAG_OUT 0
/* command status wrapper */
struct bulk_cs_wrap {
__le32 Signature; /* should = 'USBS' */
u32 Tag; /* same as original command */
__le32 Residue; /* amount not transferred */
u8 Status; /* see below */
};
#define US_BULK_CS_WRAP_LEN 13
#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */
#define US_BULK_STAT_OK 0
#define US_BULK_STAT_FAIL 1
#define US_BULK_STAT_PHASE 2
/* bulk-only class specific requests */
#define US_BULK_RESET_REQUEST 0xff
#define US_BULK_GET_MAX_LUN 0xfe
/*
*/
struct ub_dev;
#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
#define UB_MAX_SECTORS 64
/*
* A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
* even if a webcam hogs the bus, but some devices need time to spin up.
*/
#define UB_URB_TIMEOUT (HZ*2)
#define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */
#define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */
#define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */
/*
* An instance of a SCSI command in transit.
*/
#define UB_DIR_NONE 0
#define UB_DIR_READ 1
#define UB_DIR_ILLEGAL2 2
#define UB_DIR_WRITE 3
#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \
(((c)==UB_DIR_READ)? 'r': 'n'))
enum ub_scsi_cmd_state {
UB_CMDST_INIT, /* Initial state */
UB_CMDST_CMD, /* Command submitted */
UB_CMDST_DATA, /* Data phase */
UB_CMDST_CLR2STS, /* Clearing before requesting status */
UB_CMDST_STAT, /* Status phase */
UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */
UB_CMDST_CLRRS, /* Clearing before retrying status */
UB_CMDST_SENSE, /* Sending Request Sense */
UB_CMDST_DONE /* Final state */
};
struct ub_scsi_cmd {
unsigned char cdb[UB_MAX_CDB_SIZE];
unsigned char cdb_len;
unsigned char dir; /* 0 - none, 1 - read, 3 - write. */
enum ub_scsi_cmd_state state;
unsigned int tag;
struct ub_scsi_cmd *next;
int error; /* Return code - valid upon done */
unsigned int act_len; /* Return size */
unsigned char key, asc, ascq; /* May be valid if error==-EIO */
int stat_count; /* Retries getting status. */
unsigned int timeo; /* jiffies until rq->timeout changes */
unsigned int len; /* Requested length */
unsigned int current_sg;
unsigned int nsg; /* sgv[nsg] */
struct scatterlist sgv[UB_MAX_REQ_SG];
struct ub_lun *lun;
void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
void *back;
};
struct ub_request {
struct request *rq;
unsigned int current_try;
unsigned int nsg; /* sgv[nsg] */
struct scatterlist sgv[UB_MAX_REQ_SG];
};
/*
*/
struct ub_capacity {
unsigned long nsec; /* Linux size - 512 byte sectors */
unsigned int bsize; /* Linux hardsect_size */
unsigned int bshift; /* Shift between 512 and hard sects */
};
/*
* This is a direct take-off from linux/include/completion.h
* The difference is that I do not wait on this thing, just poll.
* When I want to wait (ub_probe), I just use the stock completion.
*
* Note that INIT_COMPLETION takes no lock. It is correct. But why
* in the bloody hell that thing takes struct instead of pointer to struct
* is quite beyond me. I just copied it from the stock completion.
*/
struct ub_completion {
unsigned int done;
spinlock_t lock;
};
static inline void ub_init_completion(struct ub_completion *x)
{
x->done = 0;
spin_lock_init(&x->lock);
}
#define UB_INIT_COMPLETION(x) ((x).done = 0)
static void ub_complete(struct ub_completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->lock, flags);
x->done++;
spin_unlock_irqrestore(&x->lock, flags);
}
static int ub_is_completed(struct ub_completion *x)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&x->lock, flags);
ret = x->done;
spin_unlock_irqrestore(&x->lock, flags);
return ret;
}
/*
*/
struct ub_scsi_cmd_queue {
int qlen, qmax;
struct ub_scsi_cmd *head, *tail;
};
/*
* The block device instance (one per LUN).
*/
struct ub_lun {
struct ub_dev *udev;
struct list_head link;
struct gendisk *disk;
int id; /* Host index */
int num; /* LUN number */
char name[16];
int changed; /* Media was changed */
int removable;
int readonly;
struct ub_request urq;
/* Use Ingo's mempool if or when we have more than one command. */
/*
* Currently we never need more than one command for the whole device.
* However, giving every LUN a command is a cheap and automatic way
* to enforce fairness between them.
*/
int cmda[1];
struct ub_scsi_cmd cmdv[1];
struct ub_capacity capacity;
};
/*
* The USB device instance.
*/
struct ub_dev {
spinlock_t *lock;
atomic_t poison; /* The USB device is disconnected */
int openc; /* protected by ub_lock! */
/* kref is too implicit for our taste */
int reset; /* Reset is running */
int bad_resid;
unsigned int tagcnt;
char name[12];
struct usb_device *dev;
struct usb_interface *intf;
struct list_head luns;
unsigned int send_bulk_pipe; /* cached pipe values */
unsigned int recv_bulk_pipe;
unsigned int send_ctrl_pipe;
unsigned int recv_ctrl_pipe;
struct tasklet_struct tasklet;
struct ub_scsi_cmd_queue cmd_queue;
struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
unsigned char top_sense[UB_SENSE_SIZE];
struct ub_completion work_done;
struct urb work_urb;
struct timer_list work_timer;
int last_pipe; /* What might need clearing */
__le32 signature; /* Learned signature */
struct bulk_cb_wrap work_bcb;
struct bulk_cs_wrap work_bcs;
struct usb_ctrlrequest work_cr;
struct work_struct reset_work;
wait_queue_head_t reset_wait;
};
/*
*/
static void ub_cleanup(struct ub_dev *sc);
static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq);
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq);
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_end_rq(struct request *rq, unsigned int status);
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
struct ub_request *urq, struct ub_scsi_cmd *cmd);
static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_urb_complete(struct urb *urb);
static void ub_scsi_action(unsigned long _dev);
static void ub_scsi_dispatch(struct ub_dev *sc);
static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
int stalled_pipe);
static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
static void ub_reset_enter(struct ub_dev *sc, int try);
static void ub_reset_task(struct work_struct *work);
static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
struct ub_capacity *ret);
static int ub_sync_reset(struct ub_dev *sc);
static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
static int ub_probe_lun(struct ub_dev *sc, int lnum);
/*
*/
#ifdef CONFIG_USB_LIBUSUAL
#define ub_usb_ids usb_storage_usb_ids
#else
static struct usb_device_id ub_usb_ids[] = {
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
{ }
};
MODULE_DEVICE_TABLE(usb, ub_usb_ids);
#endif /* CONFIG_USB_LIBUSUAL */
/*
* Find me a way to identify "next free minor" for add_disk(),
* and the array disappears the next day. However, the number of
* hosts has something to do with the naming and /proc/partitions.
* This has to be thought out in detail before changing.
* If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
*/
#define UB_MAX_HOSTS 26
static char ub_hostv[UB_MAX_HOSTS];
#define UB_QLOCK_NUM 5
static spinlock_t ub_qlockv[UB_QLOCK_NUM];
static int ub_qlock_next = 0;
static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
/*
* The id allocator.
*
* This also stores the host for indexing by minor, which is somewhat dirty.
*/
static int ub_id_get(void)
{
unsigned long flags;
int i;
spin_lock_irqsave(&ub_lock, flags);
for (i = 0; i < UB_MAX_HOSTS; i++) {
if (ub_hostv[i] == 0) {
ub_hostv[i] = 1;
spin_unlock_irqrestore(&ub_lock, flags);
return i;
}
}
spin_unlock_irqrestore(&ub_lock, flags);
return -1;
}
static void ub_id_put(int id)
{
unsigned long flags;
if (id < 0 || id >= UB_MAX_HOSTS) {
printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
return;
}
spin_lock_irqsave(&ub_lock, flags);
if (ub_hostv[id] == 0) {
spin_unlock_irqrestore(&ub_lock, flags);
printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
return;
}
ub_hostv[id] = 0;
spin_unlock_irqrestore(&ub_lock, flags);
}
/*
* This is necessitated by the fact that blk_cleanup_queue does not
* necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
* Since our blk_init_queue() passes a spinlock common with ub_dev,
* we have life time issues when ub_cleanup frees ub_dev.
*/
static spinlock_t *ub_next_lock(void)
{
unsigned long flags;
spinlock_t *ret;
spin_lock_irqsave(&ub_lock, flags);
ret = &ub_qlockv[ub_qlock_next];
ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
spin_unlock_irqrestore(&ub_lock, flags);
return ret;
}
/*
* Downcount for deallocation. This rides on two assumptions:
* - once something is poisoned, its refcount cannot grow
* - opens cannot happen at this time (del_gendisk was done)
* If the above is true, we can drop the lock, which we need for
* blk_cleanup_queue(): the silly thing may attempt to sleep.
* [Actually, it never needs to sleep for us, but it calls might_sleep()]
*/
static void ub_put(struct ub_dev *sc)
{
unsigned long flags;
spin_lock_irqsave(&ub_lock, flags);
--sc->openc;
if (sc->openc == 0 && atomic_read(&sc->poison)) {
spin_unlock_irqrestore(&ub_lock, flags);
ub_cleanup(sc);
} else {
spin_unlock_irqrestore(&ub_lock, flags);
}
}
/*
* Final cleanup and deallocation.
*/
static void ub_cleanup(struct ub_dev *sc)
{
struct list_head *p;
struct ub_lun *lun;
struct request_queue *q;
while (!list_empty(&sc->luns)) {
p = sc->luns.next;
lun = list_entry(p, struct ub_lun, link);
list_del(p);
/* I don't think queue can be NULL. But... Stolen from sx8.c */
if ((q = lun->disk->queue) != NULL)
blk_cleanup_queue(q);
/*
* If we zero disk->private_data BEFORE put_disk, we have
* to check for NULL all over the place in open, release,
* check_media and revalidate, because the block level
* semaphore is well inside the put_disk.
* But we cannot zero after the call, because *disk is gone.
* The sd.c is blatantly racy in this area.
*/
/* disk->private_data = NULL; */
put_disk(lun->disk);
lun->disk = NULL;
ub_id_put(lun->id);
kfree(lun);
}
usb_set_intfdata(sc->intf, NULL);
usb_put_intf(sc->intf);
usb_put_dev(sc->dev);
kfree(sc);
}
/*
* The "command allocator".
*/
static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
{
struct ub_scsi_cmd *ret;
if (lun->cmda[0])
return NULL;
ret = &lun->cmdv[0];
lun->cmda[0] = 1;
return ret;
}
static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
{
if (cmd != &lun->cmdv[0]) {
printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
lun->name, cmd);
return;
}
if (!lun->cmda[0]) {
printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
return;
}
lun->cmda[0] = 0;
}
/*
* The command queue.
*/
static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
if (t->qlen++ == 0) {
t->head = cmd;
t->tail = cmd;
} else {
t->tail->next = cmd;
t->tail = cmd;
}
if (t->qlen > t->qmax)
t->qmax = t->qlen;
}
static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
if (t->qlen++ == 0) {
t->head = cmd;
t->tail = cmd;
} else {
cmd->next = t->head;
t->head = cmd;
}
if (t->qlen > t->qmax)
t->qmax = t->qlen;
}
static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
{
struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
struct ub_scsi_cmd *cmd;
if (t->qlen == 0)
return NULL;
if (--t->qlen == 0)
t->tail = NULL;
cmd = t->head;
t->head = cmd->next;
cmd->next = NULL;
return cmd;
}
#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head)
/*
* The request function is our main entry point
*/
static void ub_request_fn(struct request_queue *q)
{
struct ub_lun *lun = q->queuedata;
struct request *rq;
while ((rq = blk_peek_request(q)) != NULL) {
if (ub_request_fn_1(lun, rq) != 0) {
blk_stop_queue(q);
break;
}
}
}
static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
{
struct ub_dev *sc = lun->udev;
struct ub_scsi_cmd *cmd;
struct ub_request *urq;
int n_elem;
if (atomic_read(&sc->poison)) {
blk_start_request(rq);
ub_end_rq(rq, DID_NO_CONNECT << 16);
return 0;
}
if (lun->changed && !blk_pc_request(rq)) {
blk_start_request(rq);
ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
return 0;
}
if (lun->urq.rq != NULL)
return -1;
if ((cmd = ub_get_cmd(lun)) == NULL)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
blk_start_request(rq);
urq = &lun->urq;
memset(urq, 0, sizeof(struct ub_request));
urq->rq = rq;
/*
* get scatterlist from block layer
*/
sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
if (n_elem < 0) {
/* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
printk(KERN_INFO "%s: failed request map (%d)\n",
lun->name, n_elem);
goto drop;
}
if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
printk(KERN_WARNING "%s: request with %d segments\n",
lun->name, n_elem);
goto drop;
}
urq->nsg = n_elem;
if (blk_pc_request(rq)) {
ub_cmd_build_packet(sc, lun, cmd, urq);
} else {
ub_cmd_build_block(sc, lun, cmd, urq);
}
cmd->state = UB_CMDST_INIT;
cmd->lun = lun;
cmd->done = ub_rw_cmd_done;
cmd->back = urq;
cmd->tag = sc->tagcnt++;
if (ub_submit_scsi(sc, cmd) != 0)
goto drop;
return 0;
drop:
ub_put_cmd(lun, cmd);
ub_end_rq(rq, DID_ERROR << 16);
return 0;
}
static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq)
{
struct request *rq = urq->rq;
unsigned int block, nblks;
if (rq_data_dir(rq) == WRITE)
cmd->dir = UB_DIR_WRITE;
else
cmd->dir = UB_DIR_READ;
cmd->nsg = urq->nsg;
memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
/*
* build the command
*
* The call to blk_queue_logical_block_size() guarantees that request
* is aligned, but it is given in terms of 512 byte units, always.
*/
block = blk_rq_pos(rq) >> lun->capacity.bshift;
nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
cmd->cdb[2] = block >> 24;
cmd->cdb[3] = block >> 16;
cmd->cdb[4] = block >> 8;
cmd->cdb[5] = block;
cmd->cdb[7] = nblks >> 8;
cmd->cdb[8] = nblks;
cmd->cdb_len = 10;
cmd->len = blk_rq_bytes(rq);
}
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq)
{
struct request *rq = urq->rq;
if (blk_rq_bytes(rq) == 0) {
cmd->dir = UB_DIR_NONE;
} else {
if (rq_data_dir(rq) == WRITE)
cmd->dir = UB_DIR_WRITE;
else
cmd->dir = UB_DIR_READ;
}
cmd->nsg = urq->nsg;
memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
cmd->cdb_len = rq->cmd_len;
cmd->len = blk_rq_bytes(rq);
/*
* To reapply this to every URB is not as incorrect as it looks.
* In return, we avoid any complicated tracking calculations.
*/
cmd->timeo = rq->timeout;
}
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct ub_lun *lun = cmd->lun;
struct ub_request *urq = cmd->back;
struct request *rq;
unsigned int scsi_status;
rq = urq->rq;
if (cmd->error == 0) {
if (blk_pc_request(rq)) {
if (cmd->act_len >= rq->resid_len)
rq->resid_len = 0;
else
rq->resid_len -= cmd->act_len;
scsi_status = 0;
} else {
if (cmd->act_len != cmd->len) {
scsi_status = SAM_STAT_CHECK_CONDITION;
} else {
scsi_status = 0;
}
}
} else {
if (blk_pc_request(rq)) {
/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
rq->sense_len = UB_SENSE_SIZE;
if (sc->top_sense[0] != 0)
scsi_status = SAM_STAT_CHECK_CONDITION;
else
scsi_status = DID_ERROR << 16;
} else {
if (cmd->error == -EIO &&
(cmd->key == 0 ||
cmd->key == MEDIUM_ERROR ||
cmd->key == UNIT_ATTENTION)) {
if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
return;
}
scsi_status = SAM_STAT_CHECK_CONDITION;
}
}
urq->rq = NULL;
ub_put_cmd(lun, cmd);
ub_end_rq(rq, scsi_status);
blk_start_queue(lun->disk->queue);
}
static void ub_end_rq(struct request *rq, unsigned int scsi_status)
{
int error;
if (scsi_status == 0) {
error = 0;
} else {
error = -EIO;
rq->errors = scsi_status;
}
__blk_end_request_all(rq, error);
}
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
struct ub_request *urq, struct ub_scsi_cmd *cmd)
{
if (atomic_read(&sc->poison))
return -ENXIO;
ub_reset_enter(sc, urq->current_try);
if (urq->current_try >= 3)
return -EIO;
urq->current_try++;
/* Remove this if anyone complains of flooding. */
printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
"[sense %x %02x %02x] retry %d\n",
sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
cmd->key, cmd->asc, cmd->ascq, urq->current_try);
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
ub_cmd_build_block(sc, lun, cmd, urq);
cmd->state = UB_CMDST_INIT;
cmd->lun = lun;
cmd->done = ub_rw_cmd_done;
cmd->back = urq;
cmd->tag = sc->tagcnt++;
#if 0 /* Wasteful */
return ub_submit_scsi(sc, cmd);
#else
ub_cmdq_add(sc, cmd);
return 0;
#endif
}
/*
* Submit a regular SCSI operation (not an auto-sense).
*
* The Iron Law of Good Submit Routine is:
* Zero return - callback is done, Nonzero return - callback is not done.
* No exceptions.
*
* Host is assumed locked.
*/
static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
if (cmd->state != UB_CMDST_INIT ||
(cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
return -EINVAL;
}
ub_cmdq_add(sc, cmd);
/*
* We can call ub_scsi_dispatch(sc) right away here, but it's a little
* safer to jump to a tasklet, in case upper layers do something silly.
*/
tasklet_schedule(&sc->tasklet);
return 0;
}
/*
* Submit the first URB for the queued command.
* This function does not deal with queueing in any way.
*/
static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct bulk_cb_wrap *bcb;
int rc;
bcb = &sc->work_bcb;
/*
* ``If the allocation length is eighteen or greater, and a device
* server returns less than eithteen bytes of data, the application
* client should assume that the bytes not transferred would have been
* zeroes had the device server returned those bytes.''
*
* We zero sense for all commands so that when a packet request
* fails it does not return a stale sense.
*/
memset(&sc->top_sense, 0, UB_SENSE_SIZE);
/* set up the command wrapper */
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->Tag = cmd->tag; /* Endianness is not important */
bcb->DataTransferLength = cpu_to_le32(cmd->len);
bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
bcb->Length = cmd->cdb_len;
/* copy the command payload */
memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
UB_INIT_COMPLETION(sc->work_done);
sc->last_pipe = sc->send_bulk_pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
ub_complete(&sc->work_done);
return rc;
}
sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
add_timer(&sc->work_timer);
cmd->state = UB_CMDST_CMD;
return 0;
}
/*
* Timeout handler.
*/
static void ub_urb_timeout(unsigned long arg)
{
struct ub_dev *sc = (struct ub_dev *) arg;
unsigned long flags;
spin_lock_irqsave(sc->lock, flags);
if (!ub_is_completed(&sc->work_done))
usb_unlink_urb(&sc->work_urb);
spin_unlock_irqrestore(sc->lock, flags);
}
/*
* Completion routine for the work URB.
*
* This can be called directly from usb_submit_urb (while we have
* the sc->lock taken) and from an interrupt (while we do NOT have
* the sc->lock taken). Therefore, bounce this off to a tasklet.
*/
static void ub_urb_complete(struct urb *urb)
{
struct ub_dev *sc = urb->context;
ub_complete(&sc->work_done);
tasklet_schedule(&sc->tasklet);
}
static void ub_scsi_action(unsigned long _dev)
{
struct ub_dev *sc = (struct ub_dev *) _dev;
unsigned long flags;
spin_lock_irqsave(sc->lock, flags);
ub_scsi_dispatch(sc);
spin_unlock_irqrestore(sc->lock, flags);
}
static void ub_scsi_dispatch(struct ub_dev *sc)
{
struct ub_scsi_cmd *cmd;
int rc;
while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
if (cmd->state == UB_CMDST_DONE) {
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
} else if (cmd->state == UB_CMDST_INIT) {
if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
break;
cmd->error = rc;
cmd->state = UB_CMDST_DONE;
} else {
if (!ub_is_completed(&sc->work_done))
break;
del_timer(&sc->work_timer);
ub_scsi_urb_compl(sc, cmd);
}
}
}
static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct urb *urb = &sc->work_urb;
struct bulk_cs_wrap *bcs;
int endp;
int len;
int rc;
if (atomic_read(&sc->poison)) {
ub_state_done(sc, cmd, -ENODEV);
return;
}
endp = usb_pipeendpoint(sc->last_pipe);
if (usb_pipein(sc->last_pipe))
endp |= USB_DIR_IN;
if (cmd->state == UB_CMDST_CLEAR) {
if (urb->status == -EPIPE) {
/*
* STALL while clearning STALL.
* The control pipe clears itself - nothing to do.
*/
printk(KERN_NOTICE "%s: stall on control pipe\n",
sc->name);
goto Bad_End;
}
/*
* We ignore the result for the halt clear.
*/
usb_reset_endpoint(sc->dev, endp);
ub_state_sense(sc, cmd);
} else if (cmd->state == UB_CMDST_CLR2STS) {
if (urb->status == -EPIPE) {
printk(KERN_NOTICE "%s: stall on control pipe\n",
sc->name);
goto Bad_End;
}
/*
* We ignore the result for the halt clear.
*/
usb_reset_endpoint(sc->dev, endp);
ub_state_stat(sc, cmd);
} else if (cmd->state == UB_CMDST_CLRRS) {
if (urb->status == -EPIPE) {
printk(KERN_NOTICE "%s: stall on control pipe\n",
sc->name);
goto Bad_End;
}
/*
* We ignore the result for the halt clear.
*/
usb_reset_endpoint(sc->dev, endp);
ub_state_stat_counted(sc, cmd);
} else if (cmd->state == UB_CMDST_CMD) {
switch (urb->status) {
case 0:
break;
case -EOVERFLOW:
goto Bad_End;
case -EPIPE:
rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
if (rc != 0) {
printk(KERN_NOTICE "%s: "
"unable to submit clear (%d)\n",
sc->name, rc);
/*
* This is typically ENOMEM or some other such shit.
* Retrying is pointless. Just do Bad End on it...
*/
ub_state_done(sc, cmd, rc);
return;
}
cmd->state = UB_CMDST_CLEAR;
return;
case -ESHUTDOWN: /* unplug */
case -EILSEQ: /* unplug timeout on uhci */
ub_state_done(sc, cmd, -ENODEV);
return;
default:
goto Bad_End;
}
if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
goto Bad_End;
}
if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
ub_state_stat(sc, cmd);
return;
}
// udelay(125); // usb-storage has this
ub_data_start(sc, cmd);
} else if (cmd->state == UB_CMDST_DATA) {
if (urb->status == -EPIPE) {
rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
if (rc != 0) {
printk(KERN_NOTICE "%s: "
"unable to submit clear (%d)\n",
sc->name, rc);
ub_state_done(sc, cmd, rc);
return;
}
cmd->state = UB_CMDST_CLR2STS;
return;
}
if (urb->status == -EOVERFLOW) {
/*
* A babble? Failure, but we must transfer CSW now.
*/
cmd->error = -EOVERFLOW; /* A cheap trick... */
ub_state_stat(sc, cmd);
return;
}
if (cmd->dir == UB_DIR_WRITE) {
/*
* Do not continue writes in case of a failure.
* Doing so would cause sectors to be mixed up,
* which is worse than sectors lost.
*
* We must try to read the CSW, or many devices
* get confused.
*/
len = urb->actual_length;
if (urb->status != 0 ||
len != cmd->sgv[cmd->current_sg].length) {
cmd->act_len += len;
cmd->error = -EIO;
ub_state_stat(sc, cmd);
return;
}
} else {
/*
* If an error occurs on read, we record it, and
* continue to fetch data in order to avoid bubble.
*
* As a small shortcut, we stop if we detect that
* a CSW mixed into data.
*/
if (urb->status != 0)
cmd->error = -EIO;
len = urb->actual_length;
if (urb->status != 0 ||
len != cmd->sgv[cmd->current_sg].length) {
if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
goto Bad_End;
}
}
cmd->act_len += urb->actual_length;
if (++cmd->current_sg < cmd->nsg) {
ub_data_start(sc, cmd);
return;
}
ub_state_stat(sc, cmd);
} else if (cmd->state == UB_CMDST_STAT) {
if (urb->status == -EPIPE) {
rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
if (rc != 0) {
printk(KERN_NOTICE "%s: "
"unable to submit clear (%d)\n",
sc->name, rc);
ub_state_done(sc, cmd, rc);
return;
}
/*
* Having a stall when getting CSW is an error, so
* make sure uppper levels are not oblivious to it.
*/
cmd->error = -EIO; /* A cheap trick... */
cmd->state = UB_CMDST_CLRRS;
return;
}
/* Catch everything, including -EOVERFLOW and other nasties. */
if (urb->status != 0)
goto Bad_End;
if (urb->actual_length == 0) {
ub_state_stat_counted(sc, cmd);
return;
}
/*
* Check the returned Bulk protocol status.
* The status block has to be validated first.
*/
bcs = &sc->work_bcs;
if (sc->signature == cpu_to_le32(0)) {
/*
* This is the first reply, so do not perform the check.
* Instead, remember the signature the device uses
* for future checks. But do not allow a nul.
*/
sc->signature = bcs->Signature;
if (sc->signature == cpu_to_le32(0)) {
ub_state_stat_counted(sc, cmd);
return;
}
} else {
if (bcs->Signature != sc->signature) {
ub_state_stat_counted(sc, cmd);
return;
}
}
if (bcs->Tag != cmd->tag) {
/*
* This usually happens when we disagree with the
* device's microcode about something. For instance,
* a few of them throw this after timeouts. They buffer
* commands and reply at commands we timed out before.
* Without flushing these replies we loop forever.
*/
ub_state_stat_counted(sc, cmd);
return;
}
if (!sc->bad_resid) {
len = le32_to_cpu(bcs->Residue);
if (len != cmd->len - cmd->act_len) {
/*
* Only start ignoring if this cmd ended well.
*/
if (cmd->len == cmd->act_len) {
printk(KERN_NOTICE "%s: "
"bad residual %d of %d, ignoring\n",
sc->name, len, cmd->len);
sc->bad_resid = 1;
}
}
}
switch (bcs->Status) {
case US_BULK_STAT_OK:
break;
case US_BULK_STAT_FAIL:
ub_state_sense(sc, cmd);
return;
case US_BULK_STAT_PHASE:
goto Bad_End;
default:
printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
sc->name, bcs->Status);
ub_state_done(sc, cmd, -EINVAL);
return;
}
/* Not zeroing error to preserve a babble indicator */
if (cmd->error != 0) {
ub_state_sense(sc, cmd);
return;
}
cmd->state = UB_CMDST_DONE;
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
} else if (cmd->state == UB_CMDST_SENSE) {
ub_state_done(sc, cmd, -EIO);
} else {
printk(KERN_WARNING "%s: wrong command state %d\n",
sc->name, cmd->state);
ub_state_done(sc, cmd, -EINVAL);
return;
}
return;
Bad_End: /* Little Excel is dead */
ub_state_done(sc, cmd, -EIO);
}
/*
* Factorization helper for the command state machine:
* Initiate a data segment transfer.
*/
static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
int pipe;
int rc;
UB_INIT_COMPLETION(sc->work_done);
if (cmd->dir == UB_DIR_READ)
pipe = sc->recv_bulk_pipe;
else
pipe = sc->send_bulk_pipe;
sc->last_pipe = pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
sg->length, ub_urb_complete, sc);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
ub_complete(&sc->work_done);
ub_state_done(sc, cmd, rc);
return;
}
if (cmd->timeo)
sc->work_timer.expires = jiffies + cmd->timeo;
else
sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
add_timer(&sc->work_timer);
cmd->state = UB_CMDST_DATA;
}
/*
* Factorization helper for the command state machine:
* Finish the command.
*/
static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
{
cmd->error = rc;
cmd->state = UB_CMDST_DONE;
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
}
/*
* Factorization helper for the command state machine:
* Submit a CSW read.
*/
static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
int rc;
UB_INIT_COMPLETION(sc->work_done);
sc->last_pipe = sc->recv_bulk_pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
&sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
ub_complete(&sc->work_done);
ub_state_done(sc, cmd, rc);
return -1;
}
if (cmd->timeo)
sc->work_timer.expires = jiffies + cmd->timeo;
else
sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
add_timer(&sc->work_timer);
return 0;
}
/*
* Factorization helper for the command state machine:
* Submit a CSW read and go to STAT state.
*/
static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
if (__ub_state_stat(sc, cmd) != 0)
return;
cmd->stat_count = 0;
cmd->state = UB_CMDST_STAT;
}
/*
* Factorization helper for the command state machine:
* Submit a CSW read and go to STAT state with counter (along [C] path).
*/
static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
if (++cmd->stat_count >= 4) {
ub_state_sense(sc, cmd);
return;
}
if (__ub_state_stat(sc, cmd) != 0)
return;
cmd->state = UB_CMDST_STAT;
}
/*
* Factorization helper for the command state machine:
* Submit a REQUEST SENSE and go to SENSE state.
*/
static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct ub_scsi_cmd *scmd;
struct scatterlist *sg;
int rc;
if (cmd->cdb[0] == REQUEST_SENSE) {
rc = -EPIPE;
goto error;
}
scmd = &sc->top_rqs_cmd;
memset(scmd, 0, sizeof(struct ub_scsi_cmd));
scmd->cdb[0] = REQUEST_SENSE;
scmd->cdb[4] = UB_SENSE_SIZE;
scmd->cdb_len = 6;
scmd->dir = UB_DIR_READ;
scmd->state = UB_CMDST_INIT;
scmd->nsg = 1;
sg = &scmd->sgv[0];
sg_init_table(sg, UB_MAX_REQ_SG);
sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
(unsigned long)sc->top_sense & (PAGE_SIZE-1));
scmd->len = UB_SENSE_SIZE;
scmd->lun = cmd->lun;
scmd->done = ub_top_sense_done;
scmd->back = cmd;
scmd->tag = sc->tagcnt++;
cmd->state = UB_CMDST_SENSE;
ub_cmdq_insert(sc, scmd);
return;
error:
ub_state_done(sc, cmd, rc);
}
/*
* A helper for the command's state machine:
* Submit a stall clear.
*/
static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
int stalled_pipe)
{
int endp;
struct usb_ctrlrequest *cr;
int rc;
endp = usb_pipeendpoint(stalled_pipe);
if (usb_pipein (stalled_pipe))
endp |= USB_DIR_IN;
cr = &sc->work_cr;
cr->bRequestType = USB_RECIP_ENDPOINT;
cr->bRequest = USB_REQ_CLEAR_FEATURE;
cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
cr->wIndex = cpu_to_le16(endp);
cr->wLength = cpu_to_le16(0);
UB_INIT_COMPLETION(sc->work_done);
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
ub_complete(&sc->work_done);
return rc;
}
sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
add_timer(&sc->work_timer);
return 0;
}
/*
*/
static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
{
unsigned char *sense = sc->top_sense;
struct ub_scsi_cmd *cmd;
/*
* Find the command which triggered the unit attention or a check,
* save the sense into it, and advance its state machine.
*/
if ((cmd = ub_cmdq_peek(sc)) == NULL) {
printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
return;
}
if (cmd != scmd->back) {
printk(KERN_WARNING "%s: "
"sense done for wrong command 0x%x\n",
sc->name, cmd->tag);
return;
}
if (cmd->state != UB_CMDST_SENSE) {
printk(KERN_WARNING "%s: sense done with bad cmd state %d\n",
sc->name, cmd->state);
return;
}
/*
* Ignoring scmd->act_len, because the buffer was pre-zeroed.
*/
cmd->key = sense[2] & 0x0F;
cmd->asc = sense[12];
cmd->ascq = sense[13];
ub_scsi_urb_compl(sc, cmd);
}
/*
* Reset management
*/
static void ub_reset_enter(struct ub_dev *sc, int try)
{
if (sc->reset) {
/* This happens often on multi-LUN devices. */
return;
}
sc->reset = try + 1;
#if 0 /* Not needed because the disconnect waits for us. */
unsigned long flags;
spin_lock_irqsave(&ub_lock, flags);
sc->openc++;
spin_unlock_irqrestore(&ub_lock, flags);
#endif
#if 0 /* We let them stop themselves. */
struct ub_lun *lun;
list_for_each_entry(lun, &sc->luns, link) {
blk_stop_queue(lun->disk->queue);
}
#endif
schedule_work(&sc->reset_work);
}
static void ub_reset_task(struct work_struct *work)
{
struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
unsigned long flags;
struct ub_lun *lun;
int rc;
if (!sc->reset) {
printk(KERN_WARNING "%s: Running reset unrequested\n",
sc->name);
return;
}
if (atomic_read(&sc->poison)) {
;
} else if ((sc->reset & 1) == 0) {
ub_sync_reset(sc);
msleep(700); /* usb-storage sleeps 6s (!) */
ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
ub_probe_clear_stall(sc, sc->send_bulk_pipe);
} else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
;
} else {
rc = usb_lock_device_for_reset(sc->dev, sc->intf);
if (rc < 0) {
printk(KERN_NOTICE
"%s: usb_lock_device_for_reset failed (%d)\n",
sc->name, rc);
} else {
rc = usb_reset_device(sc->dev);
if (rc < 0) {
printk(KERN_NOTICE "%s: "
"usb_lock_device_for_reset failed (%d)\n",
sc->name, rc);
}
usb_unlock_device(sc->dev);
}
}
/*
* In theory, no commands can be running while reset is active,
* so nobody can ask for another reset, and so we do not need any
* queues of resets or anything. We do need a spinlock though,
* to interact with block layer.
*/
spin_lock_irqsave(sc->lock, flags);
sc->reset = 0;
tasklet_schedule(&sc->tasklet);
list_for_each_entry(lun, &sc->luns, link) {
blk_start_queue(lun->disk->queue);
}
wake_up(&sc->reset_wait);
spin_unlock_irqrestore(sc->lock, flags);
}
/*
* XXX Reset brackets are too much hassle to implement, so just stub them
* in order to prevent forced unbinding (which deadlocks solid when our
* ->disconnect method waits for the reset to complete and this kills keventd).
*
* XXX Tell Alan to move usb_unlock_device inside of usb_reset_device,
* or else the post_reset is invoked, and restats I/O on a locked device.
*/
static int ub_pre_reset(struct usb_interface *iface) {
return 0;
}
static int ub_post_reset(struct usb_interface *iface) {
return 0;
}
/*
* This is called from a process context.
*/
static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
{
lun->readonly = 0; /* XXX Query this from the device */
lun->capacity.nsec = 0;
lun->capacity.bsize = 512;
lun->capacity.bshift = 0;
if (ub_sync_tur(sc, lun) != 0)
return; /* Not ready */
lun->changed = 0;
if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
/*
* The retry here means something is wrong, either with the
* device, with the transport, or with our code.
* We keep this because sd.c has retries for capacity.
*/
if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
lun->capacity.nsec = 0;
lun->capacity.bsize = 512;
lun->capacity.bshift = 0;
}
}
}
/*
* The open funcion.
* This is mostly needed to keep refcounting, but also to support
* media checks on removable media drives.
*/
static int ub_bd_open(struct block_device *bdev, fmode_t mode)
{
struct ub_lun *lun = bdev->bd_disk->private_data;
struct ub_dev *sc = lun->udev;
unsigned long flags;
int rc;
spin_lock_irqsave(&ub_lock, flags);
if (atomic_read(&sc->poison)) {
spin_unlock_irqrestore(&ub_lock, flags);
return -ENXIO;
}
sc->openc++;
spin_unlock_irqrestore(&ub_lock, flags);
if (lun->removable || lun->readonly)
check_disk_change(bdev);
/*
* The sd.c considers ->media_present and ->changed not equivalent,
* under some pretty murky conditions (a failure of READ CAPACITY).
* We may need it one day.
*/
if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
rc = -ENOMEDIUM;
goto err_open;
}
if (lun->readonly && (mode & FMODE_WRITE)) {
rc = -EROFS;
goto err_open;
}
return 0;
err_open:
ub_put(sc);
return rc;
}
/*
*/
static int ub_bd_release(struct gendisk *disk, fmode_t mode)
{
struct ub_lun *lun = disk->private_data;
struct ub_dev *sc = lun->udev;
ub_put(sc);
return 0;
}
/*
* The ioctl interface.
*/
static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct gendisk *disk = bdev->bd_disk;
void __user *usermem = (void __user *) arg;
return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
}
/*
* This is called by check_disk_change if we reported a media change.
* The main onjective here is to discover the features of the media such as
* the capacity, read-only status, etc. USB storage generally does not
* need to be spun up, but if we needed it, this would be the place.
*
* This call can sleep.
*
* The return code is not used.
*/
static int ub_bd_revalidate(struct gendisk *disk)
{
struct ub_lun *lun = disk->private_data;
ub_revalidate(lun->udev, lun);
/* XXX Support sector size switching like in sr.c */
blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
set_capacity(disk, lun->capacity.nsec);
// set_disk_ro(sdkp->disk, lun->readonly);
return 0;
}
/*
* The check is called by the block layer to verify if the media
* is still available. It is supposed to be harmless, lightweight and
* non-intrusive in case the media was not changed.
*
* This call can sleep.
*
* The return code is bool!
*/
static int ub_bd_media_changed(struct gendisk *disk)
{
struct ub_lun *lun = disk->private_data;
if (!lun->removable)
return 0;
/*
* We clean checks always after every command, so this is not
* as dangerous as it looks. If the TEST_UNIT_READY fails here,
* the device is actually not ready with operator or software
* intervention required. One dangerous item might be a drive which
* spins itself down, and come the time to write dirty pages, this
* will fail, then block layer discards the data. Since we never
* spin drives up, such devices simply cannot be used with ub anyway.
*/
if (ub_sync_tur(lun->udev, lun) != 0) {
lun->changed = 1;
return 1;
}
return lun->changed;
}
static const struct block_device_operations ub_bd_fops = {
.owner = THIS_MODULE,
.open = ub_bd_open,
.release = ub_bd_release,
.locked_ioctl = ub_bd_ioctl,
.media_changed = ub_bd_media_changed,
.revalidate_disk = ub_bd_revalidate,
};
/*
* Common ->done routine for commands executed synchronously.
*/
static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct completion *cop = cmd->back;
complete(cop);
}
/*
* Test if the device has a check condition on it, synchronously.
*/
static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
{
struct ub_scsi_cmd *cmd;
enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
unsigned long flags;
struct completion compl;
int rc;
init_completion(&compl);
rc = -ENOMEM;
if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
goto err_alloc;
cmd->cdb[0] = TEST_UNIT_READY;
cmd->cdb_len = 6;
cmd->dir = UB_DIR_NONE;
cmd->state = UB_CMDST_INIT;
cmd->lun = lun; /* This may be NULL, but that's ok */
cmd->done = ub_probe_done;
cmd->back = &compl;
spin_lock_irqsave(sc->lock, flags);
cmd->tag = sc->tagcnt++;
rc = ub_submit_scsi(sc, cmd);
spin_unlock_irqrestore(sc->lock, flags);
if (rc != 0)
goto err_submit;
wait_for_completion(&compl);
rc = cmd->error;
if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */
rc = cmd->key;
err_submit:
kfree(cmd);
err_alloc:
return rc;
}
/*
* Read the SCSI capacity synchronously (for probing).
*/
static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
struct ub_capacity *ret)
{
struct ub_scsi_cmd *cmd;
struct scatterlist *sg;
char *p;
enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
unsigned long flags;
unsigned int bsize, shift;
unsigned long nsec;
struct completion compl;
int rc;
init_completion(&compl);
rc = -ENOMEM;
if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
goto err_alloc;
p = (char *)cmd + sizeof(struct ub_scsi_cmd);
cmd->cdb[0] = 0x25;
cmd->cdb_len = 10;
cmd->dir = UB_DIR_READ;
cmd->state = UB_CMDST_INIT;
cmd->nsg = 1;
sg = &cmd->sgv[0];
sg_init_table(sg, UB_MAX_REQ_SG);
sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
cmd->len = 8;
cmd->lun = lun;
cmd->done = ub_probe_done;
cmd->back = &compl;
spin_lock_irqsave(sc->lock, flags);
cmd->tag = sc->tagcnt++;
rc = ub_submit_scsi(sc, cmd);
spin_unlock_irqrestore(sc->lock, flags);
if (rc != 0)
goto err_submit;
wait_for_completion(&compl);
if (cmd->error != 0) {
rc = -EIO;
goto err_read;
}
if (cmd->act_len != 8) {
rc = -EIO;
goto err_read;
}
/* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
nsec = be32_to_cpu(*(__be32 *)p) + 1;
bsize = be32_to_cpu(*(__be32 *)(p + 4));
switch (bsize) {
case 512: shift = 0; break;
case 1024: shift = 1; break;
case 2048: shift = 2; break;
case 4096: shift = 3; break;
default:
rc = -EDOM;
goto err_inv_bsize;
}
ret->bsize = bsize;
ret->bshift = shift;
ret->nsec = nsec << shift;
rc = 0;
err_inv_bsize:
err_read:
err_submit:
kfree(cmd);
err_alloc:
return rc;
}
/*
*/
static void ub_probe_urb_complete(struct urb *urb)
{
struct completion *cop = urb->context;
complete(cop);
}
static void ub_probe_timeout(unsigned long arg)
{
struct completion *cop = (struct completion *) arg;
complete(cop);
}
/*
* Reset with a Bulk reset.
*/
static int ub_sync_reset(struct ub_dev *sc)
{
int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
struct usb_ctrlrequest *cr;
struct completion compl;
struct timer_list timer;
int rc;
init_completion(&compl);
cr = &sc->work_cr;
cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
cr->bRequest = US_BULK_RESET_REQUEST;
cr->wValue = cpu_to_le16(0);
cr->wIndex = cpu_to_le16(ifnum);
cr->wLength = cpu_to_le16(0);
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
printk(KERN_WARNING
"%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
return rc;
}
init_timer(&timer);
timer.function = ub_probe_timeout;
timer.data = (unsigned long) &compl;
timer.expires = jiffies + UB_CTRL_TIMEOUT;
add_timer(&timer);
wait_for_completion(&compl);
del_timer_sync(&timer);
usb_kill_urb(&sc->work_urb);
return sc->work_urb.status;
}
/*
* Get number of LUNs by the way of Bulk GetMaxLUN command.
*/
static int ub_sync_getmaxlun(struct ub_dev *sc)
{
int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
unsigned char *p;
enum { ALLOC_SIZE = 1 };
struct usb_ctrlrequest *cr;
struct completion compl;
struct timer_list timer;
int nluns;
int rc;
init_completion(&compl);
rc = -ENOMEM;
if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
goto err_alloc;
*p = 55;
cr = &sc->work_cr;
cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
cr->bRequest = US_BULK_GET_MAX_LUN;
cr->wValue = cpu_to_le16(0);
cr->wIndex = cpu_to_le16(ifnum);
cr->wLength = cpu_to_le16(1);
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
(unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
goto err_submit;
init_timer(&timer);
timer.function = ub_probe_timeout;
timer.data = (unsigned long) &compl;
timer.expires = jiffies + UB_CTRL_TIMEOUT;
add_timer(&timer);
wait_for_completion(&compl);
del_timer_sync(&timer);
usb_kill_urb(&sc->work_urb);
if ((rc = sc->work_urb.status) < 0)
goto err_io;
if (sc->work_urb.actual_length != 1) {
nluns = 0;
} else {
if ((nluns = *p) == 55) {
nluns = 0;
} else {
/* GetMaxLUN returns the maximum LUN number */
nluns += 1;
if (nluns > UB_MAX_LUNS)
nluns = UB_MAX_LUNS;
}
}
kfree(p);
return nluns;
err_io:
err_submit:
kfree(p);
err_alloc:
return rc;
}
/*
* Clear initial stalls.
*/
static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
{
int endp;
struct usb_ctrlrequest *cr;
struct completion compl;
struct timer_list timer;
int rc;
init_completion(&compl);
endp = usb_pipeendpoint(stalled_pipe);
if (usb_pipein (stalled_pipe))
endp |= USB_DIR_IN;
cr = &sc->work_cr;
cr->bRequestType = USB_RECIP_ENDPOINT;
cr->bRequest = USB_REQ_CLEAR_FEATURE;
cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
cr->wIndex = cpu_to_le16(endp);
cr->wLength = cpu_to_le16(0);
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
printk(KERN_WARNING
"%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
return rc;
}
init_timer(&timer);
timer.function = ub_probe_timeout;
timer.data = (unsigned long) &compl;
timer.expires = jiffies + UB_CTRL_TIMEOUT;
add_timer(&timer);
wait_for_completion(&compl);
del_timer_sync(&timer);
usb_kill_urb(&sc->work_urb);
usb_reset_endpoint(sc->dev, endp);
return 0;
}
/*
* Get the pipe settings.
*/
static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
struct usb_interface *intf)
{
struct usb_host_interface *altsetting = intf->cur_altsetting;
struct usb_endpoint_descriptor *ep_in = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
struct usb_endpoint_descriptor *ep;
int i;
/*
* Find the endpoints we need.
* We are expecting a minimum of 2 endpoints - in and out (bulk).
* We will ignore any others.
*/
for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
ep = &altsetting->endpoint[i].desc;
/* Is it a BULK endpoint? */
if (usb_endpoint_xfer_bulk(ep)) {
/* BULK in or out? */
if (usb_endpoint_dir_in(ep)) {
if (ep_in == NULL)
ep_in = ep;
} else {
if (ep_out == NULL)
ep_out = ep;
}
}
}
if (ep_in == NULL || ep_out == NULL) {
printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name);
return -ENODEV;
}
/* Calculate and store the pipe values */
sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
sc->send_bulk_pipe = usb_sndbulkpipe(dev,
usb_endpoint_num(ep_out));
sc->recv_bulk_pipe = usb_rcvbulkpipe(dev,
usb_endpoint_num(ep_in));
return 0;
}
/*
* Probing is done in the process context, which allows us to cheat
* and not to build a state machine for the discovery.
*/
static int ub_probe(struct usb_interface *intf,
const struct usb_device_id *dev_id)
{
struct ub_dev *sc;
int nluns;
int rc;
int i;
if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
return -ENXIO;
rc = -ENOMEM;
if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
goto err_core;
sc->lock = ub_next_lock();
INIT_LIST_HEAD(&sc->luns);
usb_init_urb(&sc->work_urb);
tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
atomic_set(&sc->poison, 0);
INIT_WORK(&sc->reset_work, ub_reset_task);
init_waitqueue_head(&sc->reset_wait);
init_timer(&sc->work_timer);
sc->work_timer.data = (unsigned long) sc;
sc->work_timer.function = ub_urb_timeout;
ub_init_completion(&sc->work_done);
sc->work_done.done = 1; /* A little yuk, but oh well... */
sc->dev = interface_to_usbdev(intf);
sc->intf = intf;
// sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
usb_set_intfdata(intf, sc);
usb_get_dev(sc->dev);
/*
* Since we give the interface struct to the block level through
* disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
* oopses on close after a disconnect (kernels 2.6.16 and up).
*/
usb_get_intf(sc->intf);
snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
sc->dev->bus->busnum, sc->dev->devnum);
/* XXX Verify that we can handle the device (from descriptors) */
if (ub_get_pipes(sc, sc->dev, intf) != 0)
goto err_dev_desc;
/*
* At this point, all USB initialization is done, do upper layer.
* We really hate halfway initialized structures, so from the
* invariants perspective, this ub_dev is fully constructed at
* this point.
*/
/*
* This is needed to clear toggles. It is a problem only if we do
* `rmmod ub && modprobe ub` without disconnects, but we like that.
*/
#if 0 /* iPod Mini fails if we do this (big white iPod works) */
ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
ub_probe_clear_stall(sc, sc->send_bulk_pipe);
#endif
/*
* The way this is used by the startup code is a little specific.
* A SCSI check causes a USB stall. Our common case code sees it
* and clears the check, after which the device is ready for use.
* But if a check was not present, any command other than
* TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
*
* If we neglect to clear the SCSI check, the first real command fails
* (which is the capacity readout). We clear that and retry, but why
* causing spurious retries for no reason.
*
* Revalidation may start with its own TEST_UNIT_READY, but that one
* has to succeed, so we clear checks with an additional one here.
* In any case it's not our business how revaliadation is implemented.
*/
for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */
if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
if (rc != 0x6) break;
msleep(10);
}
nluns = 1;
for (i = 0; i < 3; i++) {
if ((rc = ub_sync_getmaxlun(sc)) < 0)
break;
if (rc != 0) {
nluns = rc;
break;
}
msleep(100);
}
for (i = 0; i < nluns; i++) {
ub_probe_lun(sc, i);
}
return 0;
err_dev_desc:
usb_set_intfdata(intf, NULL);
usb_put_intf(sc->intf);
usb_put_dev(sc->dev);
kfree(sc);
err_core:
return rc;
}
static int ub_probe_lun(struct ub_dev *sc, int lnum)
{
struct ub_lun *lun;
struct request_queue *q;
struct gendisk *disk;
int rc;
rc = -ENOMEM;
if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
goto err_alloc;
lun->num = lnum;
rc = -ENOSR;
if ((lun->id = ub_id_get()) == -1)
goto err_id;
lun->udev = sc;
snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
lun->removable = 1; /* XXX Query this from the device */
lun->changed = 1; /* ub_revalidate clears only */
ub_revalidate(sc, lun);
rc = -ENOMEM;
if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
goto err_diskalloc;
sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
disk->major = UB_MAJOR;
disk->first_minor = lun->id * UB_PARTS_PER_LUN;
disk->fops = &ub_bd_fops;
disk->private_data = lun;
disk->driverfs_dev = &sc->intf->dev;
rc = -ENOMEM;
if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
goto err_blkqinit;
disk->queue = q;
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
blk_queue_max_sectors(q, UB_MAX_SECTORS);
blk_queue_logical_block_size(q, lun->capacity.bsize);
lun->disk = disk;
q->queuedata = lun;
list_add(&lun->link, &sc->luns);
set_capacity(disk, lun->capacity.nsec);
if (lun->removable)
disk->flags |= GENHD_FL_REMOVABLE;
add_disk(disk);
return 0;
err_blkqinit:
put_disk(disk);
err_diskalloc:
ub_id_put(lun->id);
err_id:
kfree(lun);
err_alloc:
return rc;
}
static void ub_disconnect(struct usb_interface *intf)
{
struct ub_dev *sc = usb_get_intfdata(intf);
struct ub_lun *lun;
unsigned long flags;
/*
* Prevent ub_bd_release from pulling the rug from under us.
* XXX This is starting to look like a kref.
* XXX Why not to take this ref at probe time?
*/
spin_lock_irqsave(&ub_lock, flags);
sc->openc++;
spin_unlock_irqrestore(&ub_lock, flags);
/*
* Fence stall clearings, operations triggered by unlinkings and so on.
* We do not attempt to unlink any URBs, because we do not trust the
* unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
*/
atomic_set(&sc->poison, 1);
/*
* Wait for reset to end, if any.
*/
wait_event(sc->reset_wait, !sc->reset);
/*
* Blow away queued commands.
*
* Actually, this never works, because before we get here
* the HCD terminates outstanding URB(s). It causes our
* SCSI command queue to advance, commands fail to submit,
* and the whole queue drains. So, we just use this code to
* print warnings.
*/
spin_lock_irqsave(sc->lock, flags);
{
struct ub_scsi_cmd *cmd;
int cnt = 0;
while ((cmd = ub_cmdq_peek(sc)) != NULL) {
cmd->error = -ENOTCONN;
cmd->state = UB_CMDST_DONE;
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
cnt++;
}
if (cnt != 0) {
printk(KERN_WARNING "%s: "
"%d was queued after shutdown\n", sc->name, cnt);
}
}
spin_unlock_irqrestore(sc->lock, flags);
/*
* Unregister the upper layer.
*/
list_for_each_entry(lun, &sc->luns, link) {
del_gendisk(lun->disk);
/*
* I wish I could do:
* queue_flag_set(QUEUE_FLAG_DEAD, q);
* As it is, we rely on our internal poisoning and let
* the upper levels to spin furiously failing all the I/O.
*/
}
/*
* Testing for -EINPROGRESS is always a bug, so we are bending
* the rules a little.
*/
spin_lock_irqsave(sc->lock, flags);
if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */
printk(KERN_WARNING "%s: "
"URB is active after disconnect\n", sc->name);
}
spin_unlock_irqrestore(sc->lock, flags);
/*
* There is virtually no chance that other CPU runs a timeout so long
* after ub_urb_complete should have called del_timer, but only if HCD
* didn't forget to deliver a callback on unlink.
*/
del_timer_sync(&sc->work_timer);
/*
* At this point there must be no commands coming from anyone
* and no URBs left in transit.
*/
ub_put(sc);
}
static struct usb_driver ub_driver = {
.name = "ub",
.probe = ub_probe,
.disconnect = ub_disconnect,
.id_table = ub_usb_ids,
.pre_reset = ub_pre_reset,
.post_reset = ub_post_reset,
};
static int __init ub_init(void)
{
int rc;
int i;
for (i = 0; i < UB_QLOCK_NUM; i++)
spin_lock_init(&ub_qlockv[i]);
if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
goto err_regblkdev;
if ((rc = usb_register(&ub_driver)) != 0)
goto err_register;
usb_usual_set_present(USB_US_TYPE_UB);
return 0;
err_register:
unregister_blkdev(UB_MAJOR, DRV_NAME);
err_regblkdev:
return rc;
}
static void __exit ub_exit(void)
{
usb_deregister(&ub_driver);
unregister_blkdev(UB_MAJOR, DRV_NAME);
usb_usual_clear_present(USB_US_TYPE_UB);
}
module_init(ub_init);
module_exit(ub_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
DRHAX34/android_kernel_zte_sailboat | fs/smbfs/proc.c | 1458 | 85803 | /*
* proc.c
*
* Copyright (C) 1995, 1996 by Paal-Kr. Engstad and Volker Lendecke
* Copyright (C) 1997 by Volker Lendecke
*
* Please add a note about your changes to smbfs in the ChangeLog file.
*/
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/dcache.h>
#include <linux/nls.h>
#include <linux/smp_lock.h>
#include <linux/net.h>
#include <linux/vfs.h>
#include <linux/smb_fs.h>
#include <linux/smbno.h>
#include <linux/smb_mount.h>
#include <net/sock.h>
#include <asm/string.h>
#include <asm/div64.h>
#include "smb_debug.h"
#include "proto.h"
#include "request.h"
/* Features. Undefine if they cause problems, this should perhaps be a
config option. */
#define SMBFS_POSIX_UNLINK 1
/* Allow smb_retry to be interrupted. */
#define SMB_RETRY_INTR
#define SMB_VWV(packet) ((packet) + SMB_HEADER_LEN)
#define SMB_CMD(packet) (*(packet+8))
#define SMB_WCT(packet) (*(packet+SMB_HEADER_LEN - 1))
#define SMB_DIRINFO_SIZE 43
#define SMB_STATUS_SIZE 21
#define SMB_ST_BLKSIZE (PAGE_SIZE)
#define SMB_ST_BLKSHIFT (PAGE_SHIFT)
static struct smb_ops smb_ops_core;
static struct smb_ops smb_ops_os2;
static struct smb_ops smb_ops_win95;
static struct smb_ops smb_ops_winNT;
static struct smb_ops smb_ops_unix;
static struct smb_ops smb_ops_null;
static void
smb_init_dirent(struct smb_sb_info *server, struct smb_fattr *fattr);
static void
smb_finish_dirent(struct smb_sb_info *server, struct smb_fattr *fattr);
static int
smb_proc_getattr_core(struct smb_sb_info *server, struct dentry *dir,
struct smb_fattr *fattr);
static int
smb_proc_getattr_ff(struct smb_sb_info *server, struct dentry *dentry,
struct smb_fattr *fattr);
static int
smb_proc_setattr_core(struct smb_sb_info *server, struct dentry *dentry,
u16 attr);
static int
smb_proc_setattr_ext(struct smb_sb_info *server,
struct inode *inode, struct smb_fattr *fattr);
static int
smb_proc_query_cifsunix(struct smb_sb_info *server);
static void
install_ops(struct smb_ops *dst, struct smb_ops *src);
static void
str_upper(char *name, int len)
{
while (len--)
{
if (*name >= 'a' && *name <= 'z')
*name -= ('a' - 'A');
name++;
}
}
#if 0
static void
str_lower(char *name, int len)
{
while (len--)
{
if (*name >= 'A' && *name <= 'Z')
*name += ('a' - 'A');
name++;
}
}
#endif
/* reverse a string inline. This is used by the dircache walking routines */
static void reverse_string(char *buf, int len)
{
char c;
char *end = buf+len-1;
while(buf < end) {
c = *buf;
*(buf++) = *end;
*(end--) = c;
}
}
/* no conversion, just a wrapper for memcpy. */
static int convert_memcpy(unsigned char *output, int olen,
const unsigned char *input, int ilen,
struct nls_table *nls_from,
struct nls_table *nls_to)
{
if (olen < ilen)
return -ENAMETOOLONG;
memcpy(output, input, ilen);
return ilen;
}
static inline int write_char(unsigned char ch, char *output, int olen)
{
if (olen < 4)
return -ENAMETOOLONG;
sprintf(output, ":x%02x", ch);
return 4;
}
static inline int write_unichar(wchar_t ch, char *output, int olen)
{
if (olen < 5)
return -ENAMETOOLONG;
sprintf(output, ":%04x", ch);
return 5;
}
/* convert from one "codepage" to another (possibly being utf8). */
static int convert_cp(unsigned char *output, int olen,
const unsigned char *input, int ilen,
struct nls_table *nls_from,
struct nls_table *nls_to)
{
int len = 0;
int n;
wchar_t ch;
while (ilen > 0) {
/* convert by changing to unicode and back to the new cp */
n = nls_from->char2uni(input, ilen, &ch);
if (n == -EINVAL) {
ilen--;
n = write_char(*input++, output, olen);
if (n < 0)
goto fail;
output += n;
olen -= n;
len += n;
continue;
} else if (n < 0)
goto fail;
input += n;
ilen -= n;
n = nls_to->uni2char(ch, output, olen);
if (n == -EINVAL)
n = write_unichar(ch, output, olen);
if (n < 0)
goto fail;
output += n;
olen -= n;
len += n;
}
return len;
fail:
return n;
}
/* ----------------------------------------------------------- */
/*
* nls_unicode
*
* This encodes/decodes little endian unicode format
*/
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
if (boundlen < 2)
return -EINVAL;
*out++ = uni & 0xff;
*out++ = uni >> 8;
return 2;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
if (boundlen < 2)
return -EINVAL;
*uni = (rawstring[1] << 8) | rawstring[0];
return 2;
}
static struct nls_table unicode_table = {
.charset = "unicode",
.uni2char = uni2char,
.char2uni = char2uni,
};
/* ----------------------------------------------------------- */
static int setcodepage(struct nls_table **p, char *name)
{
struct nls_table *nls;
if (!name || !*name) {
nls = NULL;
} else if ( (nls = load_nls(name)) == NULL) {
printk (KERN_ERR "smbfs: failed to load nls '%s'\n", name);
return -EINVAL;
}
/* if already set, unload the previous one. */
if (*p && *p != &unicode_table)
unload_nls(*p);
*p = nls;
return 0;
}
/* Handles all changes to codepage settings. */
int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
{
int n = 0;
smb_lock_server(server);
/* Don't load any nls_* at all, if no remote is requested */
if (!*cp->remote_name)
goto out;
/* local */
n = setcodepage(&server->local_nls, cp->local_name);
if (n != 0)
goto out;
/* remote */
if (!strcmp(cp->remote_name, "unicode")) {
server->remote_nls = &unicode_table;
} else {
n = setcodepage(&server->remote_nls, cp->remote_name);
if (n != 0)
setcodepage(&server->local_nls, NULL);
}
out:
if (server->local_nls != NULL && server->remote_nls != NULL)
server->ops->convert = convert_cp;
else
server->ops->convert = convert_memcpy;
smb_unlock_server(server);
return n;
}
/*****************************************************************************/
/* */
/* Encoding/Decoding section */
/* */
/*****************************************************************************/
static __u8 *
smb_encode_smb_length(__u8 * p, __u32 len)
{
*p = 0;
*(p+1) = 0;
*(p+2) = (len & 0xFF00) >> 8;
*(p+3) = (len & 0xFF);
if (len > 0xFFFF)
{
*(p+1) = 1;
}
return p + 4;
}
/*
* smb_build_path: build the path to entry and name storing it in buf.
* The path returned will have the trailing '\0'.
*/
static int smb_build_path(struct smb_sb_info *server, unsigned char *buf,
int maxlen,
struct dentry *entry, struct qstr *name)
{
unsigned char *path = buf;
int len;
int unicode = (server->mnt->flags & SMB_MOUNT_UNICODE) != 0;
if (maxlen < (2<<unicode))
return -ENAMETOOLONG;
if (maxlen > SMB_MAXPATHLEN + 1)
maxlen = SMB_MAXPATHLEN + 1;
if (entry == NULL)
goto test_name_and_out;
/*
* If IS_ROOT, we have to do no walking at all.
*/
if (IS_ROOT(entry) && !name) {
*path++ = '\\';
if (unicode) *path++ = '\0';
*path++ = '\0';
if (unicode) *path++ = '\0';
return path-buf;
}
/*
* Build the path string walking the tree backward from end to ROOT
* and store it in reversed order [see reverse_string()]
*/
dget(entry);
spin_lock(&entry->d_lock);
while (!IS_ROOT(entry)) {
struct dentry *parent;
if (maxlen < (3<<unicode)) {
spin_unlock(&entry->d_lock);
dput(entry);
return -ENAMETOOLONG;
}
len = server->ops->convert(path, maxlen-2,
entry->d_name.name, entry->d_name.len,
server->local_nls, server->remote_nls);
if (len < 0) {
spin_unlock(&entry->d_lock);
dput(entry);
return len;
}
reverse_string(path, len);
path += len;
if (unicode) {
/* Note: reverse order */
*path++ = '\0';
maxlen--;
}
*path++ = '\\';
maxlen -= len+1;
parent = entry->d_parent;
dget(parent);
spin_unlock(&entry->d_lock);
dput(entry);
entry = parent;
spin_lock(&entry->d_lock);
}
spin_unlock(&entry->d_lock);
dput(entry);
reverse_string(buf, path-buf);
/* maxlen has space for at least one char */
test_name_and_out:
if (name) {
if (maxlen < (3<<unicode))
return -ENAMETOOLONG;
*path++ = '\\';
if (unicode) {
*path++ = '\0';
maxlen--;
}
len = server->ops->convert(path, maxlen-2,
name->name, name->len,
server->local_nls, server->remote_nls);
if (len < 0)
return len;
path += len;
maxlen -= len+1;
}
/* maxlen has space for at least one char */
*path++ = '\0';
if (unicode) *path++ = '\0';
return path-buf;
}
static int smb_encode_path(struct smb_sb_info *server, char *buf, int maxlen,
struct dentry *dir, struct qstr *name)
{
int result;
result = smb_build_path(server, buf, maxlen, dir, name);
if (result < 0)
goto out;
if (server->opt.protocol <= SMB_PROTOCOL_COREPLUS)
str_upper(buf, result);
out:
return result;
}
/* encode_path for non-trans2 request SMBs */
static int smb_simple_encode_path(struct smb_request *req, char **p,
struct dentry * entry, struct qstr * name)
{
struct smb_sb_info *server = req->rq_server;
char *s = *p;
int res;
int maxlen = ((char *)req->rq_buffer + req->rq_bufsize) - s;
int unicode = (server->mnt->flags & SMB_MOUNT_UNICODE);
if (!maxlen)
return -ENAMETOOLONG;
*s++ = 4; /* ASCII data format */
/*
* SMB Unicode strings must be 16bit aligned relative the start of the
* packet. If they are not they must be padded with 0.
*/
if (unicode) {
int align = s - (char *)req->rq_buffer;
if (!(align & 1)) {
*s++ = '\0';
maxlen--;
}
}
res = smb_encode_path(server, s, maxlen-1, entry, name);
if (res < 0)
return res;
*p = s + res;
return 0;
}
/* The following are taken directly from msdos-fs */
/* Linear day numbers of the respective 1sts in non-leap years. */
static int day_n[] =
{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, 0};
/* JanFebMarApr May Jun Jul Aug Sep Oct Nov Dec */
static time_t
utc2local(struct smb_sb_info *server, time_t time)
{
return time - server->opt.serverzone*60;
}
static time_t
local2utc(struct smb_sb_info *server, time_t time)
{
return time + server->opt.serverzone*60;
}
/* Convert a MS-DOS time/date pair to a UNIX date (seconds since 1 1 70). */
static time_t
date_dos2unix(struct smb_sb_info *server, __u16 date, __u16 time)
{
int month, year;
time_t secs;
/* first subtract and mask after that... Otherwise, if
date == 0, bad things happen */
month = ((date >> 5) - 1) & 15;
year = date >> 9;
secs = (time & 31) * 2 + 60 * ((time >> 5) & 63) + (time >> 11) * 3600 + 86400 *
((date & 31) - 1 + day_n[month] + (year / 4) + year * 365 - ((year & 3) == 0 &&
month < 2 ? 1 : 0) + 3653);
/* days since 1.1.70 plus 80's leap day */
return local2utc(server, secs);
}
/* Convert linear UNIX date to a MS-DOS time/date pair. */
static void
date_unix2dos(struct smb_sb_info *server,
int unix_date, __u16 *date, __u16 *time)
{
int day, year, nl_day, month;
unix_date = utc2local(server, unix_date);
if (unix_date < 315532800)
unix_date = 315532800;
*time = (unix_date % 60) / 2 +
(((unix_date / 60) % 60) << 5) +
(((unix_date / 3600) % 24) << 11);
day = unix_date / 86400 - 3652;
year = day / 365;
if ((year + 3) / 4 + 365 * year > day)
year--;
day -= (year + 3) / 4 + 365 * year;
if (day == 59 && !(year & 3)) {
nl_day = day;
month = 2;
} else {
nl_day = (year & 3) || day <= 59 ? day : day - 1;
for (month = 1; month < 12; month++)
if (day_n[month] > nl_day)
break;
}
*date = nl_day - day_n[month - 1] + 1 + (month << 5) + (year << 9);
}
/* The following are taken from fs/ntfs/util.c */
#define NTFS_TIME_OFFSET ((u64)(369*365 + 89) * 24 * 3600 * 10000000)
/*
* Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
* into Unix UTC (based 1970-01-01, in seconds).
*/
static struct timespec
smb_ntutc2unixutc(u64 ntutc)
{
struct timespec ts;
/* FIXME: what about the timezone difference? */
/* Subtract the NTFS time offset, then convert to 1s intervals. */
u64 t = ntutc - NTFS_TIME_OFFSET;
ts.tv_nsec = do_div(t, 10000000) * 100;
ts.tv_sec = t;
return ts;
}
/* Convert the Unix UTC into NT time */
static u64
smb_unixutc2ntutc(struct timespec ts)
{
/* Note: timezone conversion is probably wrong. */
/* return ((u64)utc2local(server, t)) * 10000000 + NTFS_TIME_OFFSET; */
return ((u64)ts.tv_sec) * 10000000 + ts.tv_nsec/100 + NTFS_TIME_OFFSET;
}
#define MAX_FILE_MODE 6
static mode_t file_mode[] = {
S_IFREG, S_IFDIR, S_IFLNK, S_IFCHR, S_IFBLK, S_IFIFO, S_IFSOCK
};
static int smb_filetype_to_mode(u32 filetype)
{
if (filetype > MAX_FILE_MODE) {
PARANOIA("Filetype out of range: %d\n", filetype);
return S_IFREG;
}
return file_mode[filetype];
}
static u32 smb_filetype_from_mode(int mode)
{
if (S_ISREG(mode))
return UNIX_TYPE_FILE;
if (S_ISDIR(mode))
return UNIX_TYPE_DIR;
if (S_ISLNK(mode))
return UNIX_TYPE_SYMLINK;
if (S_ISCHR(mode))
return UNIX_TYPE_CHARDEV;
if (S_ISBLK(mode))
return UNIX_TYPE_BLKDEV;
if (S_ISFIFO(mode))
return UNIX_TYPE_FIFO;
if (S_ISSOCK(mode))
return UNIX_TYPE_SOCKET;
return UNIX_TYPE_UNKNOWN;
}
/*****************************************************************************/
/* */
/* Support section. */
/* */
/*****************************************************************************/
__u32
smb_len(__u8 * p)
{
return ((*(p+1) & 0x1) << 16L) | (*(p+2) << 8L) | *(p+3);
}
static __u16
smb_bcc(__u8 * packet)
{
int pos = SMB_HEADER_LEN + SMB_WCT(packet) * sizeof(__u16);
return WVAL(packet, pos);
}
/* smb_valid_packet: We check if packet fulfills the basic
requirements of a smb packet */
static int
smb_valid_packet(__u8 * packet)
{
return (packet[4] == 0xff
&& packet[5] == 'S'
&& packet[6] == 'M'
&& packet[7] == 'B'
&& (smb_len(packet) + 4 == SMB_HEADER_LEN
+ SMB_WCT(packet) * 2 + smb_bcc(packet)));
}
/* smb_verify: We check if we got the answer we expected, and if we
got enough data. If bcc == -1, we don't care. */
static int
smb_verify(__u8 * packet, int command, int wct, int bcc)
{
if (SMB_CMD(packet) != command)
goto bad_command;
if (SMB_WCT(packet) < wct)
goto bad_wct;
if (bcc != -1 && smb_bcc(packet) < bcc)
goto bad_bcc;
return 0;
bad_command:
printk(KERN_ERR "smb_verify: command=%x, SMB_CMD=%x??\n",
command, SMB_CMD(packet));
goto fail;
bad_wct:
printk(KERN_ERR "smb_verify: command=%x, wct=%d, SMB_WCT=%d??\n",
command, wct, SMB_WCT(packet));
goto fail;
bad_bcc:
printk(KERN_ERR "smb_verify: command=%x, bcc=%d, SMB_BCC=%d??\n",
command, bcc, smb_bcc(packet));
fail:
return -EIO;
}
/*
* Returns the maximum read or write size for the "payload". Making all of the
* packet fit within the negotiated max_xmit size.
*
* N.B. Since this value is usually computed before locking the server,
* the server's packet size must never be decreased!
*/
static inline int
smb_get_xmitsize(struct smb_sb_info *server, int overhead)
{
return server->opt.max_xmit - overhead;
}
/*
* Calculate the maximum read size
*/
int
smb_get_rsize(struct smb_sb_info *server)
{
/* readX has 12 parameters, read has 5 */
int overhead = SMB_HEADER_LEN + 12 * sizeof(__u16) + 2 + 1 + 2;
int size = smb_get_xmitsize(server, overhead);
VERBOSE("xmit=%d, size=%d\n", server->opt.max_xmit, size);
return size;
}
/*
* Calculate the maximum write size
*/
int
smb_get_wsize(struct smb_sb_info *server)
{
/* writeX has 14 parameters, write has 5 */
int overhead = SMB_HEADER_LEN + 14 * sizeof(__u16) + 2 + 1 + 2;
int size = smb_get_xmitsize(server, overhead);
VERBOSE("xmit=%d, size=%d\n", server->opt.max_xmit, size);
return size;
}
/*
* Convert SMB error codes to -E... errno values.
*/
int
smb_errno(struct smb_request *req)
{
int errcls = req->rq_rcls;
int error = req->rq_err;
char *class = "Unknown";
VERBOSE("errcls %d code %d from command 0x%x\n",
errcls, error, SMB_CMD(req->rq_header));
if (errcls == ERRDOS) {
switch (error) {
case ERRbadfunc:
return -EINVAL;
case ERRbadfile:
case ERRbadpath:
return -ENOENT;
case ERRnofids:
return -EMFILE;
case ERRnoaccess:
return -EACCES;
case ERRbadfid:
return -EBADF;
case ERRbadmcb:
return -EREMOTEIO;
case ERRnomem:
return -ENOMEM;
case ERRbadmem:
return -EFAULT;
case ERRbadenv:
case ERRbadformat:
return -EREMOTEIO;
case ERRbadaccess:
return -EACCES;
case ERRbaddata:
return -E2BIG;
case ERRbaddrive:
return -ENXIO;
case ERRremcd:
return -EREMOTEIO;
case ERRdiffdevice:
return -EXDEV;
case ERRnofiles:
return -ENOENT;
case ERRbadshare:
return -ETXTBSY;
case ERRlock:
return -EDEADLK;
case ERRfilexists:
return -EEXIST;
case ERROR_INVALID_PARAMETER:
return -EINVAL;
case ERROR_DISK_FULL:
return -ENOSPC;
case ERROR_INVALID_NAME:
return -ENOENT;
case ERROR_DIR_NOT_EMPTY:
return -ENOTEMPTY;
case ERROR_NOT_LOCKED:
return -ENOLCK;
case ERROR_ALREADY_EXISTS:
return -EEXIST;
default:
class = "ERRDOS";
goto err_unknown;
}
} else if (errcls == ERRSRV) {
switch (error) {
/* N.B. This is wrong ... EIO ? */
case ERRerror:
return -ENFILE;
case ERRbadpw:
return -EINVAL;
case ERRbadtype:
case ERRtimeout:
return -EIO;
case ERRaccess:
return -EACCES;
/*
* This is a fatal error, as it means the "tree ID"
* for this connection is no longer valid. We map
* to a special error code and get a new connection.
*/
case ERRinvnid:
return -EBADSLT;
default:
class = "ERRSRV";
goto err_unknown;
}
} else if (errcls == ERRHRD) {
switch (error) {
case ERRnowrite:
return -EROFS;
case ERRbadunit:
return -ENODEV;
case ERRnotready:
return -EUCLEAN;
case ERRbadcmd:
case ERRdata:
return -EIO;
case ERRbadreq:
return -ERANGE;
case ERRbadshare:
return -ETXTBSY;
case ERRlock:
return -EDEADLK;
case ERRdiskfull:
return -ENOSPC;
default:
class = "ERRHRD";
goto err_unknown;
}
} else if (errcls == ERRCMD) {
class = "ERRCMD";
} else if (errcls == SUCCESS) {
return 0; /* This is the only valid 0 return */
}
err_unknown:
printk(KERN_ERR "smb_errno: class %s, code %d from command 0x%x\n",
class, error, SMB_CMD(req->rq_header));
return -EIO;
}
/* smb_request_ok: We expect the server to be locked. Then we do the
request and check the answer completely. When smb_request_ok
returns 0, you can be quite sure that everything went well. When
the answer is <=0, the returned number is a valid unix errno. */
static int
smb_request_ok(struct smb_request *req, int command, int wct, int bcc)
{
int result;
req->rq_resp_wct = wct;
req->rq_resp_bcc = bcc;
result = smb_add_request(req);
if (result != 0) {
DEBUG1("smb_request failed\n");
goto out;
}
if (smb_valid_packet(req->rq_header) != 0) {
PARANOIA("invalid packet!\n");
goto out;
}
result = smb_verify(req->rq_header, command, wct, bcc);
out:
return result;
}
/*
* This implements the NEWCONN ioctl. It installs the server pid,
* sets server->state to CONN_VALID, and wakes up the waiting process.
*/
int
smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
{
struct file *filp;
struct sock *sk;
int error;
VERBOSE("fd=%d, pid=%d\n", opt->fd, current->pid);
smb_lock_server(server);
/*
* Make sure we don't already have a valid connection ...
*/
error = -EINVAL;
if (server->state == CONN_VALID)
goto out;
error = -EACCES;
if (current_uid() != server->mnt->mounted_uid &&
!capable(CAP_SYS_ADMIN))
goto out;
error = -EBADF;
filp = fget(opt->fd);
if (!filp)
goto out;
if (!smb_valid_socket(filp->f_path.dentry->d_inode))
goto out_putf;
server->sock_file = filp;
server->conn_pid = get_pid(task_pid(current));
server->opt = *opt;
server->generation += 1;
server->state = CONN_VALID;
error = 0;
if (server->conn_error) {
/*
* conn_error is the returncode we originally decided to
* drop the old connection on. This message should be positive
* and not make people ask questions on why smbfs is printing
* error messages ...
*/
printk(KERN_INFO "SMB connection re-established (%d)\n",
server->conn_error);
server->conn_error = 0;
}
/*
* Store the server in sock user_data (Only used by sunrpc)
*/
sk = SOCKET_I(filp->f_path.dentry->d_inode)->sk;
sk->sk_user_data = server;
/* chain into the data_ready callback */
server->data_ready = xchg(&sk->sk_data_ready, smb_data_ready);
/* check if we have an old smbmount that uses seconds for the
serverzone */
if (server->opt.serverzone > 12*60 || server->opt.serverzone < -12*60)
server->opt.serverzone /= 60;
/* now that we have an established connection we can detect the server
type and enable bug workarounds */
if (server->opt.protocol < SMB_PROTOCOL_LANMAN2)
install_ops(server->ops, &smb_ops_core);
else if (server->opt.protocol == SMB_PROTOCOL_LANMAN2)
install_ops(server->ops, &smb_ops_os2);
else if (server->opt.protocol == SMB_PROTOCOL_NT1 &&
(server->opt.max_xmit < 0x1000) &&
!(server->opt.capabilities & SMB_CAP_NT_SMBS)) {
/* FIXME: can we kill the WIN95 flag now? */
server->mnt->flags |= SMB_MOUNT_WIN95;
VERBOSE("detected WIN95 server\n");
install_ops(server->ops, &smb_ops_win95);
} else {
/*
* Samba has max_xmit 65535
* NT4spX has max_xmit 4536 (or something like that)
* win2k has ...
*/
VERBOSE("detected NT1 (Samba, NT4/5) server\n");
install_ops(server->ops, &smb_ops_winNT);
}
/* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
server->ops->getattr = smb_proc_getattr_core;
} else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
server->ops->getattr = smb_proc_getattr_ff;
}
/* Decode server capabilities */
if (server->opt.capabilities & SMB_CAP_LARGE_FILES) {
/* Should be ok to set this now, as no one can access the
mount until the connection has been established. */
SB_of(server)->s_maxbytes = ~0ULL >> 1;
VERBOSE("LFS enabled\n");
}
if (server->opt.capabilities & SMB_CAP_UNICODE) {
server->mnt->flags |= SMB_MOUNT_UNICODE;
VERBOSE("Unicode enabled\n");
} else {
server->mnt->flags &= ~SMB_MOUNT_UNICODE;
}
#if 0
/* flags we may test for other patches ... */
if (server->opt.capabilities & SMB_CAP_LARGE_READX) {
VERBOSE("Large reads enabled\n");
}
if (server->opt.capabilities & SMB_CAP_LARGE_WRITEX) {
VERBOSE("Large writes enabled\n");
}
#endif
if (server->opt.capabilities & SMB_CAP_UNIX) {
struct inode *inode;
VERBOSE("Using UNIX CIFS extensions\n");
install_ops(server->ops, &smb_ops_unix);
inode = SB_of(server)->s_root->d_inode;
if (inode)
inode->i_op = &smb_dir_inode_operations_unix;
}
VERBOSE("protocol=%d, max_xmit=%d, pid=%d capabilities=0x%x\n",
server->opt.protocol, server->opt.max_xmit,
pid_nr(server->conn_pid), server->opt.capabilities);
/* FIXME: this really should be done by smbmount. */
if (server->opt.max_xmit > SMB_MAX_PACKET_SIZE) {
server->opt.max_xmit = SMB_MAX_PACKET_SIZE;
}
smb_unlock_server(server);
smbiod_wake_up();
if (server->opt.capabilities & SMB_CAP_UNIX)
smb_proc_query_cifsunix(server);
server->conn_complete++;
wake_up_interruptible_all(&server->conn_wq);
return error;
out:
smb_unlock_server(server);
smbiod_wake_up();
return error;
out_putf:
fput(filp);
goto out;
}
/* smb_setup_header: We completely set up the packet. You only have to
insert the command-specific fields */
__u8 *
smb_setup_header(struct smb_request *req, __u8 command, __u16 wct, __u16 bcc)
{
__u32 xmit_len = SMB_HEADER_LEN + wct * sizeof(__u16) + bcc + 2;
__u8 *p = req->rq_header;
struct smb_sb_info *server = req->rq_server;
p = smb_encode_smb_length(p, xmit_len - 4);
*p++ = 0xff;
*p++ = 'S';
*p++ = 'M';
*p++ = 'B';
*p++ = command;
memset(p, '\0', 19);
p += 19;
p += 8;
if (server->opt.protocol > SMB_PROTOCOL_CORE) {
int flags = SMB_FLAGS_CASELESS_PATHNAMES;
int flags2 = SMB_FLAGS2_LONG_PATH_COMPONENTS |
SMB_FLAGS2_EXTENDED_ATTRIBUTES; /* EA? not really ... */
*(req->rq_header + smb_flg) = flags;
if (server->mnt->flags & SMB_MOUNT_UNICODE)
flags2 |= SMB_FLAGS2_UNICODE_STRINGS;
WSET(req->rq_header, smb_flg2, flags2);
}
*p++ = wct; /* wct */
p += 2 * wct;
WSET(p, 0, bcc);
/* Include the header in the data to send */
req->rq_iovlen = 1;
req->rq_iov[0].iov_base = req->rq_header;
req->rq_iov[0].iov_len = xmit_len - bcc;
return req->rq_buffer;
}
static void
smb_setup_bcc(struct smb_request *req, __u8 *p)
{
u16 bcc = p - req->rq_buffer;
u8 *pbcc = req->rq_header + SMB_HEADER_LEN + 2*SMB_WCT(req->rq_header);
WSET(pbcc, 0, bcc);
smb_encode_smb_length(req->rq_header, SMB_HEADER_LEN +
2*SMB_WCT(req->rq_header) - 2 + bcc);
/* Include the "bytes" in the data to send */
req->rq_iovlen = 2;
req->rq_iov[1].iov_base = req->rq_buffer;
req->rq_iov[1].iov_len = bcc;
}
static int
smb_proc_seek(struct smb_sb_info *server, __u16 fileid,
__u16 mode, off_t offset)
{
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, 0)))
goto out;
smb_setup_header(req, SMBlseek, 4, 0);
WSET(req->rq_header, smb_vwv0, fileid);
WSET(req->rq_header, smb_vwv1, mode);
DSET(req->rq_header, smb_vwv2, offset);
req->rq_flags |= SMB_REQ_NORETRY;
result = smb_request_ok(req, SMBlseek, 2, 0);
if (result < 0) {
result = 0;
goto out_free;
}
result = DVAL(req->rq_header, smb_vwv0);
out_free:
smb_rput(req);
out:
return result;
}
static int
smb_proc_open(struct smb_sb_info *server, struct dentry *dentry, int wish)
{
struct inode *ino = dentry->d_inode;
struct smb_inode_info *ei = SMB_I(ino);
int mode, read_write = 0x42, read_only = 0x40;
int res;
char *p;
struct smb_request *req;
/*
* Attempt to open r/w, unless there are no write privileges.
*/
mode = read_write;
if (!(ino->i_mode & (S_IWUSR | S_IWGRP | S_IWOTH)))
mode = read_only;
#if 0
/* FIXME: why is this code not in? below we fix it so that a caller
wanting RO doesn't get RW. smb_revalidate_inode does some
optimization based on access mode. tail -f needs it to be correct.
We must open rw since we don't do the open if called a second time
with different 'wish'. Is that not supported by smb servers? */
if (!(wish & (O_WRONLY | O_RDWR)))
mode = read_only;
#endif
res = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
retry:
p = smb_setup_header(req, SMBopen, 2, 0);
WSET(req->rq_header, smb_vwv0, mode);
WSET(req->rq_header, smb_vwv1, aSYSTEM | aHIDDEN | aDIR);
res = smb_simple_encode_path(req, &p, dentry, NULL);
if (res < 0)
goto out_free;
smb_setup_bcc(req, p);
res = smb_request_ok(req, SMBopen, 7, 0);
if (res != 0) {
if (mode == read_write &&
(res == -EACCES || res == -ETXTBSY || res == -EROFS))
{
VERBOSE("%s/%s R/W failed, error=%d, retrying R/O\n",
DENTRY_PATH(dentry), res);
mode = read_only;
req->rq_flags = 0;
goto retry;
}
goto out_free;
}
/* We should now have data in vwv[0..6]. */
ei->fileid = WVAL(req->rq_header, smb_vwv0);
ei->attr = WVAL(req->rq_header, smb_vwv1);
/* smb_vwv2 has mtime */
/* smb_vwv4 has size */
ei->access = (WVAL(req->rq_header, smb_vwv6) & SMB_ACCMASK);
ei->open = server->generation;
out_free:
smb_rput(req);
out:
return res;
}
/*
* Make sure the file is open, and check that the access
* is compatible with the desired access.
*/
int
smb_open(struct dentry *dentry, int wish)
{
struct inode *inode = dentry->d_inode;
int result;
__u16 access;
result = -ENOENT;
if (!inode) {
printk(KERN_ERR "smb_open: no inode for dentry %s/%s\n",
DENTRY_PATH(dentry));
goto out;
}
if (!smb_is_open(inode)) {
struct smb_sb_info *server = server_from_inode(inode);
result = 0;
if (!smb_is_open(inode))
result = smb_proc_open(server, dentry, wish);
if (result)
goto out;
/*
* A successful open means the path is still valid ...
*/
smb_renew_times(dentry);
}
/*
* Check whether the access is compatible with the desired mode.
*/
result = 0;
access = SMB_I(inode)->access;
if (access != wish && access != SMB_O_RDWR) {
PARANOIA("%s/%s access denied, access=%x, wish=%x\n",
DENTRY_PATH(dentry), access, wish);
result = -EACCES;
}
out:
return result;
}
static int
smb_proc_close(struct smb_sb_info *server, __u16 fileid, __u32 mtime)
{
struct smb_request *req;
int result = -ENOMEM;
if (! (req = smb_alloc_request(server, 0)))
goto out;
smb_setup_header(req, SMBclose, 3, 0);
WSET(req->rq_header, smb_vwv0, fileid);
DSET(req->rq_header, smb_vwv1, utc2local(server, mtime));
req->rq_flags |= SMB_REQ_NORETRY;
result = smb_request_ok(req, SMBclose, 0, 0);
smb_rput(req);
out:
return result;
}
/*
* Win NT 4.0 has an apparent bug in that it fails to update the
* modify time when writing to a file. As a workaround, we update
* both modify and access time locally, and post the times to the
* server when closing the file.
*/
static int
smb_proc_close_inode(struct smb_sb_info *server, struct inode * ino)
{
struct smb_inode_info *ei = SMB_I(ino);
int result = 0;
if (smb_is_open(ino))
{
/*
* We clear the open flag in advance, in case another
* process observes the value while we block below.
*/
ei->open = 0;
/*
* Kludge alert: SMB timestamps are accurate only to
* two seconds ... round the times to avoid needless
* cache invalidations!
*/
if (ino->i_mtime.tv_sec & 1) {
ino->i_mtime.tv_sec--;
ino->i_mtime.tv_nsec = 0;
}
if (ino->i_atime.tv_sec & 1) {
ino->i_atime.tv_sec--;
ino->i_atime.tv_nsec = 0;
}
/*
* If the file is open with write permissions,
* update the time stamps to sync mtime and atime.
*/
if ((server->opt.capabilities & SMB_CAP_UNIX) == 0 &&
(server->opt.protocol >= SMB_PROTOCOL_LANMAN2) &&
!(ei->access == SMB_O_RDONLY))
{
struct smb_fattr fattr;
smb_get_inode_attr(ino, &fattr);
smb_proc_setattr_ext(server, ino, &fattr);
}
result = smb_proc_close(server, ei->fileid, ino->i_mtime.tv_sec);
/*
* Force a revalidation after closing ... some servers
* don't post the size until the file has been closed.
*/
if (server->opt.protocol < SMB_PROTOCOL_NT1)
ei->oldmtime = 0;
ei->closed = jiffies;
}
return result;
}
int
smb_close(struct inode *ino)
{
int result = 0;
if (smb_is_open(ino)) {
struct smb_sb_info *server = server_from_inode(ino);
result = smb_proc_close_inode(server, ino);
}
return result;
}
/*
* This is used to close a file following a failed instantiate.
* Since we don't have an inode, we can't use any of the above.
*/
int
smb_close_fileid(struct dentry *dentry, __u16 fileid)
{
struct smb_sb_info *server = server_from_dentry(dentry);
int result;
result = smb_proc_close(server, fileid, get_seconds());
return result;
}
/* In smb_proc_read and smb_proc_write we do not retry, because the
file-id would not be valid after a reconnection. */
static void
smb_proc_read_data(struct smb_request *req)
{
req->rq_iov[0].iov_base = req->rq_buffer;
req->rq_iov[0].iov_len = 3;
req->rq_iov[1].iov_base = req->rq_page;
req->rq_iov[1].iov_len = req->rq_rsize;
req->rq_iovlen = 2;
req->rq_rlen = smb_len(req->rq_header) + 4 - req->rq_bytes_recvd;
}
static int
smb_proc_read(struct inode *inode, loff_t offset, int count, char *data)
{
struct smb_sb_info *server = server_from_inode(inode);
__u16 returned_count, data_len;
unsigned char *buf;
int result;
struct smb_request *req;
u8 rbuf[4];
result = -ENOMEM;
if (! (req = smb_alloc_request(server, 0)))
goto out;
smb_setup_header(req, SMBread, 5, 0);
buf = req->rq_header;
WSET(buf, smb_vwv0, SMB_I(inode)->fileid);
WSET(buf, smb_vwv1, count);
DSET(buf, smb_vwv2, offset);
WSET(buf, smb_vwv4, 0);
req->rq_page = data;
req->rq_rsize = count;
req->rq_callback = smb_proc_read_data;
req->rq_buffer = rbuf;
req->rq_flags |= SMB_REQ_NORETRY | SMB_REQ_STATIC;
result = smb_request_ok(req, SMBread, 5, -1);
if (result < 0)
goto out_free;
returned_count = WVAL(req->rq_header, smb_vwv0);
data_len = WVAL(rbuf, 1);
if (returned_count != data_len) {
printk(KERN_NOTICE "smb_proc_read: returned != data_len\n");
printk(KERN_NOTICE "smb_proc_read: ret_c=%d, data_len=%d\n",
returned_count, data_len);
}
result = data_len;
out_free:
smb_rput(req);
out:
VERBOSE("ino=%ld, fileid=%d, count=%d, result=%d\n",
inode->i_ino, SMB_I(inode)->fileid, count, result);
return result;
}
static int
smb_proc_write(struct inode *inode, loff_t offset, int count, const char *data)
{
struct smb_sb_info *server = server_from_inode(inode);
int result;
u16 fileid = SMB_I(inode)->fileid;
u8 buf[4];
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, 0)))
goto out;
VERBOSE("ino=%ld, fileid=%d, count=%d@%Ld\n",
inode->i_ino, fileid, count, offset);
smb_setup_header(req, SMBwrite, 5, count + 3);
WSET(req->rq_header, smb_vwv0, fileid);
WSET(req->rq_header, smb_vwv1, count);
DSET(req->rq_header, smb_vwv2, offset);
WSET(req->rq_header, smb_vwv4, 0);
buf[0] = 1;
WSET(buf, 1, count); /* yes, again ... */
req->rq_iov[1].iov_base = buf;
req->rq_iov[1].iov_len = 3;
req->rq_iov[2].iov_base = (char *) data;
req->rq_iov[2].iov_len = count;
req->rq_iovlen = 3;
req->rq_flags |= SMB_REQ_NORETRY;
result = smb_request_ok(req, SMBwrite, 1, 0);
if (result >= 0)
result = WVAL(req->rq_header, smb_vwv0);
smb_rput(req);
out:
return result;
}
/*
* In smb_proc_readX and smb_proc_writeX we do not retry, because the
* file-id would not be valid after a reconnection.
*/
#define SMB_READX_MAX_PAD 64
static void
smb_proc_readX_data(struct smb_request *req)
{
/* header length, excluding the netbios length (-4) */
int hdrlen = SMB_HEADER_LEN + req->rq_resp_wct*2 - 2;
int data_off = WVAL(req->rq_header, smb_vwv6);
/*
* Some genius made the padding to the data bytes arbitrary.
* So we must first calculate the amount of padding used by the server.
*/
data_off -= hdrlen;
if (data_off > SMB_READX_MAX_PAD || data_off < 0) {
PARANOIA("offset is larger than SMB_READX_MAX_PAD or negative!\n");
PARANOIA("%d > %d || %d < 0\n", data_off, SMB_READX_MAX_PAD, data_off);
req->rq_rlen = req->rq_bufsize + 1;
return;
}
req->rq_iov[0].iov_base = req->rq_buffer;
req->rq_iov[0].iov_len = data_off;
req->rq_iov[1].iov_base = req->rq_page;
req->rq_iov[1].iov_len = req->rq_rsize;
req->rq_iovlen = 2;
req->rq_rlen = smb_len(req->rq_header) + 4 - req->rq_bytes_recvd;
}
static int
smb_proc_readX(struct inode *inode, loff_t offset, int count, char *data)
{
struct smb_sb_info *server = server_from_inode(inode);
unsigned char *buf;
int result;
struct smb_request *req;
static char pad[SMB_READX_MAX_PAD];
result = -ENOMEM;
if (! (req = smb_alloc_request(server, 0)))
goto out;
smb_setup_header(req, SMBreadX, 12, 0);
buf = req->rq_header;
WSET(buf, smb_vwv0, 0x00ff);
WSET(buf, smb_vwv1, 0);
WSET(buf, smb_vwv2, SMB_I(inode)->fileid);
DSET(buf, smb_vwv3, (u32)offset); /* low 32 bits */
WSET(buf, smb_vwv5, count);
WSET(buf, smb_vwv6, 0);
DSET(buf, smb_vwv7, 0);
WSET(buf, smb_vwv9, 0);
DSET(buf, smb_vwv10, (u32)(offset >> 32)); /* high 32 bits */
WSET(buf, smb_vwv11, 0);
req->rq_page = data;
req->rq_rsize = count;
req->rq_callback = smb_proc_readX_data;
req->rq_buffer = pad;
req->rq_bufsize = SMB_READX_MAX_PAD;
req->rq_flags |= SMB_REQ_STATIC | SMB_REQ_NORETRY;
result = smb_request_ok(req, SMBreadX, 12, -1);
if (result < 0)
goto out_free;
result = WVAL(req->rq_header, smb_vwv5);
out_free:
smb_rput(req);
out:
VERBOSE("ino=%ld, fileid=%d, count=%d, result=%d\n",
inode->i_ino, SMB_I(inode)->fileid, count, result);
return result;
}
static int
smb_proc_writeX(struct inode *inode, loff_t offset, int count, const char *data)
{
struct smb_sb_info *server = server_from_inode(inode);
int result;
u8 *p;
static u8 pad[4];
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, 0)))
goto out;
VERBOSE("ino=%ld, fileid=%d, count=%d@%Ld\n",
inode->i_ino, SMB_I(inode)->fileid, count, offset);
p = smb_setup_header(req, SMBwriteX, 14, count + 1);
WSET(req->rq_header, smb_vwv0, 0x00ff);
WSET(req->rq_header, smb_vwv1, 0);
WSET(req->rq_header, smb_vwv2, SMB_I(inode)->fileid);
DSET(req->rq_header, smb_vwv3, (u32)offset); /* low 32 bits */
DSET(req->rq_header, smb_vwv5, 0);
WSET(req->rq_header, smb_vwv7, 0); /* write mode */
WSET(req->rq_header, smb_vwv8, 0);
WSET(req->rq_header, smb_vwv9, 0);
WSET(req->rq_header, smb_vwv10, count); /* data length */
WSET(req->rq_header, smb_vwv11, smb_vwv12 + 2 + 1);
DSET(req->rq_header, smb_vwv12, (u32)(offset >> 32));
req->rq_iov[1].iov_base = pad;
req->rq_iov[1].iov_len = 1;
req->rq_iov[2].iov_base = (char *) data;
req->rq_iov[2].iov_len = count;
req->rq_iovlen = 3;
req->rq_flags |= SMB_REQ_NORETRY;
result = smb_request_ok(req, SMBwriteX, 6, 0);
if (result >= 0)
result = WVAL(req->rq_header, smb_vwv2);
smb_rput(req);
out:
return result;
}
int
smb_proc_create(struct dentry *dentry, __u16 attr, time_t ctime, __u16 *fileid)
{
struct smb_sb_info *server = server_from_dentry(dentry);
char *p;
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
p = smb_setup_header(req, SMBcreate, 3, 0);
WSET(req->rq_header, smb_vwv0, attr);
DSET(req->rq_header, smb_vwv1, utc2local(server, ctime));
result = smb_simple_encode_path(req, &p, dentry, NULL);
if (result < 0)
goto out_free;
smb_setup_bcc(req, p);
result = smb_request_ok(req, SMBcreate, 1, 0);
if (result < 0)
goto out_free;
*fileid = WVAL(req->rq_header, smb_vwv0);
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
int
smb_proc_mv(struct dentry *old_dentry, struct dentry *new_dentry)
{
struct smb_sb_info *server = server_from_dentry(old_dentry);
char *p;
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
p = smb_setup_header(req, SMBmv, 1, 0);
WSET(req->rq_header, smb_vwv0, aSYSTEM | aHIDDEN | aDIR);
result = smb_simple_encode_path(req, &p, old_dentry, NULL);
if (result < 0)
goto out_free;
result = smb_simple_encode_path(req, &p, new_dentry, NULL);
if (result < 0)
goto out_free;
smb_setup_bcc(req, p);
if ((result = smb_request_ok(req, SMBmv, 0, 0)) < 0)
goto out_free;
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
/*
* Code common to mkdir and rmdir.
*/
static int
smb_proc_generic_command(struct dentry *dentry, __u8 command)
{
struct smb_sb_info *server = server_from_dentry(dentry);
char *p;
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
p = smb_setup_header(req, command, 0, 0);
result = smb_simple_encode_path(req, &p, dentry, NULL);
if (result < 0)
goto out_free;
smb_setup_bcc(req, p);
result = smb_request_ok(req, command, 0, 0);
if (result < 0)
goto out_free;
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
int
smb_proc_mkdir(struct dentry *dentry)
{
return smb_proc_generic_command(dentry, SMBmkdir);
}
int
smb_proc_rmdir(struct dentry *dentry)
{
return smb_proc_generic_command(dentry, SMBrmdir);
}
#if SMBFS_POSIX_UNLINK
/*
* Removes readonly attribute from a file. Used by unlink to give posix
* semantics.
*/
static int
smb_set_rw(struct dentry *dentry,struct smb_sb_info *server)
{
int result;
struct smb_fattr fattr;
/* FIXME: cifsUE should allow removing a readonly file. */
/* first get current attribute */
smb_init_dirent(server, &fattr);
result = server->ops->getattr(server, dentry, &fattr);
smb_finish_dirent(server, &fattr);
if (result < 0)
return result;
/* if RONLY attribute is set, remove it */
if (fattr.attr & aRONLY) { /* read only attribute is set */
fattr.attr &= ~aRONLY;
result = smb_proc_setattr_core(server, dentry, fattr.attr);
}
return result;
}
#endif
int
smb_proc_unlink(struct dentry *dentry)
{
struct smb_sb_info *server = server_from_dentry(dentry);
int flag = 0;
char *p;
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
retry:
p = smb_setup_header(req, SMBunlink, 1, 0);
WSET(req->rq_header, smb_vwv0, aSYSTEM | aHIDDEN);
result = smb_simple_encode_path(req, &p, dentry, NULL);
if (result < 0)
goto out_free;
smb_setup_bcc(req, p);
if ((result = smb_request_ok(req, SMBunlink, 0, 0)) < 0) {
#if SMBFS_POSIX_UNLINK
if (result == -EACCES && !flag) {
/* Posix semantics is for the read-only state
of a file to be ignored in unlink(). In the
SMB world a unlink() is refused on a
read-only file. To make things easier for
unix users we try to override the files
permission if the unlink fails with the
right error.
This introduces a race condition that could
lead to a file being written by someone who
shouldn't have access, but as far as I can
tell that is unavoidable */
/* remove RONLY attribute and try again */
result = smb_set_rw(dentry,server);
if (result == 0) {
flag = 1;
req->rq_flags = 0;
goto retry;
}
}
#endif
goto out_free;
}
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
int
smb_proc_flush(struct smb_sb_info *server, __u16 fileid)
{
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, 0)))
goto out;
smb_setup_header(req, SMBflush, 1, 0);
WSET(req->rq_header, smb_vwv0, fileid);
req->rq_flags |= SMB_REQ_NORETRY;
result = smb_request_ok(req, SMBflush, 0, 0);
smb_rput(req);
out:
return result;
}
static int
smb_proc_trunc32(struct inode *inode, loff_t length)
{
/*
* Writing 0bytes is old-SMB magic for truncating files.
* MAX_NON_LFS should prevent this from being called with a too
* large offset.
*/
return smb_proc_write(inode, length, 0, NULL);
}
static int
smb_proc_trunc64(struct inode *inode, loff_t length)
{
struct smb_sb_info *server = server_from_inode(inode);
int result;
char *param;
char *data;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, 14)))
goto out;
param = req->rq_buffer;
data = req->rq_buffer + 6;
/* FIXME: must we also set allocation size? winNT seems to do that */
WSET(param, 0, SMB_I(inode)->fileid);
WSET(param, 2, SMB_SET_FILE_END_OF_FILE_INFO);
WSET(param, 4, 0);
LSET(data, 0, length);
req->rq_trans2_command = TRANSACT2_SETFILEINFO;
req->rq_ldata = 8;
req->rq_data = data;
req->rq_lparm = 6;
req->rq_parm = param;
req->rq_flags |= SMB_REQ_NORETRY;
result = smb_add_request(req);
if (result < 0)
goto out_free;
result = 0;
if (req->rq_rcls != 0)
result = smb_errno(req);
out_free:
smb_rput(req);
out:
return result;
}
static int
smb_proc_trunc95(struct inode *inode, loff_t length)
{
struct smb_sb_info *server = server_from_inode(inode);
int result = smb_proc_trunc32(inode, length);
/*
* win9x doesn't appear to update the size immediately.
* It will return the old file size after the truncate,
* confusing smbfs. So we force an update.
*
* FIXME: is this still necessary?
*/
smb_proc_flush(server, SMB_I(inode)->fileid);
return result;
}
static void
smb_init_dirent(struct smb_sb_info *server, struct smb_fattr *fattr)
{
memset(fattr, 0, sizeof(*fattr));
fattr->f_nlink = 1;
fattr->f_uid = server->mnt->uid;
fattr->f_gid = server->mnt->gid;
fattr->f_unix = 0;
}
static void
smb_finish_dirent(struct smb_sb_info *server, struct smb_fattr *fattr)
{
if (fattr->f_unix)
return;
fattr->f_mode = server->mnt->file_mode;
if (fattr->attr & aDIR) {
fattr->f_mode = server->mnt->dir_mode;
fattr->f_size = SMB_ST_BLKSIZE;
}
/* Check the read-only flag */
if (fattr->attr & aRONLY)
fattr->f_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
/* How many 512 byte blocks do we need for this file? */
fattr->f_blocks = 0;
if (fattr->f_size != 0)
fattr->f_blocks = 1 + ((fattr->f_size-1) >> 9);
return;
}
void
smb_init_root_dirent(struct smb_sb_info *server, struct smb_fattr *fattr,
struct super_block *sb)
{
smb_init_dirent(server, fattr);
fattr->attr = aDIR;
fattr->f_ino = 2; /* traditional root inode number */
fattr->f_mtime = current_fs_time(sb);
smb_finish_dirent(server, fattr);
}
/*
* Decode a dirent for old protocols
*
* qname is filled with the decoded, and possibly translated, name.
* fattr receives decoded attributes
*
* Bugs Noted:
* (1) Pathworks servers may pad the name with extra spaces.
*/
static char *
smb_decode_short_dirent(struct smb_sb_info *server, char *p,
struct qstr *qname, struct smb_fattr *fattr,
unsigned char *name_buf)
{
int len;
/*
* SMB doesn't have a concept of inode numbers ...
*/
smb_init_dirent(server, fattr);
fattr->f_ino = 0; /* FIXME: do we need this? */
p += SMB_STATUS_SIZE; /* reserved (search_status) */
fattr->attr = *p;
fattr->f_mtime.tv_sec = date_dos2unix(server, WVAL(p, 3), WVAL(p, 1));
fattr->f_mtime.tv_nsec = 0;
fattr->f_size = DVAL(p, 5);
fattr->f_ctime = fattr->f_mtime;
fattr->f_atime = fattr->f_mtime;
qname->name = p + 9;
len = strnlen(qname->name, 12);
/*
* Trim trailing blanks for Pathworks servers
*/
while (len > 2 && qname->name[len-1] == ' ')
len--;
smb_finish_dirent(server, fattr);
#if 0
/* FIXME: These only work for ascii chars, and recent smbmount doesn't
allow the flag to be set anyway. It kills const. Remove? */
switch (server->opt.case_handling) {
case SMB_CASE_UPPER:
str_upper(entry->name, len);
break;
case SMB_CASE_LOWER:
str_lower(entry->name, len);
break;
default:
break;
}
#endif
qname->len = 0;
len = server->ops->convert(name_buf, SMB_MAXNAMELEN,
qname->name, len,
server->remote_nls, server->local_nls);
if (len > 0) {
qname->len = len;
qname->name = name_buf;
DEBUG1("len=%d, name=%.*s\n",qname->len,qname->len,qname->name);
}
return p + 22;
}
/*
* This routine is used to read in directory entries from the network.
* Note that it is for short directory name seeks, i.e.: protocol <
* SMB_PROTOCOL_LANMAN2
*/
static int
smb_proc_readdir_short(struct file *filp, void *dirent, filldir_t filldir,
struct smb_cache_control *ctl)
{
struct dentry *dir = filp->f_path.dentry;
struct smb_sb_info *server = server_from_dentry(dir);
struct qstr qname;
struct smb_fattr fattr;
char *p;
int result;
int i, first, entries_seen, entries;
int entries_asked = (server->opt.max_xmit - 100) / SMB_DIRINFO_SIZE;
__u16 bcc;
__u16 count;
char status[SMB_STATUS_SIZE];
static struct qstr mask = {
.name = "*.*",
.len = 3,
};
unsigned char *last_status;
struct smb_request *req;
unsigned char *name_buf;
VERBOSE("%s/%s\n", DENTRY_PATH(dir));
lock_kernel();
result = -ENOMEM;
if (! (name_buf = kmalloc(SMB_MAXNAMELEN, GFP_KERNEL)))
goto out;
first = 1;
entries = 0;
entries_seen = 2; /* implicit . and .. */
result = -ENOMEM;
if (! (req = smb_alloc_request(server, server->opt.max_xmit)))
goto out_name;
while (1) {
p = smb_setup_header(req, SMBsearch, 2, 0);
WSET(req->rq_header, smb_vwv0, entries_asked);
WSET(req->rq_header, smb_vwv1, aDIR);
if (first == 1) {
result = smb_simple_encode_path(req, &p, dir, &mask);
if (result < 0)
goto out_free;
if (p + 3 > (char *)req->rq_buffer + req->rq_bufsize) {
result = -ENAMETOOLONG;
goto out_free;
}
*p++ = 5;
WSET(p, 0, 0);
p += 2;
first = 0;
} else {
if (p + 5 + SMB_STATUS_SIZE >
(char *)req->rq_buffer + req->rq_bufsize) {
result = -ENAMETOOLONG;
goto out_free;
}
*p++ = 4;
*p++ = 0;
*p++ = 5;
WSET(p, 0, SMB_STATUS_SIZE);
p += 2;
memcpy(p, status, SMB_STATUS_SIZE);
p += SMB_STATUS_SIZE;
}
smb_setup_bcc(req, p);
result = smb_request_ok(req, SMBsearch, 1, -1);
if (result < 0) {
if ((req->rq_rcls == ERRDOS) &&
(req->rq_err == ERRnofiles))
break;
goto out_free;
}
count = WVAL(req->rq_header, smb_vwv0);
if (count <= 0)
break;
result = -EIO;
bcc = smb_bcc(req->rq_header);
if (bcc != count * SMB_DIRINFO_SIZE + 3)
goto out_free;
p = req->rq_buffer + 3;
/* Make sure the response fits in the buffer. Fixed sized
entries means we don't have to check in the decode loop. */
last_status = req->rq_buffer + 3 + (count-1) * SMB_DIRINFO_SIZE;
if (last_status + SMB_DIRINFO_SIZE >=
req->rq_buffer + req->rq_bufsize) {
printk(KERN_ERR "smb_proc_readdir_short: "
"last dir entry outside buffer! "
"%d@%p %d@%p\n", SMB_DIRINFO_SIZE, last_status,
req->rq_bufsize, req->rq_buffer);
goto out_free;
}
/* Read the last entry into the status field. */
memcpy(status, last_status, SMB_STATUS_SIZE);
/* Now we are ready to parse smb directory entries. */
for (i = 0; i < count; i++) {
p = smb_decode_short_dirent(server, p,
&qname, &fattr, name_buf);
if (qname.len == 0)
continue;
if (entries_seen == 2 && qname.name[0] == '.') {
if (qname.len == 1)
continue;
if (qname.name[1] == '.' && qname.len == 2)
continue;
}
if (!smb_fill_cache(filp, dirent, filldir, ctl,
&qname, &fattr))
; /* stop reading? */
entries_seen++;
}
}
result = entries;
out_free:
smb_rput(req);
out_name:
kfree(name_buf);
out:
unlock_kernel();
return result;
}
static void smb_decode_unix_basic(struct smb_fattr *fattr, struct smb_sb_info *server, char *p)
{
u64 size, disk_bytes;
/* FIXME: verify nls support. all is sent as utf8? */
fattr->f_unix = 1;
fattr->f_mode = 0;
/* FIXME: use the uniqueID from the remote instead? */
/* 0 L file size in bytes */
/* 8 L file size on disk in bytes (block count) */
/* 40 L uid */
/* 48 L gid */
/* 56 W file type */
/* 60 L devmajor */
/* 68 L devminor */
/* 76 L unique ID (inode) */
/* 84 L permissions */
/* 92 L link count */
size = LVAL(p, 0);
disk_bytes = LVAL(p, 8);
/*
* Some samba versions round up on-disk byte usage
* to 1MB boundaries, making it useless. When seeing
* that, use the size instead.
*/
if (!(disk_bytes & 0xfffff))
disk_bytes = size+511;
fattr->f_size = size;
fattr->f_blocks = disk_bytes >> 9;
fattr->f_ctime = smb_ntutc2unixutc(LVAL(p, 16));
fattr->f_atime = smb_ntutc2unixutc(LVAL(p, 24));
fattr->f_mtime = smb_ntutc2unixutc(LVAL(p, 32));
if (server->mnt->flags & SMB_MOUNT_UID)
fattr->f_uid = server->mnt->uid;
else
fattr->f_uid = LVAL(p, 40);
if (server->mnt->flags & SMB_MOUNT_GID)
fattr->f_gid = server->mnt->gid;
else
fattr->f_gid = LVAL(p, 48);
fattr->f_mode |= smb_filetype_to_mode(WVAL(p, 56));
if (S_ISBLK(fattr->f_mode) || S_ISCHR(fattr->f_mode)) {
__u64 major = LVAL(p, 60);
__u64 minor = LVAL(p, 68);
fattr->f_rdev = MKDEV(major & 0xffffffff, minor & 0xffffffff);
if (MAJOR(fattr->f_rdev) != (major & 0xffffffff) ||
MINOR(fattr->f_rdev) != (minor & 0xffffffff))
fattr->f_rdev = 0;
}
fattr->f_mode |= LVAL(p, 84);
if ( (server->mnt->flags & SMB_MOUNT_DMODE) &&
(S_ISDIR(fattr->f_mode)) )
fattr->f_mode = (server->mnt->dir_mode & S_IRWXUGO) | S_IFDIR;
else if ( (server->mnt->flags & SMB_MOUNT_FMODE) &&
!(S_ISDIR(fattr->f_mode)) )
fattr->f_mode = (server->mnt->file_mode & S_IRWXUGO) |
(fattr->f_mode & S_IFMT);
}
/*
* Interpret a long filename structure using the specified info level:
* level 1 for anything below NT1 protocol
* level 260 for NT1 protocol
*
* qname is filled with the decoded, and possibly translated, name
* fattr receives decoded attributes.
*
* Bugs Noted:
* (1) Win NT 4.0 appends a null byte to names and counts it in the length!
*/
static char *
smb_decode_long_dirent(struct smb_sb_info *server, char *p, int level,
struct qstr *qname, struct smb_fattr *fattr,
unsigned char *name_buf)
{
char *result;
unsigned int len = 0;
int n;
__u16 date, time;
int unicode = (server->mnt->flags & SMB_MOUNT_UNICODE);
/*
* SMB doesn't have a concept of inode numbers ...
*/
smb_init_dirent(server, fattr);
fattr->f_ino = 0; /* FIXME: do we need this? */
switch (level) {
case 1:
len = *((unsigned char *) p + 22);
qname->name = p + 23;
result = p + 24 + len;
date = WVAL(p, 0);
time = WVAL(p, 2);
fattr->f_ctime.tv_sec = date_dos2unix(server, date, time);
fattr->f_ctime.tv_nsec = 0;
date = WVAL(p, 4);
time = WVAL(p, 6);
fattr->f_atime.tv_sec = date_dos2unix(server, date, time);
fattr->f_atime.tv_nsec = 0;
date = WVAL(p, 8);
time = WVAL(p, 10);
fattr->f_mtime.tv_sec = date_dos2unix(server, date, time);
fattr->f_mtime.tv_nsec = 0;
fattr->f_size = DVAL(p, 12);
/* ULONG allocation size */
fattr->attr = WVAL(p, 20);
VERBOSE("info 1 at %p, len=%d, name=%.*s\n",
p, len, len, qname->name);
break;
case 260:
result = p + WVAL(p, 0);
len = DVAL(p, 60);
if (len > 255) len = 255;
/* NT4 null terminates, unless we are using unicode ... */
qname->name = p + 94;
if (!unicode && len && qname->name[len-1] == '\0')
len--;
fattr->f_ctime = smb_ntutc2unixutc(LVAL(p, 8));
fattr->f_atime = smb_ntutc2unixutc(LVAL(p, 16));
fattr->f_mtime = smb_ntutc2unixutc(LVAL(p, 24));
/* change time (32) */
fattr->f_size = LVAL(p, 40);
/* alloc size (48) */
fattr->attr = DVAL(p, 56);
VERBOSE("info 260 at %p, len=%d, name=%.*s\n",
p, len, len, qname->name);
break;
case SMB_FIND_FILE_UNIX:
result = p + WVAL(p, 0);
qname->name = p + 108;
len = strlen(qname->name);
/* FIXME: should we check the length?? */
p += 8;
smb_decode_unix_basic(fattr, server, p);
VERBOSE("info SMB_FIND_FILE_UNIX at %p, len=%d, name=%.*s\n",
p, len, len, qname->name);
break;
default:
PARANOIA("Unknown info level %d\n", level);
result = p + WVAL(p, 0);
goto out;
}
smb_finish_dirent(server, fattr);
#if 0
/* FIXME: These only work for ascii chars, and recent smbmount doesn't
allow the flag to be set anyway. Remove? */
switch (server->opt.case_handling) {
case SMB_CASE_UPPER:
str_upper(qname->name, len);
break;
case SMB_CASE_LOWER:
str_lower(qname->name, len);
break;
default:
break;
}
#endif
qname->len = 0;
n = server->ops->convert(name_buf, SMB_MAXNAMELEN,
qname->name, len,
server->remote_nls, server->local_nls);
if (n > 0) {
qname->len = n;
qname->name = name_buf;
}
out:
return result;
}
/* findfirst/findnext flags */
#define SMB_CLOSE_AFTER_FIRST (1<<0)
#define SMB_CLOSE_IF_END (1<<1)
#define SMB_REQUIRE_RESUME_KEY (1<<2)
#define SMB_CONTINUE_BIT (1<<3)
/*
* Note: samba-2.0.7 (at least) has a very similar routine, cli_list, in
* source/libsmb/clilist.c. When looking for smb bugs in the readdir code,
* go there for advise.
*
* Bugs Noted:
* (1) When using Info Level 1 Win NT 4.0 truncates directory listings
* for certain patterns of names and/or lengths. The breakage pattern
* is completely reproducible and can be toggled by the creation of a
* single file. (E.g. echo hi >foo breaks, rm -f foo works.)
*/
static int
smb_proc_readdir_long(struct file *filp, void *dirent, filldir_t filldir,
struct smb_cache_control *ctl)
{
struct dentry *dir = filp->f_path.dentry;
struct smb_sb_info *server = server_from_dentry(dir);
struct qstr qname;
struct smb_fattr fattr;
unsigned char *p, *lastname;
char *mask, *param;
__u16 command;
int first, entries_seen;
/* Both NT and OS/2 accept info level 1 (but see note below). */
int info_level = 260;
const int max_matches = 512;
unsigned int ff_searchcount = 0;
unsigned int ff_eos = 0;
unsigned int ff_lastname = 0;
unsigned int ff_dir_handle = 0;
unsigned int loop_count = 0;
unsigned int mask_len, i;
int result;
struct smb_request *req;
unsigned char *name_buf;
static struct qstr star = {
.name = "*",
.len = 1,
};
lock_kernel();
/*
* We always prefer unix style. Use info level 1 for older
* servers that don't do 260.
*/
if (server->opt.capabilities & SMB_CAP_UNIX)
info_level = SMB_FIND_FILE_UNIX;
else if (server->opt.protocol < SMB_PROTOCOL_NT1)
info_level = 1;
result = -ENOMEM;
if (! (name_buf = kmalloc(SMB_MAXNAMELEN+2, GFP_KERNEL)))
goto out;
if (! (req = smb_alloc_request(server, server->opt.max_xmit)))
goto out_name;
param = req->rq_buffer;
/*
* Encode the initial path
*/
mask = param + 12;
result = smb_encode_path(server, mask, SMB_MAXPATHLEN+1, dir, &star);
if (result <= 0)
goto out_free;
mask_len = result - 1; /* mask_len is strlen, not #bytes */
result = 0;
first = 1;
VERBOSE("starting mask_len=%d, mask=%s\n", mask_len, mask);
entries_seen = 2;
ff_eos = 0;
while (ff_eos == 0) {
loop_count += 1;
if (loop_count > 10) {
printk(KERN_WARNING "smb_proc_readdir_long: "
"Looping in FIND_NEXT??\n");
result = -EIO;
break;
}
if (first != 0) {
command = TRANSACT2_FINDFIRST;
WSET(param, 0, aSYSTEM | aHIDDEN | aDIR);
WSET(param, 2, max_matches); /* max count */
WSET(param, 4, SMB_CLOSE_IF_END);
WSET(param, 6, info_level);
DSET(param, 8, 0);
} else {
command = TRANSACT2_FINDNEXT;
VERBOSE("handle=0x%X, lastname=%d, mask=%.*s\n",
ff_dir_handle, ff_lastname, mask_len, mask);
WSET(param, 0, ff_dir_handle); /* search handle */
WSET(param, 2, max_matches); /* max count */
WSET(param, 4, info_level);
DSET(param, 6, 0);
WSET(param, 10, SMB_CONTINUE_BIT|SMB_CLOSE_IF_END);
}
req->rq_trans2_command = command;
req->rq_ldata = 0;
req->rq_data = NULL;
req->rq_lparm = 12 + mask_len + 1;
req->rq_parm = param;
req->rq_flags = 0;
result = smb_add_request(req);
if (result < 0) {
PARANOIA("error=%d, breaking\n", result);
break;
}
if (req->rq_rcls == ERRSRV && req->rq_err == ERRerror) {
/* a damn Win95 bug - sometimes it clags if you
ask it too fast */
schedule_timeout_interruptible(msecs_to_jiffies(200));
continue;
}
if (req->rq_rcls != 0) {
result = smb_errno(req);
PARANOIA("name=%s, result=%d, rcls=%d, err=%d\n",
mask, result, req->rq_rcls, req->rq_err);
break;
}
/* parse out some important return info */
if (first != 0) {
ff_dir_handle = WVAL(req->rq_parm, 0);
ff_searchcount = WVAL(req->rq_parm, 2);
ff_eos = WVAL(req->rq_parm, 4);
ff_lastname = WVAL(req->rq_parm, 8);
} else {
ff_searchcount = WVAL(req->rq_parm, 0);
ff_eos = WVAL(req->rq_parm, 2);
ff_lastname = WVAL(req->rq_parm, 6);
}
if (ff_searchcount == 0)
break;
/* Now we are ready to parse smb directory entries. */
/* point to the data bytes */
p = req->rq_data;
for (i = 0; i < ff_searchcount; i++) {
/* make sure we stay within the buffer */
if (p >= req->rq_data + req->rq_ldata) {
printk(KERN_ERR "smb_proc_readdir_long: "
"dirent pointer outside buffer! "
"%p %d@%p\n",
p, req->rq_ldata, req->rq_data);
result = -EIO; /* always a comm. error? */
goto out_free;
}
p = smb_decode_long_dirent(server, p, info_level,
&qname, &fattr, name_buf);
/* ignore . and .. from the server */
if (entries_seen == 2 && qname.name[0] == '.') {
if (qname.len == 1)
continue;
if (qname.name[1] == '.' && qname.len == 2)
continue;
}
if (!smb_fill_cache(filp, dirent, filldir, ctl,
&qname, &fattr))
; /* stop reading? */
entries_seen++;
}
VERBOSE("received %d entries, eos=%d\n", ff_searchcount,ff_eos);
/*
* We might need the lastname for continuations.
*
* Note that some servers (win95?) point to the filename and
* others (NT4, Samba using NT1) to the dir entry. We assume
* here that those who do not point to a filename do not need
* this info to continue the listing.
*
* OS/2 needs this and talks infolevel 1.
* NetApps want lastname with infolevel 260.
* win2k want lastname with infolevel 260, and points to
* the record not to the name.
* Samba+CifsUnixExt doesn't need lastname.
*
* Both are happy if we return the data they point to. So we do.
* (FIXME: above is not true with win2k)
*/
mask_len = 0;
if (info_level != SMB_FIND_FILE_UNIX &&
ff_lastname > 0 && ff_lastname < req->rq_ldata) {
lastname = req->rq_data + ff_lastname;
switch (info_level) {
case 260:
mask_len = req->rq_ldata - ff_lastname;
break;
case 1:
/* lastname points to a length byte */
mask_len = *lastname++;
if (ff_lastname + 1 + mask_len > req->rq_ldata)
mask_len = req->rq_ldata - ff_lastname - 1;
break;
}
/*
* Update the mask string for the next message.
*/
if (mask_len > 255)
mask_len = 255;
if (mask_len)
strncpy(mask, lastname, mask_len);
}
mask_len = strnlen(mask, mask_len);
VERBOSE("new mask, len=%d@%d of %d, mask=%.*s\n",
mask_len, ff_lastname, req->rq_ldata, mask_len, mask);
first = 0;
loop_count = 0;
}
out_free:
smb_rput(req);
out_name:
kfree(name_buf);
out:
unlock_kernel();
return result;
}
/*
* This version uses the trans2 TRANSACT2_FINDFIRST message
* to get the attribute data.
*
* Bugs Noted:
*/
static int
smb_proc_getattr_ff(struct smb_sb_info *server, struct dentry *dentry,
struct smb_fattr *fattr)
{
char *param, *mask;
__u16 date, time;
int mask_len, result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
param = req->rq_buffer;
mask = param + 12;
mask_len = smb_encode_path(server, mask, SMB_MAXPATHLEN+1, dentry,NULL);
if (mask_len < 0) {
result = mask_len;
goto out_free;
}
VERBOSE("name=%s, len=%d\n", mask, mask_len);
WSET(param, 0, aSYSTEM | aHIDDEN | aDIR);
WSET(param, 2, 1); /* max count */
WSET(param, 4, 1); /* close after this call */
WSET(param, 6, 1); /* info_level */
DSET(param, 8, 0);
req->rq_trans2_command = TRANSACT2_FINDFIRST;
req->rq_ldata = 0;
req->rq_data = NULL;
req->rq_lparm = 12 + mask_len;
req->rq_parm = param;
req->rq_flags = 0;
result = smb_add_request(req);
if (result < 0)
goto out_free;
if (req->rq_rcls != 0) {
result = smb_errno(req);
#ifdef SMBFS_PARANOIA
if (result != -ENOENT)
PARANOIA("error for %s, rcls=%d, err=%d\n",
mask, req->rq_rcls, req->rq_err);
#endif
goto out_free;
}
/* Make sure we got enough data ... */
result = -EINVAL;
if (req->rq_ldata < 22 || WVAL(req->rq_parm, 2) != 1) {
PARANOIA("bad result for %s, len=%d, count=%d\n",
mask, req->rq_ldata, WVAL(req->rq_parm, 2));
goto out_free;
}
/*
* Decode the response into the fattr ...
*/
date = WVAL(req->rq_data, 0);
time = WVAL(req->rq_data, 2);
fattr->f_ctime.tv_sec = date_dos2unix(server, date, time);
fattr->f_ctime.tv_nsec = 0;
date = WVAL(req->rq_data, 4);
time = WVAL(req->rq_data, 6);
fattr->f_atime.tv_sec = date_dos2unix(server, date, time);
fattr->f_atime.tv_nsec = 0;
date = WVAL(req->rq_data, 8);
time = WVAL(req->rq_data, 10);
fattr->f_mtime.tv_sec = date_dos2unix(server, date, time);
fattr->f_mtime.tv_nsec = 0;
VERBOSE("name=%s, date=%x, time=%x, mtime=%ld\n",
mask, date, time, fattr->f_mtime.tv_sec);
fattr->f_size = DVAL(req->rq_data, 12);
/* ULONG allocation size */
fattr->attr = WVAL(req->rq_data, 20);
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
static int
smb_proc_getattr_core(struct smb_sb_info *server, struct dentry *dir,
struct smb_fattr *fattr)
{
int result;
char *p;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
p = smb_setup_header(req, SMBgetatr, 0, 0);
result = smb_simple_encode_path(req, &p, dir, NULL);
if (result < 0)
goto out_free;
smb_setup_bcc(req, p);
if ((result = smb_request_ok(req, SMBgetatr, 10, 0)) < 0)
goto out_free;
fattr->attr = WVAL(req->rq_header, smb_vwv0);
fattr->f_mtime.tv_sec = local2utc(server, DVAL(req->rq_header, smb_vwv1));
fattr->f_mtime.tv_nsec = 0;
fattr->f_size = DVAL(req->rq_header, smb_vwv3);
fattr->f_ctime = fattr->f_mtime;
fattr->f_atime = fattr->f_mtime;
#ifdef SMBFS_DEBUG_TIMESTAMP
printk("getattr_core: %s/%s, mtime=%ld\n",
DENTRY_PATH(dir), fattr->f_mtime);
#endif
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
/*
* Bugs Noted:
* (1) Win 95 swaps the date and time fields in the standard info level.
*/
static int
smb_proc_getattr_trans2(struct smb_sb_info *server, struct dentry *dir,
struct smb_request *req, int infolevel)
{
char *p, *param;
int result;
param = req->rq_buffer;
WSET(param, 0, infolevel);
DSET(param, 2, 0);
result = smb_encode_path(server, param+6, SMB_MAXPATHLEN+1, dir, NULL);
if (result < 0)
goto out;
p = param + 6 + result;
req->rq_trans2_command = TRANSACT2_QPATHINFO;
req->rq_ldata = 0;
req->rq_data = NULL;
req->rq_lparm = p - param;
req->rq_parm = param;
req->rq_flags = 0;
result = smb_add_request(req);
if (result < 0)
goto out;
if (req->rq_rcls != 0) {
VERBOSE("for %s: result=%d, rcls=%d, err=%d\n",
¶m[6], result, req->rq_rcls, req->rq_err);
result = smb_errno(req);
goto out;
}
result = -ENOENT;
if (req->rq_ldata < 22) {
PARANOIA("not enough data for %s, len=%d\n",
¶m[6], req->rq_ldata);
goto out;
}
result = 0;
out:
return result;
}
static int
smb_proc_getattr_trans2_std(struct smb_sb_info *server, struct dentry *dir,
struct smb_fattr *attr)
{
u16 date, time;
int off_date = 0, off_time = 2;
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
result = smb_proc_getattr_trans2(server, dir, req, SMB_INFO_STANDARD);
if (result < 0)
goto out_free;
/*
* Kludge alert: Win 95 swaps the date and time field,
* contrary to the CIFS docs and Win NT practice.
*/
if (server->mnt->flags & SMB_MOUNT_WIN95) {
off_date = 2;
off_time = 0;
}
date = WVAL(req->rq_data, off_date);
time = WVAL(req->rq_data, off_time);
attr->f_ctime.tv_sec = date_dos2unix(server, date, time);
attr->f_ctime.tv_nsec = 0;
date = WVAL(req->rq_data, 4 + off_date);
time = WVAL(req->rq_data, 4 + off_time);
attr->f_atime.tv_sec = date_dos2unix(server, date, time);
attr->f_atime.tv_nsec = 0;
date = WVAL(req->rq_data, 8 + off_date);
time = WVAL(req->rq_data, 8 + off_time);
attr->f_mtime.tv_sec = date_dos2unix(server, date, time);
attr->f_mtime.tv_nsec = 0;
#ifdef SMBFS_DEBUG_TIMESTAMP
printk(KERN_DEBUG "getattr_trans2: %s/%s, date=%x, time=%x, mtime=%ld\n",
DENTRY_PATH(dir), date, time, attr->f_mtime);
#endif
attr->f_size = DVAL(req->rq_data, 12);
attr->attr = WVAL(req->rq_data, 20);
out_free:
smb_rput(req);
out:
return result;
}
static int
smb_proc_getattr_trans2_all(struct smb_sb_info *server, struct dentry *dir,
struct smb_fattr *attr)
{
struct smb_request *req;
int result;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
result = smb_proc_getattr_trans2(server, dir, req,
SMB_QUERY_FILE_ALL_INFO);
if (result < 0)
goto out_free;
attr->f_ctime = smb_ntutc2unixutc(LVAL(req->rq_data, 0));
attr->f_atime = smb_ntutc2unixutc(LVAL(req->rq_data, 8));
attr->f_mtime = smb_ntutc2unixutc(LVAL(req->rq_data, 16));
/* change (24) */
attr->attr = WVAL(req->rq_data, 32);
/* pad? (34) */
/* allocated size (40) */
attr->f_size = LVAL(req->rq_data, 48);
out_free:
smb_rput(req);
out:
return result;
}
static int
smb_proc_getattr_unix(struct smb_sb_info *server, struct dentry *dir,
struct smb_fattr *attr)
{
struct smb_request *req;
int result;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
result = smb_proc_getattr_trans2(server, dir, req,
SMB_QUERY_FILE_UNIX_BASIC);
if (result < 0)
goto out_free;
smb_decode_unix_basic(attr, server, req->rq_data);
out_free:
smb_rput(req);
out:
return result;
}
static int
smb_proc_getattr_95(struct smb_sb_info *server, struct dentry *dir,
struct smb_fattr *attr)
{
struct inode *inode = dir->d_inode;
int result;
/* FIXME: why not use the "all" version? */
result = smb_proc_getattr_trans2_std(server, dir, attr);
if (result < 0)
goto out;
/*
* None of the getattr versions here can make win9x return the right
* filesize if there are changes made to an open file.
* A seek-to-end does return the right size, but we only need to do
* that on files we have written.
*/
if (inode && SMB_I(inode)->flags & SMB_F_LOCALWRITE &&
smb_is_open(inode))
{
__u16 fileid = SMB_I(inode)->fileid;
attr->f_size = smb_proc_seek(server, fileid, 2, 0);
}
out:
return result;
}
static int
smb_proc_ops_wait(struct smb_sb_info *server)
{
int result;
result = wait_event_interruptible_timeout(server->conn_wq,
server->conn_complete, 30*HZ);
if (!result || signal_pending(current))
return -EIO;
return 0;
}
static int
smb_proc_getattr_null(struct smb_sb_info *server, struct dentry *dir,
struct smb_fattr *fattr)
{
int result;
if (smb_proc_ops_wait(server) < 0)
return -EIO;
smb_init_dirent(server, fattr);
result = server->ops->getattr(server, dir, fattr);
smb_finish_dirent(server, fattr);
return result;
}
static int
smb_proc_readdir_null(struct file *filp, void *dirent, filldir_t filldir,
struct smb_cache_control *ctl)
{
struct smb_sb_info *server = server_from_dentry(filp->f_path.dentry);
if (smb_proc_ops_wait(server) < 0)
return -EIO;
return server->ops->readdir(filp, dirent, filldir, ctl);
}
int
smb_proc_getattr(struct dentry *dir, struct smb_fattr *fattr)
{
struct smb_sb_info *server = server_from_dentry(dir);
int result;
smb_init_dirent(server, fattr);
result = server->ops->getattr(server, dir, fattr);
smb_finish_dirent(server, fattr);
return result;
}
/*
* Because of bugs in the core protocol, we use this only to set
* attributes. See smb_proc_settime() below for timestamp handling.
*
* Bugs Noted:
* (1) If mtime is non-zero, both Win 3.1 and Win 95 fail
* with an undocumented error (ERRDOS code 50). Setting
* mtime to 0 allows the attributes to be set.
* (2) The extra parameters following the name string aren't
* in the CIFS docs, but seem to be necessary for operation.
*/
static int
smb_proc_setattr_core(struct smb_sb_info *server, struct dentry *dentry,
__u16 attr)
{
char *p;
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
p = smb_setup_header(req, SMBsetatr, 8, 0);
WSET(req->rq_header, smb_vwv0, attr);
DSET(req->rq_header, smb_vwv1, 0); /* mtime */
WSET(req->rq_header, smb_vwv3, 0); /* reserved values */
WSET(req->rq_header, smb_vwv4, 0);
WSET(req->rq_header, smb_vwv5, 0);
WSET(req->rq_header, smb_vwv6, 0);
WSET(req->rq_header, smb_vwv7, 0);
result = smb_simple_encode_path(req, &p, dentry, NULL);
if (result < 0)
goto out_free;
if (p + 2 > (char *)req->rq_buffer + req->rq_bufsize) {
result = -ENAMETOOLONG;
goto out_free;
}
*p++ = 4;
*p++ = 0;
smb_setup_bcc(req, p);
result = smb_request_ok(req, SMBsetatr, 0, 0);
if (result < 0)
goto out_free;
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
/*
* Because of bugs in the trans2 setattr messages, we must set
* attributes and timestamps separately. The core SMBsetatr
* message seems to be the only reliable way to set attributes.
*/
int
smb_proc_setattr(struct dentry *dir, struct smb_fattr *fattr)
{
struct smb_sb_info *server = server_from_dentry(dir);
int result;
VERBOSE("setting %s/%s, open=%d\n",
DENTRY_PATH(dir), smb_is_open(dir->d_inode));
result = smb_proc_setattr_core(server, dir, fattr->attr);
return result;
}
/*
* Sets the timestamps for an file open with write permissions.
*/
static int
smb_proc_setattr_ext(struct smb_sb_info *server,
struct inode *inode, struct smb_fattr *fattr)
{
__u16 date, time;
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, 0)))
goto out;
smb_setup_header(req, SMBsetattrE, 7, 0);
WSET(req->rq_header, smb_vwv0, SMB_I(inode)->fileid);
/* We don't change the creation time */
WSET(req->rq_header, smb_vwv1, 0);
WSET(req->rq_header, smb_vwv2, 0);
date_unix2dos(server, fattr->f_atime.tv_sec, &date, &time);
WSET(req->rq_header, smb_vwv3, date);
WSET(req->rq_header, smb_vwv4, time);
date_unix2dos(server, fattr->f_mtime.tv_sec, &date, &time);
WSET(req->rq_header, smb_vwv5, date);
WSET(req->rq_header, smb_vwv6, time);
#ifdef SMBFS_DEBUG_TIMESTAMP
printk(KERN_DEBUG "smb_proc_setattr_ext: date=%d, time=%d, mtime=%ld\n",
date, time, fattr->f_mtime);
#endif
req->rq_flags |= SMB_REQ_NORETRY;
result = smb_request_ok(req, SMBsetattrE, 0, 0);
if (result < 0)
goto out_free;
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
/*
* Bugs Noted:
* (1) The TRANSACT2_SETPATHINFO message under Win NT 4.0 doesn't
* set the file's attribute flags.
*/
static int
smb_proc_setattr_trans2(struct smb_sb_info *server,
struct dentry *dir, struct smb_fattr *fattr)
{
__u16 date, time;
char *p, *param;
int result;
char data[26];
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
param = req->rq_buffer;
WSET(param, 0, 1); /* Info level SMB_INFO_STANDARD */
DSET(param, 2, 0);
result = smb_encode_path(server, param+6, SMB_MAXPATHLEN+1, dir, NULL);
if (result < 0)
goto out_free;
p = param + 6 + result;
WSET(data, 0, 0); /* creation time */
WSET(data, 2, 0);
date_unix2dos(server, fattr->f_atime.tv_sec, &date, &time);
WSET(data, 4, date);
WSET(data, 6, time);
date_unix2dos(server, fattr->f_mtime.tv_sec, &date, &time);
WSET(data, 8, date);
WSET(data, 10, time);
#ifdef SMBFS_DEBUG_TIMESTAMP
printk(KERN_DEBUG "setattr_trans2: %s/%s, date=%x, time=%x, mtime=%ld\n",
DENTRY_PATH(dir), date, time, fattr->f_mtime);
#endif
DSET(data, 12, 0); /* size */
DSET(data, 16, 0); /* blksize */
WSET(data, 20, 0); /* attr */
DSET(data, 22, 0); /* ULONG EA size */
req->rq_trans2_command = TRANSACT2_SETPATHINFO;
req->rq_ldata = 26;
req->rq_data = data;
req->rq_lparm = p - param;
req->rq_parm = param;
req->rq_flags = 0;
result = smb_add_request(req);
if (result < 0)
goto out_free;
result = 0;
if (req->rq_rcls != 0)
result = smb_errno(req);
out_free:
smb_rput(req);
out:
return result;
}
/*
* ATTR_MODE 0x001
* ATTR_UID 0x002
* ATTR_GID 0x004
* ATTR_SIZE 0x008
* ATTR_ATIME 0x010
* ATTR_MTIME 0x020
* ATTR_CTIME 0x040
* ATTR_ATIME_SET 0x080
* ATTR_MTIME_SET 0x100
* ATTR_FORCE 0x200
* ATTR_ATTR_FLAG 0x400
*
* major/minor should only be set by mknod.
*/
int
smb_proc_setattr_unix(struct dentry *d, struct iattr *attr,
unsigned int major, unsigned int minor)
{
struct smb_sb_info *server = server_from_dentry(d);
u64 nttime;
char *p, *param;
int result;
char data[100];
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
param = req->rq_buffer;
DEBUG1("valid flags = 0x%04x\n", attr->ia_valid);
WSET(param, 0, SMB_SET_FILE_UNIX_BASIC);
DSET(param, 2, 0);
result = smb_encode_path(server, param+6, SMB_MAXPATHLEN+1, d, NULL);
if (result < 0)
goto out_free;
p = param + 6 + result;
/* 0 L file size in bytes */
/* 8 L file size on disk in bytes (block count) */
/* 40 L uid */
/* 48 L gid */
/* 56 W file type enum */
/* 60 L devmajor */
/* 68 L devminor */
/* 76 L unique ID (inode) */
/* 84 L permissions */
/* 92 L link count */
LSET(data, 0, SMB_SIZE_NO_CHANGE);
LSET(data, 8, SMB_SIZE_NO_CHANGE);
LSET(data, 16, SMB_TIME_NO_CHANGE);
LSET(data, 24, SMB_TIME_NO_CHANGE);
LSET(data, 32, SMB_TIME_NO_CHANGE);
LSET(data, 40, SMB_UID_NO_CHANGE);
LSET(data, 48, SMB_GID_NO_CHANGE);
DSET(data, 56, smb_filetype_from_mode(attr->ia_mode));
LSET(data, 60, major);
LSET(data, 68, minor);
LSET(data, 76, 0);
LSET(data, 84, SMB_MODE_NO_CHANGE);
LSET(data, 92, 0);
if (attr->ia_valid & ATTR_SIZE) {
LSET(data, 0, attr->ia_size);
LSET(data, 8, 0); /* can't set anyway */
}
/*
* FIXME: check the conversion function it the correct one
*
* we can't set ctime but we might as well pass this to the server
* and let it ignore it.
*/
if (attr->ia_valid & ATTR_CTIME) {
nttime = smb_unixutc2ntutc(attr->ia_ctime);
LSET(data, 16, nttime);
}
if (attr->ia_valid & ATTR_ATIME) {
nttime = smb_unixutc2ntutc(attr->ia_atime);
LSET(data, 24, nttime);
}
if (attr->ia_valid & ATTR_MTIME) {
nttime = smb_unixutc2ntutc(attr->ia_mtime);
LSET(data, 32, nttime);
}
if (attr->ia_valid & ATTR_UID) {
LSET(data, 40, attr->ia_uid);
}
if (attr->ia_valid & ATTR_GID) {
LSET(data, 48, attr->ia_gid);
}
if (attr->ia_valid & ATTR_MODE) {
LSET(data, 84, attr->ia_mode);
}
req->rq_trans2_command = TRANSACT2_SETPATHINFO;
req->rq_ldata = 100;
req->rq_data = data;
req->rq_lparm = p - param;
req->rq_parm = param;
req->rq_flags = 0;
result = smb_add_request(req);
out_free:
smb_rput(req);
out:
return result;
}
/*
* Set the modify and access timestamps for a file.
*
* Incredibly enough, in all of SMB there is no message to allow
* setting both attributes and timestamps at once.
*
* Bugs Noted:
* (1) Win 95 doesn't support the TRANSACT2_SETFILEINFO message
* with info level 1 (INFO_STANDARD).
* (2) Win 95 seems not to support setting directory timestamps.
* (3) Under the core protocol apparently the only way to set the
* timestamp is to open and close the file.
*/
int
smb_proc_settime(struct dentry *dentry, struct smb_fattr *fattr)
{
struct smb_sb_info *server = server_from_dentry(dentry);
struct inode *inode = dentry->d_inode;
int result;
VERBOSE("setting %s/%s, open=%d\n",
DENTRY_PATH(dentry), smb_is_open(inode));
/* setting the time on a Win95 server fails (tridge) */
if (server->opt.protocol >= SMB_PROTOCOL_LANMAN2 &&
!(server->mnt->flags & SMB_MOUNT_WIN95)) {
if (smb_is_open(inode) && SMB_I(inode)->access != SMB_O_RDONLY)
result = smb_proc_setattr_ext(server, inode, fattr);
else
result = smb_proc_setattr_trans2(server, dentry, fattr);
} else {
/*
* Fail silently on directories ... timestamp can't be set?
*/
result = 0;
if (S_ISREG(inode->i_mode)) {
/*
* Set the mtime by opening and closing the file.
* Note that the file is opened read-only, but this
* still allows us to set the date (tridge)
*/
result = -EACCES;
if (!smb_is_open(inode))
smb_proc_open(server, dentry, SMB_O_RDONLY);
if (smb_is_open(inode)) {
inode->i_mtime = fattr->f_mtime;
result = smb_proc_close_inode(server, inode);
}
}
}
return result;
}
int
smb_proc_dskattr(struct dentry *dentry, struct kstatfs *attr)
{
struct smb_sb_info *server = SMB_SB(dentry->d_sb);
int result;
char *p;
long unit;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, 0)))
goto out;
smb_setup_header(req, SMBdskattr, 0, 0);
if ((result = smb_request_ok(req, SMBdskattr, 5, 0)) < 0)
goto out_free;
p = SMB_VWV(req->rq_header);
unit = (WVAL(p, 2) * WVAL(p, 4)) >> SMB_ST_BLKSHIFT;
attr->f_blocks = WVAL(p, 0) * unit;
attr->f_bsize = SMB_ST_BLKSIZE;
attr->f_bavail = attr->f_bfree = WVAL(p, 6) * unit;
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
int
smb_proc_read_link(struct smb_sb_info *server, struct dentry *d,
char *buffer, int len)
{
char *p, *param;
int result;
struct smb_request *req;
DEBUG1("readlink of %s/%s\n", DENTRY_PATH(d));
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
param = req->rq_buffer;
WSET(param, 0, SMB_QUERY_FILE_UNIX_LINK);
DSET(param, 2, 0);
result = smb_encode_path(server, param+6, SMB_MAXPATHLEN+1, d, NULL);
if (result < 0)
goto out_free;
p = param + 6 + result;
req->rq_trans2_command = TRANSACT2_QPATHINFO;
req->rq_ldata = 0;
req->rq_data = NULL;
req->rq_lparm = p - param;
req->rq_parm = param;
req->rq_flags = 0;
result = smb_add_request(req);
if (result < 0)
goto out_free;
DEBUG1("for %s: result=%d, rcls=%d, err=%d\n",
¶m[6], result, req->rq_rcls, req->rq_err);
/* copy data up to the \0 or buffer length */
result = len;
if (req->rq_ldata < len)
result = req->rq_ldata;
strncpy(buffer, req->rq_data, result);
out_free:
smb_rput(req);
out:
return result;
}
/*
* Create a symlink object called dentry which points to oldpath.
* Samba does not permit dangling links but returns a suitable error message.
*/
int
smb_proc_symlink(struct smb_sb_info *server, struct dentry *d,
const char *oldpath)
{
char *p, *param;
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
param = req->rq_buffer;
WSET(param, 0, SMB_SET_FILE_UNIX_LINK);
DSET(param, 2, 0);
result = smb_encode_path(server, param + 6, SMB_MAXPATHLEN+1, d, NULL);
if (result < 0)
goto out_free;
p = param + 6 + result;
req->rq_trans2_command = TRANSACT2_SETPATHINFO;
req->rq_ldata = strlen(oldpath) + 1;
req->rq_data = (char *) oldpath;
req->rq_lparm = p - param;
req->rq_parm = param;
req->rq_flags = 0;
result = smb_add_request(req);
if (result < 0)
goto out_free;
DEBUG1("for %s: result=%d, rcls=%d, err=%d\n",
¶m[6], result, req->rq_rcls, req->rq_err);
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
/*
* Create a hard link object called new_dentry which points to dentry.
*/
int
smb_proc_link(struct smb_sb_info *server, struct dentry *dentry,
struct dentry *new_dentry)
{
char *p, *param;
int result;
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, PAGE_SIZE)))
goto out;
param = req->rq_buffer;
WSET(param, 0, SMB_SET_FILE_UNIX_HLINK);
DSET(param, 2, 0);
result = smb_encode_path(server, param + 6, SMB_MAXPATHLEN+1,
new_dentry, NULL);
if (result < 0)
goto out_free;
p = param + 6 + result;
/* Grr, pointless separation of parameters and data ... */
req->rq_data = p;
req->rq_ldata = smb_encode_path(server, p, SMB_MAXPATHLEN+1,
dentry, NULL);
req->rq_trans2_command = TRANSACT2_SETPATHINFO;
req->rq_lparm = p - param;
req->rq_parm = param;
req->rq_flags = 0;
result = smb_add_request(req);
if (result < 0)
goto out_free;
DEBUG1("for %s: result=%d, rcls=%d, err=%d\n",
¶m[6], result, req->rq_rcls, req->rq_err);
result = 0;
out_free:
smb_rput(req);
out:
return result;
}
static int
smb_proc_query_cifsunix(struct smb_sb_info *server)
{
int result;
int major, minor;
u64 caps;
char param[2];
struct smb_request *req;
result = -ENOMEM;
if (! (req = smb_alloc_request(server, 100)))
goto out;
WSET(param, 0, SMB_QUERY_CIFS_UNIX_INFO);
req->rq_trans2_command = TRANSACT2_QFSINFO;
req->rq_ldata = 0;
req->rq_data = NULL;
req->rq_lparm = 2;
req->rq_parm = param;
req->rq_flags = 0;
result = smb_add_request(req);
if (result < 0)
goto out_free;
if (req->rq_ldata < 12) {
PARANOIA("Not enough data\n");
goto out_free;
}
major = WVAL(req->rq_data, 0);
minor = WVAL(req->rq_data, 2);
DEBUG1("Server implements CIFS Extensions for UNIX systems v%d.%d\n",
major, minor);
/* FIXME: verify that we are ok with this major/minor? */
caps = LVAL(req->rq_data, 4);
DEBUG1("Server capabilities 0x%016llx\n", caps);
out_free:
smb_rput(req);
out:
return result;
}
static void
install_ops(struct smb_ops *dst, struct smb_ops *src)
{
memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
}
/* < LANMAN2 */
static struct smb_ops smb_ops_core =
{
.read = smb_proc_read,
.write = smb_proc_write,
.readdir = smb_proc_readdir_short,
.getattr = smb_proc_getattr_core,
.truncate = smb_proc_trunc32,
};
/* LANMAN2, OS/2, others? */
static struct smb_ops smb_ops_os2 =
{
.read = smb_proc_read,
.write = smb_proc_write,
.readdir = smb_proc_readdir_long,
.getattr = smb_proc_getattr_trans2_std,
.truncate = smb_proc_trunc32,
};
/* Win95, and possibly some NetApp versions too */
static struct smb_ops smb_ops_win95 =
{
.read = smb_proc_read, /* does not support 12word readX */
.write = smb_proc_write,
.readdir = smb_proc_readdir_long,
.getattr = smb_proc_getattr_95,
.truncate = smb_proc_trunc95,
};
/* Samba, NT4 and NT5 */
static struct smb_ops smb_ops_winNT =
{
.read = smb_proc_readX,
.write = smb_proc_writeX,
.readdir = smb_proc_readdir_long,
.getattr = smb_proc_getattr_trans2_all,
.truncate = smb_proc_trunc64,
};
/* Samba w/ unix extensions. Others? */
static struct smb_ops smb_ops_unix =
{
.read = smb_proc_readX,
.write = smb_proc_writeX,
.readdir = smb_proc_readdir_long,
.getattr = smb_proc_getattr_unix,
/* FIXME: core/ext/time setattr needs to be cleaned up! */
/* .setattr = smb_proc_setattr_unix, */
.truncate = smb_proc_trunc64,
};
/* Place holder until real ops are in place */
static struct smb_ops smb_ops_null =
{
.readdir = smb_proc_readdir_null,
.getattr = smb_proc_getattr_null,
};
void smb_install_null_ops(struct smb_ops *ops)
{
install_ops(ops, &smb_ops_null);
}
| gpl-2.0 |
Hashcode/android_kernel_samsung-jf-common | fs/btrfs/inode.c | 2482 | 206715 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/mount.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h"
#include "ordered-data.h"
#include "xattr.h"
#include "tree-log.h"
#include "volumes.h"
#include "compression.h"
#include "locking.h"
#include "free-space-cache.h"
#include "inode-map.h"
struct btrfs_iget_args {
u64 ino;
struct btrfs_root *root;
};
static const struct inode_operations btrfs_dir_inode_operations;
static const struct inode_operations btrfs_symlink_inode_operations;
static const struct inode_operations btrfs_dir_ro_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
static const struct address_space_operations btrfs_aops;
static const struct address_space_operations btrfs_symlink_aops;
static const struct file_operations btrfs_dir_file_operations;
static struct extent_io_ops btrfs_extent_io_ops;
static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
struct kmem_cache *btrfs_free_space_cachep;
#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
[S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
[S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
[S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
[S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
};
static int btrfs_setsize(struct inode *inode, loff_t newsize);
static int btrfs_truncate(struct inode *inode);
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock);
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir,
const struct qstr *qstr)
{
int err;
err = btrfs_init_acl(trans, inode, dir);
if (!err)
err = btrfs_xattr_security_init(trans, inode, dir, qstr);
return err;
}
/*
* this does all the hard work for inserting an inline extent into
* the btree. The caller should have done a btrfs_drop_extents so that
* no overlapping inline items exist in the btree
*/
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
u64 start, size_t size, size_t compressed_size,
int compress_type,
struct page **compressed_pages)
{
struct btrfs_key key;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct page *page = NULL;
char *kaddr;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
int err = 0;
int ret;
size_t cur_size = size;
size_t datasize;
unsigned long offset;
if (compressed_size && compressed_pages)
cur_size = compressed_size;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
key.objectid = btrfs_ino(inode);
key.offset = start;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
datasize = btrfs_file_extent_calc_inline_size(cur_size);
inode_add_bytes(inode, size);
ret = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (ret) {
err = ret;
goto fail;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_encryption(leaf, ei, 0);
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
btrfs_set_file_extent_ram_bytes(leaf, ei, size);
ptr = btrfs_file_extent_inline_start(ei);
if (compress_type != BTRFS_COMPRESS_NONE) {
struct page *cpage;
int i = 0;
while (compressed_size > 0) {
cpage = compressed_pages[i];
cur_size = min_t(unsigned long, compressed_size,
PAGE_CACHE_SIZE);
kaddr = kmap_atomic(cpage);
write_extent_buffer(leaf, kaddr, ptr, cur_size);
kunmap_atomic(kaddr);
i++;
ptr += cur_size;
compressed_size -= cur_size;
}
btrfs_set_file_extent_compression(leaf, ei,
compress_type);
} else {
page = find_get_page(inode->i_mapping,
start >> PAGE_CACHE_SHIFT);
btrfs_set_file_extent_compression(leaf, ei, 0);
kaddr = kmap_atomic(page);
offset = start & (PAGE_CACHE_SIZE - 1);
write_extent_buffer(leaf, kaddr + offset, ptr, size);
kunmap_atomic(kaddr);
page_cache_release(page);
}
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
/*
* we're an inline extent, so nobody can
* extend the file past i_size without locking
* a page we already have locked.
*
* We must do any isize and inode updates
* before we unlock the pages. Otherwise we
* could end up racing with unlink.
*/
BTRFS_I(inode)->disk_i_size = inode->i_size;
ret = btrfs_update_inode(trans, root, inode);
return ret;
fail:
btrfs_free_path(path);
return err;
}
/*
* conditionally insert an inline extent into the file. This
* does the checks required to make sure the data is small enough
* to fit as an inline extent.
*/
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode, u64 start, u64 end,
size_t compressed_size, int compress_type,
struct page **compressed_pages)
{
u64 isize = i_size_read(inode);
u64 actual_end = min(end + 1, isize);
u64 inline_len = actual_end - start;
u64 aligned_end = (end + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
u64 hint_byte;
u64 data_len = inline_len;
int ret;
if (compressed_size)
data_len = compressed_size;
if (start > 0 ||
actual_end >= PAGE_CACHE_SIZE ||
data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
(!compressed_size &&
(actual_end & (root->sectorsize - 1)) == 0) ||
end + 1 < isize ||
data_len > root->fs_info->max_inline) {
return 1;
}
ret = btrfs_drop_extents(trans, inode, start, aligned_end,
&hint_byte, 1);
if (ret)
return ret;
if (isize > actual_end)
inline_len = min_t(u64, isize, actual_end);
ret = insert_inline_extent(trans, root, inode, start,
inline_len, compressed_size,
compress_type, compressed_pages);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
btrfs_delalloc_release_metadata(inode, end + 1 - start);
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
return 0;
}
struct async_extent {
u64 start;
u64 ram_size;
u64 compressed_size;
struct page **pages;
unsigned long nr_pages;
int compress_type;
struct list_head list;
};
struct async_cow {
struct inode *inode;
struct btrfs_root *root;
struct page *locked_page;
u64 start;
u64 end;
struct list_head extents;
struct btrfs_work work;
};
static noinline int add_async_extent(struct async_cow *cow,
u64 start, u64 ram_size,
u64 compressed_size,
struct page **pages,
unsigned long nr_pages,
int compress_type)
{
struct async_extent *async_extent;
async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
BUG_ON(!async_extent); /* -ENOMEM */
async_extent->start = start;
async_extent->ram_size = ram_size;
async_extent->compressed_size = compressed_size;
async_extent->pages = pages;
async_extent->nr_pages = nr_pages;
async_extent->compress_type = compress_type;
list_add_tail(&async_extent->list, &cow->extents);
return 0;
}
/*
* we create compressed extents in two phases. The first
* phase compresses a range of pages that have already been
* locked (both pages and state bits are locked).
*
* This is done inside an ordered work queue, and the compression
* is spread across many cpus. The actual IO submission is step
* two, and the ordered work queue takes care of making sure that
* happens in the same order things were put onto the queue by
* writepages and friends.
*
* If this code finds it can't get good compression, it puts an
* entry onto the work queue to write the uncompressed bytes. This
* makes sure that both compressed inodes and uncompressed inodes
* are written in the same order that pdflush sent them down.
*/
static noinline int compress_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end,
struct async_cow *async_cow,
int *num_added)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 num_bytes;
u64 blocksize = root->sectorsize;
u64 actual_end;
u64 isize = i_size_read(inode);
int ret = 0;
struct page **pages = NULL;
unsigned long nr_pages;
unsigned long nr_pages_ret = 0;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
unsigned long max_compressed = 128 * 1024;
unsigned long max_uncompressed = 128 * 1024;
int i;
int will_compress;
int compress_type = root->fs_info->compress_type;
/* if this is a small write inside eof, kick off a defrag */
if ((end - start + 1) < 16 * 1024 &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(NULL, inode);
actual_end = min_t(u64, isize, end + 1);
again:
will_compress = 0;
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
/*
* we don't want to send crud past the end of i_size through
* compression, that's just a waste of CPU time. So, if the
* end of the file is before the start of our current
* requested range of bytes, we bail out to the uncompressed
* cleanup code that can deal with all of this.
*
* It isn't really the fastest way to fix things, but this is a
* very uncommon corner.
*/
if (actual_end <= start)
goto cleanup_and_bail_uncompressed;
total_compressed = actual_end - start;
/* we want to make sure that amount of ram required to uncompress
* an extent is reasonable, so we limit the total size in ram
* of a compressed extent to 128k. This is a crucial number
* because it also controls how easily we can spread reads across
* cpus for decompression.
*
* We also want to make sure the amount of IO required to do
* a random read is reasonably small, so we limit the size of
* a compressed extent to 128k.
*/
total_compressed = min(total_compressed, max_uncompressed);
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
num_bytes = max(blocksize, num_bytes);
total_in = 0;
ret = 0;
/*
* we do compression for mount -o compress and when the
* inode has not been flagged as nocompress. This flag can
* change at any time if we discover bad compression ratios.
*/
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
(btrfs_test_opt(root, COMPRESS) ||
(BTRFS_I(inode)->force_compress) ||
(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
WARN_ON(pages);
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
if (!pages) {
/* just bail out to the uncompressed code */
goto cont;
}
if (BTRFS_I(inode)->force_compress)
compress_type = BTRFS_I(inode)->force_compress;
ret = btrfs_compress_pages(compress_type,
inode->i_mapping, start,
total_compressed, pages,
nr_pages, &nr_pages_ret,
&total_in,
&total_compressed,
max_compressed);
if (!ret) {
unsigned long offset = total_compressed &
(PAGE_CACHE_SIZE - 1);
struct page *page = pages[nr_pages_ret - 1];
char *kaddr;
/* zero the tail end of the last page, we might be
* sending it down to disk
*/
if (offset) {
kaddr = kmap_atomic(page);
memset(kaddr + offset, 0,
PAGE_CACHE_SIZE - offset);
kunmap_atomic(kaddr);
}
will_compress = 1;
}
}
cont:
if (start == 0) {
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto cleanup_and_out;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
/* lets try to make an inline extent */
if (ret || total_in < (actual_end - start)) {
/* we didn't compress the entire range, try
* to make an uncompressed inline extent.
*/
ret = cow_file_range_inline(trans, root, inode,
start, end, 0, 0, NULL);
} else {
/* try making a compressed inline extent */
ret = cow_file_range_inline(trans, root, inode,
start, end,
total_compressed,
compress_type, pages);
}
if (ret <= 0) {
/*
* inline extent creation worked or returned error,
* we don't need to create any more async work items.
* Unlock and free up our temp pages.
*/
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
EXTENT_CLEAR_DELALLOC |
EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
btrfs_end_transaction(trans, root);
goto free_pages_out;
}
btrfs_end_transaction(trans, root);
}
if (will_compress) {
/*
* we aren't doing an inline extent round the compressed size
* up to a block size boundary so the allocator does sane
* things
*/
total_compressed = (total_compressed + blocksize - 1) &
~(blocksize - 1);
/*
* one last check to make sure the compression is really a
* win, compare the page count read with the blocks on disk
*/
total_in = (total_in + PAGE_CACHE_SIZE - 1) &
~(PAGE_CACHE_SIZE - 1);
if (total_compressed >= total_in) {
will_compress = 0;
} else {
num_bytes = total_in;
}
}
if (!will_compress && pages) {
/*
* the compression code ran but failed to make things smaller,
* free any pages it allocated and our page pointer array
*/
for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
page_cache_release(pages[i]);
}
kfree(pages);
pages = NULL;
total_compressed = 0;
nr_pages_ret = 0;
/* flag the file so we don't compress in the future */
if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
!(BTRFS_I(inode)->force_compress)) {
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
}
}
if (will_compress) {
*num_added += 1;
/* the async work queues will take care of doing actual
* allocation on disk for these compressed pages,
* and will submit them to the elevator.
*/
add_async_extent(async_cow, start, num_bytes,
total_compressed, pages, nr_pages_ret,
compress_type);
if (start + num_bytes < end) {
start += num_bytes;
pages = NULL;
cond_resched();
goto again;
}
} else {
cleanup_and_bail_uncompressed:
/*
* No compression, but we still need to write the pages in
* the file we've been given so far. redirty the locked
* page if it corresponds to our extent and set things up
* for the async work queue to run cow_file_range to do
* the normal delalloc dance
*/
if (page_offset(locked_page) >= start &&
page_offset(locked_page) <= end) {
__set_page_dirty_nobuffers(locked_page);
/* unlocked later on in the async handlers */
}
add_async_extent(async_cow, start, end - start + 1,
0, NULL, 0, BTRFS_COMPRESS_NONE);
*num_added += 1;
}
out:
return ret;
free_pages_out:
for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
page_cache_release(pages[i]);
}
kfree(pages);
goto out;
cleanup_and_out:
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_DIRTY |
EXTENT_CLEAR_DELALLOC |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
if (!trans || IS_ERR(trans))
btrfs_error(root->fs_info, ret, "Failed to join transaction");
else
btrfs_abort_transaction(trans, root, ret);
goto free_pages_out;
}
/*
* phase two of compressed writeback. This is the ordered portion
* of the code, which only gets called in the order the work was
* queued. We walk all the async extents created by compress_file_range
* and send them down to the disk.
*/
static noinline int submit_compressed_extents(struct inode *inode,
struct async_cow *async_cow)
{
struct async_extent *async_extent;
u64 alloc_hint = 0;
struct btrfs_trans_handle *trans;
struct btrfs_key ins;
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree;
int ret = 0;
if (list_empty(&async_cow->extents))
return 0;
while (!list_empty(&async_cow->extents)) {
async_extent = list_entry(async_cow->extents.next,
struct async_extent, list);
list_del(&async_extent->list);
io_tree = &BTRFS_I(inode)->io_tree;
retry:
/* did the compression code fall back to uncompressed IO? */
if (!async_extent->pages) {
int page_started = 0;
unsigned long nr_written = 0;
lock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
/* allocate blocks */
ret = cow_file_range(inode, async_cow->locked_page,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
&page_started, &nr_written, 0);
/* JDM XXX */
/*
* if page_started, cow_file_range inserted an
* inline extent and took care of all the unlocking
* and IO for us. Otherwise, we need to submit
* all those pages down to the drive.
*/
if (!page_started && !ret)
extent_write_locked_range(io_tree,
inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
btrfs_get_extent,
WB_SYNC_ALL);
kfree(async_extent);
cond_resched();
continue;
}
lock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->ram_size - 1);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
} else {
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_reserve_extent(trans, root,
async_extent->compressed_size,
async_extent->compressed_size,
0, alloc_hint, &ins, 1);
if (ret)
btrfs_abort_transaction(trans, root, ret);
btrfs_end_transaction(trans, root);
}
if (ret) {
int i;
for (i = 0; i < async_extent->nr_pages; i++) {
WARN_ON(async_extent->pages[i]->mapping);
page_cache_release(async_extent->pages[i]);
}
kfree(async_extent->pages);
async_extent->nr_pages = 0;
async_extent->pages = NULL;
unlock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
if (ret == -ENOSPC)
goto retry;
goto out_free; /* JDM: Requeue? */
}
/*
* here we're doing allocation and writeback of the
* compressed pages
*/
btrfs_drop_extent_cache(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1, 0);
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = async_extent->start;
em->len = async_extent->ram_size;
em->orig_start = em->start;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->compress_type = async_extent->compress_type;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1, 0);
}
ret = btrfs_add_ordered_extent_compress(inode,
async_extent->start,
ins.objectid,
async_extent->ram_size,
ins.offset,
BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type);
BUG_ON(ret); /* -ENOMEM */
/*
* clear dirty, set writeback and unlock the pages.
*/
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
ret = btrfs_submit_compressed_write(inode,
async_extent->start,
async_extent->ram_size,
ins.objectid,
ins.offset, async_extent->pages,
async_extent->nr_pages);
BUG_ON(ret); /* -ENOMEM */
alloc_hint = ins.objectid + ins.offset;
kfree(async_extent);
cond_resched();
}
ret = 0;
out:
return ret;
out_free:
kfree(async_extent);
goto out;
}
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
u64 num_bytes)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
u64 alloc_hint = 0;
read_lock(&em_tree->lock);
em = search_extent_mapping(em_tree, start, num_bytes);
if (em) {
/*
* if block start isn't an actual block number then find the
* first block in this inode and use that as a hint. If that
* block is also bogus then just don't worry about it.
*/
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
free_extent_map(em);
em = search_extent_mapping(em_tree, 0, 0);
if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
alloc_hint = em->block_start;
if (em)
free_extent_map(em);
} else {
alloc_hint = em->block_start;
free_extent_map(em);
}
}
read_unlock(&em_tree->lock);
return alloc_hint;
}
/*
* when extent_io.c finds a delayed allocation range in the file,
* the call backs end up in this code. The basic idea is to
* allocate extents on disk for the range, and create ordered data structs
* in ram to track those extents.
*
* locked_page is the page that writepage had locked already. We use
* it to make sure we don't do extra locks or unlocks.
*
* *page_started is set to one if we unlock locked_page and do everything
* required to start IO on it. It may be clean and already done with
* IO when we return.
*/
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written,
int unlock)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 alloc_hint = 0;
u64 num_bytes;
unsigned long ram_size;
u64 disk_num_bytes;
u64 cur_alloc_size;
u64 blocksize = root->sectorsize;
struct btrfs_key ins;
struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
int ret = 0;
BUG_ON(btrfs_is_free_space_inode(root, inode));
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
return PTR_ERR(trans);
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
num_bytes = max(blocksize, num_bytes);
disk_num_bytes = num_bytes;
ret = 0;
/* if this is a small write inside eof, kick off defrag */
if (num_bytes < 64 * 1024 &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(trans, inode);
if (start == 0) {
/* lets try to make an inline extent */
ret = cow_file_range_inline(trans, root, inode,
start, end, 0, 0, NULL);
if (ret == 0) {
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
*nr_written = *nr_written +
(end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
*page_started = 1;
goto out;
} else if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
}
BUG_ON(disk_num_bytes >
btrfs_super_total_bytes(root->fs_info->super_copy));
alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
while (disk_num_bytes > 0) {
unsigned long op;
cur_alloc_size = disk_num_bytes;
ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
root->sectorsize, 0, alloc_hint,
&ins, 1);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = start;
em->orig_start = em->start;
ram_size = ins.offset;
em->len = ins.offset;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, start,
start + ram_size - 1, 0);
}
cur_alloc_size = ins.offset;
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
ram_size, cur_alloc_size, 0);
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, start,
cur_alloc_size);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
}
if (disk_num_bytes < cur_alloc_size)
break;
/* we're not doing compressed IO, don't unlock the first
* page (which the caller expects to stay locked), don't
* clear any dirty bits and don't set any writeback bits
*
* Do set the Private2 bit so we know this page was properly
* setup for writepage
*/
op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
EXTENT_SET_PRIVATE2;
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
start, start + ram_size - 1,
locked_page, op);
disk_num_bytes -= cur_alloc_size;
num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
}
ret = 0;
out:
btrfs_end_transaction(trans, root);
return ret;
out_unlock:
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
goto out;
}
/*
* work queue call back to started compression on a file and pages
*/
static noinline void async_cow_start(struct btrfs_work *work)
{
struct async_cow *async_cow;
int num_added = 0;
async_cow = container_of(work, struct async_cow, work);
compress_file_range(async_cow->inode, async_cow->locked_page,
async_cow->start, async_cow->end, async_cow,
&num_added);
if (num_added == 0)
async_cow->inode = NULL;
}
/*
* work queue call back to submit previously compressed pages
*/
static noinline void async_cow_submit(struct btrfs_work *work)
{
struct async_cow *async_cow;
struct btrfs_root *root;
unsigned long nr_pages;
async_cow = container_of(work, struct async_cow, work);
root = async_cow->root;
nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
if (atomic_read(&root->fs_info->async_delalloc_pages) <
5 * 1042 * 1024 &&
waitqueue_active(&root->fs_info->async_submit_wait))
wake_up(&root->fs_info->async_submit_wait);
if (async_cow->inode)
submit_compressed_extents(async_cow->inode, async_cow);
}
static noinline void async_cow_free(struct btrfs_work *work)
{
struct async_cow *async_cow;
async_cow = container_of(work, struct async_cow, work);
kfree(async_cow);
}
static int cow_file_range_async(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written)
{
struct async_cow *async_cow;
struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long nr_pages;
u64 cur_end;
int limit = 10 * 1024 * 1042;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1, 0, NULL, GFP_NOFS);
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
BUG_ON(!async_cow); /* -ENOMEM */
async_cow->inode = inode;
async_cow->root = root;
async_cow->locked_page = locked_page;
async_cow->start = start;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
cur_end = end;
else
cur_end = min(end, start + 512 * 1024 - 1);
async_cow->end = cur_end;
INIT_LIST_HEAD(&async_cow->extents);
async_cow->work.func = async_cow_start;
async_cow->work.ordered_func = async_cow_submit;
async_cow->work.ordered_free = async_cow_free;
async_cow->work.flags = 0;
nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
btrfs_queue_worker(&root->fs_info->delalloc_workers,
&async_cow->work);
if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->async_delalloc_pages) <
limit));
}
while (atomic_read(&root->fs_info->async_submit_draining) &&
atomic_read(&root->fs_info->async_delalloc_pages)) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->async_delalloc_pages) ==
0));
}
*nr_written += nr_pages;
start = cur_end + 1;
}
*page_started = 1;
return 0;
}
static noinline int csum_exist_in_range(struct btrfs_root *root,
u64 bytenr, u64 num_bytes)
{
int ret;
struct btrfs_ordered_sum *sums;
LIST_HEAD(list);
ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
bytenr + num_bytes - 1, &list, 0);
if (ret == 0 && list_empty(&list))
return 0;
while (!list_empty(&list)) {
sums = list_entry(list.next, struct btrfs_ordered_sum, list);
list_del(&sums->list);
kfree(sums);
}
return 1;
}
/*
* when nowcow writeback call back. This checks for snapshots or COW copies
* of the extents that exist in the file, and COWs the file as required.
*
* If no cow copies or snapshots exist, we write directly to the existing
* blocks on disk
*/
static noinline int run_delalloc_nocow(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started, int force,
unsigned long *nr_written)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct extent_buffer *leaf;
struct btrfs_path *path;
struct btrfs_file_extent_item *fi;
struct btrfs_key found_key;
u64 cow_start;
u64 cur_offset;
u64 extent_end;
u64 extent_offset;
u64 disk_bytenr;
u64 num_bytes;
int extent_type;
int ret, err;
int type;
int nocow;
int check_prev = 1;
bool nolock;
u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
nolock = btrfs_is_free_space_inode(root, inode);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_free_path(path);
return PTR_ERR(trans);
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
cow_start = (u64)-1;
cur_offset = start;
while (1) {
ret = btrfs_lookup_file_extent(trans, root, path, ino,
cur_offset, 0);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
if (ret > 0 && path->slots[0] > 0 && check_prev) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key,
path->slots[0] - 1);
if (found_key.objectid == ino &&
found_key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
check_prev = 0;
next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
if (ret > 0)
break;
leaf = path->nodes[0];
}
nocow = 0;
disk_bytenr = 0;
num_bytes = 0;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid > ino ||
found_key.type > BTRFS_EXTENT_DATA_KEY ||
found_key.offset > end)
break;
if (found_key.offset > cur_offset) {
extent_end = found_key.offset;
extent_type = 0;
goto out_check;
}
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
extent_offset = btrfs_file_extent_offset(leaf, fi);
extent_end = found_key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
if (extent_end <= start) {
path->slots[0]++;
goto next_slot;
}
if (disk_bytenr == 0)
goto out_check;
if (btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
goto out_check;
if (btrfs_extent_readonly(root, disk_bytenr))
goto out_check;
if (btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
extent_offset, disk_bytenr))
goto out_check;
disk_bytenr += extent_offset;
disk_bytenr += cur_offset - found_key.offset;
num_bytes = min(end + 1, extent_end) - cur_offset;
/*
* force cow if csum exists in the range.
* this ensure that csum for a given extent are
* either valid or do not exist.
*/
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out_check;
nocow = 1;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = found_key.offset +
btrfs_file_extent_inline_len(leaf, fi);
extent_end = ALIGN(extent_end, root->sectorsize);
} else {
BUG_ON(1);
}
out_check:
if (extent_end <= start) {
path->slots[0]++;
goto next_slot;
}
if (!nocow) {
if (cow_start == (u64)-1)
cow_start = cur_offset;
cur_offset = extent_end;
if (cur_offset > end)
break;
path->slots[0]++;
goto next_slot;
}
btrfs_release_path(path);
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page, cow_start,
found_key.offset - 1, page_started,
nr_written, 1);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
cow_start = (u64)-1;
}
if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
struct extent_map *em;
struct extent_map_tree *em_tree;
em_tree = &BTRFS_I(inode)->extent_tree;
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = cur_offset;
em->orig_start = em->start;
em->len = num_bytes;
em->block_len = num_bytes;
em->block_start = disk_bytenr;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, em->start,
em->start + em->len - 1, 0);
}
type = BTRFS_ORDERED_PREALLOC;
} else {
type = BTRFS_ORDERED_NOCOW;
}
ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
num_bytes, num_bytes, type);
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, cur_offset,
num_bytes);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
}
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
cur_offset, cur_offset + num_bytes - 1,
locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
EXTENT_SET_PRIVATE2);
cur_offset = extent_end;
if (cur_offset > end)
break;
}
btrfs_release_path(path);
if (cur_offset <= end && cow_start == (u64)-1)
cow_start = cur_offset;
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page, cow_start, end,
page_started, nr_written, 1);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
}
error:
if (nolock) {
err = btrfs_end_transaction_nolock(trans, root);
} else {
err = btrfs_end_transaction(trans, root);
}
if (!ret)
ret = err;
btrfs_free_path(path);
return ret;
}
/*
* extent_io.c call back to do delayed allocation processing
*/
static int run_delalloc_range(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written)
{
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 1, nr_written);
else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
else if (!btrfs_test_opt(root, COMPRESS) &&
!(BTRFS_I(inode)->force_compress) &&
!(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1);
else
ret = cow_file_range_async(inode, locked_page, start, end,
page_started, nr_written);
return ret;
}
static void btrfs_split_extent_hook(struct inode *inode,
struct extent_state *orig, u64 split)
{
/* not delalloc, ignore it */
if (!(orig->state & EXTENT_DELALLOC))
return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
}
/*
* extent_io.c merge_extent_hook, used to track merged delayed allocation
* extents so we can keep track of new extents that are just merged onto old
* extents, such as when we are doing sequential writes, so we can properly
* account for the metadata space we'll need.
*/
static void btrfs_merge_extent_hook(struct inode *inode,
struct extent_state *new,
struct extent_state *other)
{
/* not delalloc, ignore it */
if (!(other->state & EXTENT_DELALLOC))
return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock);
}
/*
* extent_io.c set_bit_hook, used to track delayed allocation
* bytes in this file, and to maintain the list of inodes that
* have pending delalloc work to be done.
*/
static void btrfs_set_bit_hook(struct inode *inode,
struct extent_state *state, int *bits)
{
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
bool do_list = !btrfs_is_free_space_inode(root, inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
} else {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
}
spin_lock(&root->fs_info->delalloc_lock);
BTRFS_I(inode)->delalloc_bytes += len;
root->fs_info->delalloc_bytes += len;
if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
&root->fs_info->delalloc_inodes);
}
spin_unlock(&root->fs_info->delalloc_lock);
}
}
/*
* extent_io.c clear_bit_hook, see set_bit_hook for why
*/
static void btrfs_clear_bit_hook(struct inode *inode,
struct extent_state *state, int *bits)
{
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
bool do_list = !btrfs_is_free_space_inode(root, inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock);
}
if (*bits & EXTENT_DO_ACCOUNTING)
btrfs_delalloc_release_metadata(inode, len);
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
&& do_list)
btrfs_free_reserved_data_space(inode, len);
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes -= len;
BTRFS_I(inode)->delalloc_bytes -= len;
if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_del_init(&BTRFS_I(inode)->delalloc_inodes);
}
spin_unlock(&root->fs_info->delalloc_lock);
}
}
/*
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
* we don't create bios that span stripes or chunks
*/
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
struct btrfs_mapping_tree *map_tree;
u64 logical = (u64)bio->bi_sector << 9;
u64 length = 0;
u64 map_length;
int ret;
if (bio_flags & EXTENT_BIO_COMPRESSED)
return 0;
length = bio->bi_size;
map_tree = &root->fs_info->mapping_tree;
map_length = length;
ret = btrfs_map_block(map_tree, READ, logical,
&map_length, NULL, 0);
/* Will always return 0 or 1 with map_multi == NULL */
BUG_ON(ret < 0);
if (map_length < length + size)
return 1;
return 0;
}
/*
* in order to insert checksums into the metadata in large chunks,
* we wait until bio submission time. All the pages in the bio are
* checksummed and sums are attached onto the ordered extent record.
*
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
static int __btrfs_submit_bio_start(struct inode *inode, int rw,
struct bio *bio, int mirror_num,
unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
/*
* in order to insert checksums into the metadata in large chunks,
* we wait until bio submission time. All the pages in the bio are
* checksummed and sums are attached onto the ordered extent record.
*
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
return btrfs_map_bio(root, rw, bio, mirror_num, 1);
}
/*
* extent_io.c submission hook. This does the right thing for csum calculation
* on write, or reading the csums from the tree before a read
*/
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
int skip_sum;
int metadata = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
if (btrfs_is_free_space_inode(root, inode))
metadata = 2;
ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
if (ret)
return ret;
if (!(rw & REQ_WRITE)) {
if (bio_flags & EXTENT_BIO_COMPRESSED) {
return btrfs_submit_compressed_read(inode, bio,
mirror_num, bio_flags);
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
if (ret)
return ret;
}
goto mapit;
} else if (!skip_sum) {
/* csum items have already been cloned */
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
goto mapit;
/* we're doing a write, do the async checksumming */
return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
inode, rw, bio, mirror_num,
bio_flags, bio_offset,
__btrfs_submit_bio_start,
__btrfs_submit_bio_done);
}
mapit:
return btrfs_map_bio(root, rw, bio, mirror_num, 0);
}
/*
* given a list of ordered sums record them in the inode. This happens
* at IO completion time based on sums calculated at bio submission time.
*/
static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_offset,
struct list_head *list)
{
struct btrfs_ordered_sum *sum;
list_for_each_entry(sum, list, list) {
btrfs_csum_file_blocks(trans,
BTRFS_I(inode)->root->fs_info->csum_root, sum);
}
return 0;
}
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state)
{
if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
WARN_ON(1);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
cached_state, GFP_NOFS);
}
/* see btrfs_writepage_start_hook for details on why this is required */
struct btrfs_writepage_fixup {
struct page *page;
struct btrfs_work work;
};
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{
struct btrfs_writepage_fixup *fixup;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct page *page;
struct inode *inode;
u64 page_start;
u64 page_end;
int ret;
fixup = container_of(work, struct btrfs_writepage_fixup, work);
page = fixup->page;
again:
lock_page(page);
if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
ClearPageChecked(page);
goto out_page;
}
inode = page->mapping->host;
page_start = page_offset(page);
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
&cached_state);
/* already ordered? We're done */
if (PagePrivate2(page))
goto out;
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
page_end, &cached_state, GFP_NOFS);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (ret) {
mapping_set_error(page->mapping, ret);
end_extent_writepage(page, ret, page_start, page_end);
ClearPageChecked(page);
goto out;
}
btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
ClearPageChecked(page);
set_page_dirty(page);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
out_page:
unlock_page(page);
page_cache_release(page);
kfree(fixup);
}
/*
* There are a few paths in the higher layers of the kernel that directly
* set the page dirty bit without asking the filesystem if it is a
* good idea. This causes problems because we want to make sure COW
* properly happens and the data=ordered rules are followed.
*
* In our case any range that doesn't have the ORDERED bit set
* hasn't been properly setup for IO. We kick off an async process
* to fix it up. The async helper will wait for ordered extents, set
* the delalloc bit and make it safe to write the page.
*/
static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
struct btrfs_writepage_fixup *fixup;
struct btrfs_root *root = BTRFS_I(inode)->root;
/* this page is properly in the ordered list */
if (TestClearPagePrivate2(page))
return 0;
if (PageChecked(page))
return -EAGAIN;
fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
if (!fixup)
return -EAGAIN;
SetPageChecked(page);
page_cache_get(page);
fixup->work.func = btrfs_writepage_fixup_worker;
fixup->page = page;
btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
return -EBUSY;
}
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_pos,
u64 disk_bytenr, u64 disk_num_bytes,
u64 num_bytes, u64 ram_bytes,
u8 compression, u8 encryption,
u16 other_encoding, int extent_type)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *fi;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key ins;
u64 hint;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
/*
* we may be replacing one extent in the tree with another.
* The new extent is pinned in the extent map, and we don't want
* to drop it from the cache until it is completely in the btree.
*
* So, tell btrfs_drop_extents to leave this extent in the cache.
* the caller is expected to unpin it and allow it to be merged
* with the others.
*/
ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
&hint, 0);
if (ret)
goto out;
ins.objectid = btrfs_ino(inode);
ins.offset = file_pos;
ins.type = BTRFS_EXTENT_DATA_KEY;
ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
if (ret)
goto out;
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_type(leaf, fi, extent_type);
btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
btrfs_set_file_extent_compression(leaf, fi, compression);
btrfs_set_file_extent_encryption(leaf, fi, encryption);
btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
btrfs_unlock_up_safe(path, 1);
btrfs_set_lock_blocking(leaf);
btrfs_mark_buffer_dirty(leaf);
inode_add_bytes(inode, num_bytes);
ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_alloc_reserved_file_extent(trans, root,
root->root_key.objectid,
btrfs_ino(inode), file_pos, &ins);
out:
btrfs_free_path(path);
return ret;
}
/*
* helper function for btrfs_finish_ordered_io, this
* just reads in some of the csum leaves to prime them into ram
* before we start the transaction. It limits the amount of btree
* reads required while inside the transaction.
*/
/* as ordered data IO finishes, this gets called so we can finish
* an ordered extent if the range of bytes in the file it covers are
* fully written.
*/
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans = NULL;
struct btrfs_ordered_extent *ordered_extent = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
int compress_type = 0;
int ret;
bool nolock;
ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
end - start + 1);
if (!ret)
return 0;
BUG_ON(!ordered_extent); /* Logic error */
nolock = btrfs_is_free_space_inode(root, inode);
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (!ret) {
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, root, ret);
}
goto out;
}
lock_extent_bits(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
0, &cached_state);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out_unlock;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
BUG_ON(compress_type);
ret = btrfs_mark_extent_written(trans, inode,
ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len);
} else {
BUG_ON(root == root->fs_info->tree_root);
ret = insert_reserved_file_extent(trans, inode,
ordered_extent->file_offset,
ordered_extent->start,
ordered_extent->disk_len,
ordered_extent->len,
ordered_extent->len,
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered_extent->file_offset,
ordered_extent->len);
}
unlock_extent_cached(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, &cached_state, GFP_NOFS);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, root, ret);
goto out;
}
}
ret = 0;
out:
if (root != root->fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, ordered_extent->len);
if (trans) {
if (nolock)
btrfs_end_transaction_nolock(trans, root);
else
btrfs_end_transaction(trans, root);
}
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
/* once for the tree */
btrfs_put_ordered_extent(ordered_extent);
return 0;
out_unlock:
unlock_extent_cached(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, &cached_state, GFP_NOFS);
goto out;
}
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate)
{
trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
ClearPagePrivate2(page);
return btrfs_finish_ordered_io(page->mapping->host, start, end);
}
/*
* when reads are done, we need to check csums to verify the data is correct
* if there's a match, we allow the bio to finish. If not, the code in
* extent_io.c will try to find good copies for us.
*/
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int mirror)
{
size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
struct inode *inode = page->mapping->host;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
char *kaddr;
u64 private = ~(u32)0;
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
u32 csum = ~(u32)0;
if (PageChecked(page)) {
ClearPageChecked(page);
goto good;
}
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
goto good;
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
GFP_NOFS);
return 0;
}
if (state && state->start == start) {
private = state->private;
ret = 0;
} else {
ret = get_state_private(io_tree, start, &private);
}
kaddr = kmap_atomic(page);
if (ret)
goto zeroit;
csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
btrfs_csum_final(csum, (char *)&csum);
if (csum != private)
goto zeroit;
kunmap_atomic(kaddr);
good:
return 0;
zeroit:
printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
"private %llu\n",
(unsigned long long)btrfs_ino(page->mapping->host),
(unsigned long long)start, csum,
(unsigned long long)private);
memset(kaddr + offset, 1, end - start + 1);
flush_dcache_page(page);
kunmap_atomic(kaddr);
if (private == 0)
return 0;
return -EIO;
}
struct delayed_iput {
struct list_head list;
struct inode *inode;
};
/* JDM: If this is fs-wide, why can't we add a pointer to
* btrfs_inode instead and avoid the allocation? */
void btrfs_add_delayed_iput(struct inode *inode)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct delayed_iput *delayed;
if (atomic_add_unless(&inode->i_count, -1, 1))
return;
delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
delayed->inode = inode;
spin_lock(&fs_info->delayed_iput_lock);
list_add_tail(&delayed->list, &fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
}
void btrfs_run_delayed_iputs(struct btrfs_root *root)
{
LIST_HEAD(list);
struct btrfs_fs_info *fs_info = root->fs_info;
struct delayed_iput *delayed;
int empty;
spin_lock(&fs_info->delayed_iput_lock);
empty = list_empty(&fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
if (empty)
return;
down_read(&root->fs_info->cleanup_work_sem);
spin_lock(&fs_info->delayed_iput_lock);
list_splice_init(&fs_info->delayed_iputs, &list);
spin_unlock(&fs_info->delayed_iput_lock);
while (!list_empty(&list)) {
delayed = list_entry(list.next, struct delayed_iput, list);
list_del(&delayed->list);
iput(delayed->inode);
kfree(delayed);
}
up_read(&root->fs_info->cleanup_work_sem);
}
enum btrfs_orphan_cleanup_state {
ORPHAN_CLEANUP_STARTED = 1,
ORPHAN_CLEANUP_DONE = 2,
};
/*
* This is called in transaction commit time. If there are no orphan
* files in the subvolume, it removes orphan item and frees block_rsv
* structure.
*/
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_block_rsv *block_rsv;
int ret;
if (!list_empty(&root->orphan_list) ||
root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
return;
spin_lock(&root->orphan_lock);
if (!list_empty(&root->orphan_list)) {
spin_unlock(&root->orphan_lock);
return;
}
if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
spin_unlock(&root->orphan_lock);
return;
}
block_rsv = root->orphan_block_rsv;
root->orphan_block_rsv = NULL;
spin_unlock(&root->orphan_lock);
if (root->orphan_item_inserted &&
btrfs_root_refs(&root->root_item) > 0) {
ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
BUG_ON(ret);
root->orphan_item_inserted = 0;
}
if (block_rsv) {
WARN_ON(block_rsv->size > 0);
btrfs_free_block_rsv(root, block_rsv);
}
}
/*
* This creates an orphan entry for the given inode in case something goes
* wrong in the middle of an unlink/truncate.
*
* NOTE: caller of this function should reserve 5 units of metadata for
* this function.
*/
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *block_rsv = NULL;
int reserve = 0;
int insert = 0;
int ret;
if (!root->orphan_block_rsv) {
block_rsv = btrfs_alloc_block_rsv(root);
if (!block_rsv)
return -ENOMEM;
}
spin_lock(&root->orphan_lock);
if (!root->orphan_block_rsv) {
root->orphan_block_rsv = block_rsv;
} else if (block_rsv) {
btrfs_free_block_rsv(root, block_rsv);
block_rsv = NULL;
}
if (list_empty(&BTRFS_I(inode)->i_orphan)) {
list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
#if 0
/*
* For proper ENOSPC handling, we should do orphan
* cleanup when mounting. But this introduces backward
* compatibility issue.
*/
if (!xchg(&root->orphan_item_inserted, 1))
insert = 2;
else
insert = 1;
#endif
insert = 1;
}
if (!BTRFS_I(inode)->orphan_meta_reserved) {
BTRFS_I(inode)->orphan_meta_reserved = 1;
reserve = 1;
}
spin_unlock(&root->orphan_lock);
/* grab metadata reservation from transaction handle */
if (reserve) {
ret = btrfs_orphan_reserve_metadata(trans, inode);
BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
}
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
ret = 0;
}
/* insert an orphan item to track subvolume contains orphan files */
if (insert >= 2) {
ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
}
return 0;
}
/*
* We have done the truncate/delete so we can go ahead and remove the orphan
* item for this particular inode.
*/
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int delete_item = 0;
int release_rsv = 0;
int ret = 0;
spin_lock(&root->orphan_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
list_del_init(&BTRFS_I(inode)->i_orphan);
delete_item = 1;
}
if (BTRFS_I(inode)->orphan_meta_reserved) {
BTRFS_I(inode)->orphan_meta_reserved = 0;
release_rsv = 1;
}
spin_unlock(&root->orphan_lock);
if (trans && delete_item) {
ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
}
if (release_rsv)
btrfs_orphan_release_metadata(inode);
return 0;
}
/*
* this cleans up any orphans that may be left on the list from the last use
* of this root.
*/
int btrfs_orphan_cleanup(struct btrfs_root *root)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key, found_key;
struct btrfs_trans_handle *trans;
struct inode *inode;
u64 last_objectid = 0;
int ret = 0, nr_unlink = 0, nr_truncate = 0;
if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
return 0;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->reada = -1;
key.objectid = BTRFS_ORPHAN_OBJECTID;
btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
key.offset = (u64)-1;
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
/*
* if ret == 0 means we found what we were searching for, which
* is weird, but possible, so only screw with path if we didn't
* find the key and see if we have stuff that matches
*/
if (ret > 0) {
ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
}
/* pull out the item */
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
/* make sure the item matches what we want */
if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
break;
if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
break;
/* release the path since we're done with it */
btrfs_release_path(path);
/*
* this is where we are basically btrfs_lookup, without the
* crossing root thing. we store the inode number in the
* offset of the orphan item.
*/
if (found_key.offset == last_objectid) {
printk(KERN_ERR "btrfs: Error removing orphan entry, "
"stopping orphan cleanup\n");
ret = -EINVAL;
goto out;
}
last_objectid = found_key.offset;
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
ret = PTR_RET(inode);
if (ret && ret != -ESTALE)
goto out;
if (ret == -ESTALE && root == root->fs_info->tree_root) {
struct btrfs_root *dead_root;
struct btrfs_fs_info *fs_info = root->fs_info;
int is_dead_root = 0;
/*
* this is an orphan in the tree root. Currently these
* could come from 2 sources:
* a) a snapshot deletion in progress
* b) a free space cache inode
* We need to distinguish those two, as the snapshot
* orphan must not get deleted.
* find_dead_roots already ran before us, so if this
* is a snapshot deletion, we should find the root
* in the dead_roots list
*/
spin_lock(&fs_info->trans_lock);
list_for_each_entry(dead_root, &fs_info->dead_roots,
root_list) {
if (dead_root->root_key.objectid ==
found_key.objectid) {
is_dead_root = 1;
break;
}
}
spin_unlock(&fs_info->trans_lock);
if (is_dead_root) {
/* prevent this orphan from being found again */
key.offset = found_key.objectid - 1;
continue;
}
}
/*
* Inode is already gone but the orphan item is still there,
* kill the orphan item.
*/
if (ret == -ESTALE) {
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
ret = btrfs_del_orphan_item(trans, root,
found_key.objectid);
BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
btrfs_end_transaction(trans, root);
continue;
}
/*
* add this inode to the orphan list so btrfs_orphan_del does
* the proper thing when we hit it
*/
spin_lock(&root->orphan_lock);
list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
spin_unlock(&root->orphan_lock);
/* if we have links, this was a truncate, lets do that */
if (inode->i_nlink) {
if (!S_ISREG(inode->i_mode)) {
WARN_ON(1);
iput(inode);
continue;
}
nr_truncate++;
ret = btrfs_truncate(inode);
} else {
nr_unlink++;
}
/* this will do delete_inode and everything for us */
iput(inode);
if (ret)
goto out;
}
/* release the path since we're done with it */
btrfs_release_path(path);
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
if (root->orphan_block_rsv)
btrfs_block_rsv_release(root, root->orphan_block_rsv,
(u64)-1);
if (root->orphan_block_rsv || root->orphan_item_inserted) {
trans = btrfs_join_transaction(root);
if (!IS_ERR(trans))
btrfs_end_transaction(trans, root);
}
if (nr_unlink)
printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
if (nr_truncate)
printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
out:
if (ret)
printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
btrfs_free_path(path);
return ret;
}
/*
* very simple check to peek ahead in the leaf looking for xattrs. If we
* don't find any xattrs, we know there can't be any acls.
*
* slot is the slot the inode is in, objectid is the objectid of the inode
*/
static noinline int acls_after_inode_item(struct extent_buffer *leaf,
int slot, u64 objectid)
{
u32 nritems = btrfs_header_nritems(leaf);
struct btrfs_key found_key;
int scanned = 0;
slot++;
while (slot < nritems) {
btrfs_item_key_to_cpu(leaf, &found_key, slot);
/* we found a different objectid, there must not be acls */
if (found_key.objectid != objectid)
return 0;
/* we found an xattr, assume we've got an acl */
if (found_key.type == BTRFS_XATTR_ITEM_KEY)
return 1;
/*
* we found a key greater than an xattr key, there can't
* be any acls later on
*/
if (found_key.type > BTRFS_XATTR_ITEM_KEY)
return 0;
slot++;
scanned++;
/*
* it goes inode, inode backrefs, xattrs, extents,
* so if there are a ton of hard links to an inode there can
* be a lot of backrefs. Don't waste time searching too hard,
* this is just an optimization
*/
if (scanned >= 8)
break;
}
/* we hit the end of the leaf before we found an xattr or
* something larger than an xattr. We have to assume the inode
* has acls
*/
return 1;
}
/*
* read an inode from the btree into the in-memory inode
*/
static void btrfs_read_locked_inode(struct inode *inode)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
struct btrfs_timespec *tspec;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key location;
int maybe_acls;
u32 rdev;
int ret;
bool filled = false;
ret = btrfs_fill_inode(inode, &rdev);
if (!ret)
filled = true;
path = btrfs_alloc_path();
if (!path)
goto make_bad;
path->leave_spinning = 1;
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret)
goto make_bad;
leaf = path->nodes[0];
if (filled)
goto cache_acl;
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
inode->i_mode = btrfs_inode_mode(leaf, inode_item);
set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
inode->i_uid = btrfs_inode_uid(leaf, inode_item);
inode->i_gid = btrfs_inode_gid(leaf, inode_item);
btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
tspec = btrfs_inode_atime(inode_item);
inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
tspec = btrfs_inode_mtime(inode_item);
inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
tspec = btrfs_inode_ctime(inode_item);
inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
inode->i_generation = BTRFS_I(inode)->generation;
inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
BTRFS_I(inode)->index_cnt = (u64)-1;
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
cache_acl:
/*
* try to precache a NULL acl entry for files that don't have
* any xattrs or acls
*/
maybe_acls = acls_after_inode_item(leaf, path->slots[0],
btrfs_ino(inode));
if (!maybe_acls)
cache_no_acl(inode);
btrfs_free_path(path);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
break;
case S_IFDIR:
inode->i_fop = &btrfs_dir_file_operations;
if (root == root->fs_info->tree_root)
inode->i_op = &btrfs_dir_ro_inode_operations;
else
inode->i_op = &btrfs_dir_inode_operations;
break;
case S_IFLNK:
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
break;
default:
inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
break;
}
btrfs_update_iflags(inode);
return;
make_bad:
btrfs_free_path(path);
make_bad_inode(inode);
}
/*
* given a leaf and an inode, copy the inode fields into the leaf
*/
static void fill_inode_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf,
struct btrfs_inode_item *item,
struct inode *inode)
{
btrfs_set_inode_uid(leaf, item, inode->i_uid);
btrfs_set_inode_gid(leaf, item, inode->i_gid);
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
btrfs_set_inode_mode(leaf, item, inode->i_mode);
btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
inode->i_atime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
inode->i_atime.tv_nsec);
btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
inode->i_mtime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
inode->i_mtime.tv_nsec);
btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
inode->i_ctime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
inode->i_ctime.tv_nsec);
btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
btrfs_set_inode_transid(leaf, item, trans->transid);
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
btrfs_set_inode_block_group(leaf, item, 0);
}
/*
* copy everything in the in-memory inode into the btree.
*/
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
struct extent_buffer *leaf;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
goto failed;
}
btrfs_unlock_up_safe(path, 1);
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, inode);
btrfs_mark_buffer_dirty(leaf);
btrfs_set_inode_last_trans(trans, inode);
ret = 0;
failed:
btrfs_free_path(path);
return ret;
}
/*
* copy everything in the in-memory inode into the btree.
*/
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
int ret;
/*
* If the inode is a free space inode, we can deadlock during commit
* if we put it into the delayed code.
*
* The data relocation inode should also be directly updated
* without delay
*/
if (!btrfs_is_free_space_inode(root, inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
return ret;
}
return btrfs_update_inode_item(trans, root, inode);
}
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
int ret;
ret = btrfs_update_inode(trans, root, inode);
if (ret == -ENOSPC)
return btrfs_update_inode_item(trans, root, inode);
return ret;
}
/*
* unlink helper that gets used here in inode.c and in the tree logging
* recovery code. It remove a link in a directory with a given name, and
* also drops the back refs in the inode to the directory
*/
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, struct inode *inode,
const char *name, int name_len)
{
struct btrfs_path *path;
int ret = 0;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
u64 ino = btrfs_ino(inode);
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->leave_spinning = 1;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
goto err;
}
if (!di) {
ret = -ENOENT;
goto err;
}
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto err;
btrfs_release_path(path);
ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
dir_ino, &index);
if (ret) {
printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
"inode %llu parent %llu\n", name_len, name,
(unsigned long long)ino, (unsigned long long)dir_ino);
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
inode, dir_ino);
if (ret != 0 && ret != -ENOENT) {
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
dir, index);
if (ret == -ENOENT)
ret = 0;
err:
btrfs_free_path(path);
if (ret)
goto out;
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
btrfs_update_inode(trans, root, dir);
out:
return ret;
}
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, struct inode *inode,
const char *name, int name_len)
{
int ret;
ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
if (!ret) {
btrfs_drop_nlink(inode);
ret = btrfs_update_inode(trans, root, inode);
}
return ret;
}
/* helper to check if there is any shared block in the path */
static int check_path_shared(struct btrfs_root *root,
struct btrfs_path *path)
{
struct extent_buffer *eb;
int level;
u64 refs = 1;
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
int ret;
if (!path->nodes[level])
break;
eb = path->nodes[level];
if (!btrfs_block_can_be_shared(root, eb))
continue;
ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
&refs, NULL);
if (refs > 1)
return 1;
}
return 0;
}
/*
* helper to start transaction for unlink and rmdir.
*
* unlink and rmdir are special in btrfs, they do not always free space.
* so in enospc case, we should make sure they will free space before
* allowing them to use the global metadata reservation.
*/
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
struct dentry *dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct btrfs_inode_ref *ref;
struct btrfs_dir_item *di;
struct inode *inode = dentry->d_inode;
u64 index;
int check_link = 1;
int err = -ENOSPC;
int ret;
u64 ino = btrfs_ino(inode);
u64 dir_ino = btrfs_ino(dir);
/*
* 1 for the possible orphan item
* 1 for the dir item
* 1 for the dir index
* 1 for the inode ref
* 1 for the inode ref in the tree log
* 2 for the dir entries in the log
* 1 for the inode
*/
trans = btrfs_start_transaction(root, 8);
if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
return trans;
if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return ERR_PTR(-ENOSPC);
/* check if there is someone else holds reference */
if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
return ERR_PTR(-ENOSPC);
if (atomic_read(&inode->i_count) > 2)
return ERR_PTR(-ENOSPC);
if (xchg(&root->fs_info->enospc_unlink, 1))
return ERR_PTR(-ENOSPC);
path = btrfs_alloc_path();
if (!path) {
root->fs_info->enospc_unlink = 0;
return ERR_PTR(-ENOMEM);
}
/* 1 for the orphan item */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
btrfs_free_path(path);
root->fs_info->enospc_unlink = 0;
return trans;
}
path->skip_locking = 1;
path->search_commit_root = 1;
ret = btrfs_lookup_inode(trans, root, path,
&BTRFS_I(dir)->location, 0);
if (ret < 0) {
err = ret;
goto out;
}
if (ret == 0) {
if (check_path_shared(root, path))
goto out;
} else {
check_link = 0;
}
btrfs_release_path(path);
ret = btrfs_lookup_inode(trans, root, path,
&BTRFS_I(inode)->location, 0);
if (ret < 0) {
err = ret;
goto out;
}
if (ret == 0) {
if (check_path_shared(root, path))
goto out;
} else {
check_link = 0;
}
btrfs_release_path(path);
if (ret == 0 && S_ISREG(inode->i_mode)) {
ret = btrfs_lookup_file_extent(trans, root, path,
ino, (u64)-1, 0);
if (ret < 0) {
err = ret;
goto out;
}
BUG_ON(ret == 0); /* Corruption */
if (check_path_shared(root, path))
goto out;
btrfs_release_path(path);
}
if (!check_link) {
err = 0;
goto out;
}
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
dentry->d_name.name, dentry->d_name.len, 0);
if (IS_ERR(di)) {
err = PTR_ERR(di);
goto out;
}
if (di) {
if (check_path_shared(root, path))
goto out;
} else {
err = 0;
goto out;
}
btrfs_release_path(path);
ref = btrfs_lookup_inode_ref(trans, root, path,
dentry->d_name.name, dentry->d_name.len,
ino, dir_ino, 0);
if (IS_ERR(ref)) {
err = PTR_ERR(ref);
goto out;
}
BUG_ON(!ref); /* Logic error */
if (check_path_shared(root, path))
goto out;
index = btrfs_inode_ref_index(path->nodes[0], ref);
btrfs_release_path(path);
/*
* This is a commit root search, if we can lookup inode item and other
* relative items in the commit root, it means the transaction of
* dir/file creation has been committed, and the dir index item that we
* delay to insert has also been inserted into the commit root. So
* we needn't worry about the delayed insertion of the dir index item
* here.
*/
di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
dentry->d_name.name, dentry->d_name.len, 0);
if (IS_ERR(di)) {
err = PTR_ERR(di);
goto out;
}
BUG_ON(ret == -ENOENT);
if (check_path_shared(root, path))
goto out;
err = 0;
out:
btrfs_free_path(path);
/* Migrate the orphan reservation over */
if (!err)
err = btrfs_block_rsv_migrate(trans->block_rsv,
&root->fs_info->global_block_rsv,
trans->bytes_reserved);
if (err) {
btrfs_end_transaction(trans, root);
root->fs_info->enospc_unlink = 0;
return ERR_PTR(err);
}
trans->block_rsv = &root->fs_info->global_block_rsv;
return trans;
}
static void __unlink_end_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
if (trans->block_rsv == &root->fs_info->global_block_rsv) {
btrfs_block_rsv_release(root, trans->block_rsv,
trans->bytes_reserved);
trans->block_rsv = &root->fs_info->trans_block_rsv;
BUG_ON(!root->fs_info->enospc_unlink);
root->fs_info->enospc_unlink = 0;
}
btrfs_end_transaction(trans, root);
}
static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
struct inode *inode = dentry->d_inode;
int ret;
unsigned long nr = 0;
trans = __unlink_start_trans(dir, dentry);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len);
if (ret)
goto out;
if (inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, inode);
if (ret)
goto out;
}
out:
nr = trans->blocks_used;
__unlink_end_trans(trans, root);
btrfs_btree_balance_dirty(root, nr);
return ret;
}
int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, u64 objectid,
const char *name, int name_len)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
int ret;
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR_OR_NULL(di)) {
if (!di)
ret = -ENOENT;
else
ret = PTR_ERR(di);
goto out;
}
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
btrfs_release_path(path);
ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
dir_ino, &index, name, name_len);
if (ret < 0) {
if (ret != -ENOENT) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
di = btrfs_search_dir_index_item(root, path, dir_ino,
name, name_len);
if (IS_ERR_OR_NULL(di)) {
if (!di)
ret = -ENOENT;
else
ret = PTR_ERR(di);
btrfs_abort_transaction(trans, root, ret);
goto out;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
index = key.offset;
}
btrfs_release_path(path);
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, dir);
if (ret)
btrfs_abort_transaction(trans, root, ret);
out:
btrfs_free_path(path);
return ret;
}
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
int err = 0;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
unsigned long nr = 0;
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
return -ENOTEMPTY;
trans = __unlink_start_trans(dir, dentry);
if (IS_ERR(trans))
return PTR_ERR(trans);
if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
err = btrfs_unlink_subvol(trans, root, dir,
BTRFS_I(inode)->location.objectid,
dentry->d_name.name,
dentry->d_name.len);
goto out;
}
err = btrfs_orphan_add(trans, inode);
if (err)
goto out;
/* now the directory is empty */
err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len);
if (!err)
btrfs_i_size_write(inode, 0);
out:
nr = trans->blocks_used;
__unlink_end_trans(trans, root);
btrfs_btree_balance_dirty(root, nr);
return err;
}
/*
* this can truncate away extent items, csum items and directory items.
* It starts at a high offset and removes keys until it can't find
* any higher than new_size
*
* csum items that cross the new i_size are truncated to the new size
* as well.
*
* min_type is the minimum key type to truncate down to. If set to 0, this
* will kill all the items on this inode, including the INODE_ITEM_KEY.
*/
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode,
u64 new_size, u32 min_type)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
struct btrfs_key found_key;
u64 extent_start = 0;
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
u64 mask = root->sectorsize - 1;
u32 found_type = (u8)-1;
int found_extent;
int del_item;
int pending_del_nr = 0;
int pending_del_slot = 0;
int extent_type = -1;
int ret;
int err = 0;
u64 ino = btrfs_ino(inode);
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = -1;
if (root->ref_cows || root == root->fs_info->tree_root)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
/*
* This function is also used to drop the items in the log tree before
* we relog the inode, so if root != BTRFS_I(inode)->root, it means
* it is used to drop the loged items. So we shouldn't kill the delayed
* items.
*/
if (min_type == 0 && root == BTRFS_I(inode)->root)
btrfs_kill_delayed_inode_items(inode);
key.objectid = ino;
key.offset = (u64)-1;
key.type = (u8)-1;
search_again:
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0) {
/* there are no items in the tree for us to truncate, we're
* done
*/
if (path->slots[0] == 0)
goto out;
path->slots[0]--;
}
while (1) {
fi = NULL;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
if (found_key.objectid != ino)
break;
if (found_type < min_type)
break;
item_end = found_key.offset;
if (found_type == BTRFS_EXTENT_DATA_KEY) {
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
item_end +=
btrfs_file_extent_num_bytes(leaf, fi);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
item_end += btrfs_file_extent_inline_len(leaf,
fi);
}
item_end--;
}
if (found_type > min_type) {
del_item = 1;
} else {
if (item_end < new_size)
break;
if (found_key.offset >= new_size)
del_item = 1;
else
del_item = 0;
}
found_extent = 0;
/* FIXME, shrink the extent if the ref count is only 1 */
if (found_type != BTRFS_EXTENT_DATA_KEY)
goto delete;
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
u64 num_dec;
extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
if (!del_item) {
u64 orig_num_bytes =
btrfs_file_extent_num_bytes(leaf, fi);
extent_num_bytes = new_size -
found_key.offset + root->sectorsize - 1;
extent_num_bytes = extent_num_bytes &
~((u64)root->sectorsize - 1);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_num_bytes);
num_dec = (orig_num_bytes -
extent_num_bytes);
if (root->ref_cows && extent_start != 0)
inode_sub_bytes(inode, num_dec);
btrfs_mark_buffer_dirty(leaf);
} else {
extent_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf,
fi);
extent_offset = found_key.offset -
btrfs_file_extent_offset(leaf, fi);
/* FIXME blocksize != 4096 */
num_dec = btrfs_file_extent_num_bytes(leaf, fi);
if (extent_start != 0) {
found_extent = 1;
if (root->ref_cows)
inode_sub_bytes(inode, num_dec);
}
}
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
/*
* we can't truncate inline items that have had
* special encodings
*/
if (!del_item &&
btrfs_file_extent_compression(leaf, fi) == 0 &&
btrfs_file_extent_encryption(leaf, fi) == 0 &&
btrfs_file_extent_other_encoding(leaf, fi) == 0) {
u32 size = new_size - found_key.offset;
if (root->ref_cows) {
inode_sub_bytes(inode, item_end + 1 -
new_size);
}
size =
btrfs_file_extent_calc_inline_size(size);
btrfs_truncate_item(trans, root, path,
size, 1);
} else if (root->ref_cows) {
inode_sub_bytes(inode, item_end + 1 -
found_key.offset);
}
}
delete:
if (del_item) {
if (!pending_del_nr) {
/* no pending yet, add ourselves */
pending_del_slot = path->slots[0];
pending_del_nr = 1;
} else if (pending_del_nr &&
path->slots[0] + 1 == pending_del_slot) {
/* hop on the pending chunk */
pending_del_nr++;
pending_del_slot = path->slots[0];
} else {
BUG();
}
} else {
break;
}
if (found_extent && (root->ref_cows ||
root == root->fs_info->tree_root)) {
btrfs_set_path_blocking(path);
ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
ino, extent_offset, 0);
BUG_ON(ret);
}
if (found_type == BTRFS_INODE_ITEM_KEY)
break;
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot) {
if (root->ref_cows &&
BTRFS_I(inode)->location.objectid !=
BTRFS_FREE_INO_OBJECTID) {
err = -EAGAIN;
goto out;
}
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
pending_del_nr);
if (ret) {
btrfs_abort_transaction(trans,
root, ret);
goto error;
}
pending_del_nr = 0;
}
btrfs_release_path(path);
goto search_again;
} else {
path->slots[0]--;
}
}
out:
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, pending_del_slot,
pending_del_nr);
if (ret)
btrfs_abort_transaction(trans, root, ret);
}
error:
btrfs_free_path(path);
return err;
}
/*
* taken from block_truncate_page, but does cow as it zeros out
* any bytes left in the last page in the file.
*/
static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
{
struct inode *inode = mapping->host;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
char *kaddr;
u32 blocksize = root->sectorsize;
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
struct page *page;
gfp_t mask = btrfs_alloc_write_mask(mapping);
int ret = 0;
u64 page_start;
u64 page_end;
if ((offset & (blocksize - 1)) == 0)
goto out;
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (ret)
goto out;
ret = -ENOMEM;
again:
page = find_or_create_page(mapping, index, mask);
if (!page) {
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
goto out;
}
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
if (!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
lock_page(page);
if (page->mapping != mapping) {
unlock_page(page);
page_cache_release(page);
goto again;
}
if (!PageUptodate(page)) {
ret = -EIO;
goto out_unlock;
}
}
wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
set_page_extent_mapped(page);
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
unlock_page(page);
page_cache_release(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
goto out_unlock;
}
ret = 0;
if (offset != PAGE_CACHE_SIZE) {
kaddr = kmap(page);
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
flush_dcache_page(page);
kunmap(page);
}
ClearPageChecked(page);
set_page_dirty(page);
unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
GFP_NOFS);
out_unlock:
if (ret)
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
unlock_page(page);
page_cache_release(page);
out:
return ret;
}
/*
* This function puts in dummy file extents for the area we're creating a hole
* for. So if we are truncating this file to a larger size we need to insert
* these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
* the range between oldsize and size
*/
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
u64 mask = root->sectorsize - 1;
u64 hole_start = (oldsize + mask) & ~mask;
u64 block_end = (size + mask) & ~mask;
u64 last_byte;
u64 cur_offset;
u64 hole_size;
int err = 0;
if (size <= hole_start)
return 0;
while (1) {
struct btrfs_ordered_extent *ordered;
btrfs_wait_ordered_range(inode, hole_start,
block_end - hole_start);
lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
&cached_state);
ordered = btrfs_lookup_ordered_extent(inode, hole_start);
if (!ordered)
break;
unlock_extent_cached(io_tree, hole_start, block_end - 1,
&cached_state, GFP_NOFS);
btrfs_put_ordered_extent(ordered);
}
cur_offset = hole_start;
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
block_end - cur_offset, 0);
if (IS_ERR(em)) {
err = PTR_ERR(em);
break;
}
last_byte = min(extent_map_end(em), block_end);
last_byte = (last_byte + mask) & ~mask;
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
u64 hint_byte = 0;
hole_size = last_byte - cur_offset;
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
break;
}
err = btrfs_drop_extents(trans, inode, cur_offset,
cur_offset + hole_size,
&hint_byte, 1);
if (err) {
btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
break;
}
err = btrfs_insert_file_extent(trans, root,
btrfs_ino(inode), cur_offset, 0,
0, hole_size, 0, hole_size,
0, 0, 0);
if (err) {
btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
break;
}
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
}
free_extent_map(em);
em = NULL;
cur_offset = last_byte;
if (cur_offset >= block_end)
break;
}
free_extent_map(em);
unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
GFP_NOFS);
return err;
}
static int btrfs_setsize(struct inode *inode, loff_t newsize)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
loff_t oldsize = i_size_read(inode);
int ret;
if (newsize == oldsize)
return 0;
if (newsize > oldsize) {
truncate_pagecache(inode, oldsize, newsize);
ret = btrfs_cont_expand(inode, oldsize, newsize);
if (ret)
return ret;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
i_size_write(inode, newsize);
btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
ret = btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
} else {
/*
* We're truncating a file that used to have good data down to
* zero. Make sure it gets into the ordered flush list so that
* any new writes get down to disk quickly.
*/
if (newsize == 0)
BTRFS_I(inode)->ordered_data_close = 1;
/* we don't support swapfiles, so vmtruncate shouldn't fail */
truncate_setsize(inode, newsize);
ret = btrfs_truncate(inode);
}
return ret;
}
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
int err;
if (btrfs_root_readonly(root))
return -EROFS;
err = inode_change_ok(inode, attr);
if (err)
return err;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
err = btrfs_setsize(inode, attr->ia_size);
if (err)
return err;
}
if (attr->ia_valid) {
setattr_copy(inode, attr);
err = btrfs_dirty_inode(inode);
if (!err && attr->ia_valid & ATTR_MODE)
err = btrfs_acl_chmod(inode);
}
return err;
}
void btrfs_evict_inode(struct inode *inode)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv, *global_rsv;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
unsigned long nr;
int ret;
trace_btrfs_inode_evict(inode);
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
btrfs_is_free_space_inode(root, inode)))
goto no_delete;
if (is_bad_inode(inode)) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (root->fs_info->log_root_recovering) {
BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
goto no_delete;
}
if (inode->i_nlink > 0) {
BUG_ON(btrfs_root_refs(&root->root_item) != 0);
goto no_delete;
}
rsv = btrfs_alloc_block_rsv(root);
if (!rsv) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
rsv->size = min_size;
global_rsv = &root->fs_info->global_block_rsv;
btrfs_i_size_write(inode, 0);
/*
* This is a bit simpler than btrfs_truncate since
*
* 1) We've already reserved our space for our orphan item in the
* unlink.
* 2) We're going to delete the inode item, so we don't need to update
* it at all.
*
* So we just need to reserve some slack space in case we add bytes when
* doing the truncate.
*/
while (1) {
ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
/*
* Try and steal from the global reserve since we will
* likely not use this space anyway, we want to try as
* hard as possible to get this to work.
*/
if (ret)
ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
if (ret) {
printk(KERN_WARNING "Could not get space for a "
"delete, will truncate on mount %d\n", ret);
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
goto no_delete;
}
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
goto no_delete;
}
trans->block_rsv = rsv;
ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
if (ret != -EAGAIN)
break;
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root, nr);
}
btrfs_free_block_rsv(root, rsv);
if (ret == 0) {
trans->block_rsv = root->orphan_block_rsv;
ret = btrfs_orphan_del(trans, inode);
BUG_ON(ret);
}
trans->block_rsv = &root->fs_info->trans_block_rsv;
if (!(root == root->fs_info->tree_root ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
btrfs_return_ino(root, btrfs_ino(inode));
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
no_delete:
end_writeback(inode);
return;
}
/*
* this returns the key found in the dir entry in the location pointer.
* If no dir entries were found, location->objectid is 0.
*/
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
struct btrfs_key *location)
{
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct btrfs_dir_item *di;
struct btrfs_path *path;
struct btrfs_root *root = BTRFS_I(dir)->root;
int ret = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
namelen, 0);
if (IS_ERR(di))
ret = PTR_ERR(di);
if (IS_ERR_OR_NULL(di))
goto out_err;
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
out:
btrfs_free_path(path);
return ret;
out_err:
location->objectid = 0;
goto out;
}
/*
* when we hit a tree root in a directory, the btrfs part of the inode
* needs to be changed to reflect the root directory of the tree root. This
* is kind of like crossing a mount point.
*/
static int fixup_tree_root_location(struct btrfs_root *root,
struct inode *dir,
struct dentry *dentry,
struct btrfs_key *location,
struct btrfs_root **sub_root)
{
struct btrfs_path *path;
struct btrfs_root *new_root;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
int ret;
int err = 0;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
err = -ENOENT;
ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
BTRFS_I(dir)->root->root_key.objectid,
location->objectid);
if (ret) {
if (ret < 0)
err = ret;
goto out;
}
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
goto out;
ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
(unsigned long)(ref + 1),
dentry->d_name.len);
if (ret)
goto out;
btrfs_release_path(path);
new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
if (IS_ERR(new_root)) {
err = PTR_ERR(new_root);
goto out;
}
if (btrfs_root_refs(&new_root->root_item) == 0) {
err = -ENOENT;
goto out;
}
*sub_root = new_root;
location->objectid = btrfs_root_dirid(&new_root->root_item);
location->type = BTRFS_INODE_ITEM_KEY;
location->offset = 0;
err = 0;
out:
btrfs_free_path(path);
return err;
}
static void inode_tree_add(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_inode *entry;
struct rb_node **p;
struct rb_node *parent;
u64 ino = btrfs_ino(inode);
again:
p = &root->inode_tree.rb_node;
parent = NULL;
if (inode_unhashed(inode))
return;
spin_lock(&root->inode_lock);
while (*p) {
parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node);
if (ino < btrfs_ino(&entry->vfs_inode))
p = &parent->rb_left;
else if (ino > btrfs_ino(&entry->vfs_inode))
p = &parent->rb_right;
else {
WARN_ON(!(entry->vfs_inode.i_state &
(I_WILL_FREE | I_FREEING)));
rb_erase(parent, &root->inode_tree);
RB_CLEAR_NODE(parent);
spin_unlock(&root->inode_lock);
goto again;
}
}
rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
spin_unlock(&root->inode_lock);
}
static void inode_tree_del(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int empty = 0;
spin_lock(&root->inode_lock);
if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
empty = RB_EMPTY_ROOT(&root->inode_tree);
}
spin_unlock(&root->inode_lock);
/*
* Free space cache has inodes in the tree root, but the tree root has a
* root_refs of 0, so this could end up dropping the tree root as a
* snapshot, so we need the extra !root->fs_info->tree_root check to
* make sure we don't drop it.
*/
if (empty && btrfs_root_refs(&root->root_item) == 0 &&
root != root->fs_info->tree_root) {
synchronize_srcu(&root->fs_info->subvol_srcu);
spin_lock(&root->inode_lock);
empty = RB_EMPTY_ROOT(&root->inode_tree);
spin_unlock(&root->inode_lock);
if (empty)
btrfs_add_dead_root(root);
}
}
void btrfs_invalidate_inodes(struct btrfs_root *root)
{
struct rb_node *node;
struct rb_node *prev;
struct btrfs_inode *entry;
struct inode *inode;
u64 objectid = 0;
WARN_ON(btrfs_root_refs(&root->root_item) != 0);
spin_lock(&root->inode_lock);
again:
node = root->inode_tree.rb_node;
prev = NULL;
while (node) {
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
if (objectid < btrfs_ino(&entry->vfs_inode))
node = node->rb_left;
else if (objectid > btrfs_ino(&entry->vfs_inode))
node = node->rb_right;
else
break;
}
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
if (objectid <= btrfs_ino(&entry->vfs_inode)) {
node = prev;
break;
}
prev = rb_next(prev);
}
}
while (node) {
entry = rb_entry(node, struct btrfs_inode, rb_node);
objectid = btrfs_ino(&entry->vfs_inode) + 1;
inode = igrab(&entry->vfs_inode);
if (inode) {
spin_unlock(&root->inode_lock);
if (atomic_read(&inode->i_count) > 1)
d_prune_aliases(inode);
/*
* btrfs_drop_inode will have it removed from
* the inode cache when its usage count
* hits zero.
*/
iput(inode);
cond_resched();
spin_lock(&root->inode_lock);
goto again;
}
if (cond_resched_lock(&root->inode_lock))
goto again;
node = rb_next(node);
}
spin_unlock(&root->inode_lock);
}
static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
struct btrfs_iget_args *args = p;
inode->i_ino = args->ino;
BTRFS_I(inode)->root = args->root;
btrfs_set_inode_space_info(args->root, inode);
return 0;
}
static int btrfs_find_actor(struct inode *inode, void *opaque)
{
struct btrfs_iget_args *args = opaque;
return args->ino == btrfs_ino(inode) &&
args->root == BTRFS_I(inode)->root;
}
static struct inode *btrfs_iget_locked(struct super_block *s,
u64 objectid,
struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
args.ino = objectid;
args.root = root;
inode = iget5_locked(s, objectid, btrfs_find_actor,
btrfs_init_locked_inode,
(void *)&args);
return inode;
}
/* Get an inode object given its location and corresponding root.
* Returns in *is_new if the inode was read from disk
*/
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *new)
{
struct inode *inode;
inode = btrfs_iget_locked(s, location->objectid, root);
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
btrfs_read_locked_inode(inode);
if (!is_bad_inode(inode)) {
inode_tree_add(inode);
unlock_new_inode(inode);
if (new)
*new = 1;
} else {
unlock_new_inode(inode);
iput(inode);
inode = ERR_PTR(-ESTALE);
}
}
return inode;
}
static struct inode *new_simple_dir(struct super_block *s,
struct btrfs_key *key,
struct btrfs_root *root)
{
struct inode *inode = new_inode(s);
if (!inode)
return ERR_PTR(-ENOMEM);
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
BTRFS_I(inode)->dummy_inode = 1;
inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
inode->i_op = &btrfs_dir_ro_inode_operations;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
return inode;
}
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
struct btrfs_key location;
int index;
int ret = 0;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
if (unlikely(d_need_lookup(dentry))) {
memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
kfree(dentry->d_fsdata);
dentry->d_fsdata = NULL;
/* This thing is hashed, drop it for now */
d_drop(dentry);
} else {
ret = btrfs_inode_by_name(dir, dentry, &location);
}
if (ret < 0)
return ERR_PTR(ret);
if (location.objectid == 0)
return NULL;
if (location.type == BTRFS_INODE_ITEM_KEY) {
inode = btrfs_iget(dir->i_sb, &location, root, NULL);
return inode;
}
BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
index = srcu_read_lock(&root->fs_info->subvol_srcu);
ret = fixup_tree_root_location(root, dir, dentry,
&location, &sub_root);
if (ret < 0) {
if (ret != -ENOENT)
inode = ERR_PTR(ret);
else
inode = new_simple_dir(dir->i_sb, &location, sub_root);
} else {
inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
}
srcu_read_unlock(&root->fs_info->subvol_srcu, index);
if (!IS_ERR(inode) && root != sub_root) {
down_read(&root->fs_info->cleanup_work_sem);
if (!(inode->i_sb->s_flags & MS_RDONLY))
ret = btrfs_orphan_cleanup(sub_root);
up_read(&root->fs_info->cleanup_work_sem);
if (ret)
inode = ERR_PTR(ret);
}
return inode;
}
static int btrfs_dentry_delete(const struct dentry *dentry)
{
struct btrfs_root *root;
struct inode *inode = dentry->d_inode;
if (!inode && !IS_ROOT(dentry))
inode = dentry->d_parent->d_inode;
if (inode) {
root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0)
return 1;
if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return 1;
}
return 0;
}
static void btrfs_dentry_release(struct dentry *dentry)
{
if (dentry->d_fsdata)
kfree(dentry->d_fsdata);
}
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct dentry *ret;
ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
if (unlikely(d_need_lookup(dentry))) {
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
spin_unlock(&dentry->d_lock);
}
return ret;
}
unsigned char btrfs_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
static int btrfs_real_readdir(struct file *filp, void *dirent,
filldir_t filldir)
{
struct inode *inode = filp->f_dentry->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_item *item;
struct btrfs_dir_item *di;
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_path *path;
struct list_head ins_list;
struct list_head del_list;
int ret;
struct extent_buffer *leaf;
int slot;
unsigned char d_type;
int over = 0;
u32 di_cur;
u32 di_total;
u32 di_len;
int key_type = BTRFS_DIR_INDEX_KEY;
char tmp_name[32];
char *name_ptr;
int name_len;
int is_curr = 0; /* filp->f_pos points to the current index? */
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
key_type = BTRFS_DIR_ITEM_KEY;
/* special case for "." */
if (filp->f_pos == 0) {
over = filldir(dirent, ".", 1,
filp->f_pos, btrfs_ino(inode), DT_DIR);
if (over)
return 0;
filp->f_pos = 1;
}
/* special case for .., just use the back ref */
if (filp->f_pos == 1) {
u64 pino = parent_ino(filp->f_path.dentry);
over = filldir(dirent, "..", 2,
filp->f_pos, pino, DT_DIR);
if (over)
return 0;
filp->f_pos = 2;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = 1;
if (key_type == BTRFS_DIR_INDEX_KEY) {
INIT_LIST_HEAD(&ins_list);
INIT_LIST_HEAD(&del_list);
btrfs_get_delayed_items(inode, &ins_list, &del_list);
}
btrfs_set_key_type(&key, key_type);
key.offset = filp->f_pos;
key.objectid = btrfs_ino(inode);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto err;
else if (ret > 0)
break;
continue;
}
item = btrfs_item_nr(leaf, slot);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid != key.objectid)
break;
if (btrfs_key_type(&found_key) != key_type)
break;
if (found_key.offset < filp->f_pos)
goto next;
if (key_type == BTRFS_DIR_INDEX_KEY &&
btrfs_should_delete_dir_index(&del_list,
found_key.offset))
goto next;
filp->f_pos = found_key.offset;
is_curr = 1;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
di_cur = 0;
di_total = btrfs_item_size(leaf, item);
while (di_cur < di_total) {
struct btrfs_key location;
if (verify_dir_item(root, leaf, di))
break;
name_len = btrfs_dir_name_len(leaf, di);
if (name_len <= sizeof(tmp_name)) {
name_ptr = tmp_name;
} else {
name_ptr = kmalloc(name_len, GFP_NOFS);
if (!name_ptr) {
ret = -ENOMEM;
goto err;
}
}
read_extent_buffer(leaf, name_ptr,
(unsigned long)(di + 1), name_len);
d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
btrfs_dir_item_key_to_cpu(leaf, di, &location);
/* is this a reference to our own snapshot? If so
* skip it.
*
* In contrast to old kernels, we insert the snapshot's
* dir item and dir index after it has been created, so
* we won't find a reference to our own snapshot. We
* still keep the following code for backward
* compatibility.
*/
if (location.type == BTRFS_ROOT_ITEM_KEY &&
location.objectid == root->root_key.objectid) {
over = 0;
goto skip;
}
over = filldir(dirent, name_ptr, name_len,
found_key.offset, location.objectid,
d_type);
skip:
if (name_ptr != tmp_name)
kfree(name_ptr);
if (over)
goto nopos;
di_len = btrfs_dir_name_len(leaf, di) +
btrfs_dir_data_len(leaf, di) + sizeof(*di);
di_cur += di_len;
di = (struct btrfs_dir_item *)((char *)di + di_len);
}
next:
path->slots[0]++;
}
if (key_type == BTRFS_DIR_INDEX_KEY) {
if (is_curr)
filp->f_pos++;
ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
&ins_list);
if (ret)
goto nopos;
}
/* Reached end of directory/root. Bump pos past the last item. */
if (key_type == BTRFS_DIR_INDEX_KEY)
/*
* 32-bit glibc will use getdents64, but then strtol -
* so the last number we can serve is this.
*/
filp->f_pos = 0x7fffffff;
else
filp->f_pos++;
nopos:
ret = 0;
err:
if (key_type == BTRFS_DIR_INDEX_KEY)
btrfs_put_delayed_items(&ins_list, &del_list);
btrfs_free_path(path);
return ret;
}
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret = 0;
bool nolock = false;
if (BTRFS_I(inode)->dummy_inode)
return 0;
if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
if (nolock)
ret = btrfs_end_transaction_nolock(trans, root);
else
ret = btrfs_commit_transaction(trans, root);
}
return ret;
}
/*
* This is somewhat expensive, updating the tree every time the
* inode changes. But, it is most likely to find the inode in cache.
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
int btrfs_dirty_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
if (BTRFS_I(inode)->dummy_inode)
return 0;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
if (ret && ret == -ENOSPC) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans, root);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
}
btrfs_end_transaction(trans, root);
if (BTRFS_I(inode)->delayed_node)
btrfs_balance_delayed_items(root);
return ret;
}
/*
* This is a copy of file_update_time. We need this so we can return error on
* ENOSPC for updating the inode in the case of file write and mmap writes.
*/
int btrfs_update_time(struct file *file)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct timespec now;
int ret;
enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
/* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
return 0;
now = current_fs_time(inode->i_sb);
if (!timespec_equal(&inode->i_mtime, &now))
sync_it = S_MTIME;
if (!timespec_equal(&inode->i_ctime, &now))
sync_it |= S_CTIME;
if (IS_I_VERSION(inode))
sync_it |= S_VERSION;
if (!sync_it)
return 0;
/* Finally allowed to write? Takes lock. */
if (mnt_want_write_file(file))
return 0;
/* Only change inode inside the lock region */
if (sync_it & S_VERSION)
inode_inc_iversion(inode);
if (sync_it & S_CTIME)
inode->i_ctime = now;
if (sync_it & S_MTIME)
inode->i_mtime = now;
ret = btrfs_dirty_inode(inode);
if (!ret)
mark_inode_dirty_sync(inode);
mnt_drop_write(file->f_path.mnt);
return ret;
}
/*
* find the highest existing sequence number in a directory
* and then set the in-memory index_cnt variable to reflect
* free sequence numbers
*/
static int btrfs_set_inode_index_count(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key key, found_key;
struct btrfs_path *path;
struct extent_buffer *leaf;
int ret;
key.objectid = btrfs_ino(inode);
btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
key.offset = (u64)-1;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
/* FIXME: we should be able to handle this */
if (ret == 0)
goto out;
ret = 0;
/*
* MAGIC NUMBER EXPLANATION:
* since we search a directory based on f_pos we have to start at 2
* since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
* else has to start at 2
*/
if (path->slots[0] == 0) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
}
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != btrfs_ino(inode) ||
btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
}
BTRFS_I(inode)->index_cnt = found_key.offset + 1;
out:
btrfs_free_path(path);
return ret;
}
/*
* helper to find a free sequence number in a given directory. This current
* code is very simple, later versions will do smarter things in the btree
*/
int btrfs_set_inode_index(struct inode *dir, u64 *index)
{
int ret = 0;
if (BTRFS_I(dir)->index_cnt == (u64)-1) {
ret = btrfs_inode_delayed_dir_index_count(dir);
if (ret) {
ret = btrfs_set_inode_index_count(dir);
if (ret)
return ret;
}
}
*index = BTRFS_I(dir)->index_cnt;
BTRFS_I(dir)->index_cnt++;
return ret;
}
static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir,
const char *name, int name_len,
u64 ref_objectid, u64 objectid,
umode_t mode, u64 *index)
{
struct inode *inode;
struct btrfs_inode_item *inode_item;
struct btrfs_key *location;
struct btrfs_path *path;
struct btrfs_inode_ref *ref;
struct btrfs_key key[2];
u32 sizes[2];
unsigned long ptr;
int ret;
int owner;
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
inode = new_inode(root->fs_info->sb);
if (!inode) {
btrfs_free_path(path);
return ERR_PTR(-ENOMEM);
}
/*
* we have to initialize this early, so we can reclaim the inode
* number if we fail afterwards in this function.
*/
inode->i_ino = objectid;
if (dir) {
trace_btrfs_inode_request(dir);
ret = btrfs_set_inode_index(dir, index);
if (ret) {
btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
}
/*
* index_cnt is ignored for everything but a dir,
* btrfs_get_inode_index_count has an explanation for the magic
* number
*/
BTRFS_I(inode)->index_cnt = 2;
BTRFS_I(inode)->root = root;
BTRFS_I(inode)->generation = trans->transid;
inode->i_generation = BTRFS_I(inode)->generation;
btrfs_set_inode_space_info(root, inode);
if (S_ISDIR(mode))
owner = 0;
else
owner = 1;
key[0].objectid = objectid;
btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
key[0].offset = 0;
key[1].objectid = objectid;
btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
key[1].offset = ref_objectid;
sizes[0] = sizeof(struct btrfs_inode_item);
sizes[1] = name_len + sizeof(*ref);
path->leave_spinning = 1;
ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
if (ret != 0)
goto fail;
inode_init_owner(inode, dir, mode);
inode_set_bytes(inode, 0);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, path->nodes[0], inode_item, inode);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
struct btrfs_inode_ref);
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
ptr = (unsigned long)(ref + 1);
write_extent_buffer(path->nodes[0], name, ptr, name_len);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
location = &BTRFS_I(inode)->location;
location->objectid = objectid;
location->offset = 0;
btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
btrfs_inherit_iflags(inode, dir);
if (S_ISREG(mode)) {
if (btrfs_test_opt(root, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
if (btrfs_test_opt(root, NODATACOW) ||
(BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
}
insert_inode_hash(inode);
inode_tree_add(inode);
trace_btrfs_inode_new(inode);
btrfs_set_inode_last_trans(trans, inode);
return inode;
fail:
if (dir)
BTRFS_I(dir)->index_cnt--;
btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
static inline u8 btrfs_inode_type(struct inode *inode)
{
return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
}
/*
* utility function to add 'inode' into 'parent_inode' with
* a give name and a given sequence number.
* if 'add_backref' is true, also insert a backref from the
* inode to the parent directory.
*/
int btrfs_add_link(struct btrfs_trans_handle *trans,
struct inode *parent_inode, struct inode *inode,
const char *name, int name_len, int add_backref, u64 index)
{
int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = BTRFS_I(parent_inode)->root;
u64 ino = btrfs_ino(inode);
u64 parent_ino = btrfs_ino(parent_inode);
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
} else {
key.objectid = ino;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
}
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
key.objectid, root->root_key.objectid,
parent_ino, index, name, name_len);
} else if (add_backref) {
ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
parent_ino, index);
}
/* Nothing to clean up yet */
if (ret)
return ret;
ret = btrfs_insert_dir_item(trans, root, name, name_len,
parent_inode, &key,
btrfs_inode_type(inode), index);
if (ret == -EEXIST)
goto fail_dir_item;
else if (ret) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
btrfs_i_size_write(parent_inode, parent_inode->i_size +
name_len * 2);
parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, parent_inode);
if (ret)
btrfs_abort_transaction(trans, root, ret);
return ret;
fail_dir_item:
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
u64 local_index;
int err;
err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
key.objectid, root->root_key.objectid,
parent_ino, &local_index, name, name_len);
} else if (add_backref) {
u64 local_index;
int err;
err = btrfs_del_inode_ref(trans, root, name, name_len,
ino, parent_ino, &local_index);
}
return ret;
}
static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
struct inode *dir, struct dentry *dentry,
struct inode *inode, int backref, u64 index)
{
int err = btrfs_add_link(trans, dir, inode,
dentry->d_name.name, dentry->d_name.len,
backref, index);
if (err > 0)
err = -EEXIST;
return err;
}
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int err;
int drop_inode = 0;
u64 objectid;
unsigned long nr = 0;
u64 index = 0;
if (!new_valid_dev(rdev))
return -EINVAL;
/*
* 2 for inode item and ref
* 2 for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_op = &btrfs_special_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
init_special_inode(inode, inode->i_mode, rdev);
btrfs_update_inode(trans, root, inode);
d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
return err;
}
static int btrfs_create(struct inode *dir, struct dentry *dentry,
umode_t mode, struct nameidata *nd)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int drop_inode = 0;
int err;
unsigned long nr = 0;
u64 objectid;
u64 index = 0;
/*
* 2 for inode item and ref
* 2 for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root, nr);
return err;
}
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = old_dentry->d_inode;
u64 index;
unsigned long nr = 0;
int err;
int drop_inode = 0;
/* do not allow sys_link's with other subvols of the same device */
if (root->objectid != BTRFS_I(inode)->root->objectid)
return -EXDEV;
if (inode->i_nlink == ~0U)
return -EMLINK;
err = btrfs_set_inode_index(dir, &index);
if (err)
goto fail;
/*
* 2 items for inode and inode ref
* 2 items for dir items
* 1 item for parent inode
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto fail;
}
btrfs_inc_nlink(inode);
inode->i_ctime = CURRENT_TIME;
ihold(inode);
err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
if (err) {
drop_inode = 1;
} else {
struct dentry *parent = dentry->d_parent;
err = btrfs_update_inode(trans, root, inode);
if (err)
goto fail;
d_instantiate(dentry, inode);
btrfs_log_new_name(trans, inode, NULL, parent);
}
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
fail:
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root, nr);
return err;
}
static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
int err = 0;
int drop_on_err = 0;
u64 objectid = 0;
u64 index = 0;
unsigned long nr = 1;
/*
* 2 items for inode and ref
* 2 items for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_fail;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
S_IFDIR | mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_fail;
}
drop_on_err = 1;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
goto out_fail;
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, root, inode);
if (err)
goto out_fail;
err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
dentry->d_name.len, 0, index);
if (err)
goto out_fail;
d_instantiate(dentry, inode);
drop_on_err = 0;
out_fail:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_on_err)
iput(inode);
btrfs_btree_balance_dirty(root, nr);
return err;
}
/* helper for btfs_get_extent. Given an existing extent in the tree,
* and an extent that you want to insert, deal with overlap and insert
* the new extent into the tree.
*/
static int merge_extent_mapping(struct extent_map_tree *em_tree,
struct extent_map *existing,
struct extent_map *em,
u64 map_start, u64 map_len)
{
u64 start_diff;
BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
start_diff = map_start - em->start;
em->start = map_start;
em->len = map_len;
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
em->block_start += start_diff;
em->block_len -= start_diff;
}
return add_extent_mapping(em_tree, em);
}
static noinline int uncompress_inline(struct btrfs_path *path,
struct inode *inode, struct page *page,
size_t pg_offset, u64 extent_offset,
struct btrfs_file_extent_item *item)
{
int ret;
struct extent_buffer *leaf = path->nodes[0];
char *tmp;
size_t max_size;
unsigned long inline_size;
unsigned long ptr;
int compress_type;
WARN_ON(pg_offset != 0);
compress_type = btrfs_file_extent_compression(leaf, item);
max_size = btrfs_file_extent_ram_bytes(leaf, item);
inline_size = btrfs_file_extent_inline_item_len(leaf,
btrfs_item_nr(leaf, path->slots[0]));
tmp = kmalloc(inline_size, GFP_NOFS);
if (!tmp)
return -ENOMEM;
ptr = btrfs_file_extent_inline_start(item);
read_extent_buffer(leaf, tmp, ptr, inline_size);
max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size);
if (ret) {
char *kaddr = kmap_atomic(page);
unsigned long copy_size = min_t(u64,
PAGE_CACHE_SIZE - pg_offset,
max_size - extent_offset);
memset(kaddr + pg_offset, 0, copy_size);
kunmap_atomic(kaddr);
}
kfree(tmp);
return 0;
}
/*
* a bit scary, this does extent mapping from logical file offset to the disk.
* the ugly parts come from merging extents from the disk with the in-ram
* representation. This gets more complex because of the data=ordered code,
* where the in-ram extents might be locked pending data=ordered completion.
*
* This also copies inline extents directly into the page.
*/
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
{
int ret;
int err = 0;
u64 bytenr;
u64 extent_start = 0;
u64 extent_end = 0;
u64 objectid = btrfs_ino(inode);
u32 found_type;
struct btrfs_path *path = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *item;
struct extent_buffer *leaf;
struct btrfs_key found_key;
struct extent_map *em = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_trans_handle *trans = NULL;
int compress_type;
again:
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em)
em->bdev = root->fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
if (em) {
if (em->start > start || em->start + em->len <= start)
free_extent_map(em);
else if (em->block_start == EXTENT_MAP_INLINE && page)
free_extent_map(em);
else
goto out;
}
em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
}
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->start = EXTENT_MAP_HOLE;
em->orig_start = EXTENT_MAP_HOLE;
em->len = (u64)-1;
em->block_len = (u64)-1;
if (!path) {
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
/*
* Chances are we'll be called again, so go ahead and do
* readahead
*/
path->reada = 1;
}
ret = btrfs_lookup_file_extent(trans, root, path,
objectid, start, trans != NULL);
if (ret < 0) {
err = ret;
goto out;
}
if (ret != 0) {
if (path->slots[0] == 0)
goto not_found;
path->slots[0]--;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
/* are we inside the extent that was found? */
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
if (found_key.objectid != objectid ||
found_type != BTRFS_EXTENT_DATA_KEY) {
goto not_found;
}
found_type = btrfs_file_extent_type(leaf, item);
extent_start = found_key.offset;
compress_type = btrfs_file_extent_compression(leaf, item);
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
extent_end = extent_start +
btrfs_file_extent_num_bytes(leaf, item);
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_inline_len(leaf, item);
extent_end = (extent_start + size + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
}
if (start >= extent_end) {
path->slots[0]++;
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0)
goto not_found;
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != objectid ||
found_key.type != BTRFS_EXTENT_DATA_KEY)
goto not_found;
if (start + len <= found_key.offset)
goto not_found;
em->start = start;
em->len = found_key.offset - start;
goto not_found_em;
}
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
em->start = extent_start;
em->len = extent_end - extent_start;
em->orig_start = extent_start -
btrfs_file_extent_offset(leaf, item);
bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
if (bytenr == 0) {
em->block_start = EXTENT_MAP_HOLE;
goto insert;
}
if (compress_type != BTRFS_COMPRESS_NONE) {
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
em->compress_type = compress_type;
em->block_start = bytenr;
em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
item);
} else {
bytenr += btrfs_file_extent_offset(leaf, item);
em->block_start = bytenr;
em->block_len = em->len;
if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
}
goto insert;
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
unsigned long ptr;
char *map;
size_t size;
size_t extent_offset;
size_t copy_size;
em->block_start = EXTENT_MAP_INLINE;
if (!page || create) {
em->start = extent_start;
em->len = extent_end - extent_start;
goto out;
}
size = btrfs_file_extent_inline_len(leaf, item);
extent_offset = page_offset(page) + pg_offset - extent_start;
copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
size - extent_offset);
em->start = extent_start + extent_offset;
em->len = (copy_size + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
em->orig_start = EXTENT_MAP_INLINE;
if (compress_type) {
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
em->compress_type = compress_type;
}
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
if (create == 0 && !PageUptodate(page)) {
if (btrfs_file_extent_compression(leaf, item) !=
BTRFS_COMPRESS_NONE) {
ret = uncompress_inline(path, inode, page,
pg_offset,
extent_offset, item);
BUG_ON(ret); /* -ENOMEM */
} else {
map = kmap(page);
read_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
memset(map + pg_offset + copy_size, 0,
PAGE_CACHE_SIZE - pg_offset -
copy_size);
}
kunmap(page);
}
flush_dcache_page(page);
} else if (create && PageUptodate(page)) {
BUG();
if (!trans) {
kunmap(page);
free_extent_map(em);
em = NULL;
btrfs_release_path(path);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return ERR_CAST(trans);
goto again;
}
map = kmap(page);
write_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
kunmap(page);
btrfs_mark_buffer_dirty(leaf);
}
set_extent_uptodate(io_tree, em->start,
extent_map_end(em) - 1, NULL, GFP_NOFS);
goto insert;
} else {
printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
WARN_ON(1);
}
not_found:
em->start = start;
em->len = len;
not_found_em:
em->block_start = EXTENT_MAP_HOLE;
set_bit(EXTENT_FLAG_VACANCY, &em->flags);
insert:
btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
"[%llu %llu]\n", (unsigned long long)em->start,
(unsigned long long)em->len,
(unsigned long long)start,
(unsigned long long)len);
err = -EIO;
goto out;
}
err = 0;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* an overlapping map exists in the tree
*/
if (ret == -EEXIST) {
struct extent_map *existing;
ret = 0;
existing = lookup_extent_mapping(em_tree, start, len);
if (existing && (existing->start > start ||
existing->start + existing->len <= start)) {
free_extent_map(existing);
existing = NULL;
}
if (!existing) {
existing = lookup_extent_mapping(em_tree, em->start,
em->len);
if (existing) {
err = merge_extent_mapping(em_tree, existing,
em, start,
root->sectorsize);
free_extent_map(existing);
if (err) {
free_extent_map(em);
em = NULL;
}
} else {
err = -EIO;
free_extent_map(em);
em = NULL;
}
} else {
free_extent_map(em);
em = existing;
err = 0;
}
}
write_unlock(&em_tree->lock);
out:
trace_btrfs_get_extent(root, em);
if (path)
btrfs_free_path(path);
if (trans) {
ret = btrfs_end_transaction(trans, root);
if (!err)
err = ret;
}
if (err) {
free_extent_map(em);
return ERR_PTR(err);
}
BUG_ON(!em); /* Error is always set */
return em;
}
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
{
struct extent_map *em;
struct extent_map *hole_em = NULL;
u64 range_start = start;
u64 end;
u64 found;
u64 found_end;
int err = 0;
em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
if (IS_ERR(em))
return em;
if (em) {
/*
* if our em maps to a hole, there might
* actually be delalloc bytes behind it
*/
if (em->block_start != EXTENT_MAP_HOLE)
return em;
else
hole_em = em;
}
/* check to see if we've wrapped (len == -1 or similar) */
end = start + len;
if (end < start)
end = (u64)-1;
else
end -= 1;
em = NULL;
/* ok, we didn't find anything, lets look for delalloc */
found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
end, len, EXTENT_DELALLOC, 1);
found_end = range_start + found;
if (found_end < range_start)
found_end = (u64)-1;
/*
* we didn't find anything useful, return
* the original results from get_extent()
*/
if (range_start > end || found_end <= start) {
em = hole_em;
hole_em = NULL;
goto out;
}
/* adjust the range_start to make sure it doesn't
* go backwards from the start they passed in
*/
range_start = max(start,range_start);
found = found_end - range_start;
if (found > 0) {
u64 hole_start = start;
u64 hole_len = len;
em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
}
/*
* when btrfs_get_extent can't find anything it
* returns one huge hole
*
* make sure what it found really fits our range, and
* adjust to make sure it is based on the start from
* the caller
*/
if (hole_em) {
u64 calc_end = extent_map_end(hole_em);
if (calc_end <= start || (hole_em->start > end)) {
free_extent_map(hole_em);
hole_em = NULL;
} else {
hole_start = max(hole_em->start, start);
hole_len = calc_end - hole_start;
}
}
em->bdev = NULL;
if (hole_em && range_start > hole_start) {
/* our hole starts before our delalloc, so we
* have to return just the parts of the hole
* that go until the delalloc starts
*/
em->len = min(hole_len,
range_start - hole_start);
em->start = hole_start;
em->orig_start = hole_start;
/*
* don't adjust block start at all,
* it is fixed at EXTENT_MAP_HOLE
*/
em->block_start = hole_em->block_start;
em->block_len = hole_len;
} else {
em->start = range_start;
em->len = found;
em->orig_start = range_start;
em->block_start = EXTENT_MAP_DELALLOC;
em->block_len = found;
}
} else if (hole_em) {
return hole_em;
}
out:
free_extent_map(hole_em);
if (err) {
free_extent_map(em);
return ERR_PTR(err);
}
return em;
}
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
struct extent_map *em,
u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct btrfs_key ins;
u64 alloc_hint;
int ret;
bool insert = false;
/*
* Ok if the extent map we looked up is a hole and is for the exact
* range we want, there is no reason to allocate a new one, however if
* it is not right then we need to free this one and drop the cache for
* our range.
*/
if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
em->len != len) {
free_extent_map(em);
em = NULL;
insert = true;
btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return ERR_CAST(trans);
if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
btrfs_add_inode_defrag(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
alloc_hint = get_extent_allocation_hint(inode, start, len);
ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
alloc_hint, &ins, 1);
if (ret) {
em = ERR_PTR(ret);
goto out;
}
if (!em) {
em = alloc_extent_map();
if (!em) {
em = ERR_PTR(-ENOMEM);
goto out;
}
}
em->start = start;
em->orig_start = em->start;
em->len = ins.offset;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
/*
* We need to do this because if we're using the original em we searched
* for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
*/
em->flags = 0;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (insert) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST)
break;
btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
}
ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
ins.offset, ins.offset, 0);
if (ret) {
btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
em = ERR_PTR(ret);
}
out:
btrfs_end_transaction(trans, root);
return em;
}
/*
* returns 1 when the nocow is safe, < 1 on error, 0 if the
* block must be cow'd
*/
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
struct inode *inode, u64 offset, u64 len)
{
struct btrfs_path *path;
int ret;
struct extent_buffer *leaf;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
u64 disk_bytenr;
u64 backref_offset;
u64 extent_end;
u64 num_bytes;
int slot;
int found_type;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
offset, 0);
if (ret < 0)
goto out;
slot = path->slots[0];
if (ret == 1) {
if (slot == 0) {
/* can't find the item, must cow */
ret = 0;
goto out;
}
slot--;
}
ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY) {
/* not our file or wrong item type, must cow */
goto out;
}
if (key.offset > offset) {
/* Wrong offset, must cow */
goto out;
}
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
found_type = btrfs_file_extent_type(leaf, fi);
if (found_type != BTRFS_FILE_EXTENT_REG &&
found_type != BTRFS_FILE_EXTENT_PREALLOC) {
/* not a regular extent, must cow */
goto out;
}
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
backref_offset = btrfs_file_extent_offset(leaf, fi);
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
if (extent_end < offset + len) {
/* extent doesn't include our full range, must cow */
goto out;
}
if (btrfs_extent_readonly(root, disk_bytenr))
goto out;
/*
* look for other files referencing this extent, if we
* find any we must cow
*/
if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
key.offset - backref_offset, disk_bytenr))
goto out;
/*
* adjust disk_bytenr and num_bytes to cover just the bytes
* in this extent we are about to write. If there
* are any csums in that range we have to cow in order
* to keep the csums correct
*/
disk_bytenr += backref_offset;
disk_bytenr += offset - key.offset;
num_bytes = min(offset + len, extent_end) - offset;
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out;
/*
* all of the above have passed, it is safe to overwrite this extent
* without cow
*/
ret = 1;
out:
btrfs_free_path(path);
return ret;
}
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start = iblock << inode->i_blkbits;
u64 len = bh_result->b_size;
struct btrfs_trans_handle *trans;
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
if (IS_ERR(em))
return PTR_ERR(em);
/*
* Ok for INLINE and COMPRESSED extents we need to fallback on buffered
* io. INLINE is special, and we could probably kludge it in here, but
* it's still buffered so for safety lets just fall back to the generic
* buffered path.
*
* For COMPRESSED we _have_ to read the entire extent in so we can
* decompress it, so there will be buffering required no matter what we
* do, so go ahead and fallback to buffered.
*
* We return -ENOTBLK because thats what makes DIO go ahead and go back
* to buffered IO. Don't blame me, this is the price we pay for using
* the generic code.
*/
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
em->block_start == EXTENT_MAP_INLINE) {
free_extent_map(em);
return -ENOTBLK;
}
/* Just a good old fashioned hole, return */
if (!create && (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
free_extent_map(em);
/* DIO will do one hole at a time, so just unlock a sector */
unlock_extent(&BTRFS_I(inode)->io_tree, start,
start + root->sectorsize - 1);
return 0;
}
/*
* We don't allocate a new extent in the following cases
*
* 1) The inode is marked as NODATACOW. In this case we'll just use the
* existing extent.
* 2) The extent is marked as PREALLOC. We're good to go here and can
* just use the extent.
*
*/
if (!create) {
len = em->len - (start - em->start);
goto map;
}
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
em->block_start != EXTENT_MAP_HOLE)) {
int type;
int ret;
u64 block_start;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
type = BTRFS_ORDERED_PREALLOC;
else
type = BTRFS_ORDERED_NOCOW;
len = min(len, em->len - (start - em->start));
block_start = em->block_start + (start - em->start);
/*
* we're not going to log anything, but we do need
* to make sure the current transaction stays open
* while we look for nocow cross refs
*/
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
goto must_cow;
if (can_nocow_odirect(trans, inode, start, len) == 1) {
ret = btrfs_add_ordered_extent_dio(inode, start,
block_start, len, len, type);
btrfs_end_transaction(trans, root);
if (ret) {
free_extent_map(em);
return ret;
}
goto unlock;
}
btrfs_end_transaction(trans, root);
}
must_cow:
/*
* this will cow the extent, reset the len in case we changed
* it above
*/
len = bh_result->b_size;
em = btrfs_new_extent_direct(inode, em, start, len);
if (IS_ERR(em))
return PTR_ERR(em);
len = min(len, em->len - (start - em->start));
unlock:
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
0, NULL, GFP_NOFS);
map:
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
bh_result->b_size = len;
bh_result->b_bdev = em->bdev;
set_buffer_mapped(bh_result);
if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
set_buffer_new(bh_result);
free_extent_map(em);
return 0;
}
struct btrfs_dio_private {
struct inode *inode;
u64 logical_offset;
u64 disk_bytenr;
u64 bytes;
u32 *csums;
void *private;
/* number of bios pending for this dio */
atomic_t pending_bios;
/* IO errors */
int errors;
struct bio *orig_bio;
};
static void btrfs_endio_direct_read(struct bio *bio, int err)
{
struct btrfs_dio_private *dip = bio->bi_private;
struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
struct bio_vec *bvec = bio->bi_io_vec;
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start;
u32 *private = dip->csums;
start = dip->logical_offset;
do {
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
struct page *page = bvec->bv_page;
char *kaddr;
u32 csum = ~(u32)0;
unsigned long flags;
local_irq_save(flags);
kaddr = kmap_atomic(page);
csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
csum, bvec->bv_len);
btrfs_csum_final(csum, (char *)&csum);
kunmap_atomic(kaddr);
local_irq_restore(flags);
flush_dcache_page(bvec->bv_page);
if (csum != *private) {
printk(KERN_ERR "btrfs csum failed ino %llu off"
" %llu csum %u private %u\n",
(unsigned long long)btrfs_ino(inode),
(unsigned long long)start,
csum, *private);
err = -EIO;
}
}
start += bvec->bv_len;
private++;
bvec++;
} while (bvec <= bvec_end);
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
dip->logical_offset + dip->bytes - 1);
bio->bi_private = dip->private;
kfree(dip->csums);
kfree(dip);
/* If we had a csum failure make sure to clear the uptodate flag */
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
dio_end_io(bio, err);
}
static void btrfs_endio_direct_write(struct bio *bio, int err)
{
struct btrfs_dio_private *dip = bio->bi_private;
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_ordered_extent *ordered = NULL;
struct extent_state *cached_state = NULL;
u64 ordered_offset = dip->logical_offset;
u64 ordered_bytes = dip->bytes;
int ret;
if (err)
goto out_done;
again:
ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
&ordered_offset,
ordered_bytes);
if (!ret)
goto out_test;
BUG_ON(!ordered);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
err = -ENOMEM;
goto out;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
ret = btrfs_ordered_update_i_size(inode, 0, ordered);
if (!ret)
err = btrfs_update_inode_fallback(trans, root, inode);
goto out;
}
lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1, 0,
&cached_state);
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
ret = btrfs_mark_extent_written(trans, inode,
ordered->file_offset,
ordered->file_offset +
ordered->len);
if (ret) {
err = ret;
goto out_unlock;
}
} else {
ret = insert_reserved_file_extent(trans, inode,
ordered->file_offset,
ordered->start,
ordered->disk_len,
ordered->len,
ordered->len,
0, 0, 0,
BTRFS_FILE_EXTENT_REG);
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered->file_offset, ordered->len);
if (ret) {
err = ret;
WARN_ON(1);
goto out_unlock;
}
}
add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
ret = btrfs_ordered_update_i_size(inode, 0, ordered);
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
btrfs_update_inode_fallback(trans, root, inode);
ret = 0;
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1,
&cached_state, GFP_NOFS);
out:
btrfs_delalloc_release_metadata(inode, ordered->len);
btrfs_end_transaction(trans, root);
ordered_offset = ordered->file_offset + ordered->len;
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
out_test:
/*
* our bio might span multiple ordered extents. If we haven't
* completed the accounting for the whole dio, go back and try again
*/
if (ordered_offset < dip->logical_offset + dip->bytes) {
ordered_bytes = dip->logical_offset + dip->bytes -
ordered_offset;
goto again;
}
out_done:
bio->bi_private = dip->private;
kfree(dip->csums);
kfree(dip);
/* If we had an error make sure to clear the uptodate flag */
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
dio_end_io(bio, err);
}
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
struct bio *bio, int mirror_num,
unsigned long bio_flags, u64 offset)
{
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
static void btrfs_end_dio_bio(struct bio *bio, int err)
{
struct btrfs_dio_private *dip = bio->bi_private;
if (err) {
printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
"sector %#Lx len %u err no %d\n",
(unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
(unsigned long long)bio->bi_sector, bio->bi_size, err);
dip->errors = 1;
/*
* before atomic variable goto zero, we must make sure
* dip->errors is perceived to be set.
*/
smp_mb__before_atomic_dec();
}
/* if there are more bios still pending for this dio, just exit */
if (!atomic_dec_and_test(&dip->pending_bios))
goto out;
if (dip->errors)
bio_io_error(dip->orig_bio);
else {
set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
bio_endio(dip->orig_bio, 0);
}
out:
bio_put(bio);
}
static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
u64 first_sector, gfp_t gfp_flags)
{
int nr_vecs = bio_get_nr_vecs(bdev);
return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
}
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
int rw, u64 file_offset, int skip_sum,
u32 *csums, int async_submit)
{
int write = rw & REQ_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
bio_get(bio);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
if (ret)
goto err;
if (skip_sum)
goto map;
if (write && async_submit) {
ret = btrfs_wq_submit_bio(root->fs_info,
inode, rw, bio, 0, 0,
file_offset,
__btrfs_submit_bio_start_direct_io,
__btrfs_submit_bio_done);
goto err;
} else if (write) {
/*
* If we aren't doing async submit, calculate the csum of the
* bio now.
*/
ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
if (ret)
goto err;
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
file_offset, csums);
if (ret)
goto err;
}
map:
ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
}
static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int skip_sum)
{
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
struct bio *bio;
struct bio *orig_bio = dip->orig_bio;
struct bio_vec *bvec = orig_bio->bi_io_vec;
u64 start_sector = orig_bio->bi_sector;
u64 file_offset = dip->logical_offset;
u64 submit_len = 0;
u64 map_length;
int nr_pages = 0;
u32 *csums = dip->csums;
int ret = 0;
int async_submit = 0;
int write = rw & REQ_WRITE;
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(orig_bio);
return -EIO;
}
if (map_length >= orig_bio->bi_size) {
bio = orig_bio;
goto submit;
}
async_submit = 1;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
if (!bio)
return -ENOMEM;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
atomic_inc(&dip->pending_bios);
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
if (unlikely(map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
bvec->bv_offset) < bvec->bv_len)) {
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
* we inc the count. Otherwise, the dip might get freed
* before we're done setting it up
*/
atomic_inc(&dip->pending_bios);
ret = __btrfs_submit_dio_bio(bio, inode, rw,
file_offset, skip_sum,
csums, async_submit);
if (ret) {
bio_put(bio);
atomic_dec(&dip->pending_bios);
goto out_err;
}
/* Write's use the ordered csums */
if (!write && !skip_sum)
csums = csums + nr_pages;
start_sector += submit_len >> 9;
file_offset += submit_len;
submit_len = 0;
nr_pages = 0;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
start_sector, GFP_NOFS);
if (!bio)
goto out_err;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(bio);
goto out_err;
}
} else {
submit_len += bvec->bv_len;
nr_pages ++;
bvec++;
}
}
submit:
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
csums, async_submit);
if (!ret)
return 0;
bio_put(bio);
out_err:
dip->errors = 1;
/*
* before atomic variable goto zero, we must
* make sure dip->errors is perceived to be set.
*/
smp_mb__before_atomic_dec();
if (atomic_dec_and_test(&dip->pending_bios))
bio_io_error(dip->orig_bio);
/* bio_end_io() will handle error, so we needn't return it */
return 0;
}
static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_dio_private *dip;
struct bio_vec *bvec = bio->bi_io_vec;
int skip_sum;
int write = rw & REQ_WRITE;
int ret = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
dip = kmalloc(sizeof(*dip), GFP_NOFS);
if (!dip) {
ret = -ENOMEM;
goto free_ordered;
}
dip->csums = NULL;
/* Write's use the ordered csum stuff, so we don't need dip->csums */
if (!write && !skip_sum) {
dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
if (!dip->csums) {
kfree(dip);
ret = -ENOMEM;
goto free_ordered;
}
}
dip->private = bio->bi_private;
dip->inode = inode;
dip->logical_offset = file_offset;
dip->bytes = 0;
do {
dip->bytes += bvec->bv_len;
bvec++;
} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
dip->disk_bytenr = (u64)bio->bi_sector << 9;
bio->bi_private = dip;
dip->errors = 0;
dip->orig_bio = bio;
atomic_set(&dip->pending_bios, 0);
if (write)
bio->bi_end_io = btrfs_endio_direct_write;
else
bio->bi_end_io = btrfs_endio_direct_read;
ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
if (!ret)
return;
free_ordered:
/*
* If this is a write, we need to clean up the reserved space and kill
* the ordered extent.
*/
if (write) {
struct btrfs_ordered_extent *ordered;
ordered = btrfs_lookup_ordered_extent(inode, file_offset);
if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
btrfs_free_reserved_extent(root, ordered->start,
ordered->disk_len);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
bio_endio(bio, ret);
}
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
int seg;
int i;
size_t size;
unsigned long addr;
unsigned blocksize_mask = root->sectorsize - 1;
ssize_t retval = -EINVAL;
loff_t end = offset;
if (offset & blocksize_mask)
goto out;
/* Check the memory alignment. Blocks cannot straddle pages */
for (seg = 0; seg < nr_segs; seg++) {
addr = (unsigned long)iov[seg].iov_base;
size = iov[seg].iov_len;
end += size;
if ((addr & blocksize_mask) || (size & blocksize_mask))
goto out;
/* If this is a write we don't need to check anymore */
if (rw & WRITE)
continue;
/*
* Check to make sure we don't have duplicate iov_base's in this
* iovec, if so return EINVAL, otherwise we'll get csum errors
* when reading back.
*/
for (i = seg + 1; i < nr_segs; i++) {
if (iov[seg].iov_base == iov[i].iov_base)
goto out;
}
}
retval = 0;
out:
return retval;
}
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
u64 lockstart, lockend;
ssize_t ret;
int writing = rw & WRITE;
int write_bits = 0;
size_t count = iov_length(iov, nr_segs);
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
offset, nr_segs)) {
return 0;
}
lockstart = offset;
lockend = offset + count - 1;
if (writing) {
ret = btrfs_delalloc_reserve_space(inode, count);
if (ret)
goto out;
}
while (1) {
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
0, &cached_state);
/*
* We're concerned with the entire range that we're going to be
* doing DIO to, so we need to make sure theres no ordered
* extents in this range.
*/
ordered = btrfs_lookup_ordered_range(inode, lockstart,
lockend - lockstart + 1);
if (!ordered)
break;
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
cond_resched();
}
/*
* we don't use btrfs_set_extent_delalloc because we don't want
* the dirty or uptodate bits
*/
if (writing) {
write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
EXTENT_DELALLOC, NULL, &cached_state,
GFP_NOFS);
if (ret) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_LOCKED | write_bits,
1, 0, &cached_state, GFP_NOFS);
goto out;
}
}
free_extent_state(cached_state);
cached_state = NULL;
ret = __blockdev_direct_IO(rw, iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, 0);
if (ret < 0 && ret != -EIOCBQUEUED) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
offset + iov_length(iov, nr_segs) - 1,
EXTENT_LOCKED | write_bits, 1, 0,
&cached_state, GFP_NOFS);
} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
/*
* We're falling back to buffered, unlock the section we didn't
* do IO on.
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
offset + iov_length(iov, nr_segs) - 1,
EXTENT_LOCKED | write_bits, 1, 0,
&cached_state, GFP_NOFS);
}
out:
free_extent_state(cached_state);
return ret;
}
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
}
int btrfs_readpage(struct file *file, struct page *page)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(page->mapping->host)->io_tree;
return extent_read_full_page(tree, page, btrfs_get_extent, 0);
}
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
{
struct extent_io_tree *tree;
if (current->flags & PF_MEMALLOC) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
tree = &BTRFS_I(page->mapping->host)->io_tree;
return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
}
int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
}
static int
btrfs_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
return extent_readpages(tree, mapping, pages, nr_pages,
btrfs_get_extent);
}
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
struct extent_io_tree *tree;
struct extent_map_tree *map;
int ret;
tree = &BTRFS_I(page->mapping->host)->io_tree;
map = &BTRFS_I(page->mapping->host)->extent_tree;
ret = try_release_extent_mapping(map, tree, page, gfp_flags);
if (ret == 1) {
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
}
return ret;
}
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
if (PageWriteback(page) || PageDirty(page))
return 0;
return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
}
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
{
struct extent_io_tree *tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
u64 page_start = page_offset(page);
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
/*
* we have the page locked, so new writeback can't start,
* and the dirty bit won't be cleared while we are here.
*
* Wait for IO on this page so that we can safely clear
* the PagePrivate2 bit and do ordered accounting
*/
wait_on_page_writeback(page);
tree = &BTRFS_I(page->mapping->host)->io_tree;
if (offset) {
btrfs_releasepage(page, GFP_NOFS);
return;
}
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
ordered = btrfs_lookup_ordered_extent(page->mapping->host,
page_offset(page));
if (ordered) {
/*
* IO on this page will never be started, so we need
* to account for any ordered extents now
*/
clear_extent_bit(tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
&cached_state, GFP_NOFS);
/*
* whoever cleared the private bit is responsible
* for the finish_ordered_io
*/
if (TestClearPagePrivate2(page)) {
btrfs_finish_ordered_io(page->mapping->host,
page_start, page_end);
}
btrfs_put_ordered_extent(ordered);
cached_state = NULL;
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
}
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
__btrfs_releasepage(page, GFP_NOFS);
ClearPageChecked(page);
if (PagePrivate(page)) {
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
}
}
/*
* btrfs_page_mkwrite() is not allowed to change the file size as it gets
* called from a page fault handler when a page is first dirtied. Hence we must
* be careful to check for EOF conditions here. We set the page up correctly
* for a written page which means we get ENOSPC checking when writing into
* holes and correct delalloc and unwritten extent mapping on filesystems that
* support these features.
*
* We are not allowed to take the i_mutex here so we have to play games to
* protect against truncate races as the page could now be beyond EOF. Because
* vmtruncate() writes the inode size before removing pages, once we have the
* page lock we can determine safely if the page is beyond EOF. If it is not
* beyond EOF, then the page is guaranteed safe against truncation until we
* unlock the page.
*/
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = fdentry(vma->vm_file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
char *kaddr;
unsigned long zero_start;
loff_t size;
int ret;
int reserved = 0;
u64 page_start;
u64 page_end;
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (!ret) {
ret = btrfs_update_time(vma->vm_file);
reserved = 1;
}
if (ret) {
if (ret == -ENOMEM)
ret = VM_FAULT_OOM;
else /* -ENOSPC, -EIO, etc */
ret = VM_FAULT_SIGBUS;
if (reserved)
goto out;
goto out_noreserve;
}
ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
again:
lock_page(page);
size = i_size_read(inode);
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
if ((page->mapping != inode->i_mapping) ||
(page_start >= size)) {
/* page got truncated out from underneath us */
goto out_unlock;
}
wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
set_page_extent_mapped(page);
/*
* we can't set the delalloc bits if there are pending ordered
* extents. Drop our locks and wait for them to finish
*/
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
/*
* XXX - page_mkwrite gets called every time the page is dirtied, even
* if it was already dirty, so for space accounting reasons we need to
* clear any delalloc bits for the range we are fixing to save. There
* is probably a better way to do this, but for now keep consistent with
* prepare_pages in the normal write path.
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
ret = 0;
/* page is wholly or partially inside EOF */
if (page_start + PAGE_CACHE_SIZE > size)
zero_start = size & ~PAGE_CACHE_MASK;
else
zero_start = PAGE_CACHE_SIZE;
if (zero_start != PAGE_CACHE_SIZE) {
kaddr = kmap(page);
memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
flush_dcache_page(page);
kunmap(page);
}
ClearPageChecked(page);
set_page_dirty(page);
SetPageUptodate(page);
BTRFS_I(inode)->last_trans = root->fs_info->generation;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
out_unlock:
if (!ret)
return VM_FAULT_LOCKED;
unlock_page(page);
out:
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
out_noreserve:
return ret;
}
static int btrfs_truncate(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv;
int ret;
int err = 0;
struct btrfs_trans_handle *trans;
unsigned long nr;
u64 mask = root->sectorsize - 1;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
if (ret)
return ret;
btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
/*
* Yes ladies and gentelment, this is indeed ugly. The fact is we have
* 3 things going on here
*
* 1) We need to reserve space for our orphan item and the space to
* delete our orphan item. Lord knows we don't want to have a dangling
* orphan item because we didn't reserve space to remove it.
*
* 2) We need to reserve space to update our inode.
*
* 3) We need to have something to cache all the space that is going to
* be free'd up by the truncate operation, but also have some slack
* space reserved in case it uses space during the truncate (thank you
* very much snapshotting).
*
* And we need these to all be seperate. The fact is we can use alot of
* space doing the truncate, and we have no earthly idea how much space
* we will use, so we need the truncate reservation to be seperate so it
* doesn't end up using space reserved for updating the inode or
* removing the orphan item. We also need to be able to stop the
* transaction and start a new one, which means we need to be able to
* update the inode several times, and we have no idea of knowing how
* many times that will be, so we can't just reserve 1 item for the
* entirety of the opration, so that has to be done seperately as well.
* Then there is the orphan item, which does indeed need to be held on
* to for the whole operation, and we need nobody to touch this reserved
* space except the orphan code.
*
* So that leaves us with
*
* 1) root->orphan_block_rsv - for the orphan deletion.
* 2) rsv - for the truncate reservation, which we will steal from the
* transaction reservation.
* 3) fs_info->trans_block_rsv - this will have 1 items worth left for
* updating the inode.
*/
rsv = btrfs_alloc_block_rsv(root);
if (!rsv)
return -ENOMEM;
rsv->size = min_size;
/*
* 1 for the truncate slack space
* 1 for the orphan item we're going to add
* 1 for the orphan item deletion
* 1 for updating the inode.
*/
trans = btrfs_start_transaction(root, 4);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out;
}
/* Migrate the slack space for the truncate to our reserve */
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
min_size);
BUG_ON(ret);
ret = btrfs_orphan_add(trans, inode);
if (ret) {
btrfs_end_transaction(trans, root);
goto out;
}
/*
* setattr is responsible for setting the ordered_data_close flag,
* but that is only tested during the last file release. That
* could happen well after the next commit, leaving a great big
* window where new writes may get lost if someone chooses to write
* to this file after truncating to zero
*
* The inode doesn't have any dirty data here, and so if we commit
* this is a noop. If someone immediately starts writing to the inode
* it is very likely we'll catch some of their writes in this
* transaction, and the commit will find this file on the ordered
* data list with good things to send down.
*
* This is a best effort solution, there is still a window where
* using truncate to replace the contents of the file will
* end up with a zero length file after a crash.
*/
if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
btrfs_add_ordered_operation(trans, root, inode);
while (1) {
ret = btrfs_block_rsv_refill(root, rsv, min_size);
if (ret) {
/*
* This can only happen with the original transaction we
* started above, every other time we shouldn't have a
* transaction started yet.
*/
if (ret == -EAGAIN)
goto end_trans;
err = ret;
break;
}
if (!trans) {
/* Just need the 1 for updating the inode */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = err = PTR_ERR(trans);
trans = NULL;
break;
}
}
trans->block_rsv = rsv;
ret = btrfs_truncate_inode_items(trans, root, inode,
inode->i_size,
BTRFS_EXTENT_DATA_KEY);
if (ret != -EAGAIN) {
err = ret;
break;
}
trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
err = ret;
break;
}
end_trans:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root, nr);
}
if (ret == 0 && inode->i_nlink > 0) {
trans->block_rsv = root->orphan_block_rsv;
ret = btrfs_orphan_del(trans, inode);
if (ret)
err = ret;
} else if (ret && inode->i_nlink > 0) {
/*
* Failed to do the truncate, remove us from the in memory
* orphan list.
*/
ret = btrfs_orphan_del(NULL, inode);
}
if (trans) {
trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret && !err)
err = ret;
nr = trans->blocks_used;
ret = btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
}
out:
btrfs_free_block_rsv(root, rsv);
if (ret && !err)
err = ret;
return err;
}
/*
* create a new subvolume directory/inode (helper for the ioctl).
*/
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root, u64 new_dirid)
{
struct inode *inode;
int err;
u64 index = 0;
inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
new_dirid, new_dirid,
S_IFDIR | (~current_umask() & S_IRWXUGO),
&index);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
set_nlink(inode, 1);
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, new_root, inode);
iput(inode);
return err;
}
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
struct btrfs_inode *ei;
struct inode *inode;
ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
ei->root = NULL;
ei->space_info = NULL;
ei->generation = 0;
ei->sequence = 0;
ei->last_trans = 0;
ei->last_sub_trans = 0;
ei->logged_trans = 0;
ei->delalloc_bytes = 0;
ei->disk_i_size = 0;
ei->flags = 0;
ei->csum_bytes = 0;
ei->index_cnt = (u64)-1;
ei->last_unlink_trans = 0;
spin_lock_init(&ei->lock);
ei->outstanding_extents = 0;
ei->reserved_extents = 0;
ei->ordered_data_close = 0;
ei->orphan_meta_reserved = 0;
ei->dummy_inode = 0;
ei->in_defrag = 0;
ei->delalloc_meta_reserved = 0;
ei->force_compress = BTRFS_COMPRESS_NONE;
ei->delayed_node = NULL;
inode = &ei->vfs_inode;
extent_map_tree_init(&ei->extent_tree);
extent_io_tree_init(&ei->io_tree, &inode->i_data);
extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
ei->io_tree.track_uptodate = 1;
ei->io_failure_tree.track_uptodate = 1;
mutex_init(&ei->log_mutex);
mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->i_orphan);
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->ordered_operations);
RB_CLEAR_NODE(&ei->rb_node);
return inode;
}
static void btrfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
void btrfs_destroy_inode(struct inode *inode)
{
struct btrfs_ordered_extent *ordered;
struct btrfs_root *root = BTRFS_I(inode)->root;
WARN_ON(!list_empty(&inode->i_dentry));
WARN_ON(inode->i_data.nrpages);
WARN_ON(BTRFS_I(inode)->outstanding_extents);
WARN_ON(BTRFS_I(inode)->reserved_extents);
WARN_ON(BTRFS_I(inode)->delalloc_bytes);
WARN_ON(BTRFS_I(inode)->csum_bytes);
/*
* This can happen where we create an inode, but somebody else also
* created the same inode and we need to destroy the one we already
* created.
*/
if (!root)
goto free;
/*
* Make sure we're properly removed from the ordered operation
* lists.
*/
smp_mb();
if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
spin_lock(&root->fs_info->ordered_extent_lock);
list_del_init(&BTRFS_I(inode)->ordered_operations);
spin_unlock(&root->fs_info->ordered_extent_lock);
}
spin_lock(&root->orphan_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
(unsigned long long)btrfs_ino(inode));
list_del_init(&BTRFS_I(inode)->i_orphan);
}
spin_unlock(&root->orphan_lock);
while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
if (!ordered)
break;
else {
printk(KERN_ERR "btrfs found ordered "
"extent %llu %llu on inode cleanup\n",
(unsigned long long)ordered->file_offset,
(unsigned long long)ordered->len);
btrfs_remove_ordered_extent(inode, ordered);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
}
inode_tree_del(inode);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
free:
btrfs_remove_delayed_node(inode);
call_rcu(&inode->i_rcu, btrfs_i_callback);
}
int btrfs_drop_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0 &&
!btrfs_is_free_space_inode(root, inode))
return 1;
else
return generic_drop_inode(inode);
}
static void init_once(void *foo)
{
struct btrfs_inode *ei = (struct btrfs_inode *) foo;
inode_init_once(&ei->vfs_inode);
}
void btrfs_destroy_cachep(void)
{
if (btrfs_inode_cachep)
kmem_cache_destroy(btrfs_inode_cachep);
if (btrfs_trans_handle_cachep)
kmem_cache_destroy(btrfs_trans_handle_cachep);
if (btrfs_transaction_cachep)
kmem_cache_destroy(btrfs_transaction_cachep);
if (btrfs_path_cachep)
kmem_cache_destroy(btrfs_path_cachep);
if (btrfs_free_space_cachep)
kmem_cache_destroy(btrfs_free_space_cachep);
}
int btrfs_init_cachep(void)
{
btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
sizeof(struct btrfs_inode), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
if (!btrfs_inode_cachep)
goto fail;
btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
sizeof(struct btrfs_trans_handle), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_trans_handle_cachep)
goto fail;
btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
sizeof(struct btrfs_transaction), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_transaction_cachep)
goto fail;
btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
sizeof(struct btrfs_path), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_path_cachep)
goto fail;
btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
sizeof(struct btrfs_free_space), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_free_space_cachep)
goto fail;
return 0;
fail:
btrfs_destroy_cachep();
return -ENOMEM;
}
static int btrfs_getattr(struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
u32 blocksize = inode->i_sb->s_blocksize;
generic_fillattr(inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
stat->blksize = PAGE_CACHE_SIZE;
stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
return 0;
}
/*
* If a file is moved, it will inherit the cow and compression flags of the new
* directory.
*/
static void fixup_inode_flags(struct inode *dir, struct inode *inode)
{
struct btrfs_inode *b_dir = BTRFS_I(dir);
struct btrfs_inode *b_inode = BTRFS_I(inode);
if (b_dir->flags & BTRFS_INODE_NODATACOW)
b_inode->flags |= BTRFS_INODE_NODATACOW;
else
b_inode->flags &= ~BTRFS_INODE_NODATACOW;
if (b_dir->flags & BTRFS_INODE_COMPRESS)
b_inode->flags |= BTRFS_INODE_COMPRESS;
else
b_inode->flags &= ~BTRFS_INODE_COMPRESS;
}
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
struct inode *new_inode = new_dentry->d_inode;
struct inode *old_inode = old_dentry->d_inode;
struct timespec ctime = CURRENT_TIME;
u64 index = 0;
u64 root_objectid;
int ret;
u64 old_ino = btrfs_ino(old_inode);
if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return -EPERM;
/* we only allow rename subvolume link between subvolumes */
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
return -EXDEV;
if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
(new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
return -ENOTEMPTY;
if (S_ISDIR(old_inode->i_mode) && new_inode &&
new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
/*
* we're using rename to replace one file with another.
* and the replacement file is large. Start IO on it now so
* we don't add too much work to the end of the transaction
*/
if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(old_inode->i_mapping);
/* close the racy window with snapshot create/destroy ioctl */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&root->fs_info->subvol_sem);
/*
* We want to reserve the absolute worst case amount of items. So if
* both inodes are subvols and we need to unlink them then that would
* require 4 item modifications, but if they are both normal inodes it
* would require 5 item modifications, so we'll assume their normal
* inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
* should cover the worst case number of items we'll modify.
*/
trans = btrfs_start_transaction(root, 20);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_notrans;
}
if (dest != root)
btrfs_record_root_in_trans(trans, dest);
ret = btrfs_set_inode_index(new_dir, &index);
if (ret)
goto out_fail;
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
/* force full log commit if subvolume involved. */
root->fs_info->last_trans_log_full_commit = trans->transid;
} else {
ret = btrfs_insert_inode_ref(trans, dest,
new_dentry->d_name.name,
new_dentry->d_name.len,
old_ino,
btrfs_ino(new_dir), index);
if (ret)
goto out_fail;
/*
* this is an ugly little race, but the rename is required
* to make sure that if we crash, the inode is either at the
* old name or the new one. pinning the log transaction lets
* us make sure we don't allow a log commit to come in after
* we unlink the name but before we add the new name back in.
*/
btrfs_pin_log_trans(root);
}
/*
* make sure the inode gets flushed if it is replacing
* something.
*/
if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
btrfs_add_ordered_operation(trans, root, old_inode);
old_dir->i_ctime = old_dir->i_mtime = ctime;
new_dir->i_ctime = new_dir->i_mtime = ctime;
old_inode->i_ctime = ctime;
if (old_dentry->d_parent != new_dentry->d_parent)
btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
old_dentry->d_name.name,
old_dentry->d_name.len);
} else {
ret = __btrfs_unlink_inode(trans, root, old_dir,
old_dentry->d_inode,
old_dentry->d_name.name,
old_dentry->d_name.len);
if (!ret)
ret = btrfs_update_inode(trans, root, old_inode);
}
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
if (new_inode) {
new_inode->i_ctime = CURRENT_TIME;
if (unlikely(btrfs_ino(new_inode) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
root_objectid = BTRFS_I(new_inode)->location.objectid;
ret = btrfs_unlink_subvol(trans, dest, new_dir,
root_objectid,
new_dentry->d_name.name,
new_dentry->d_name.len);
BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, dest, new_dir,
new_dentry->d_inode,
new_dentry->d_name.name,
new_dentry->d_name.len);
}
if (!ret && new_inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, new_dentry->d_inode);
BUG_ON(ret);
}
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
}
fixup_inode_flags(new_dir, old_inode);
ret = btrfs_add_link(trans, new_dir, old_inode,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
struct dentry *parent = new_dentry->d_parent;
btrfs_log_new_name(trans, old_inode, old_dir, parent);
btrfs_end_log_trans(root);
}
out_fail:
btrfs_end_transaction(trans, root);
out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&root->fs_info->subvol_sem);
return ret;
}
/*
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
{
struct list_head *head = &root->fs_info->delalloc_inodes;
struct btrfs_inode *binode;
struct inode *inode;
if (root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
spin_lock(&root->fs_info->delalloc_lock);
while (!list_empty(head)) {
binode = list_entry(head->next, struct btrfs_inode,
delalloc_inodes);
inode = igrab(&binode->vfs_inode);
if (!inode)
list_del_init(&binode->delalloc_inodes);
spin_unlock(&root->fs_info->delalloc_lock);
if (inode) {
filemap_flush(inode->i_mapping);
if (delay_iput)
btrfs_add_delayed_iput(inode);
else
iput(inode);
}
cond_resched();
spin_lock(&root->fs_info->delalloc_lock);
}
spin_unlock(&root->fs_info->delalloc_lock);
/* the filemap_flush will queue IO into the worker threads, but
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
*/
atomic_inc(&root->fs_info->async_submit_draining);
while (atomic_read(&root->fs_info->nr_async_submits) ||
atomic_read(&root->fs_info->async_delalloc_pages)) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->nr_async_submits) == 0 &&
atomic_read(&root->fs_info->async_delalloc_pages) == 0));
}
atomic_dec(&root->fs_info->async_submit_draining);
return 0;
}
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct btrfs_key key;
struct inode *inode = NULL;
int err;
int drop_inode = 0;
u64 objectid;
u64 index = 0 ;
int name_len;
int datasize;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
struct extent_buffer *leaf;
unsigned long nr = 0;
name_len = strlen(symname) + 1;
if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
return -ENAMETOOLONG;
/*
* 2 items for inode item and ref
* 2 items for dir items
* 1 item for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
S_IFLNK|S_IRWXUGO, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
if (drop_inode)
goto out_unlock;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
drop_inode = 1;
goto out_unlock;
}
key.objectid = btrfs_ino(inode);
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
datasize = btrfs_file_extent_calc_inline_size(name_len);
err = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (err) {
drop_inode = 1;
btrfs_free_path(path);
goto out_unlock;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei,
BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_encryption(leaf, ei, 0);
btrfs_set_file_extent_compression(leaf, ei, 0);
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
inode_set_bytes(inode, name_len);
btrfs_i_size_write(inode, name_len - 1);
err = btrfs_update_inode(trans, root, inode);
if (err)
drop_inode = 1;
out_unlock:
if (!err)
d_instantiate(dentry, inode);
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root, nr);
return err;
}
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint,
struct btrfs_trans_handle *trans)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 cur_offset = start;
u64 i_size;
int ret = 0;
bool own_trans = true;
if (trans)
own_trans = false;
while (num_bytes > 0) {
if (own_trans) {
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
break;
}
}
ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
0, *alloc_hint, &ins, 1);
if (ret) {
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
ret = insert_reserved_file_extent(trans, inode,
cur_offset, ins.objectid,
ins.offset, ins.offset,
ins.offset, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
num_bytes -= ins.offset;
cur_offset += ins.offset;
*alloc_hint = ins.objectid + ins.offset;
inode->i_ctime = CURRENT_TIME;
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
(actual_len > inode->i_size) &&
(cur_offset > inode->i_size)) {
if (cur_offset > actual_len)
i_size = actual_len;
else
i_size = cur_offset;
i_size_write(inode, i_size);
btrfs_ordered_update_i_size(inode, i_size, NULL);
}
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
if (own_trans)
btrfs_end_transaction(trans, root);
}
return ret;
}
int btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint)
{
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
min_size, actual_len, alloc_hint,
NULL);
}
int btrfs_prealloc_file_range_trans(struct inode *inode,
struct btrfs_trans_handle *trans, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint)
{
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
min_size, actual_len, alloc_hint, trans);
}
static int btrfs_set_page_dirty(struct page *page)
{
return __set_page_dirty_nobuffers(page);
}
static int btrfs_permission(struct inode *inode, int mask)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
umode_t mode = inode->i_mode;
if (mask & MAY_WRITE &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
if (btrfs_root_readonly(root))
return -EROFS;
if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
return -EACCES;
}
return generic_permission(inode, mask);
}
static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
.create = btrfs_create,
.unlink = btrfs_unlink,
.link = btrfs_link,
.mkdir = btrfs_mkdir,
.rmdir = btrfs_rmdir,
.rename = btrfs_rename,
.symlink = btrfs_symlink,
.setattr = btrfs_setattr,
.mknod = btrfs_mknod,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
.get_acl = btrfs_get_acl,
};
static const struct inode_operations btrfs_dir_ro_inode_operations = {
.lookup = btrfs_lookup,
.permission = btrfs_permission,
.get_acl = btrfs_get_acl,
};
static const struct file_operations btrfs_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = btrfs_real_readdir,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_ioctl,
#endif
.release = btrfs_release_file,
.fsync = btrfs_sync_file,
};
static struct extent_io_ops btrfs_extent_io_ops = {
.fill_delalloc = run_delalloc_range,
.submit_bio_hook = btrfs_submit_bio_hook,
.merge_bio_hook = btrfs_merge_bio_hook,
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
.writepage_end_io_hook = btrfs_writepage_end_io_hook,
.writepage_start_hook = btrfs_writepage_start_hook,
.set_bit_hook = btrfs_set_bit_hook,
.clear_bit_hook = btrfs_clear_bit_hook,
.merge_extent_hook = btrfs_merge_extent_hook,
.split_extent_hook = btrfs_split_extent_hook,
};
/*
* btrfs doesn't support the bmap operation because swapfiles
* use bmap to make a mapping of extents in the file. They assume
* these extents won't change over the life of the file and they
* use the bmap result to do IO directly to the drive.
*
* the btrfs bmap call would return logical addresses that aren't
* suitable for IO and they also will change frequently as COW
* operations happen. So, swapfile + btrfs == corruption.
*
* For now we're avoiding this by dropping bmap.
*/
static const struct address_space_operations btrfs_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readpages = btrfs_readpages,
.direct_IO = btrfs_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
.set_page_dirty = btrfs_set_page_dirty,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations btrfs_symlink_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
};
static const struct inode_operations btrfs_file_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
.fiemap = btrfs_fiemap,
.get_acl = btrfs_get_acl,
};
static const struct inode_operations btrfs_special_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.get_acl = btrfs_get_acl,
};
static const struct inode_operations btrfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.get_acl = btrfs_get_acl,
};
const struct dentry_operations btrfs_dentry_operations = {
.d_delete = btrfs_dentry_delete,
.d_release = btrfs_dentry_release,
};
| gpl-2.0 |
PsychoGame/android_kernel_lge_apq8064 | fs/btrfs/inode.c | 2482 | 206715 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/mount.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h"
#include "ordered-data.h"
#include "xattr.h"
#include "tree-log.h"
#include "volumes.h"
#include "compression.h"
#include "locking.h"
#include "free-space-cache.h"
#include "inode-map.h"
struct btrfs_iget_args {
u64 ino;
struct btrfs_root *root;
};
static const struct inode_operations btrfs_dir_inode_operations;
static const struct inode_operations btrfs_symlink_inode_operations;
static const struct inode_operations btrfs_dir_ro_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
static const struct address_space_operations btrfs_aops;
static const struct address_space_operations btrfs_symlink_aops;
static const struct file_operations btrfs_dir_file_operations;
static struct extent_io_ops btrfs_extent_io_ops;
static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
struct kmem_cache *btrfs_free_space_cachep;
#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
[S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
[S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
[S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
[S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
};
static int btrfs_setsize(struct inode *inode, loff_t newsize);
static int btrfs_truncate(struct inode *inode);
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock);
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir,
const struct qstr *qstr)
{
int err;
err = btrfs_init_acl(trans, inode, dir);
if (!err)
err = btrfs_xattr_security_init(trans, inode, dir, qstr);
return err;
}
/*
* this does all the hard work for inserting an inline extent into
* the btree. The caller should have done a btrfs_drop_extents so that
* no overlapping inline items exist in the btree
*/
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
u64 start, size_t size, size_t compressed_size,
int compress_type,
struct page **compressed_pages)
{
struct btrfs_key key;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct page *page = NULL;
char *kaddr;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
int err = 0;
int ret;
size_t cur_size = size;
size_t datasize;
unsigned long offset;
if (compressed_size && compressed_pages)
cur_size = compressed_size;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
key.objectid = btrfs_ino(inode);
key.offset = start;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
datasize = btrfs_file_extent_calc_inline_size(cur_size);
inode_add_bytes(inode, size);
ret = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (ret) {
err = ret;
goto fail;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_encryption(leaf, ei, 0);
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
btrfs_set_file_extent_ram_bytes(leaf, ei, size);
ptr = btrfs_file_extent_inline_start(ei);
if (compress_type != BTRFS_COMPRESS_NONE) {
struct page *cpage;
int i = 0;
while (compressed_size > 0) {
cpage = compressed_pages[i];
cur_size = min_t(unsigned long, compressed_size,
PAGE_CACHE_SIZE);
kaddr = kmap_atomic(cpage);
write_extent_buffer(leaf, kaddr, ptr, cur_size);
kunmap_atomic(kaddr);
i++;
ptr += cur_size;
compressed_size -= cur_size;
}
btrfs_set_file_extent_compression(leaf, ei,
compress_type);
} else {
page = find_get_page(inode->i_mapping,
start >> PAGE_CACHE_SHIFT);
btrfs_set_file_extent_compression(leaf, ei, 0);
kaddr = kmap_atomic(page);
offset = start & (PAGE_CACHE_SIZE - 1);
write_extent_buffer(leaf, kaddr + offset, ptr, size);
kunmap_atomic(kaddr);
page_cache_release(page);
}
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
/*
* we're an inline extent, so nobody can
* extend the file past i_size without locking
* a page we already have locked.
*
* We must do any isize and inode updates
* before we unlock the pages. Otherwise we
* could end up racing with unlink.
*/
BTRFS_I(inode)->disk_i_size = inode->i_size;
ret = btrfs_update_inode(trans, root, inode);
return ret;
fail:
btrfs_free_path(path);
return err;
}
/*
* conditionally insert an inline extent into the file. This
* does the checks required to make sure the data is small enough
* to fit as an inline extent.
*/
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode, u64 start, u64 end,
size_t compressed_size, int compress_type,
struct page **compressed_pages)
{
u64 isize = i_size_read(inode);
u64 actual_end = min(end + 1, isize);
u64 inline_len = actual_end - start;
u64 aligned_end = (end + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
u64 hint_byte;
u64 data_len = inline_len;
int ret;
if (compressed_size)
data_len = compressed_size;
if (start > 0 ||
actual_end >= PAGE_CACHE_SIZE ||
data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
(!compressed_size &&
(actual_end & (root->sectorsize - 1)) == 0) ||
end + 1 < isize ||
data_len > root->fs_info->max_inline) {
return 1;
}
ret = btrfs_drop_extents(trans, inode, start, aligned_end,
&hint_byte, 1);
if (ret)
return ret;
if (isize > actual_end)
inline_len = min_t(u64, isize, actual_end);
ret = insert_inline_extent(trans, root, inode, start,
inline_len, compressed_size,
compress_type, compressed_pages);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
btrfs_delalloc_release_metadata(inode, end + 1 - start);
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
return 0;
}
struct async_extent {
u64 start;
u64 ram_size;
u64 compressed_size;
struct page **pages;
unsigned long nr_pages;
int compress_type;
struct list_head list;
};
struct async_cow {
struct inode *inode;
struct btrfs_root *root;
struct page *locked_page;
u64 start;
u64 end;
struct list_head extents;
struct btrfs_work work;
};
static noinline int add_async_extent(struct async_cow *cow,
u64 start, u64 ram_size,
u64 compressed_size,
struct page **pages,
unsigned long nr_pages,
int compress_type)
{
struct async_extent *async_extent;
async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
BUG_ON(!async_extent); /* -ENOMEM */
async_extent->start = start;
async_extent->ram_size = ram_size;
async_extent->compressed_size = compressed_size;
async_extent->pages = pages;
async_extent->nr_pages = nr_pages;
async_extent->compress_type = compress_type;
list_add_tail(&async_extent->list, &cow->extents);
return 0;
}
/*
* we create compressed extents in two phases. The first
* phase compresses a range of pages that have already been
* locked (both pages and state bits are locked).
*
* This is done inside an ordered work queue, and the compression
* is spread across many cpus. The actual IO submission is step
* two, and the ordered work queue takes care of making sure that
* happens in the same order things were put onto the queue by
* writepages and friends.
*
* If this code finds it can't get good compression, it puts an
* entry onto the work queue to write the uncompressed bytes. This
* makes sure that both compressed inodes and uncompressed inodes
* are written in the same order that pdflush sent them down.
*/
static noinline int compress_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end,
struct async_cow *async_cow,
int *num_added)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 num_bytes;
u64 blocksize = root->sectorsize;
u64 actual_end;
u64 isize = i_size_read(inode);
int ret = 0;
struct page **pages = NULL;
unsigned long nr_pages;
unsigned long nr_pages_ret = 0;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
unsigned long max_compressed = 128 * 1024;
unsigned long max_uncompressed = 128 * 1024;
int i;
int will_compress;
int compress_type = root->fs_info->compress_type;
/* if this is a small write inside eof, kick off a defrag */
if ((end - start + 1) < 16 * 1024 &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(NULL, inode);
actual_end = min_t(u64, isize, end + 1);
again:
will_compress = 0;
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
/*
* we don't want to send crud past the end of i_size through
* compression, that's just a waste of CPU time. So, if the
* end of the file is before the start of our current
* requested range of bytes, we bail out to the uncompressed
* cleanup code that can deal with all of this.
*
* It isn't really the fastest way to fix things, but this is a
* very uncommon corner.
*/
if (actual_end <= start)
goto cleanup_and_bail_uncompressed;
total_compressed = actual_end - start;
/* we want to make sure that amount of ram required to uncompress
* an extent is reasonable, so we limit the total size in ram
* of a compressed extent to 128k. This is a crucial number
* because it also controls how easily we can spread reads across
* cpus for decompression.
*
* We also want to make sure the amount of IO required to do
* a random read is reasonably small, so we limit the size of
* a compressed extent to 128k.
*/
total_compressed = min(total_compressed, max_uncompressed);
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
num_bytes = max(blocksize, num_bytes);
total_in = 0;
ret = 0;
/*
* we do compression for mount -o compress and when the
* inode has not been flagged as nocompress. This flag can
* change at any time if we discover bad compression ratios.
*/
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
(btrfs_test_opt(root, COMPRESS) ||
(BTRFS_I(inode)->force_compress) ||
(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
WARN_ON(pages);
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
if (!pages) {
/* just bail out to the uncompressed code */
goto cont;
}
if (BTRFS_I(inode)->force_compress)
compress_type = BTRFS_I(inode)->force_compress;
ret = btrfs_compress_pages(compress_type,
inode->i_mapping, start,
total_compressed, pages,
nr_pages, &nr_pages_ret,
&total_in,
&total_compressed,
max_compressed);
if (!ret) {
unsigned long offset = total_compressed &
(PAGE_CACHE_SIZE - 1);
struct page *page = pages[nr_pages_ret - 1];
char *kaddr;
/* zero the tail end of the last page, we might be
* sending it down to disk
*/
if (offset) {
kaddr = kmap_atomic(page);
memset(kaddr + offset, 0,
PAGE_CACHE_SIZE - offset);
kunmap_atomic(kaddr);
}
will_compress = 1;
}
}
cont:
if (start == 0) {
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto cleanup_and_out;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
/* lets try to make an inline extent */
if (ret || total_in < (actual_end - start)) {
/* we didn't compress the entire range, try
* to make an uncompressed inline extent.
*/
ret = cow_file_range_inline(trans, root, inode,
start, end, 0, 0, NULL);
} else {
/* try making a compressed inline extent */
ret = cow_file_range_inline(trans, root, inode,
start, end,
total_compressed,
compress_type, pages);
}
if (ret <= 0) {
/*
* inline extent creation worked or returned error,
* we don't need to create any more async work items.
* Unlock and free up our temp pages.
*/
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
EXTENT_CLEAR_DELALLOC |
EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
btrfs_end_transaction(trans, root);
goto free_pages_out;
}
btrfs_end_transaction(trans, root);
}
if (will_compress) {
/*
* we aren't doing an inline extent round the compressed size
* up to a block size boundary so the allocator does sane
* things
*/
total_compressed = (total_compressed + blocksize - 1) &
~(blocksize - 1);
/*
* one last check to make sure the compression is really a
* win, compare the page count read with the blocks on disk
*/
total_in = (total_in + PAGE_CACHE_SIZE - 1) &
~(PAGE_CACHE_SIZE - 1);
if (total_compressed >= total_in) {
will_compress = 0;
} else {
num_bytes = total_in;
}
}
if (!will_compress && pages) {
/*
* the compression code ran but failed to make things smaller,
* free any pages it allocated and our page pointer array
*/
for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
page_cache_release(pages[i]);
}
kfree(pages);
pages = NULL;
total_compressed = 0;
nr_pages_ret = 0;
/* flag the file so we don't compress in the future */
if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
!(BTRFS_I(inode)->force_compress)) {
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
}
}
if (will_compress) {
*num_added += 1;
/* the async work queues will take care of doing actual
* allocation on disk for these compressed pages,
* and will submit them to the elevator.
*/
add_async_extent(async_cow, start, num_bytes,
total_compressed, pages, nr_pages_ret,
compress_type);
if (start + num_bytes < end) {
start += num_bytes;
pages = NULL;
cond_resched();
goto again;
}
} else {
cleanup_and_bail_uncompressed:
/*
* No compression, but we still need to write the pages in
* the file we've been given so far. redirty the locked
* page if it corresponds to our extent and set things up
* for the async work queue to run cow_file_range to do
* the normal delalloc dance
*/
if (page_offset(locked_page) >= start &&
page_offset(locked_page) <= end) {
__set_page_dirty_nobuffers(locked_page);
/* unlocked later on in the async handlers */
}
add_async_extent(async_cow, start, end - start + 1,
0, NULL, 0, BTRFS_COMPRESS_NONE);
*num_added += 1;
}
out:
return ret;
free_pages_out:
for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
page_cache_release(pages[i]);
}
kfree(pages);
goto out;
cleanup_and_out:
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_DIRTY |
EXTENT_CLEAR_DELALLOC |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
if (!trans || IS_ERR(trans))
btrfs_error(root->fs_info, ret, "Failed to join transaction");
else
btrfs_abort_transaction(trans, root, ret);
goto free_pages_out;
}
/*
* phase two of compressed writeback. This is the ordered portion
* of the code, which only gets called in the order the work was
* queued. We walk all the async extents created by compress_file_range
* and send them down to the disk.
*/
static noinline int submit_compressed_extents(struct inode *inode,
struct async_cow *async_cow)
{
struct async_extent *async_extent;
u64 alloc_hint = 0;
struct btrfs_trans_handle *trans;
struct btrfs_key ins;
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree;
int ret = 0;
if (list_empty(&async_cow->extents))
return 0;
while (!list_empty(&async_cow->extents)) {
async_extent = list_entry(async_cow->extents.next,
struct async_extent, list);
list_del(&async_extent->list);
io_tree = &BTRFS_I(inode)->io_tree;
retry:
/* did the compression code fall back to uncompressed IO? */
if (!async_extent->pages) {
int page_started = 0;
unsigned long nr_written = 0;
lock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
/* allocate blocks */
ret = cow_file_range(inode, async_cow->locked_page,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
&page_started, &nr_written, 0);
/* JDM XXX */
/*
* if page_started, cow_file_range inserted an
* inline extent and took care of all the unlocking
* and IO for us. Otherwise, we need to submit
* all those pages down to the drive.
*/
if (!page_started && !ret)
extent_write_locked_range(io_tree,
inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
btrfs_get_extent,
WB_SYNC_ALL);
kfree(async_extent);
cond_resched();
continue;
}
lock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->ram_size - 1);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
} else {
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_reserve_extent(trans, root,
async_extent->compressed_size,
async_extent->compressed_size,
0, alloc_hint, &ins, 1);
if (ret)
btrfs_abort_transaction(trans, root, ret);
btrfs_end_transaction(trans, root);
}
if (ret) {
int i;
for (i = 0; i < async_extent->nr_pages; i++) {
WARN_ON(async_extent->pages[i]->mapping);
page_cache_release(async_extent->pages[i]);
}
kfree(async_extent->pages);
async_extent->nr_pages = 0;
async_extent->pages = NULL;
unlock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
if (ret == -ENOSPC)
goto retry;
goto out_free; /* JDM: Requeue? */
}
/*
* here we're doing allocation and writeback of the
* compressed pages
*/
btrfs_drop_extent_cache(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1, 0);
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = async_extent->start;
em->len = async_extent->ram_size;
em->orig_start = em->start;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->compress_type = async_extent->compress_type;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1, 0);
}
ret = btrfs_add_ordered_extent_compress(inode,
async_extent->start,
ins.objectid,
async_extent->ram_size,
ins.offset,
BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type);
BUG_ON(ret); /* -ENOMEM */
/*
* clear dirty, set writeback and unlock the pages.
*/
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
ret = btrfs_submit_compressed_write(inode,
async_extent->start,
async_extent->ram_size,
ins.objectid,
ins.offset, async_extent->pages,
async_extent->nr_pages);
BUG_ON(ret); /* -ENOMEM */
alloc_hint = ins.objectid + ins.offset;
kfree(async_extent);
cond_resched();
}
ret = 0;
out:
return ret;
out_free:
kfree(async_extent);
goto out;
}
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
u64 num_bytes)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
u64 alloc_hint = 0;
read_lock(&em_tree->lock);
em = search_extent_mapping(em_tree, start, num_bytes);
if (em) {
/*
* if block start isn't an actual block number then find the
* first block in this inode and use that as a hint. If that
* block is also bogus then just don't worry about it.
*/
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
free_extent_map(em);
em = search_extent_mapping(em_tree, 0, 0);
if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
alloc_hint = em->block_start;
if (em)
free_extent_map(em);
} else {
alloc_hint = em->block_start;
free_extent_map(em);
}
}
read_unlock(&em_tree->lock);
return alloc_hint;
}
/*
* when extent_io.c finds a delayed allocation range in the file,
* the call backs end up in this code. The basic idea is to
* allocate extents on disk for the range, and create ordered data structs
* in ram to track those extents.
*
* locked_page is the page that writepage had locked already. We use
* it to make sure we don't do extra locks or unlocks.
*
* *page_started is set to one if we unlock locked_page and do everything
* required to start IO on it. It may be clean and already done with
* IO when we return.
*/
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written,
int unlock)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 alloc_hint = 0;
u64 num_bytes;
unsigned long ram_size;
u64 disk_num_bytes;
u64 cur_alloc_size;
u64 blocksize = root->sectorsize;
struct btrfs_key ins;
struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
int ret = 0;
BUG_ON(btrfs_is_free_space_inode(root, inode));
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
return PTR_ERR(trans);
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
num_bytes = max(blocksize, num_bytes);
disk_num_bytes = num_bytes;
ret = 0;
/* if this is a small write inside eof, kick off defrag */
if (num_bytes < 64 * 1024 &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(trans, inode);
if (start == 0) {
/* lets try to make an inline extent */
ret = cow_file_range_inline(trans, root, inode,
start, end, 0, 0, NULL);
if (ret == 0) {
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
*nr_written = *nr_written +
(end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
*page_started = 1;
goto out;
} else if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
}
BUG_ON(disk_num_bytes >
btrfs_super_total_bytes(root->fs_info->super_copy));
alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
while (disk_num_bytes > 0) {
unsigned long op;
cur_alloc_size = disk_num_bytes;
ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
root->sectorsize, 0, alloc_hint,
&ins, 1);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = start;
em->orig_start = em->start;
ram_size = ins.offset;
em->len = ins.offset;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, start,
start + ram_size - 1, 0);
}
cur_alloc_size = ins.offset;
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
ram_size, cur_alloc_size, 0);
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, start,
cur_alloc_size);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
}
if (disk_num_bytes < cur_alloc_size)
break;
/* we're not doing compressed IO, don't unlock the first
* page (which the caller expects to stay locked), don't
* clear any dirty bits and don't set any writeback bits
*
* Do set the Private2 bit so we know this page was properly
* setup for writepage
*/
op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
EXTENT_SET_PRIVATE2;
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
start, start + ram_size - 1,
locked_page, op);
disk_num_bytes -= cur_alloc_size;
num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
}
ret = 0;
out:
btrfs_end_transaction(trans, root);
return ret;
out_unlock:
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
goto out;
}
/*
* work queue call back to started compression on a file and pages
*/
static noinline void async_cow_start(struct btrfs_work *work)
{
struct async_cow *async_cow;
int num_added = 0;
async_cow = container_of(work, struct async_cow, work);
compress_file_range(async_cow->inode, async_cow->locked_page,
async_cow->start, async_cow->end, async_cow,
&num_added);
if (num_added == 0)
async_cow->inode = NULL;
}
/*
* work queue call back to submit previously compressed pages
*/
static noinline void async_cow_submit(struct btrfs_work *work)
{
struct async_cow *async_cow;
struct btrfs_root *root;
unsigned long nr_pages;
async_cow = container_of(work, struct async_cow, work);
root = async_cow->root;
nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
if (atomic_read(&root->fs_info->async_delalloc_pages) <
5 * 1042 * 1024 &&
waitqueue_active(&root->fs_info->async_submit_wait))
wake_up(&root->fs_info->async_submit_wait);
if (async_cow->inode)
submit_compressed_extents(async_cow->inode, async_cow);
}
static noinline void async_cow_free(struct btrfs_work *work)
{
struct async_cow *async_cow;
async_cow = container_of(work, struct async_cow, work);
kfree(async_cow);
}
static int cow_file_range_async(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written)
{
struct async_cow *async_cow;
struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long nr_pages;
u64 cur_end;
int limit = 10 * 1024 * 1042;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1, 0, NULL, GFP_NOFS);
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
BUG_ON(!async_cow); /* -ENOMEM */
async_cow->inode = inode;
async_cow->root = root;
async_cow->locked_page = locked_page;
async_cow->start = start;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
cur_end = end;
else
cur_end = min(end, start + 512 * 1024 - 1);
async_cow->end = cur_end;
INIT_LIST_HEAD(&async_cow->extents);
async_cow->work.func = async_cow_start;
async_cow->work.ordered_func = async_cow_submit;
async_cow->work.ordered_free = async_cow_free;
async_cow->work.flags = 0;
nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
btrfs_queue_worker(&root->fs_info->delalloc_workers,
&async_cow->work);
if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->async_delalloc_pages) <
limit));
}
while (atomic_read(&root->fs_info->async_submit_draining) &&
atomic_read(&root->fs_info->async_delalloc_pages)) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->async_delalloc_pages) ==
0));
}
*nr_written += nr_pages;
start = cur_end + 1;
}
*page_started = 1;
return 0;
}
static noinline int csum_exist_in_range(struct btrfs_root *root,
u64 bytenr, u64 num_bytes)
{
int ret;
struct btrfs_ordered_sum *sums;
LIST_HEAD(list);
ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
bytenr + num_bytes - 1, &list, 0);
if (ret == 0 && list_empty(&list))
return 0;
while (!list_empty(&list)) {
sums = list_entry(list.next, struct btrfs_ordered_sum, list);
list_del(&sums->list);
kfree(sums);
}
return 1;
}
/*
* when nowcow writeback call back. This checks for snapshots or COW copies
* of the extents that exist in the file, and COWs the file as required.
*
* If no cow copies or snapshots exist, we write directly to the existing
* blocks on disk
*/
static noinline int run_delalloc_nocow(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started, int force,
unsigned long *nr_written)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct extent_buffer *leaf;
struct btrfs_path *path;
struct btrfs_file_extent_item *fi;
struct btrfs_key found_key;
u64 cow_start;
u64 cur_offset;
u64 extent_end;
u64 extent_offset;
u64 disk_bytenr;
u64 num_bytes;
int extent_type;
int ret, err;
int type;
int nocow;
int check_prev = 1;
bool nolock;
u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
nolock = btrfs_is_free_space_inode(root, inode);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_free_path(path);
return PTR_ERR(trans);
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
cow_start = (u64)-1;
cur_offset = start;
while (1) {
ret = btrfs_lookup_file_extent(trans, root, path, ino,
cur_offset, 0);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
if (ret > 0 && path->slots[0] > 0 && check_prev) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key,
path->slots[0] - 1);
if (found_key.objectid == ino &&
found_key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
check_prev = 0;
next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
if (ret > 0)
break;
leaf = path->nodes[0];
}
nocow = 0;
disk_bytenr = 0;
num_bytes = 0;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid > ino ||
found_key.type > BTRFS_EXTENT_DATA_KEY ||
found_key.offset > end)
break;
if (found_key.offset > cur_offset) {
extent_end = found_key.offset;
extent_type = 0;
goto out_check;
}
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
extent_offset = btrfs_file_extent_offset(leaf, fi);
extent_end = found_key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
if (extent_end <= start) {
path->slots[0]++;
goto next_slot;
}
if (disk_bytenr == 0)
goto out_check;
if (btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
goto out_check;
if (btrfs_extent_readonly(root, disk_bytenr))
goto out_check;
if (btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
extent_offset, disk_bytenr))
goto out_check;
disk_bytenr += extent_offset;
disk_bytenr += cur_offset - found_key.offset;
num_bytes = min(end + 1, extent_end) - cur_offset;
/*
* force cow if csum exists in the range.
* this ensure that csum for a given extent are
* either valid or do not exist.
*/
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out_check;
nocow = 1;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = found_key.offset +
btrfs_file_extent_inline_len(leaf, fi);
extent_end = ALIGN(extent_end, root->sectorsize);
} else {
BUG_ON(1);
}
out_check:
if (extent_end <= start) {
path->slots[0]++;
goto next_slot;
}
if (!nocow) {
if (cow_start == (u64)-1)
cow_start = cur_offset;
cur_offset = extent_end;
if (cur_offset > end)
break;
path->slots[0]++;
goto next_slot;
}
btrfs_release_path(path);
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page, cow_start,
found_key.offset - 1, page_started,
nr_written, 1);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
cow_start = (u64)-1;
}
if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
struct extent_map *em;
struct extent_map_tree *em_tree;
em_tree = &BTRFS_I(inode)->extent_tree;
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = cur_offset;
em->orig_start = em->start;
em->len = num_bytes;
em->block_len = num_bytes;
em->block_start = disk_bytenr;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, em->start,
em->start + em->len - 1, 0);
}
type = BTRFS_ORDERED_PREALLOC;
} else {
type = BTRFS_ORDERED_NOCOW;
}
ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
num_bytes, num_bytes, type);
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, cur_offset,
num_bytes);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
}
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
cur_offset, cur_offset + num_bytes - 1,
locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
EXTENT_SET_PRIVATE2);
cur_offset = extent_end;
if (cur_offset > end)
break;
}
btrfs_release_path(path);
if (cur_offset <= end && cow_start == (u64)-1)
cow_start = cur_offset;
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page, cow_start, end,
page_started, nr_written, 1);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
}
error:
if (nolock) {
err = btrfs_end_transaction_nolock(trans, root);
} else {
err = btrfs_end_transaction(trans, root);
}
if (!ret)
ret = err;
btrfs_free_path(path);
return ret;
}
/*
* extent_io.c call back to do delayed allocation processing
*/
static int run_delalloc_range(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written)
{
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 1, nr_written);
else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
else if (!btrfs_test_opt(root, COMPRESS) &&
!(BTRFS_I(inode)->force_compress) &&
!(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1);
else
ret = cow_file_range_async(inode, locked_page, start, end,
page_started, nr_written);
return ret;
}
static void btrfs_split_extent_hook(struct inode *inode,
struct extent_state *orig, u64 split)
{
/* not delalloc, ignore it */
if (!(orig->state & EXTENT_DELALLOC))
return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
}
/*
* extent_io.c merge_extent_hook, used to track merged delayed allocation
* extents so we can keep track of new extents that are just merged onto old
* extents, such as when we are doing sequential writes, so we can properly
* account for the metadata space we'll need.
*/
static void btrfs_merge_extent_hook(struct inode *inode,
struct extent_state *new,
struct extent_state *other)
{
/* not delalloc, ignore it */
if (!(other->state & EXTENT_DELALLOC))
return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock);
}
/*
* extent_io.c set_bit_hook, used to track delayed allocation
* bytes in this file, and to maintain the list of inodes that
* have pending delalloc work to be done.
*/
static void btrfs_set_bit_hook(struct inode *inode,
struct extent_state *state, int *bits)
{
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
bool do_list = !btrfs_is_free_space_inode(root, inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
} else {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
}
spin_lock(&root->fs_info->delalloc_lock);
BTRFS_I(inode)->delalloc_bytes += len;
root->fs_info->delalloc_bytes += len;
if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
&root->fs_info->delalloc_inodes);
}
spin_unlock(&root->fs_info->delalloc_lock);
}
}
/*
* extent_io.c clear_bit_hook, see set_bit_hook for why
*/
static void btrfs_clear_bit_hook(struct inode *inode,
struct extent_state *state, int *bits)
{
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
bool do_list = !btrfs_is_free_space_inode(root, inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock);
}
if (*bits & EXTENT_DO_ACCOUNTING)
btrfs_delalloc_release_metadata(inode, len);
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
&& do_list)
btrfs_free_reserved_data_space(inode, len);
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes -= len;
BTRFS_I(inode)->delalloc_bytes -= len;
if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_del_init(&BTRFS_I(inode)->delalloc_inodes);
}
spin_unlock(&root->fs_info->delalloc_lock);
}
}
/*
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
* we don't create bios that span stripes or chunks
*/
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
struct btrfs_mapping_tree *map_tree;
u64 logical = (u64)bio->bi_sector << 9;
u64 length = 0;
u64 map_length;
int ret;
if (bio_flags & EXTENT_BIO_COMPRESSED)
return 0;
length = bio->bi_size;
map_tree = &root->fs_info->mapping_tree;
map_length = length;
ret = btrfs_map_block(map_tree, READ, logical,
&map_length, NULL, 0);
/* Will always return 0 or 1 with map_multi == NULL */
BUG_ON(ret < 0);
if (map_length < length + size)
return 1;
return 0;
}
/*
* in order to insert checksums into the metadata in large chunks,
* we wait until bio submission time. All the pages in the bio are
* checksummed and sums are attached onto the ordered extent record.
*
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
static int __btrfs_submit_bio_start(struct inode *inode, int rw,
struct bio *bio, int mirror_num,
unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
/*
* in order to insert checksums into the metadata in large chunks,
* we wait until bio submission time. All the pages in the bio are
* checksummed and sums are attached onto the ordered extent record.
*
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
return btrfs_map_bio(root, rw, bio, mirror_num, 1);
}
/*
* extent_io.c submission hook. This does the right thing for csum calculation
* on write, or reading the csums from the tree before a read
*/
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
int skip_sum;
int metadata = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
if (btrfs_is_free_space_inode(root, inode))
metadata = 2;
ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
if (ret)
return ret;
if (!(rw & REQ_WRITE)) {
if (bio_flags & EXTENT_BIO_COMPRESSED) {
return btrfs_submit_compressed_read(inode, bio,
mirror_num, bio_flags);
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
if (ret)
return ret;
}
goto mapit;
} else if (!skip_sum) {
/* csum items have already been cloned */
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
goto mapit;
/* we're doing a write, do the async checksumming */
return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
inode, rw, bio, mirror_num,
bio_flags, bio_offset,
__btrfs_submit_bio_start,
__btrfs_submit_bio_done);
}
mapit:
return btrfs_map_bio(root, rw, bio, mirror_num, 0);
}
/*
* given a list of ordered sums record them in the inode. This happens
* at IO completion time based on sums calculated at bio submission time.
*/
static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_offset,
struct list_head *list)
{
struct btrfs_ordered_sum *sum;
list_for_each_entry(sum, list, list) {
btrfs_csum_file_blocks(trans,
BTRFS_I(inode)->root->fs_info->csum_root, sum);
}
return 0;
}
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state)
{
if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
WARN_ON(1);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
cached_state, GFP_NOFS);
}
/* see btrfs_writepage_start_hook for details on why this is required */
struct btrfs_writepage_fixup {
struct page *page;
struct btrfs_work work;
};
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{
struct btrfs_writepage_fixup *fixup;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct page *page;
struct inode *inode;
u64 page_start;
u64 page_end;
int ret;
fixup = container_of(work, struct btrfs_writepage_fixup, work);
page = fixup->page;
again:
lock_page(page);
if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
ClearPageChecked(page);
goto out_page;
}
inode = page->mapping->host;
page_start = page_offset(page);
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
&cached_state);
/* already ordered? We're done */
if (PagePrivate2(page))
goto out;
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
page_end, &cached_state, GFP_NOFS);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (ret) {
mapping_set_error(page->mapping, ret);
end_extent_writepage(page, ret, page_start, page_end);
ClearPageChecked(page);
goto out;
}
btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
ClearPageChecked(page);
set_page_dirty(page);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
out_page:
unlock_page(page);
page_cache_release(page);
kfree(fixup);
}
/*
* There are a few paths in the higher layers of the kernel that directly
* set the page dirty bit without asking the filesystem if it is a
* good idea. This causes problems because we want to make sure COW
* properly happens and the data=ordered rules are followed.
*
* In our case any range that doesn't have the ORDERED bit set
* hasn't been properly setup for IO. We kick off an async process
* to fix it up. The async helper will wait for ordered extents, set
* the delalloc bit and make it safe to write the page.
*/
static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
struct btrfs_writepage_fixup *fixup;
struct btrfs_root *root = BTRFS_I(inode)->root;
/* this page is properly in the ordered list */
if (TestClearPagePrivate2(page))
return 0;
if (PageChecked(page))
return -EAGAIN;
fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
if (!fixup)
return -EAGAIN;
SetPageChecked(page);
page_cache_get(page);
fixup->work.func = btrfs_writepage_fixup_worker;
fixup->page = page;
btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
return -EBUSY;
}
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_pos,
u64 disk_bytenr, u64 disk_num_bytes,
u64 num_bytes, u64 ram_bytes,
u8 compression, u8 encryption,
u16 other_encoding, int extent_type)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *fi;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key ins;
u64 hint;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
/*
* we may be replacing one extent in the tree with another.
* The new extent is pinned in the extent map, and we don't want
* to drop it from the cache until it is completely in the btree.
*
* So, tell btrfs_drop_extents to leave this extent in the cache.
* the caller is expected to unpin it and allow it to be merged
* with the others.
*/
ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
&hint, 0);
if (ret)
goto out;
ins.objectid = btrfs_ino(inode);
ins.offset = file_pos;
ins.type = BTRFS_EXTENT_DATA_KEY;
ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
if (ret)
goto out;
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_type(leaf, fi, extent_type);
btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
btrfs_set_file_extent_compression(leaf, fi, compression);
btrfs_set_file_extent_encryption(leaf, fi, encryption);
btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
btrfs_unlock_up_safe(path, 1);
btrfs_set_lock_blocking(leaf);
btrfs_mark_buffer_dirty(leaf);
inode_add_bytes(inode, num_bytes);
ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_alloc_reserved_file_extent(trans, root,
root->root_key.objectid,
btrfs_ino(inode), file_pos, &ins);
out:
btrfs_free_path(path);
return ret;
}
/*
* helper function for btrfs_finish_ordered_io, this
* just reads in some of the csum leaves to prime them into ram
* before we start the transaction. It limits the amount of btree
* reads required while inside the transaction.
*/
/* as ordered data IO finishes, this gets called so we can finish
* an ordered extent if the range of bytes in the file it covers are
* fully written.
*/
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans = NULL;
struct btrfs_ordered_extent *ordered_extent = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
int compress_type = 0;
int ret;
bool nolock;
ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
end - start + 1);
if (!ret)
return 0;
BUG_ON(!ordered_extent); /* Logic error */
nolock = btrfs_is_free_space_inode(root, inode);
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (!ret) {
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, root, ret);
}
goto out;
}
lock_extent_bits(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
0, &cached_state);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out_unlock;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
BUG_ON(compress_type);
ret = btrfs_mark_extent_written(trans, inode,
ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len);
} else {
BUG_ON(root == root->fs_info->tree_root);
ret = insert_reserved_file_extent(trans, inode,
ordered_extent->file_offset,
ordered_extent->start,
ordered_extent->disk_len,
ordered_extent->len,
ordered_extent->len,
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered_extent->file_offset,
ordered_extent->len);
}
unlock_extent_cached(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, &cached_state, GFP_NOFS);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, root, ret);
goto out;
}
}
ret = 0;
out:
if (root != root->fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, ordered_extent->len);
if (trans) {
if (nolock)
btrfs_end_transaction_nolock(trans, root);
else
btrfs_end_transaction(trans, root);
}
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
/* once for the tree */
btrfs_put_ordered_extent(ordered_extent);
return 0;
out_unlock:
unlock_extent_cached(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, &cached_state, GFP_NOFS);
goto out;
}
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate)
{
trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
ClearPagePrivate2(page);
return btrfs_finish_ordered_io(page->mapping->host, start, end);
}
/*
* when reads are done, we need to check csums to verify the data is correct
* if there's a match, we allow the bio to finish. If not, the code in
* extent_io.c will try to find good copies for us.
*/
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int mirror)
{
size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
struct inode *inode = page->mapping->host;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
char *kaddr;
u64 private = ~(u32)0;
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
u32 csum = ~(u32)0;
if (PageChecked(page)) {
ClearPageChecked(page);
goto good;
}
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
goto good;
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
GFP_NOFS);
return 0;
}
if (state && state->start == start) {
private = state->private;
ret = 0;
} else {
ret = get_state_private(io_tree, start, &private);
}
kaddr = kmap_atomic(page);
if (ret)
goto zeroit;
csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
btrfs_csum_final(csum, (char *)&csum);
if (csum != private)
goto zeroit;
kunmap_atomic(kaddr);
good:
return 0;
zeroit:
printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
"private %llu\n",
(unsigned long long)btrfs_ino(page->mapping->host),
(unsigned long long)start, csum,
(unsigned long long)private);
memset(kaddr + offset, 1, end - start + 1);
flush_dcache_page(page);
kunmap_atomic(kaddr);
if (private == 0)
return 0;
return -EIO;
}
struct delayed_iput {
struct list_head list;
struct inode *inode;
};
/* JDM: If this is fs-wide, why can't we add a pointer to
* btrfs_inode instead and avoid the allocation? */
void btrfs_add_delayed_iput(struct inode *inode)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct delayed_iput *delayed;
if (atomic_add_unless(&inode->i_count, -1, 1))
return;
delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
delayed->inode = inode;
spin_lock(&fs_info->delayed_iput_lock);
list_add_tail(&delayed->list, &fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
}
void btrfs_run_delayed_iputs(struct btrfs_root *root)
{
LIST_HEAD(list);
struct btrfs_fs_info *fs_info = root->fs_info;
struct delayed_iput *delayed;
int empty;
spin_lock(&fs_info->delayed_iput_lock);
empty = list_empty(&fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
if (empty)
return;
down_read(&root->fs_info->cleanup_work_sem);
spin_lock(&fs_info->delayed_iput_lock);
list_splice_init(&fs_info->delayed_iputs, &list);
spin_unlock(&fs_info->delayed_iput_lock);
while (!list_empty(&list)) {
delayed = list_entry(list.next, struct delayed_iput, list);
list_del(&delayed->list);
iput(delayed->inode);
kfree(delayed);
}
up_read(&root->fs_info->cleanup_work_sem);
}
enum btrfs_orphan_cleanup_state {
ORPHAN_CLEANUP_STARTED = 1,
ORPHAN_CLEANUP_DONE = 2,
};
/*
* This is called in transaction commit time. If there are no orphan
* files in the subvolume, it removes orphan item and frees block_rsv
* structure.
*/
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_block_rsv *block_rsv;
int ret;
if (!list_empty(&root->orphan_list) ||
root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
return;
spin_lock(&root->orphan_lock);
if (!list_empty(&root->orphan_list)) {
spin_unlock(&root->orphan_lock);
return;
}
if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
spin_unlock(&root->orphan_lock);
return;
}
block_rsv = root->orphan_block_rsv;
root->orphan_block_rsv = NULL;
spin_unlock(&root->orphan_lock);
if (root->orphan_item_inserted &&
btrfs_root_refs(&root->root_item) > 0) {
ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
BUG_ON(ret);
root->orphan_item_inserted = 0;
}
if (block_rsv) {
WARN_ON(block_rsv->size > 0);
btrfs_free_block_rsv(root, block_rsv);
}
}
/*
* This creates an orphan entry for the given inode in case something goes
* wrong in the middle of an unlink/truncate.
*
* NOTE: caller of this function should reserve 5 units of metadata for
* this function.
*/
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *block_rsv = NULL;
int reserve = 0;
int insert = 0;
int ret;
if (!root->orphan_block_rsv) {
block_rsv = btrfs_alloc_block_rsv(root);
if (!block_rsv)
return -ENOMEM;
}
spin_lock(&root->orphan_lock);
if (!root->orphan_block_rsv) {
root->orphan_block_rsv = block_rsv;
} else if (block_rsv) {
btrfs_free_block_rsv(root, block_rsv);
block_rsv = NULL;
}
if (list_empty(&BTRFS_I(inode)->i_orphan)) {
list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
#if 0
/*
* For proper ENOSPC handling, we should do orphan
* cleanup when mounting. But this introduces backward
* compatibility issue.
*/
if (!xchg(&root->orphan_item_inserted, 1))
insert = 2;
else
insert = 1;
#endif
insert = 1;
}
if (!BTRFS_I(inode)->orphan_meta_reserved) {
BTRFS_I(inode)->orphan_meta_reserved = 1;
reserve = 1;
}
spin_unlock(&root->orphan_lock);
/* grab metadata reservation from transaction handle */
if (reserve) {
ret = btrfs_orphan_reserve_metadata(trans, inode);
BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
}
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
ret = 0;
}
/* insert an orphan item to track subvolume contains orphan files */
if (insert >= 2) {
ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
}
return 0;
}
/*
* We have done the truncate/delete so we can go ahead and remove the orphan
* item for this particular inode.
*/
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int delete_item = 0;
int release_rsv = 0;
int ret = 0;
spin_lock(&root->orphan_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
list_del_init(&BTRFS_I(inode)->i_orphan);
delete_item = 1;
}
if (BTRFS_I(inode)->orphan_meta_reserved) {
BTRFS_I(inode)->orphan_meta_reserved = 0;
release_rsv = 1;
}
spin_unlock(&root->orphan_lock);
if (trans && delete_item) {
ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
}
if (release_rsv)
btrfs_orphan_release_metadata(inode);
return 0;
}
/*
* this cleans up any orphans that may be left on the list from the last use
* of this root.
*/
int btrfs_orphan_cleanup(struct btrfs_root *root)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key, found_key;
struct btrfs_trans_handle *trans;
struct inode *inode;
u64 last_objectid = 0;
int ret = 0, nr_unlink = 0, nr_truncate = 0;
if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
return 0;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->reada = -1;
key.objectid = BTRFS_ORPHAN_OBJECTID;
btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
key.offset = (u64)-1;
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
/*
* if ret == 0 means we found what we were searching for, which
* is weird, but possible, so only screw with path if we didn't
* find the key and see if we have stuff that matches
*/
if (ret > 0) {
ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
}
/* pull out the item */
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
/* make sure the item matches what we want */
if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
break;
if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
break;
/* release the path since we're done with it */
btrfs_release_path(path);
/*
* this is where we are basically btrfs_lookup, without the
* crossing root thing. we store the inode number in the
* offset of the orphan item.
*/
if (found_key.offset == last_objectid) {
printk(KERN_ERR "btrfs: Error removing orphan entry, "
"stopping orphan cleanup\n");
ret = -EINVAL;
goto out;
}
last_objectid = found_key.offset;
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
ret = PTR_RET(inode);
if (ret && ret != -ESTALE)
goto out;
if (ret == -ESTALE && root == root->fs_info->tree_root) {
struct btrfs_root *dead_root;
struct btrfs_fs_info *fs_info = root->fs_info;
int is_dead_root = 0;
/*
* this is an orphan in the tree root. Currently these
* could come from 2 sources:
* a) a snapshot deletion in progress
* b) a free space cache inode
* We need to distinguish those two, as the snapshot
* orphan must not get deleted.
* find_dead_roots already ran before us, so if this
* is a snapshot deletion, we should find the root
* in the dead_roots list
*/
spin_lock(&fs_info->trans_lock);
list_for_each_entry(dead_root, &fs_info->dead_roots,
root_list) {
if (dead_root->root_key.objectid ==
found_key.objectid) {
is_dead_root = 1;
break;
}
}
spin_unlock(&fs_info->trans_lock);
if (is_dead_root) {
/* prevent this orphan from being found again */
key.offset = found_key.objectid - 1;
continue;
}
}
/*
* Inode is already gone but the orphan item is still there,
* kill the orphan item.
*/
if (ret == -ESTALE) {
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
ret = btrfs_del_orphan_item(trans, root,
found_key.objectid);
BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
btrfs_end_transaction(trans, root);
continue;
}
/*
* add this inode to the orphan list so btrfs_orphan_del does
* the proper thing when we hit it
*/
spin_lock(&root->orphan_lock);
list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
spin_unlock(&root->orphan_lock);
/* if we have links, this was a truncate, lets do that */
if (inode->i_nlink) {
if (!S_ISREG(inode->i_mode)) {
WARN_ON(1);
iput(inode);
continue;
}
nr_truncate++;
ret = btrfs_truncate(inode);
} else {
nr_unlink++;
}
/* this will do delete_inode and everything for us */
iput(inode);
if (ret)
goto out;
}
/* release the path since we're done with it */
btrfs_release_path(path);
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
if (root->orphan_block_rsv)
btrfs_block_rsv_release(root, root->orphan_block_rsv,
(u64)-1);
if (root->orphan_block_rsv || root->orphan_item_inserted) {
trans = btrfs_join_transaction(root);
if (!IS_ERR(trans))
btrfs_end_transaction(trans, root);
}
if (nr_unlink)
printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
if (nr_truncate)
printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
out:
if (ret)
printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
btrfs_free_path(path);
return ret;
}
/*
* very simple check to peek ahead in the leaf looking for xattrs. If we
* don't find any xattrs, we know there can't be any acls.
*
* slot is the slot the inode is in, objectid is the objectid of the inode
*/
static noinline int acls_after_inode_item(struct extent_buffer *leaf,
int slot, u64 objectid)
{
u32 nritems = btrfs_header_nritems(leaf);
struct btrfs_key found_key;
int scanned = 0;
slot++;
while (slot < nritems) {
btrfs_item_key_to_cpu(leaf, &found_key, slot);
/* we found a different objectid, there must not be acls */
if (found_key.objectid != objectid)
return 0;
/* we found an xattr, assume we've got an acl */
if (found_key.type == BTRFS_XATTR_ITEM_KEY)
return 1;
/*
* we found a key greater than an xattr key, there can't
* be any acls later on
*/
if (found_key.type > BTRFS_XATTR_ITEM_KEY)
return 0;
slot++;
scanned++;
/*
* it goes inode, inode backrefs, xattrs, extents,
* so if there are a ton of hard links to an inode there can
* be a lot of backrefs. Don't waste time searching too hard,
* this is just an optimization
*/
if (scanned >= 8)
break;
}
/* we hit the end of the leaf before we found an xattr or
* something larger than an xattr. We have to assume the inode
* has acls
*/
return 1;
}
/*
* read an inode from the btree into the in-memory inode
*/
static void btrfs_read_locked_inode(struct inode *inode)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
struct btrfs_timespec *tspec;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key location;
int maybe_acls;
u32 rdev;
int ret;
bool filled = false;
ret = btrfs_fill_inode(inode, &rdev);
if (!ret)
filled = true;
path = btrfs_alloc_path();
if (!path)
goto make_bad;
path->leave_spinning = 1;
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret)
goto make_bad;
leaf = path->nodes[0];
if (filled)
goto cache_acl;
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
inode->i_mode = btrfs_inode_mode(leaf, inode_item);
set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
inode->i_uid = btrfs_inode_uid(leaf, inode_item);
inode->i_gid = btrfs_inode_gid(leaf, inode_item);
btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
tspec = btrfs_inode_atime(inode_item);
inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
tspec = btrfs_inode_mtime(inode_item);
inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
tspec = btrfs_inode_ctime(inode_item);
inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
inode->i_generation = BTRFS_I(inode)->generation;
inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
BTRFS_I(inode)->index_cnt = (u64)-1;
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
cache_acl:
/*
* try to precache a NULL acl entry for files that don't have
* any xattrs or acls
*/
maybe_acls = acls_after_inode_item(leaf, path->slots[0],
btrfs_ino(inode));
if (!maybe_acls)
cache_no_acl(inode);
btrfs_free_path(path);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
break;
case S_IFDIR:
inode->i_fop = &btrfs_dir_file_operations;
if (root == root->fs_info->tree_root)
inode->i_op = &btrfs_dir_ro_inode_operations;
else
inode->i_op = &btrfs_dir_inode_operations;
break;
case S_IFLNK:
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
break;
default:
inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
break;
}
btrfs_update_iflags(inode);
return;
make_bad:
btrfs_free_path(path);
make_bad_inode(inode);
}
/*
* given a leaf and an inode, copy the inode fields into the leaf
*/
static void fill_inode_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf,
struct btrfs_inode_item *item,
struct inode *inode)
{
btrfs_set_inode_uid(leaf, item, inode->i_uid);
btrfs_set_inode_gid(leaf, item, inode->i_gid);
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
btrfs_set_inode_mode(leaf, item, inode->i_mode);
btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
inode->i_atime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
inode->i_atime.tv_nsec);
btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
inode->i_mtime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
inode->i_mtime.tv_nsec);
btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
inode->i_ctime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
inode->i_ctime.tv_nsec);
btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
btrfs_set_inode_transid(leaf, item, trans->transid);
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
btrfs_set_inode_block_group(leaf, item, 0);
}
/*
* copy everything in the in-memory inode into the btree.
*/
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
struct extent_buffer *leaf;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
goto failed;
}
btrfs_unlock_up_safe(path, 1);
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, inode);
btrfs_mark_buffer_dirty(leaf);
btrfs_set_inode_last_trans(trans, inode);
ret = 0;
failed:
btrfs_free_path(path);
return ret;
}
/*
* copy everything in the in-memory inode into the btree.
*/
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
int ret;
/*
* If the inode is a free space inode, we can deadlock during commit
* if we put it into the delayed code.
*
* The data relocation inode should also be directly updated
* without delay
*/
if (!btrfs_is_free_space_inode(root, inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
return ret;
}
return btrfs_update_inode_item(trans, root, inode);
}
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
int ret;
ret = btrfs_update_inode(trans, root, inode);
if (ret == -ENOSPC)
return btrfs_update_inode_item(trans, root, inode);
return ret;
}
/*
* unlink helper that gets used here in inode.c and in the tree logging
* recovery code. It remove a link in a directory with a given name, and
* also drops the back refs in the inode to the directory
*/
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, struct inode *inode,
const char *name, int name_len)
{
struct btrfs_path *path;
int ret = 0;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
u64 ino = btrfs_ino(inode);
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->leave_spinning = 1;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
goto err;
}
if (!di) {
ret = -ENOENT;
goto err;
}
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto err;
btrfs_release_path(path);
ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
dir_ino, &index);
if (ret) {
printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
"inode %llu parent %llu\n", name_len, name,
(unsigned long long)ino, (unsigned long long)dir_ino);
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
inode, dir_ino);
if (ret != 0 && ret != -ENOENT) {
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
dir, index);
if (ret == -ENOENT)
ret = 0;
err:
btrfs_free_path(path);
if (ret)
goto out;
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
btrfs_update_inode(trans, root, dir);
out:
return ret;
}
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, struct inode *inode,
const char *name, int name_len)
{
int ret;
ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
if (!ret) {
btrfs_drop_nlink(inode);
ret = btrfs_update_inode(trans, root, inode);
}
return ret;
}
/* helper to check if there is any shared block in the path */
static int check_path_shared(struct btrfs_root *root,
struct btrfs_path *path)
{
struct extent_buffer *eb;
int level;
u64 refs = 1;
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
int ret;
if (!path->nodes[level])
break;
eb = path->nodes[level];
if (!btrfs_block_can_be_shared(root, eb))
continue;
ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
&refs, NULL);
if (refs > 1)
return 1;
}
return 0;
}
/*
* helper to start transaction for unlink and rmdir.
*
* unlink and rmdir are special in btrfs, they do not always free space.
* so in enospc case, we should make sure they will free space before
* allowing them to use the global metadata reservation.
*/
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
struct dentry *dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct btrfs_inode_ref *ref;
struct btrfs_dir_item *di;
struct inode *inode = dentry->d_inode;
u64 index;
int check_link = 1;
int err = -ENOSPC;
int ret;
u64 ino = btrfs_ino(inode);
u64 dir_ino = btrfs_ino(dir);
/*
* 1 for the possible orphan item
* 1 for the dir item
* 1 for the dir index
* 1 for the inode ref
* 1 for the inode ref in the tree log
* 2 for the dir entries in the log
* 1 for the inode
*/
trans = btrfs_start_transaction(root, 8);
if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
return trans;
if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return ERR_PTR(-ENOSPC);
/* check if there is someone else holds reference */
if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
return ERR_PTR(-ENOSPC);
if (atomic_read(&inode->i_count) > 2)
return ERR_PTR(-ENOSPC);
if (xchg(&root->fs_info->enospc_unlink, 1))
return ERR_PTR(-ENOSPC);
path = btrfs_alloc_path();
if (!path) {
root->fs_info->enospc_unlink = 0;
return ERR_PTR(-ENOMEM);
}
/* 1 for the orphan item */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
btrfs_free_path(path);
root->fs_info->enospc_unlink = 0;
return trans;
}
path->skip_locking = 1;
path->search_commit_root = 1;
ret = btrfs_lookup_inode(trans, root, path,
&BTRFS_I(dir)->location, 0);
if (ret < 0) {
err = ret;
goto out;
}
if (ret == 0) {
if (check_path_shared(root, path))
goto out;
} else {
check_link = 0;
}
btrfs_release_path(path);
ret = btrfs_lookup_inode(trans, root, path,
&BTRFS_I(inode)->location, 0);
if (ret < 0) {
err = ret;
goto out;
}
if (ret == 0) {
if (check_path_shared(root, path))
goto out;
} else {
check_link = 0;
}
btrfs_release_path(path);
if (ret == 0 && S_ISREG(inode->i_mode)) {
ret = btrfs_lookup_file_extent(trans, root, path,
ino, (u64)-1, 0);
if (ret < 0) {
err = ret;
goto out;
}
BUG_ON(ret == 0); /* Corruption */
if (check_path_shared(root, path))
goto out;
btrfs_release_path(path);
}
if (!check_link) {
err = 0;
goto out;
}
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
dentry->d_name.name, dentry->d_name.len, 0);
if (IS_ERR(di)) {
err = PTR_ERR(di);
goto out;
}
if (di) {
if (check_path_shared(root, path))
goto out;
} else {
err = 0;
goto out;
}
btrfs_release_path(path);
ref = btrfs_lookup_inode_ref(trans, root, path,
dentry->d_name.name, dentry->d_name.len,
ino, dir_ino, 0);
if (IS_ERR(ref)) {
err = PTR_ERR(ref);
goto out;
}
BUG_ON(!ref); /* Logic error */
if (check_path_shared(root, path))
goto out;
index = btrfs_inode_ref_index(path->nodes[0], ref);
btrfs_release_path(path);
/*
* This is a commit root search, if we can lookup inode item and other
* relative items in the commit root, it means the transaction of
* dir/file creation has been committed, and the dir index item that we
* delay to insert has also been inserted into the commit root. So
* we needn't worry about the delayed insertion of the dir index item
* here.
*/
di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
dentry->d_name.name, dentry->d_name.len, 0);
if (IS_ERR(di)) {
err = PTR_ERR(di);
goto out;
}
BUG_ON(ret == -ENOENT);
if (check_path_shared(root, path))
goto out;
err = 0;
out:
btrfs_free_path(path);
/* Migrate the orphan reservation over */
if (!err)
err = btrfs_block_rsv_migrate(trans->block_rsv,
&root->fs_info->global_block_rsv,
trans->bytes_reserved);
if (err) {
btrfs_end_transaction(trans, root);
root->fs_info->enospc_unlink = 0;
return ERR_PTR(err);
}
trans->block_rsv = &root->fs_info->global_block_rsv;
return trans;
}
static void __unlink_end_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
if (trans->block_rsv == &root->fs_info->global_block_rsv) {
btrfs_block_rsv_release(root, trans->block_rsv,
trans->bytes_reserved);
trans->block_rsv = &root->fs_info->trans_block_rsv;
BUG_ON(!root->fs_info->enospc_unlink);
root->fs_info->enospc_unlink = 0;
}
btrfs_end_transaction(trans, root);
}
static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
struct inode *inode = dentry->d_inode;
int ret;
unsigned long nr = 0;
trans = __unlink_start_trans(dir, dentry);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len);
if (ret)
goto out;
if (inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, inode);
if (ret)
goto out;
}
out:
nr = trans->blocks_used;
__unlink_end_trans(trans, root);
btrfs_btree_balance_dirty(root, nr);
return ret;
}
int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, u64 objectid,
const char *name, int name_len)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
int ret;
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR_OR_NULL(di)) {
if (!di)
ret = -ENOENT;
else
ret = PTR_ERR(di);
goto out;
}
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
btrfs_release_path(path);
ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
dir_ino, &index, name, name_len);
if (ret < 0) {
if (ret != -ENOENT) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
di = btrfs_search_dir_index_item(root, path, dir_ino,
name, name_len);
if (IS_ERR_OR_NULL(di)) {
if (!di)
ret = -ENOENT;
else
ret = PTR_ERR(di);
btrfs_abort_transaction(trans, root, ret);
goto out;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
index = key.offset;
}
btrfs_release_path(path);
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, dir);
if (ret)
btrfs_abort_transaction(trans, root, ret);
out:
btrfs_free_path(path);
return ret;
}
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
int err = 0;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
unsigned long nr = 0;
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
return -ENOTEMPTY;
trans = __unlink_start_trans(dir, dentry);
if (IS_ERR(trans))
return PTR_ERR(trans);
if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
err = btrfs_unlink_subvol(trans, root, dir,
BTRFS_I(inode)->location.objectid,
dentry->d_name.name,
dentry->d_name.len);
goto out;
}
err = btrfs_orphan_add(trans, inode);
if (err)
goto out;
/* now the directory is empty */
err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len);
if (!err)
btrfs_i_size_write(inode, 0);
out:
nr = trans->blocks_used;
__unlink_end_trans(trans, root);
btrfs_btree_balance_dirty(root, nr);
return err;
}
/*
* this can truncate away extent items, csum items and directory items.
* It starts at a high offset and removes keys until it can't find
* any higher than new_size
*
* csum items that cross the new i_size are truncated to the new size
* as well.
*
* min_type is the minimum key type to truncate down to. If set to 0, this
* will kill all the items on this inode, including the INODE_ITEM_KEY.
*/
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode,
u64 new_size, u32 min_type)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
struct btrfs_key found_key;
u64 extent_start = 0;
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
u64 mask = root->sectorsize - 1;
u32 found_type = (u8)-1;
int found_extent;
int del_item;
int pending_del_nr = 0;
int pending_del_slot = 0;
int extent_type = -1;
int ret;
int err = 0;
u64 ino = btrfs_ino(inode);
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = -1;
if (root->ref_cows || root == root->fs_info->tree_root)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
/*
* This function is also used to drop the items in the log tree before
* we relog the inode, so if root != BTRFS_I(inode)->root, it means
* it is used to drop the loged items. So we shouldn't kill the delayed
* items.
*/
if (min_type == 0 && root == BTRFS_I(inode)->root)
btrfs_kill_delayed_inode_items(inode);
key.objectid = ino;
key.offset = (u64)-1;
key.type = (u8)-1;
search_again:
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0) {
/* there are no items in the tree for us to truncate, we're
* done
*/
if (path->slots[0] == 0)
goto out;
path->slots[0]--;
}
while (1) {
fi = NULL;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
if (found_key.objectid != ino)
break;
if (found_type < min_type)
break;
item_end = found_key.offset;
if (found_type == BTRFS_EXTENT_DATA_KEY) {
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
item_end +=
btrfs_file_extent_num_bytes(leaf, fi);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
item_end += btrfs_file_extent_inline_len(leaf,
fi);
}
item_end--;
}
if (found_type > min_type) {
del_item = 1;
} else {
if (item_end < new_size)
break;
if (found_key.offset >= new_size)
del_item = 1;
else
del_item = 0;
}
found_extent = 0;
/* FIXME, shrink the extent if the ref count is only 1 */
if (found_type != BTRFS_EXTENT_DATA_KEY)
goto delete;
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
u64 num_dec;
extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
if (!del_item) {
u64 orig_num_bytes =
btrfs_file_extent_num_bytes(leaf, fi);
extent_num_bytes = new_size -
found_key.offset + root->sectorsize - 1;
extent_num_bytes = extent_num_bytes &
~((u64)root->sectorsize - 1);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_num_bytes);
num_dec = (orig_num_bytes -
extent_num_bytes);
if (root->ref_cows && extent_start != 0)
inode_sub_bytes(inode, num_dec);
btrfs_mark_buffer_dirty(leaf);
} else {
extent_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf,
fi);
extent_offset = found_key.offset -
btrfs_file_extent_offset(leaf, fi);
/* FIXME blocksize != 4096 */
num_dec = btrfs_file_extent_num_bytes(leaf, fi);
if (extent_start != 0) {
found_extent = 1;
if (root->ref_cows)
inode_sub_bytes(inode, num_dec);
}
}
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
/*
* we can't truncate inline items that have had
* special encodings
*/
if (!del_item &&
btrfs_file_extent_compression(leaf, fi) == 0 &&
btrfs_file_extent_encryption(leaf, fi) == 0 &&
btrfs_file_extent_other_encoding(leaf, fi) == 0) {
u32 size = new_size - found_key.offset;
if (root->ref_cows) {
inode_sub_bytes(inode, item_end + 1 -
new_size);
}
size =
btrfs_file_extent_calc_inline_size(size);
btrfs_truncate_item(trans, root, path,
size, 1);
} else if (root->ref_cows) {
inode_sub_bytes(inode, item_end + 1 -
found_key.offset);
}
}
delete:
if (del_item) {
if (!pending_del_nr) {
/* no pending yet, add ourselves */
pending_del_slot = path->slots[0];
pending_del_nr = 1;
} else if (pending_del_nr &&
path->slots[0] + 1 == pending_del_slot) {
/* hop on the pending chunk */
pending_del_nr++;
pending_del_slot = path->slots[0];
} else {
BUG();
}
} else {
break;
}
if (found_extent && (root->ref_cows ||
root == root->fs_info->tree_root)) {
btrfs_set_path_blocking(path);
ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
ino, extent_offset, 0);
BUG_ON(ret);
}
if (found_type == BTRFS_INODE_ITEM_KEY)
break;
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot) {
if (root->ref_cows &&
BTRFS_I(inode)->location.objectid !=
BTRFS_FREE_INO_OBJECTID) {
err = -EAGAIN;
goto out;
}
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
pending_del_nr);
if (ret) {
btrfs_abort_transaction(trans,
root, ret);
goto error;
}
pending_del_nr = 0;
}
btrfs_release_path(path);
goto search_again;
} else {
path->slots[0]--;
}
}
out:
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, pending_del_slot,
pending_del_nr);
if (ret)
btrfs_abort_transaction(trans, root, ret);
}
error:
btrfs_free_path(path);
return err;
}
/*
* taken from block_truncate_page, but does cow as it zeros out
* any bytes left in the last page in the file.
*/
static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
{
struct inode *inode = mapping->host;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
char *kaddr;
u32 blocksize = root->sectorsize;
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
struct page *page;
gfp_t mask = btrfs_alloc_write_mask(mapping);
int ret = 0;
u64 page_start;
u64 page_end;
if ((offset & (blocksize - 1)) == 0)
goto out;
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (ret)
goto out;
ret = -ENOMEM;
again:
page = find_or_create_page(mapping, index, mask);
if (!page) {
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
goto out;
}
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
if (!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
lock_page(page);
if (page->mapping != mapping) {
unlock_page(page);
page_cache_release(page);
goto again;
}
if (!PageUptodate(page)) {
ret = -EIO;
goto out_unlock;
}
}
wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
set_page_extent_mapped(page);
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
unlock_page(page);
page_cache_release(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
goto out_unlock;
}
ret = 0;
if (offset != PAGE_CACHE_SIZE) {
kaddr = kmap(page);
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
flush_dcache_page(page);
kunmap(page);
}
ClearPageChecked(page);
set_page_dirty(page);
unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
GFP_NOFS);
out_unlock:
if (ret)
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
unlock_page(page);
page_cache_release(page);
out:
return ret;
}
/*
* This function puts in dummy file extents for the area we're creating a hole
* for. So if we are truncating this file to a larger size we need to insert
* these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
* the range between oldsize and size
*/
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
u64 mask = root->sectorsize - 1;
u64 hole_start = (oldsize + mask) & ~mask;
u64 block_end = (size + mask) & ~mask;
u64 last_byte;
u64 cur_offset;
u64 hole_size;
int err = 0;
if (size <= hole_start)
return 0;
while (1) {
struct btrfs_ordered_extent *ordered;
btrfs_wait_ordered_range(inode, hole_start,
block_end - hole_start);
lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
&cached_state);
ordered = btrfs_lookup_ordered_extent(inode, hole_start);
if (!ordered)
break;
unlock_extent_cached(io_tree, hole_start, block_end - 1,
&cached_state, GFP_NOFS);
btrfs_put_ordered_extent(ordered);
}
cur_offset = hole_start;
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
block_end - cur_offset, 0);
if (IS_ERR(em)) {
err = PTR_ERR(em);
break;
}
last_byte = min(extent_map_end(em), block_end);
last_byte = (last_byte + mask) & ~mask;
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
u64 hint_byte = 0;
hole_size = last_byte - cur_offset;
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
break;
}
err = btrfs_drop_extents(trans, inode, cur_offset,
cur_offset + hole_size,
&hint_byte, 1);
if (err) {
btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
break;
}
err = btrfs_insert_file_extent(trans, root,
btrfs_ino(inode), cur_offset, 0,
0, hole_size, 0, hole_size,
0, 0, 0);
if (err) {
btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
break;
}
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
}
free_extent_map(em);
em = NULL;
cur_offset = last_byte;
if (cur_offset >= block_end)
break;
}
free_extent_map(em);
unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
GFP_NOFS);
return err;
}
static int btrfs_setsize(struct inode *inode, loff_t newsize)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
loff_t oldsize = i_size_read(inode);
int ret;
if (newsize == oldsize)
return 0;
if (newsize > oldsize) {
truncate_pagecache(inode, oldsize, newsize);
ret = btrfs_cont_expand(inode, oldsize, newsize);
if (ret)
return ret;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
i_size_write(inode, newsize);
btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
ret = btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
} else {
/*
* We're truncating a file that used to have good data down to
* zero. Make sure it gets into the ordered flush list so that
* any new writes get down to disk quickly.
*/
if (newsize == 0)
BTRFS_I(inode)->ordered_data_close = 1;
/* we don't support swapfiles, so vmtruncate shouldn't fail */
truncate_setsize(inode, newsize);
ret = btrfs_truncate(inode);
}
return ret;
}
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
int err;
if (btrfs_root_readonly(root))
return -EROFS;
err = inode_change_ok(inode, attr);
if (err)
return err;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
err = btrfs_setsize(inode, attr->ia_size);
if (err)
return err;
}
if (attr->ia_valid) {
setattr_copy(inode, attr);
err = btrfs_dirty_inode(inode);
if (!err && attr->ia_valid & ATTR_MODE)
err = btrfs_acl_chmod(inode);
}
return err;
}
void btrfs_evict_inode(struct inode *inode)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv, *global_rsv;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
unsigned long nr;
int ret;
trace_btrfs_inode_evict(inode);
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
btrfs_is_free_space_inode(root, inode)))
goto no_delete;
if (is_bad_inode(inode)) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (root->fs_info->log_root_recovering) {
BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
goto no_delete;
}
if (inode->i_nlink > 0) {
BUG_ON(btrfs_root_refs(&root->root_item) != 0);
goto no_delete;
}
rsv = btrfs_alloc_block_rsv(root);
if (!rsv) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
rsv->size = min_size;
global_rsv = &root->fs_info->global_block_rsv;
btrfs_i_size_write(inode, 0);
/*
* This is a bit simpler than btrfs_truncate since
*
* 1) We've already reserved our space for our orphan item in the
* unlink.
* 2) We're going to delete the inode item, so we don't need to update
* it at all.
*
* So we just need to reserve some slack space in case we add bytes when
* doing the truncate.
*/
while (1) {
ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
/*
* Try and steal from the global reserve since we will
* likely not use this space anyway, we want to try as
* hard as possible to get this to work.
*/
if (ret)
ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
if (ret) {
printk(KERN_WARNING "Could not get space for a "
"delete, will truncate on mount %d\n", ret);
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
goto no_delete;
}
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
goto no_delete;
}
trans->block_rsv = rsv;
ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
if (ret != -EAGAIN)
break;
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root, nr);
}
btrfs_free_block_rsv(root, rsv);
if (ret == 0) {
trans->block_rsv = root->orphan_block_rsv;
ret = btrfs_orphan_del(trans, inode);
BUG_ON(ret);
}
trans->block_rsv = &root->fs_info->trans_block_rsv;
if (!(root == root->fs_info->tree_root ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
btrfs_return_ino(root, btrfs_ino(inode));
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
no_delete:
end_writeback(inode);
return;
}
/*
* this returns the key found in the dir entry in the location pointer.
* If no dir entries were found, location->objectid is 0.
*/
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
struct btrfs_key *location)
{
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct btrfs_dir_item *di;
struct btrfs_path *path;
struct btrfs_root *root = BTRFS_I(dir)->root;
int ret = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
namelen, 0);
if (IS_ERR(di))
ret = PTR_ERR(di);
if (IS_ERR_OR_NULL(di))
goto out_err;
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
out:
btrfs_free_path(path);
return ret;
out_err:
location->objectid = 0;
goto out;
}
/*
* when we hit a tree root in a directory, the btrfs part of the inode
* needs to be changed to reflect the root directory of the tree root. This
* is kind of like crossing a mount point.
*/
static int fixup_tree_root_location(struct btrfs_root *root,
struct inode *dir,
struct dentry *dentry,
struct btrfs_key *location,
struct btrfs_root **sub_root)
{
struct btrfs_path *path;
struct btrfs_root *new_root;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
int ret;
int err = 0;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
err = -ENOENT;
ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
BTRFS_I(dir)->root->root_key.objectid,
location->objectid);
if (ret) {
if (ret < 0)
err = ret;
goto out;
}
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
goto out;
ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
(unsigned long)(ref + 1),
dentry->d_name.len);
if (ret)
goto out;
btrfs_release_path(path);
new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
if (IS_ERR(new_root)) {
err = PTR_ERR(new_root);
goto out;
}
if (btrfs_root_refs(&new_root->root_item) == 0) {
err = -ENOENT;
goto out;
}
*sub_root = new_root;
location->objectid = btrfs_root_dirid(&new_root->root_item);
location->type = BTRFS_INODE_ITEM_KEY;
location->offset = 0;
err = 0;
out:
btrfs_free_path(path);
return err;
}
static void inode_tree_add(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_inode *entry;
struct rb_node **p;
struct rb_node *parent;
u64 ino = btrfs_ino(inode);
again:
p = &root->inode_tree.rb_node;
parent = NULL;
if (inode_unhashed(inode))
return;
spin_lock(&root->inode_lock);
while (*p) {
parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node);
if (ino < btrfs_ino(&entry->vfs_inode))
p = &parent->rb_left;
else if (ino > btrfs_ino(&entry->vfs_inode))
p = &parent->rb_right;
else {
WARN_ON(!(entry->vfs_inode.i_state &
(I_WILL_FREE | I_FREEING)));
rb_erase(parent, &root->inode_tree);
RB_CLEAR_NODE(parent);
spin_unlock(&root->inode_lock);
goto again;
}
}
rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
spin_unlock(&root->inode_lock);
}
static void inode_tree_del(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int empty = 0;
spin_lock(&root->inode_lock);
if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
empty = RB_EMPTY_ROOT(&root->inode_tree);
}
spin_unlock(&root->inode_lock);
/*
* Free space cache has inodes in the tree root, but the tree root has a
* root_refs of 0, so this could end up dropping the tree root as a
* snapshot, so we need the extra !root->fs_info->tree_root check to
* make sure we don't drop it.
*/
if (empty && btrfs_root_refs(&root->root_item) == 0 &&
root != root->fs_info->tree_root) {
synchronize_srcu(&root->fs_info->subvol_srcu);
spin_lock(&root->inode_lock);
empty = RB_EMPTY_ROOT(&root->inode_tree);
spin_unlock(&root->inode_lock);
if (empty)
btrfs_add_dead_root(root);
}
}
void btrfs_invalidate_inodes(struct btrfs_root *root)
{
struct rb_node *node;
struct rb_node *prev;
struct btrfs_inode *entry;
struct inode *inode;
u64 objectid = 0;
WARN_ON(btrfs_root_refs(&root->root_item) != 0);
spin_lock(&root->inode_lock);
again:
node = root->inode_tree.rb_node;
prev = NULL;
while (node) {
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
if (objectid < btrfs_ino(&entry->vfs_inode))
node = node->rb_left;
else if (objectid > btrfs_ino(&entry->vfs_inode))
node = node->rb_right;
else
break;
}
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
if (objectid <= btrfs_ino(&entry->vfs_inode)) {
node = prev;
break;
}
prev = rb_next(prev);
}
}
while (node) {
entry = rb_entry(node, struct btrfs_inode, rb_node);
objectid = btrfs_ino(&entry->vfs_inode) + 1;
inode = igrab(&entry->vfs_inode);
if (inode) {
spin_unlock(&root->inode_lock);
if (atomic_read(&inode->i_count) > 1)
d_prune_aliases(inode);
/*
* btrfs_drop_inode will have it removed from
* the inode cache when its usage count
* hits zero.
*/
iput(inode);
cond_resched();
spin_lock(&root->inode_lock);
goto again;
}
if (cond_resched_lock(&root->inode_lock))
goto again;
node = rb_next(node);
}
spin_unlock(&root->inode_lock);
}
static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
struct btrfs_iget_args *args = p;
inode->i_ino = args->ino;
BTRFS_I(inode)->root = args->root;
btrfs_set_inode_space_info(args->root, inode);
return 0;
}
static int btrfs_find_actor(struct inode *inode, void *opaque)
{
struct btrfs_iget_args *args = opaque;
return args->ino == btrfs_ino(inode) &&
args->root == BTRFS_I(inode)->root;
}
static struct inode *btrfs_iget_locked(struct super_block *s,
u64 objectid,
struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
args.ino = objectid;
args.root = root;
inode = iget5_locked(s, objectid, btrfs_find_actor,
btrfs_init_locked_inode,
(void *)&args);
return inode;
}
/* Get an inode object given its location and corresponding root.
* Returns in *is_new if the inode was read from disk
*/
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *new)
{
struct inode *inode;
inode = btrfs_iget_locked(s, location->objectid, root);
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
btrfs_read_locked_inode(inode);
if (!is_bad_inode(inode)) {
inode_tree_add(inode);
unlock_new_inode(inode);
if (new)
*new = 1;
} else {
unlock_new_inode(inode);
iput(inode);
inode = ERR_PTR(-ESTALE);
}
}
return inode;
}
static struct inode *new_simple_dir(struct super_block *s,
struct btrfs_key *key,
struct btrfs_root *root)
{
struct inode *inode = new_inode(s);
if (!inode)
return ERR_PTR(-ENOMEM);
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
BTRFS_I(inode)->dummy_inode = 1;
inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
inode->i_op = &btrfs_dir_ro_inode_operations;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
return inode;
}
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
struct btrfs_key location;
int index;
int ret = 0;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
if (unlikely(d_need_lookup(dentry))) {
memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
kfree(dentry->d_fsdata);
dentry->d_fsdata = NULL;
/* This thing is hashed, drop it for now */
d_drop(dentry);
} else {
ret = btrfs_inode_by_name(dir, dentry, &location);
}
if (ret < 0)
return ERR_PTR(ret);
if (location.objectid == 0)
return NULL;
if (location.type == BTRFS_INODE_ITEM_KEY) {
inode = btrfs_iget(dir->i_sb, &location, root, NULL);
return inode;
}
BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
index = srcu_read_lock(&root->fs_info->subvol_srcu);
ret = fixup_tree_root_location(root, dir, dentry,
&location, &sub_root);
if (ret < 0) {
if (ret != -ENOENT)
inode = ERR_PTR(ret);
else
inode = new_simple_dir(dir->i_sb, &location, sub_root);
} else {
inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
}
srcu_read_unlock(&root->fs_info->subvol_srcu, index);
if (!IS_ERR(inode) && root != sub_root) {
down_read(&root->fs_info->cleanup_work_sem);
if (!(inode->i_sb->s_flags & MS_RDONLY))
ret = btrfs_orphan_cleanup(sub_root);
up_read(&root->fs_info->cleanup_work_sem);
if (ret)
inode = ERR_PTR(ret);
}
return inode;
}
static int btrfs_dentry_delete(const struct dentry *dentry)
{
struct btrfs_root *root;
struct inode *inode = dentry->d_inode;
if (!inode && !IS_ROOT(dentry))
inode = dentry->d_parent->d_inode;
if (inode) {
root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0)
return 1;
if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return 1;
}
return 0;
}
static void btrfs_dentry_release(struct dentry *dentry)
{
if (dentry->d_fsdata)
kfree(dentry->d_fsdata);
}
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct dentry *ret;
ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
if (unlikely(d_need_lookup(dentry))) {
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
spin_unlock(&dentry->d_lock);
}
return ret;
}
unsigned char btrfs_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
static int btrfs_real_readdir(struct file *filp, void *dirent,
filldir_t filldir)
{
struct inode *inode = filp->f_dentry->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_item *item;
struct btrfs_dir_item *di;
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_path *path;
struct list_head ins_list;
struct list_head del_list;
int ret;
struct extent_buffer *leaf;
int slot;
unsigned char d_type;
int over = 0;
u32 di_cur;
u32 di_total;
u32 di_len;
int key_type = BTRFS_DIR_INDEX_KEY;
char tmp_name[32];
char *name_ptr;
int name_len;
int is_curr = 0; /* filp->f_pos points to the current index? */
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
key_type = BTRFS_DIR_ITEM_KEY;
/* special case for "." */
if (filp->f_pos == 0) {
over = filldir(dirent, ".", 1,
filp->f_pos, btrfs_ino(inode), DT_DIR);
if (over)
return 0;
filp->f_pos = 1;
}
/* special case for .., just use the back ref */
if (filp->f_pos == 1) {
u64 pino = parent_ino(filp->f_path.dentry);
over = filldir(dirent, "..", 2,
filp->f_pos, pino, DT_DIR);
if (over)
return 0;
filp->f_pos = 2;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = 1;
if (key_type == BTRFS_DIR_INDEX_KEY) {
INIT_LIST_HEAD(&ins_list);
INIT_LIST_HEAD(&del_list);
btrfs_get_delayed_items(inode, &ins_list, &del_list);
}
btrfs_set_key_type(&key, key_type);
key.offset = filp->f_pos;
key.objectid = btrfs_ino(inode);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto err;
else if (ret > 0)
break;
continue;
}
item = btrfs_item_nr(leaf, slot);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid != key.objectid)
break;
if (btrfs_key_type(&found_key) != key_type)
break;
if (found_key.offset < filp->f_pos)
goto next;
if (key_type == BTRFS_DIR_INDEX_KEY &&
btrfs_should_delete_dir_index(&del_list,
found_key.offset))
goto next;
filp->f_pos = found_key.offset;
is_curr = 1;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
di_cur = 0;
di_total = btrfs_item_size(leaf, item);
while (di_cur < di_total) {
struct btrfs_key location;
if (verify_dir_item(root, leaf, di))
break;
name_len = btrfs_dir_name_len(leaf, di);
if (name_len <= sizeof(tmp_name)) {
name_ptr = tmp_name;
} else {
name_ptr = kmalloc(name_len, GFP_NOFS);
if (!name_ptr) {
ret = -ENOMEM;
goto err;
}
}
read_extent_buffer(leaf, name_ptr,
(unsigned long)(di + 1), name_len);
d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
btrfs_dir_item_key_to_cpu(leaf, di, &location);
/* is this a reference to our own snapshot? If so
* skip it.
*
* In contrast to old kernels, we insert the snapshot's
* dir item and dir index after it has been created, so
* we won't find a reference to our own snapshot. We
* still keep the following code for backward
* compatibility.
*/
if (location.type == BTRFS_ROOT_ITEM_KEY &&
location.objectid == root->root_key.objectid) {
over = 0;
goto skip;
}
over = filldir(dirent, name_ptr, name_len,
found_key.offset, location.objectid,
d_type);
skip:
if (name_ptr != tmp_name)
kfree(name_ptr);
if (over)
goto nopos;
di_len = btrfs_dir_name_len(leaf, di) +
btrfs_dir_data_len(leaf, di) + sizeof(*di);
di_cur += di_len;
di = (struct btrfs_dir_item *)((char *)di + di_len);
}
next:
path->slots[0]++;
}
if (key_type == BTRFS_DIR_INDEX_KEY) {
if (is_curr)
filp->f_pos++;
ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
&ins_list);
if (ret)
goto nopos;
}
/* Reached end of directory/root. Bump pos past the last item. */
if (key_type == BTRFS_DIR_INDEX_KEY)
/*
* 32-bit glibc will use getdents64, but then strtol -
* so the last number we can serve is this.
*/
filp->f_pos = 0x7fffffff;
else
filp->f_pos++;
nopos:
ret = 0;
err:
if (key_type == BTRFS_DIR_INDEX_KEY)
btrfs_put_delayed_items(&ins_list, &del_list);
btrfs_free_path(path);
return ret;
}
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret = 0;
bool nolock = false;
if (BTRFS_I(inode)->dummy_inode)
return 0;
if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
if (nolock)
ret = btrfs_end_transaction_nolock(trans, root);
else
ret = btrfs_commit_transaction(trans, root);
}
return ret;
}
/*
* This is somewhat expensive, updating the tree every time the
* inode changes. But, it is most likely to find the inode in cache.
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
int btrfs_dirty_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
if (BTRFS_I(inode)->dummy_inode)
return 0;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
if (ret && ret == -ENOSPC) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans, root);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
}
btrfs_end_transaction(trans, root);
if (BTRFS_I(inode)->delayed_node)
btrfs_balance_delayed_items(root);
return ret;
}
/*
* This is a copy of file_update_time. We need this so we can return error on
* ENOSPC for updating the inode in the case of file write and mmap writes.
*/
int btrfs_update_time(struct file *file)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct timespec now;
int ret;
enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
/* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
return 0;
now = current_fs_time(inode->i_sb);
if (!timespec_equal(&inode->i_mtime, &now))
sync_it = S_MTIME;
if (!timespec_equal(&inode->i_ctime, &now))
sync_it |= S_CTIME;
if (IS_I_VERSION(inode))
sync_it |= S_VERSION;
if (!sync_it)
return 0;
/* Finally allowed to write? Takes lock. */
if (mnt_want_write_file(file))
return 0;
/* Only change inode inside the lock region */
if (sync_it & S_VERSION)
inode_inc_iversion(inode);
if (sync_it & S_CTIME)
inode->i_ctime = now;
if (sync_it & S_MTIME)
inode->i_mtime = now;
ret = btrfs_dirty_inode(inode);
if (!ret)
mark_inode_dirty_sync(inode);
mnt_drop_write(file->f_path.mnt);
return ret;
}
/*
* find the highest existing sequence number in a directory
* and then set the in-memory index_cnt variable to reflect
* free sequence numbers
*/
static int btrfs_set_inode_index_count(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key key, found_key;
struct btrfs_path *path;
struct extent_buffer *leaf;
int ret;
key.objectid = btrfs_ino(inode);
btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
key.offset = (u64)-1;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
/* FIXME: we should be able to handle this */
if (ret == 0)
goto out;
ret = 0;
/*
* MAGIC NUMBER EXPLANATION:
* since we search a directory based on f_pos we have to start at 2
* since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
* else has to start at 2
*/
if (path->slots[0] == 0) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
}
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != btrfs_ino(inode) ||
btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
}
BTRFS_I(inode)->index_cnt = found_key.offset + 1;
out:
btrfs_free_path(path);
return ret;
}
/*
* helper to find a free sequence number in a given directory. This current
* code is very simple, later versions will do smarter things in the btree
*/
int btrfs_set_inode_index(struct inode *dir, u64 *index)
{
int ret = 0;
if (BTRFS_I(dir)->index_cnt == (u64)-1) {
ret = btrfs_inode_delayed_dir_index_count(dir);
if (ret) {
ret = btrfs_set_inode_index_count(dir);
if (ret)
return ret;
}
}
*index = BTRFS_I(dir)->index_cnt;
BTRFS_I(dir)->index_cnt++;
return ret;
}
static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir,
const char *name, int name_len,
u64 ref_objectid, u64 objectid,
umode_t mode, u64 *index)
{
struct inode *inode;
struct btrfs_inode_item *inode_item;
struct btrfs_key *location;
struct btrfs_path *path;
struct btrfs_inode_ref *ref;
struct btrfs_key key[2];
u32 sizes[2];
unsigned long ptr;
int ret;
int owner;
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
inode = new_inode(root->fs_info->sb);
if (!inode) {
btrfs_free_path(path);
return ERR_PTR(-ENOMEM);
}
/*
* we have to initialize this early, so we can reclaim the inode
* number if we fail afterwards in this function.
*/
inode->i_ino = objectid;
if (dir) {
trace_btrfs_inode_request(dir);
ret = btrfs_set_inode_index(dir, index);
if (ret) {
btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
}
/*
* index_cnt is ignored for everything but a dir,
* btrfs_get_inode_index_count has an explanation for the magic
* number
*/
BTRFS_I(inode)->index_cnt = 2;
BTRFS_I(inode)->root = root;
BTRFS_I(inode)->generation = trans->transid;
inode->i_generation = BTRFS_I(inode)->generation;
btrfs_set_inode_space_info(root, inode);
if (S_ISDIR(mode))
owner = 0;
else
owner = 1;
key[0].objectid = objectid;
btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
key[0].offset = 0;
key[1].objectid = objectid;
btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
key[1].offset = ref_objectid;
sizes[0] = sizeof(struct btrfs_inode_item);
sizes[1] = name_len + sizeof(*ref);
path->leave_spinning = 1;
ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
if (ret != 0)
goto fail;
inode_init_owner(inode, dir, mode);
inode_set_bytes(inode, 0);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, path->nodes[0], inode_item, inode);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
struct btrfs_inode_ref);
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
ptr = (unsigned long)(ref + 1);
write_extent_buffer(path->nodes[0], name, ptr, name_len);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
location = &BTRFS_I(inode)->location;
location->objectid = objectid;
location->offset = 0;
btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
btrfs_inherit_iflags(inode, dir);
if (S_ISREG(mode)) {
if (btrfs_test_opt(root, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
if (btrfs_test_opt(root, NODATACOW) ||
(BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
}
insert_inode_hash(inode);
inode_tree_add(inode);
trace_btrfs_inode_new(inode);
btrfs_set_inode_last_trans(trans, inode);
return inode;
fail:
if (dir)
BTRFS_I(dir)->index_cnt--;
btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
static inline u8 btrfs_inode_type(struct inode *inode)
{
return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
}
/*
* utility function to add 'inode' into 'parent_inode' with
* a give name and a given sequence number.
* if 'add_backref' is true, also insert a backref from the
* inode to the parent directory.
*/
int btrfs_add_link(struct btrfs_trans_handle *trans,
struct inode *parent_inode, struct inode *inode,
const char *name, int name_len, int add_backref, u64 index)
{
int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = BTRFS_I(parent_inode)->root;
u64 ino = btrfs_ino(inode);
u64 parent_ino = btrfs_ino(parent_inode);
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
} else {
key.objectid = ino;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
}
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
key.objectid, root->root_key.objectid,
parent_ino, index, name, name_len);
} else if (add_backref) {
ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
parent_ino, index);
}
/* Nothing to clean up yet */
if (ret)
return ret;
ret = btrfs_insert_dir_item(trans, root, name, name_len,
parent_inode, &key,
btrfs_inode_type(inode), index);
if (ret == -EEXIST)
goto fail_dir_item;
else if (ret) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
btrfs_i_size_write(parent_inode, parent_inode->i_size +
name_len * 2);
parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, parent_inode);
if (ret)
btrfs_abort_transaction(trans, root, ret);
return ret;
fail_dir_item:
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
u64 local_index;
int err;
err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
key.objectid, root->root_key.objectid,
parent_ino, &local_index, name, name_len);
} else if (add_backref) {
u64 local_index;
int err;
err = btrfs_del_inode_ref(trans, root, name, name_len,
ino, parent_ino, &local_index);
}
return ret;
}
static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
struct inode *dir, struct dentry *dentry,
struct inode *inode, int backref, u64 index)
{
int err = btrfs_add_link(trans, dir, inode,
dentry->d_name.name, dentry->d_name.len,
backref, index);
if (err > 0)
err = -EEXIST;
return err;
}
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int err;
int drop_inode = 0;
u64 objectid;
unsigned long nr = 0;
u64 index = 0;
if (!new_valid_dev(rdev))
return -EINVAL;
/*
* 2 for inode item and ref
* 2 for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_op = &btrfs_special_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
init_special_inode(inode, inode->i_mode, rdev);
btrfs_update_inode(trans, root, inode);
d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
return err;
}
static int btrfs_create(struct inode *dir, struct dentry *dentry,
umode_t mode, struct nameidata *nd)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int drop_inode = 0;
int err;
unsigned long nr = 0;
u64 objectid;
u64 index = 0;
/*
* 2 for inode item and ref
* 2 for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root, nr);
return err;
}
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = old_dentry->d_inode;
u64 index;
unsigned long nr = 0;
int err;
int drop_inode = 0;
/* do not allow sys_link's with other subvols of the same device */
if (root->objectid != BTRFS_I(inode)->root->objectid)
return -EXDEV;
if (inode->i_nlink == ~0U)
return -EMLINK;
err = btrfs_set_inode_index(dir, &index);
if (err)
goto fail;
/*
* 2 items for inode and inode ref
* 2 items for dir items
* 1 item for parent inode
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto fail;
}
btrfs_inc_nlink(inode);
inode->i_ctime = CURRENT_TIME;
ihold(inode);
err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
if (err) {
drop_inode = 1;
} else {
struct dentry *parent = dentry->d_parent;
err = btrfs_update_inode(trans, root, inode);
if (err)
goto fail;
d_instantiate(dentry, inode);
btrfs_log_new_name(trans, inode, NULL, parent);
}
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
fail:
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root, nr);
return err;
}
static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
int err = 0;
int drop_on_err = 0;
u64 objectid = 0;
u64 index = 0;
unsigned long nr = 1;
/*
* 2 items for inode and ref
* 2 items for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_fail;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
S_IFDIR | mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_fail;
}
drop_on_err = 1;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
goto out_fail;
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, root, inode);
if (err)
goto out_fail;
err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
dentry->d_name.len, 0, index);
if (err)
goto out_fail;
d_instantiate(dentry, inode);
drop_on_err = 0;
out_fail:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_on_err)
iput(inode);
btrfs_btree_balance_dirty(root, nr);
return err;
}
/* helper for btfs_get_extent. Given an existing extent in the tree,
* and an extent that you want to insert, deal with overlap and insert
* the new extent into the tree.
*/
static int merge_extent_mapping(struct extent_map_tree *em_tree,
struct extent_map *existing,
struct extent_map *em,
u64 map_start, u64 map_len)
{
u64 start_diff;
BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
start_diff = map_start - em->start;
em->start = map_start;
em->len = map_len;
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
em->block_start += start_diff;
em->block_len -= start_diff;
}
return add_extent_mapping(em_tree, em);
}
static noinline int uncompress_inline(struct btrfs_path *path,
struct inode *inode, struct page *page,
size_t pg_offset, u64 extent_offset,
struct btrfs_file_extent_item *item)
{
int ret;
struct extent_buffer *leaf = path->nodes[0];
char *tmp;
size_t max_size;
unsigned long inline_size;
unsigned long ptr;
int compress_type;
WARN_ON(pg_offset != 0);
compress_type = btrfs_file_extent_compression(leaf, item);
max_size = btrfs_file_extent_ram_bytes(leaf, item);
inline_size = btrfs_file_extent_inline_item_len(leaf,
btrfs_item_nr(leaf, path->slots[0]));
tmp = kmalloc(inline_size, GFP_NOFS);
if (!tmp)
return -ENOMEM;
ptr = btrfs_file_extent_inline_start(item);
read_extent_buffer(leaf, tmp, ptr, inline_size);
max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size);
if (ret) {
char *kaddr = kmap_atomic(page);
unsigned long copy_size = min_t(u64,
PAGE_CACHE_SIZE - pg_offset,
max_size - extent_offset);
memset(kaddr + pg_offset, 0, copy_size);
kunmap_atomic(kaddr);
}
kfree(tmp);
return 0;
}
/*
* a bit scary, this does extent mapping from logical file offset to the disk.
* the ugly parts come from merging extents from the disk with the in-ram
* representation. This gets more complex because of the data=ordered code,
* where the in-ram extents might be locked pending data=ordered completion.
*
* This also copies inline extents directly into the page.
*/
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
{
int ret;
int err = 0;
u64 bytenr;
u64 extent_start = 0;
u64 extent_end = 0;
u64 objectid = btrfs_ino(inode);
u32 found_type;
struct btrfs_path *path = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *item;
struct extent_buffer *leaf;
struct btrfs_key found_key;
struct extent_map *em = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_trans_handle *trans = NULL;
int compress_type;
again:
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em)
em->bdev = root->fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
if (em) {
if (em->start > start || em->start + em->len <= start)
free_extent_map(em);
else if (em->block_start == EXTENT_MAP_INLINE && page)
free_extent_map(em);
else
goto out;
}
em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
}
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->start = EXTENT_MAP_HOLE;
em->orig_start = EXTENT_MAP_HOLE;
em->len = (u64)-1;
em->block_len = (u64)-1;
if (!path) {
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
/*
* Chances are we'll be called again, so go ahead and do
* readahead
*/
path->reada = 1;
}
ret = btrfs_lookup_file_extent(trans, root, path,
objectid, start, trans != NULL);
if (ret < 0) {
err = ret;
goto out;
}
if (ret != 0) {
if (path->slots[0] == 0)
goto not_found;
path->slots[0]--;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
/* are we inside the extent that was found? */
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
if (found_key.objectid != objectid ||
found_type != BTRFS_EXTENT_DATA_KEY) {
goto not_found;
}
found_type = btrfs_file_extent_type(leaf, item);
extent_start = found_key.offset;
compress_type = btrfs_file_extent_compression(leaf, item);
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
extent_end = extent_start +
btrfs_file_extent_num_bytes(leaf, item);
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_inline_len(leaf, item);
extent_end = (extent_start + size + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
}
if (start >= extent_end) {
path->slots[0]++;
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0)
goto not_found;
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != objectid ||
found_key.type != BTRFS_EXTENT_DATA_KEY)
goto not_found;
if (start + len <= found_key.offset)
goto not_found;
em->start = start;
em->len = found_key.offset - start;
goto not_found_em;
}
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
em->start = extent_start;
em->len = extent_end - extent_start;
em->orig_start = extent_start -
btrfs_file_extent_offset(leaf, item);
bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
if (bytenr == 0) {
em->block_start = EXTENT_MAP_HOLE;
goto insert;
}
if (compress_type != BTRFS_COMPRESS_NONE) {
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
em->compress_type = compress_type;
em->block_start = bytenr;
em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
item);
} else {
bytenr += btrfs_file_extent_offset(leaf, item);
em->block_start = bytenr;
em->block_len = em->len;
if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
}
goto insert;
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
unsigned long ptr;
char *map;
size_t size;
size_t extent_offset;
size_t copy_size;
em->block_start = EXTENT_MAP_INLINE;
if (!page || create) {
em->start = extent_start;
em->len = extent_end - extent_start;
goto out;
}
size = btrfs_file_extent_inline_len(leaf, item);
extent_offset = page_offset(page) + pg_offset - extent_start;
copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
size - extent_offset);
em->start = extent_start + extent_offset;
em->len = (copy_size + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
em->orig_start = EXTENT_MAP_INLINE;
if (compress_type) {
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
em->compress_type = compress_type;
}
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
if (create == 0 && !PageUptodate(page)) {
if (btrfs_file_extent_compression(leaf, item) !=
BTRFS_COMPRESS_NONE) {
ret = uncompress_inline(path, inode, page,
pg_offset,
extent_offset, item);
BUG_ON(ret); /* -ENOMEM */
} else {
map = kmap(page);
read_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
memset(map + pg_offset + copy_size, 0,
PAGE_CACHE_SIZE - pg_offset -
copy_size);
}
kunmap(page);
}
flush_dcache_page(page);
} else if (create && PageUptodate(page)) {
BUG();
if (!trans) {
kunmap(page);
free_extent_map(em);
em = NULL;
btrfs_release_path(path);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return ERR_CAST(trans);
goto again;
}
map = kmap(page);
write_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
kunmap(page);
btrfs_mark_buffer_dirty(leaf);
}
set_extent_uptodate(io_tree, em->start,
extent_map_end(em) - 1, NULL, GFP_NOFS);
goto insert;
} else {
printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
WARN_ON(1);
}
not_found:
em->start = start;
em->len = len;
not_found_em:
em->block_start = EXTENT_MAP_HOLE;
set_bit(EXTENT_FLAG_VACANCY, &em->flags);
insert:
btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
"[%llu %llu]\n", (unsigned long long)em->start,
(unsigned long long)em->len,
(unsigned long long)start,
(unsigned long long)len);
err = -EIO;
goto out;
}
err = 0;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* an overlapping map exists in the tree
*/
if (ret == -EEXIST) {
struct extent_map *existing;
ret = 0;
existing = lookup_extent_mapping(em_tree, start, len);
if (existing && (existing->start > start ||
existing->start + existing->len <= start)) {
free_extent_map(existing);
existing = NULL;
}
if (!existing) {
existing = lookup_extent_mapping(em_tree, em->start,
em->len);
if (existing) {
err = merge_extent_mapping(em_tree, existing,
em, start,
root->sectorsize);
free_extent_map(existing);
if (err) {
free_extent_map(em);
em = NULL;
}
} else {
err = -EIO;
free_extent_map(em);
em = NULL;
}
} else {
free_extent_map(em);
em = existing;
err = 0;
}
}
write_unlock(&em_tree->lock);
out:
trace_btrfs_get_extent(root, em);
if (path)
btrfs_free_path(path);
if (trans) {
ret = btrfs_end_transaction(trans, root);
if (!err)
err = ret;
}
if (err) {
free_extent_map(em);
return ERR_PTR(err);
}
BUG_ON(!em); /* Error is always set */
return em;
}
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
{
struct extent_map *em;
struct extent_map *hole_em = NULL;
u64 range_start = start;
u64 end;
u64 found;
u64 found_end;
int err = 0;
em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
if (IS_ERR(em))
return em;
if (em) {
/*
* if our em maps to a hole, there might
* actually be delalloc bytes behind it
*/
if (em->block_start != EXTENT_MAP_HOLE)
return em;
else
hole_em = em;
}
/* check to see if we've wrapped (len == -1 or similar) */
end = start + len;
if (end < start)
end = (u64)-1;
else
end -= 1;
em = NULL;
/* ok, we didn't find anything, lets look for delalloc */
found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
end, len, EXTENT_DELALLOC, 1);
found_end = range_start + found;
if (found_end < range_start)
found_end = (u64)-1;
/*
* we didn't find anything useful, return
* the original results from get_extent()
*/
if (range_start > end || found_end <= start) {
em = hole_em;
hole_em = NULL;
goto out;
}
/* adjust the range_start to make sure it doesn't
* go backwards from the start they passed in
*/
range_start = max(start,range_start);
found = found_end - range_start;
if (found > 0) {
u64 hole_start = start;
u64 hole_len = len;
em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
}
/*
* when btrfs_get_extent can't find anything it
* returns one huge hole
*
* make sure what it found really fits our range, and
* adjust to make sure it is based on the start from
* the caller
*/
if (hole_em) {
u64 calc_end = extent_map_end(hole_em);
if (calc_end <= start || (hole_em->start > end)) {
free_extent_map(hole_em);
hole_em = NULL;
} else {
hole_start = max(hole_em->start, start);
hole_len = calc_end - hole_start;
}
}
em->bdev = NULL;
if (hole_em && range_start > hole_start) {
/* our hole starts before our delalloc, so we
* have to return just the parts of the hole
* that go until the delalloc starts
*/
em->len = min(hole_len,
range_start - hole_start);
em->start = hole_start;
em->orig_start = hole_start;
/*
* don't adjust block start at all,
* it is fixed at EXTENT_MAP_HOLE
*/
em->block_start = hole_em->block_start;
em->block_len = hole_len;
} else {
em->start = range_start;
em->len = found;
em->orig_start = range_start;
em->block_start = EXTENT_MAP_DELALLOC;
em->block_len = found;
}
} else if (hole_em) {
return hole_em;
}
out:
free_extent_map(hole_em);
if (err) {
free_extent_map(em);
return ERR_PTR(err);
}
return em;
}
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
struct extent_map *em,
u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct btrfs_key ins;
u64 alloc_hint;
int ret;
bool insert = false;
/*
* Ok if the extent map we looked up is a hole and is for the exact
* range we want, there is no reason to allocate a new one, however if
* it is not right then we need to free this one and drop the cache for
* our range.
*/
if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
em->len != len) {
free_extent_map(em);
em = NULL;
insert = true;
btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return ERR_CAST(trans);
if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
btrfs_add_inode_defrag(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
alloc_hint = get_extent_allocation_hint(inode, start, len);
ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
alloc_hint, &ins, 1);
if (ret) {
em = ERR_PTR(ret);
goto out;
}
if (!em) {
em = alloc_extent_map();
if (!em) {
em = ERR_PTR(-ENOMEM);
goto out;
}
}
em->start = start;
em->orig_start = em->start;
em->len = ins.offset;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
/*
* We need to do this because if we're using the original em we searched
* for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
*/
em->flags = 0;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (insert) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST)
break;
btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
}
ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
ins.offset, ins.offset, 0);
if (ret) {
btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
em = ERR_PTR(ret);
}
out:
btrfs_end_transaction(trans, root);
return em;
}
/*
* returns 1 when the nocow is safe, < 1 on error, 0 if the
* block must be cow'd
*/
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
struct inode *inode, u64 offset, u64 len)
{
struct btrfs_path *path;
int ret;
struct extent_buffer *leaf;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
u64 disk_bytenr;
u64 backref_offset;
u64 extent_end;
u64 num_bytes;
int slot;
int found_type;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
offset, 0);
if (ret < 0)
goto out;
slot = path->slots[0];
if (ret == 1) {
if (slot == 0) {
/* can't find the item, must cow */
ret = 0;
goto out;
}
slot--;
}
ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY) {
/* not our file or wrong item type, must cow */
goto out;
}
if (key.offset > offset) {
/* Wrong offset, must cow */
goto out;
}
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
found_type = btrfs_file_extent_type(leaf, fi);
if (found_type != BTRFS_FILE_EXTENT_REG &&
found_type != BTRFS_FILE_EXTENT_PREALLOC) {
/* not a regular extent, must cow */
goto out;
}
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
backref_offset = btrfs_file_extent_offset(leaf, fi);
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
if (extent_end < offset + len) {
/* extent doesn't include our full range, must cow */
goto out;
}
if (btrfs_extent_readonly(root, disk_bytenr))
goto out;
/*
* look for other files referencing this extent, if we
* find any we must cow
*/
if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
key.offset - backref_offset, disk_bytenr))
goto out;
/*
* adjust disk_bytenr and num_bytes to cover just the bytes
* in this extent we are about to write. If there
* are any csums in that range we have to cow in order
* to keep the csums correct
*/
disk_bytenr += backref_offset;
disk_bytenr += offset - key.offset;
num_bytes = min(offset + len, extent_end) - offset;
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out;
/*
* all of the above have passed, it is safe to overwrite this extent
* without cow
*/
ret = 1;
out:
btrfs_free_path(path);
return ret;
}
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start = iblock << inode->i_blkbits;
u64 len = bh_result->b_size;
struct btrfs_trans_handle *trans;
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
if (IS_ERR(em))
return PTR_ERR(em);
/*
* Ok for INLINE and COMPRESSED extents we need to fallback on buffered
* io. INLINE is special, and we could probably kludge it in here, but
* it's still buffered so for safety lets just fall back to the generic
* buffered path.
*
* For COMPRESSED we _have_ to read the entire extent in so we can
* decompress it, so there will be buffering required no matter what we
* do, so go ahead and fallback to buffered.
*
* We return -ENOTBLK because thats what makes DIO go ahead and go back
* to buffered IO. Don't blame me, this is the price we pay for using
* the generic code.
*/
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
em->block_start == EXTENT_MAP_INLINE) {
free_extent_map(em);
return -ENOTBLK;
}
/* Just a good old fashioned hole, return */
if (!create && (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
free_extent_map(em);
/* DIO will do one hole at a time, so just unlock a sector */
unlock_extent(&BTRFS_I(inode)->io_tree, start,
start + root->sectorsize - 1);
return 0;
}
/*
* We don't allocate a new extent in the following cases
*
* 1) The inode is marked as NODATACOW. In this case we'll just use the
* existing extent.
* 2) The extent is marked as PREALLOC. We're good to go here and can
* just use the extent.
*
*/
if (!create) {
len = em->len - (start - em->start);
goto map;
}
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
em->block_start != EXTENT_MAP_HOLE)) {
int type;
int ret;
u64 block_start;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
type = BTRFS_ORDERED_PREALLOC;
else
type = BTRFS_ORDERED_NOCOW;
len = min(len, em->len - (start - em->start));
block_start = em->block_start + (start - em->start);
/*
* we're not going to log anything, but we do need
* to make sure the current transaction stays open
* while we look for nocow cross refs
*/
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
goto must_cow;
if (can_nocow_odirect(trans, inode, start, len) == 1) {
ret = btrfs_add_ordered_extent_dio(inode, start,
block_start, len, len, type);
btrfs_end_transaction(trans, root);
if (ret) {
free_extent_map(em);
return ret;
}
goto unlock;
}
btrfs_end_transaction(trans, root);
}
must_cow:
/*
* this will cow the extent, reset the len in case we changed
* it above
*/
len = bh_result->b_size;
em = btrfs_new_extent_direct(inode, em, start, len);
if (IS_ERR(em))
return PTR_ERR(em);
len = min(len, em->len - (start - em->start));
unlock:
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
0, NULL, GFP_NOFS);
map:
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
bh_result->b_size = len;
bh_result->b_bdev = em->bdev;
set_buffer_mapped(bh_result);
if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
set_buffer_new(bh_result);
free_extent_map(em);
return 0;
}
struct btrfs_dio_private {
struct inode *inode;
u64 logical_offset;
u64 disk_bytenr;
u64 bytes;
u32 *csums;
void *private;
/* number of bios pending for this dio */
atomic_t pending_bios;
/* IO errors */
int errors;
struct bio *orig_bio;
};
static void btrfs_endio_direct_read(struct bio *bio, int err)
{
struct btrfs_dio_private *dip = bio->bi_private;
struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
struct bio_vec *bvec = bio->bi_io_vec;
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start;
u32 *private = dip->csums;
start = dip->logical_offset;
do {
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
struct page *page = bvec->bv_page;
char *kaddr;
u32 csum = ~(u32)0;
unsigned long flags;
local_irq_save(flags);
kaddr = kmap_atomic(page);
csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
csum, bvec->bv_len);
btrfs_csum_final(csum, (char *)&csum);
kunmap_atomic(kaddr);
local_irq_restore(flags);
flush_dcache_page(bvec->bv_page);
if (csum != *private) {
printk(KERN_ERR "btrfs csum failed ino %llu off"
" %llu csum %u private %u\n",
(unsigned long long)btrfs_ino(inode),
(unsigned long long)start,
csum, *private);
err = -EIO;
}
}
start += bvec->bv_len;
private++;
bvec++;
} while (bvec <= bvec_end);
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
dip->logical_offset + dip->bytes - 1);
bio->bi_private = dip->private;
kfree(dip->csums);
kfree(dip);
/* If we had a csum failure make sure to clear the uptodate flag */
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
dio_end_io(bio, err);
}
static void btrfs_endio_direct_write(struct bio *bio, int err)
{
struct btrfs_dio_private *dip = bio->bi_private;
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_ordered_extent *ordered = NULL;
struct extent_state *cached_state = NULL;
u64 ordered_offset = dip->logical_offset;
u64 ordered_bytes = dip->bytes;
int ret;
if (err)
goto out_done;
again:
ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
&ordered_offset,
ordered_bytes);
if (!ret)
goto out_test;
BUG_ON(!ordered);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
err = -ENOMEM;
goto out;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
ret = btrfs_ordered_update_i_size(inode, 0, ordered);
if (!ret)
err = btrfs_update_inode_fallback(trans, root, inode);
goto out;
}
lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1, 0,
&cached_state);
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
ret = btrfs_mark_extent_written(trans, inode,
ordered->file_offset,
ordered->file_offset +
ordered->len);
if (ret) {
err = ret;
goto out_unlock;
}
} else {
ret = insert_reserved_file_extent(trans, inode,
ordered->file_offset,
ordered->start,
ordered->disk_len,
ordered->len,
ordered->len,
0, 0, 0,
BTRFS_FILE_EXTENT_REG);
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered->file_offset, ordered->len);
if (ret) {
err = ret;
WARN_ON(1);
goto out_unlock;
}
}
add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
ret = btrfs_ordered_update_i_size(inode, 0, ordered);
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
btrfs_update_inode_fallback(trans, root, inode);
ret = 0;
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1,
&cached_state, GFP_NOFS);
out:
btrfs_delalloc_release_metadata(inode, ordered->len);
btrfs_end_transaction(trans, root);
ordered_offset = ordered->file_offset + ordered->len;
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
out_test:
/*
* our bio might span multiple ordered extents. If we haven't
* completed the accounting for the whole dio, go back and try again
*/
if (ordered_offset < dip->logical_offset + dip->bytes) {
ordered_bytes = dip->logical_offset + dip->bytes -
ordered_offset;
goto again;
}
out_done:
bio->bi_private = dip->private;
kfree(dip->csums);
kfree(dip);
/* If we had an error make sure to clear the uptodate flag */
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
dio_end_io(bio, err);
}
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
struct bio *bio, int mirror_num,
unsigned long bio_flags, u64 offset)
{
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
static void btrfs_end_dio_bio(struct bio *bio, int err)
{
struct btrfs_dio_private *dip = bio->bi_private;
if (err) {
printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
"sector %#Lx len %u err no %d\n",
(unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
(unsigned long long)bio->bi_sector, bio->bi_size, err);
dip->errors = 1;
/*
* before atomic variable goto zero, we must make sure
* dip->errors is perceived to be set.
*/
smp_mb__before_atomic_dec();
}
/* if there are more bios still pending for this dio, just exit */
if (!atomic_dec_and_test(&dip->pending_bios))
goto out;
if (dip->errors)
bio_io_error(dip->orig_bio);
else {
set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
bio_endio(dip->orig_bio, 0);
}
out:
bio_put(bio);
}
static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
u64 first_sector, gfp_t gfp_flags)
{
int nr_vecs = bio_get_nr_vecs(bdev);
return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
}
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
int rw, u64 file_offset, int skip_sum,
u32 *csums, int async_submit)
{
int write = rw & REQ_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
bio_get(bio);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
if (ret)
goto err;
if (skip_sum)
goto map;
if (write && async_submit) {
ret = btrfs_wq_submit_bio(root->fs_info,
inode, rw, bio, 0, 0,
file_offset,
__btrfs_submit_bio_start_direct_io,
__btrfs_submit_bio_done);
goto err;
} else if (write) {
/*
* If we aren't doing async submit, calculate the csum of the
* bio now.
*/
ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
if (ret)
goto err;
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
file_offset, csums);
if (ret)
goto err;
}
map:
ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
}
static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int skip_sum)
{
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
struct bio *bio;
struct bio *orig_bio = dip->orig_bio;
struct bio_vec *bvec = orig_bio->bi_io_vec;
u64 start_sector = orig_bio->bi_sector;
u64 file_offset = dip->logical_offset;
u64 submit_len = 0;
u64 map_length;
int nr_pages = 0;
u32 *csums = dip->csums;
int ret = 0;
int async_submit = 0;
int write = rw & REQ_WRITE;
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(orig_bio);
return -EIO;
}
if (map_length >= orig_bio->bi_size) {
bio = orig_bio;
goto submit;
}
async_submit = 1;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
if (!bio)
return -ENOMEM;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
atomic_inc(&dip->pending_bios);
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
if (unlikely(map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
bvec->bv_offset) < bvec->bv_len)) {
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
* we inc the count. Otherwise, the dip might get freed
* before we're done setting it up
*/
atomic_inc(&dip->pending_bios);
ret = __btrfs_submit_dio_bio(bio, inode, rw,
file_offset, skip_sum,
csums, async_submit);
if (ret) {
bio_put(bio);
atomic_dec(&dip->pending_bios);
goto out_err;
}
/* Write's use the ordered csums */
if (!write && !skip_sum)
csums = csums + nr_pages;
start_sector += submit_len >> 9;
file_offset += submit_len;
submit_len = 0;
nr_pages = 0;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
start_sector, GFP_NOFS);
if (!bio)
goto out_err;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(bio);
goto out_err;
}
} else {
submit_len += bvec->bv_len;
nr_pages ++;
bvec++;
}
}
submit:
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
csums, async_submit);
if (!ret)
return 0;
bio_put(bio);
out_err:
dip->errors = 1;
/*
* before atomic variable goto zero, we must
* make sure dip->errors is perceived to be set.
*/
smp_mb__before_atomic_dec();
if (atomic_dec_and_test(&dip->pending_bios))
bio_io_error(dip->orig_bio);
/* bio_end_io() will handle error, so we needn't return it */
return 0;
}
static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_dio_private *dip;
struct bio_vec *bvec = bio->bi_io_vec;
int skip_sum;
int write = rw & REQ_WRITE;
int ret = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
dip = kmalloc(sizeof(*dip), GFP_NOFS);
if (!dip) {
ret = -ENOMEM;
goto free_ordered;
}
dip->csums = NULL;
/* Write's use the ordered csum stuff, so we don't need dip->csums */
if (!write && !skip_sum) {
dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
if (!dip->csums) {
kfree(dip);
ret = -ENOMEM;
goto free_ordered;
}
}
dip->private = bio->bi_private;
dip->inode = inode;
dip->logical_offset = file_offset;
dip->bytes = 0;
do {
dip->bytes += bvec->bv_len;
bvec++;
} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
dip->disk_bytenr = (u64)bio->bi_sector << 9;
bio->bi_private = dip;
dip->errors = 0;
dip->orig_bio = bio;
atomic_set(&dip->pending_bios, 0);
if (write)
bio->bi_end_io = btrfs_endio_direct_write;
else
bio->bi_end_io = btrfs_endio_direct_read;
ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
if (!ret)
return;
free_ordered:
/*
* If this is a write, we need to clean up the reserved space and kill
* the ordered extent.
*/
if (write) {
struct btrfs_ordered_extent *ordered;
ordered = btrfs_lookup_ordered_extent(inode, file_offset);
if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
btrfs_free_reserved_extent(root, ordered->start,
ordered->disk_len);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
bio_endio(bio, ret);
}
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
int seg;
int i;
size_t size;
unsigned long addr;
unsigned blocksize_mask = root->sectorsize - 1;
ssize_t retval = -EINVAL;
loff_t end = offset;
if (offset & blocksize_mask)
goto out;
/* Check the memory alignment. Blocks cannot straddle pages */
for (seg = 0; seg < nr_segs; seg++) {
addr = (unsigned long)iov[seg].iov_base;
size = iov[seg].iov_len;
end += size;
if ((addr & blocksize_mask) || (size & blocksize_mask))
goto out;
/* If this is a write we don't need to check anymore */
if (rw & WRITE)
continue;
/*
* Check to make sure we don't have duplicate iov_base's in this
* iovec, if so return EINVAL, otherwise we'll get csum errors
* when reading back.
*/
for (i = seg + 1; i < nr_segs; i++) {
if (iov[seg].iov_base == iov[i].iov_base)
goto out;
}
}
retval = 0;
out:
return retval;
}
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
u64 lockstart, lockend;
ssize_t ret;
int writing = rw & WRITE;
int write_bits = 0;
size_t count = iov_length(iov, nr_segs);
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
offset, nr_segs)) {
return 0;
}
lockstart = offset;
lockend = offset + count - 1;
if (writing) {
ret = btrfs_delalloc_reserve_space(inode, count);
if (ret)
goto out;
}
while (1) {
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
0, &cached_state);
/*
* We're concerned with the entire range that we're going to be
* doing DIO to, so we need to make sure theres no ordered
* extents in this range.
*/
ordered = btrfs_lookup_ordered_range(inode, lockstart,
lockend - lockstart + 1);
if (!ordered)
break;
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
cond_resched();
}
/*
* we don't use btrfs_set_extent_delalloc because we don't want
* the dirty or uptodate bits
*/
if (writing) {
write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
EXTENT_DELALLOC, NULL, &cached_state,
GFP_NOFS);
if (ret) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_LOCKED | write_bits,
1, 0, &cached_state, GFP_NOFS);
goto out;
}
}
free_extent_state(cached_state);
cached_state = NULL;
ret = __blockdev_direct_IO(rw, iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, 0);
if (ret < 0 && ret != -EIOCBQUEUED) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
offset + iov_length(iov, nr_segs) - 1,
EXTENT_LOCKED | write_bits, 1, 0,
&cached_state, GFP_NOFS);
} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
/*
* We're falling back to buffered, unlock the section we didn't
* do IO on.
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
offset + iov_length(iov, nr_segs) - 1,
EXTENT_LOCKED | write_bits, 1, 0,
&cached_state, GFP_NOFS);
}
out:
free_extent_state(cached_state);
return ret;
}
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
}
int btrfs_readpage(struct file *file, struct page *page)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(page->mapping->host)->io_tree;
return extent_read_full_page(tree, page, btrfs_get_extent, 0);
}
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
{
struct extent_io_tree *tree;
if (current->flags & PF_MEMALLOC) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
tree = &BTRFS_I(page->mapping->host)->io_tree;
return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
}
int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
}
static int
btrfs_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
return extent_readpages(tree, mapping, pages, nr_pages,
btrfs_get_extent);
}
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
struct extent_io_tree *tree;
struct extent_map_tree *map;
int ret;
tree = &BTRFS_I(page->mapping->host)->io_tree;
map = &BTRFS_I(page->mapping->host)->extent_tree;
ret = try_release_extent_mapping(map, tree, page, gfp_flags);
if (ret == 1) {
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
}
return ret;
}
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
if (PageWriteback(page) || PageDirty(page))
return 0;
return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
}
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
{
struct extent_io_tree *tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
u64 page_start = page_offset(page);
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
/*
* we have the page locked, so new writeback can't start,
* and the dirty bit won't be cleared while we are here.
*
* Wait for IO on this page so that we can safely clear
* the PagePrivate2 bit and do ordered accounting
*/
wait_on_page_writeback(page);
tree = &BTRFS_I(page->mapping->host)->io_tree;
if (offset) {
btrfs_releasepage(page, GFP_NOFS);
return;
}
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
ordered = btrfs_lookup_ordered_extent(page->mapping->host,
page_offset(page));
if (ordered) {
/*
* IO on this page will never be started, so we need
* to account for any ordered extents now
*/
clear_extent_bit(tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
&cached_state, GFP_NOFS);
/*
* whoever cleared the private bit is responsible
* for the finish_ordered_io
*/
if (TestClearPagePrivate2(page)) {
btrfs_finish_ordered_io(page->mapping->host,
page_start, page_end);
}
btrfs_put_ordered_extent(ordered);
cached_state = NULL;
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
}
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
__btrfs_releasepage(page, GFP_NOFS);
ClearPageChecked(page);
if (PagePrivate(page)) {
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
}
}
/*
* btrfs_page_mkwrite() is not allowed to change the file size as it gets
* called from a page fault handler when a page is first dirtied. Hence we must
* be careful to check for EOF conditions here. We set the page up correctly
* for a written page which means we get ENOSPC checking when writing into
* holes and correct delalloc and unwritten extent mapping on filesystems that
* support these features.
*
* We are not allowed to take the i_mutex here so we have to play games to
* protect against truncate races as the page could now be beyond EOF. Because
* vmtruncate() writes the inode size before removing pages, once we have the
* page lock we can determine safely if the page is beyond EOF. If it is not
* beyond EOF, then the page is guaranteed safe against truncation until we
* unlock the page.
*/
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = fdentry(vma->vm_file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
char *kaddr;
unsigned long zero_start;
loff_t size;
int ret;
int reserved = 0;
u64 page_start;
u64 page_end;
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (!ret) {
ret = btrfs_update_time(vma->vm_file);
reserved = 1;
}
if (ret) {
if (ret == -ENOMEM)
ret = VM_FAULT_OOM;
else /* -ENOSPC, -EIO, etc */
ret = VM_FAULT_SIGBUS;
if (reserved)
goto out;
goto out_noreserve;
}
ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
again:
lock_page(page);
size = i_size_read(inode);
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
if ((page->mapping != inode->i_mapping) ||
(page_start >= size)) {
/* page got truncated out from underneath us */
goto out_unlock;
}
wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
set_page_extent_mapped(page);
/*
* we can't set the delalloc bits if there are pending ordered
* extents. Drop our locks and wait for them to finish
*/
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
/*
* XXX - page_mkwrite gets called every time the page is dirtied, even
* if it was already dirty, so for space accounting reasons we need to
* clear any delalloc bits for the range we are fixing to save. There
* is probably a better way to do this, but for now keep consistent with
* prepare_pages in the normal write path.
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
ret = 0;
/* page is wholly or partially inside EOF */
if (page_start + PAGE_CACHE_SIZE > size)
zero_start = size & ~PAGE_CACHE_MASK;
else
zero_start = PAGE_CACHE_SIZE;
if (zero_start != PAGE_CACHE_SIZE) {
kaddr = kmap(page);
memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
flush_dcache_page(page);
kunmap(page);
}
ClearPageChecked(page);
set_page_dirty(page);
SetPageUptodate(page);
BTRFS_I(inode)->last_trans = root->fs_info->generation;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
out_unlock:
if (!ret)
return VM_FAULT_LOCKED;
unlock_page(page);
out:
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
out_noreserve:
return ret;
}
static int btrfs_truncate(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv;
int ret;
int err = 0;
struct btrfs_trans_handle *trans;
unsigned long nr;
u64 mask = root->sectorsize - 1;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
if (ret)
return ret;
btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
/*
* Yes ladies and gentelment, this is indeed ugly. The fact is we have
* 3 things going on here
*
* 1) We need to reserve space for our orphan item and the space to
* delete our orphan item. Lord knows we don't want to have a dangling
* orphan item because we didn't reserve space to remove it.
*
* 2) We need to reserve space to update our inode.
*
* 3) We need to have something to cache all the space that is going to
* be free'd up by the truncate operation, but also have some slack
* space reserved in case it uses space during the truncate (thank you
* very much snapshotting).
*
* And we need these to all be seperate. The fact is we can use alot of
* space doing the truncate, and we have no earthly idea how much space
* we will use, so we need the truncate reservation to be seperate so it
* doesn't end up using space reserved for updating the inode or
* removing the orphan item. We also need to be able to stop the
* transaction and start a new one, which means we need to be able to
* update the inode several times, and we have no idea of knowing how
* many times that will be, so we can't just reserve 1 item for the
* entirety of the opration, so that has to be done seperately as well.
* Then there is the orphan item, which does indeed need to be held on
* to for the whole operation, and we need nobody to touch this reserved
* space except the orphan code.
*
* So that leaves us with
*
* 1) root->orphan_block_rsv - for the orphan deletion.
* 2) rsv - for the truncate reservation, which we will steal from the
* transaction reservation.
* 3) fs_info->trans_block_rsv - this will have 1 items worth left for
* updating the inode.
*/
rsv = btrfs_alloc_block_rsv(root);
if (!rsv)
return -ENOMEM;
rsv->size = min_size;
/*
* 1 for the truncate slack space
* 1 for the orphan item we're going to add
* 1 for the orphan item deletion
* 1 for updating the inode.
*/
trans = btrfs_start_transaction(root, 4);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out;
}
/* Migrate the slack space for the truncate to our reserve */
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
min_size);
BUG_ON(ret);
ret = btrfs_orphan_add(trans, inode);
if (ret) {
btrfs_end_transaction(trans, root);
goto out;
}
/*
* setattr is responsible for setting the ordered_data_close flag,
* but that is only tested during the last file release. That
* could happen well after the next commit, leaving a great big
* window where new writes may get lost if someone chooses to write
* to this file after truncating to zero
*
* The inode doesn't have any dirty data here, and so if we commit
* this is a noop. If someone immediately starts writing to the inode
* it is very likely we'll catch some of their writes in this
* transaction, and the commit will find this file on the ordered
* data list with good things to send down.
*
* This is a best effort solution, there is still a window where
* using truncate to replace the contents of the file will
* end up with a zero length file after a crash.
*/
if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
btrfs_add_ordered_operation(trans, root, inode);
while (1) {
ret = btrfs_block_rsv_refill(root, rsv, min_size);
if (ret) {
/*
* This can only happen with the original transaction we
* started above, every other time we shouldn't have a
* transaction started yet.
*/
if (ret == -EAGAIN)
goto end_trans;
err = ret;
break;
}
if (!trans) {
/* Just need the 1 for updating the inode */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = err = PTR_ERR(trans);
trans = NULL;
break;
}
}
trans->block_rsv = rsv;
ret = btrfs_truncate_inode_items(trans, root, inode,
inode->i_size,
BTRFS_EXTENT_DATA_KEY);
if (ret != -EAGAIN) {
err = ret;
break;
}
trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
err = ret;
break;
}
end_trans:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root, nr);
}
if (ret == 0 && inode->i_nlink > 0) {
trans->block_rsv = root->orphan_block_rsv;
ret = btrfs_orphan_del(trans, inode);
if (ret)
err = ret;
} else if (ret && inode->i_nlink > 0) {
/*
* Failed to do the truncate, remove us from the in memory
* orphan list.
*/
ret = btrfs_orphan_del(NULL, inode);
}
if (trans) {
trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret && !err)
err = ret;
nr = trans->blocks_used;
ret = btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
}
out:
btrfs_free_block_rsv(root, rsv);
if (ret && !err)
err = ret;
return err;
}
/*
* create a new subvolume directory/inode (helper for the ioctl).
*/
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root, u64 new_dirid)
{
struct inode *inode;
int err;
u64 index = 0;
inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
new_dirid, new_dirid,
S_IFDIR | (~current_umask() & S_IRWXUGO),
&index);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
set_nlink(inode, 1);
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, new_root, inode);
iput(inode);
return err;
}
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
struct btrfs_inode *ei;
struct inode *inode;
ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
ei->root = NULL;
ei->space_info = NULL;
ei->generation = 0;
ei->sequence = 0;
ei->last_trans = 0;
ei->last_sub_trans = 0;
ei->logged_trans = 0;
ei->delalloc_bytes = 0;
ei->disk_i_size = 0;
ei->flags = 0;
ei->csum_bytes = 0;
ei->index_cnt = (u64)-1;
ei->last_unlink_trans = 0;
spin_lock_init(&ei->lock);
ei->outstanding_extents = 0;
ei->reserved_extents = 0;
ei->ordered_data_close = 0;
ei->orphan_meta_reserved = 0;
ei->dummy_inode = 0;
ei->in_defrag = 0;
ei->delalloc_meta_reserved = 0;
ei->force_compress = BTRFS_COMPRESS_NONE;
ei->delayed_node = NULL;
inode = &ei->vfs_inode;
extent_map_tree_init(&ei->extent_tree);
extent_io_tree_init(&ei->io_tree, &inode->i_data);
extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
ei->io_tree.track_uptodate = 1;
ei->io_failure_tree.track_uptodate = 1;
mutex_init(&ei->log_mutex);
mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->i_orphan);
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->ordered_operations);
RB_CLEAR_NODE(&ei->rb_node);
return inode;
}
static void btrfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
void btrfs_destroy_inode(struct inode *inode)
{
struct btrfs_ordered_extent *ordered;
struct btrfs_root *root = BTRFS_I(inode)->root;
WARN_ON(!list_empty(&inode->i_dentry));
WARN_ON(inode->i_data.nrpages);
WARN_ON(BTRFS_I(inode)->outstanding_extents);
WARN_ON(BTRFS_I(inode)->reserved_extents);
WARN_ON(BTRFS_I(inode)->delalloc_bytes);
WARN_ON(BTRFS_I(inode)->csum_bytes);
/*
* This can happen where we create an inode, but somebody else also
* created the same inode and we need to destroy the one we already
* created.
*/
if (!root)
goto free;
/*
* Make sure we're properly removed from the ordered operation
* lists.
*/
smp_mb();
if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
spin_lock(&root->fs_info->ordered_extent_lock);
list_del_init(&BTRFS_I(inode)->ordered_operations);
spin_unlock(&root->fs_info->ordered_extent_lock);
}
spin_lock(&root->orphan_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
(unsigned long long)btrfs_ino(inode));
list_del_init(&BTRFS_I(inode)->i_orphan);
}
spin_unlock(&root->orphan_lock);
while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
if (!ordered)
break;
else {
printk(KERN_ERR "btrfs found ordered "
"extent %llu %llu on inode cleanup\n",
(unsigned long long)ordered->file_offset,
(unsigned long long)ordered->len);
btrfs_remove_ordered_extent(inode, ordered);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
}
inode_tree_del(inode);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
free:
btrfs_remove_delayed_node(inode);
call_rcu(&inode->i_rcu, btrfs_i_callback);
}
int btrfs_drop_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0 &&
!btrfs_is_free_space_inode(root, inode))
return 1;
else
return generic_drop_inode(inode);
}
static void init_once(void *foo)
{
struct btrfs_inode *ei = (struct btrfs_inode *) foo;
inode_init_once(&ei->vfs_inode);
}
void btrfs_destroy_cachep(void)
{
if (btrfs_inode_cachep)
kmem_cache_destroy(btrfs_inode_cachep);
if (btrfs_trans_handle_cachep)
kmem_cache_destroy(btrfs_trans_handle_cachep);
if (btrfs_transaction_cachep)
kmem_cache_destroy(btrfs_transaction_cachep);
if (btrfs_path_cachep)
kmem_cache_destroy(btrfs_path_cachep);
if (btrfs_free_space_cachep)
kmem_cache_destroy(btrfs_free_space_cachep);
}
int btrfs_init_cachep(void)
{
btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
sizeof(struct btrfs_inode), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
if (!btrfs_inode_cachep)
goto fail;
btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
sizeof(struct btrfs_trans_handle), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_trans_handle_cachep)
goto fail;
btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
sizeof(struct btrfs_transaction), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_transaction_cachep)
goto fail;
btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
sizeof(struct btrfs_path), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_path_cachep)
goto fail;
btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
sizeof(struct btrfs_free_space), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_free_space_cachep)
goto fail;
return 0;
fail:
btrfs_destroy_cachep();
return -ENOMEM;
}
static int btrfs_getattr(struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
u32 blocksize = inode->i_sb->s_blocksize;
generic_fillattr(inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
stat->blksize = PAGE_CACHE_SIZE;
stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
return 0;
}
/*
* If a file is moved, it will inherit the cow and compression flags of the new
* directory.
*/
static void fixup_inode_flags(struct inode *dir, struct inode *inode)
{
struct btrfs_inode *b_dir = BTRFS_I(dir);
struct btrfs_inode *b_inode = BTRFS_I(inode);
if (b_dir->flags & BTRFS_INODE_NODATACOW)
b_inode->flags |= BTRFS_INODE_NODATACOW;
else
b_inode->flags &= ~BTRFS_INODE_NODATACOW;
if (b_dir->flags & BTRFS_INODE_COMPRESS)
b_inode->flags |= BTRFS_INODE_COMPRESS;
else
b_inode->flags &= ~BTRFS_INODE_COMPRESS;
}
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
struct inode *new_inode = new_dentry->d_inode;
struct inode *old_inode = old_dentry->d_inode;
struct timespec ctime = CURRENT_TIME;
u64 index = 0;
u64 root_objectid;
int ret;
u64 old_ino = btrfs_ino(old_inode);
if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return -EPERM;
/* we only allow rename subvolume link between subvolumes */
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
return -EXDEV;
if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
(new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
return -ENOTEMPTY;
if (S_ISDIR(old_inode->i_mode) && new_inode &&
new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
/*
* we're using rename to replace one file with another.
* and the replacement file is large. Start IO on it now so
* we don't add too much work to the end of the transaction
*/
if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(old_inode->i_mapping);
/* close the racy window with snapshot create/destroy ioctl */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&root->fs_info->subvol_sem);
/*
* We want to reserve the absolute worst case amount of items. So if
* both inodes are subvols and we need to unlink them then that would
* require 4 item modifications, but if they are both normal inodes it
* would require 5 item modifications, so we'll assume their normal
* inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
* should cover the worst case number of items we'll modify.
*/
trans = btrfs_start_transaction(root, 20);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_notrans;
}
if (dest != root)
btrfs_record_root_in_trans(trans, dest);
ret = btrfs_set_inode_index(new_dir, &index);
if (ret)
goto out_fail;
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
/* force full log commit if subvolume involved. */
root->fs_info->last_trans_log_full_commit = trans->transid;
} else {
ret = btrfs_insert_inode_ref(trans, dest,
new_dentry->d_name.name,
new_dentry->d_name.len,
old_ino,
btrfs_ino(new_dir), index);
if (ret)
goto out_fail;
/*
* this is an ugly little race, but the rename is required
* to make sure that if we crash, the inode is either at the
* old name or the new one. pinning the log transaction lets
* us make sure we don't allow a log commit to come in after
* we unlink the name but before we add the new name back in.
*/
btrfs_pin_log_trans(root);
}
/*
* make sure the inode gets flushed if it is replacing
* something.
*/
if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
btrfs_add_ordered_operation(trans, root, old_inode);
old_dir->i_ctime = old_dir->i_mtime = ctime;
new_dir->i_ctime = new_dir->i_mtime = ctime;
old_inode->i_ctime = ctime;
if (old_dentry->d_parent != new_dentry->d_parent)
btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
old_dentry->d_name.name,
old_dentry->d_name.len);
} else {
ret = __btrfs_unlink_inode(trans, root, old_dir,
old_dentry->d_inode,
old_dentry->d_name.name,
old_dentry->d_name.len);
if (!ret)
ret = btrfs_update_inode(trans, root, old_inode);
}
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
if (new_inode) {
new_inode->i_ctime = CURRENT_TIME;
if (unlikely(btrfs_ino(new_inode) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
root_objectid = BTRFS_I(new_inode)->location.objectid;
ret = btrfs_unlink_subvol(trans, dest, new_dir,
root_objectid,
new_dentry->d_name.name,
new_dentry->d_name.len);
BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, dest, new_dir,
new_dentry->d_inode,
new_dentry->d_name.name,
new_dentry->d_name.len);
}
if (!ret && new_inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, new_dentry->d_inode);
BUG_ON(ret);
}
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
}
fixup_inode_flags(new_dir, old_inode);
ret = btrfs_add_link(trans, new_dir, old_inode,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
struct dentry *parent = new_dentry->d_parent;
btrfs_log_new_name(trans, old_inode, old_dir, parent);
btrfs_end_log_trans(root);
}
out_fail:
btrfs_end_transaction(trans, root);
out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&root->fs_info->subvol_sem);
return ret;
}
/*
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
{
struct list_head *head = &root->fs_info->delalloc_inodes;
struct btrfs_inode *binode;
struct inode *inode;
if (root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
spin_lock(&root->fs_info->delalloc_lock);
while (!list_empty(head)) {
binode = list_entry(head->next, struct btrfs_inode,
delalloc_inodes);
inode = igrab(&binode->vfs_inode);
if (!inode)
list_del_init(&binode->delalloc_inodes);
spin_unlock(&root->fs_info->delalloc_lock);
if (inode) {
filemap_flush(inode->i_mapping);
if (delay_iput)
btrfs_add_delayed_iput(inode);
else
iput(inode);
}
cond_resched();
spin_lock(&root->fs_info->delalloc_lock);
}
spin_unlock(&root->fs_info->delalloc_lock);
/* the filemap_flush will queue IO into the worker threads, but
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
*/
atomic_inc(&root->fs_info->async_submit_draining);
while (atomic_read(&root->fs_info->nr_async_submits) ||
atomic_read(&root->fs_info->async_delalloc_pages)) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->nr_async_submits) == 0 &&
atomic_read(&root->fs_info->async_delalloc_pages) == 0));
}
atomic_dec(&root->fs_info->async_submit_draining);
return 0;
}
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct btrfs_key key;
struct inode *inode = NULL;
int err;
int drop_inode = 0;
u64 objectid;
u64 index = 0 ;
int name_len;
int datasize;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
struct extent_buffer *leaf;
unsigned long nr = 0;
name_len = strlen(symname) + 1;
if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
return -ENAMETOOLONG;
/*
* 2 items for inode item and ref
* 2 items for dir items
* 1 item for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
S_IFLNK|S_IRWXUGO, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
if (drop_inode)
goto out_unlock;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
drop_inode = 1;
goto out_unlock;
}
key.objectid = btrfs_ino(inode);
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
datasize = btrfs_file_extent_calc_inline_size(name_len);
err = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (err) {
drop_inode = 1;
btrfs_free_path(path);
goto out_unlock;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei,
BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_encryption(leaf, ei, 0);
btrfs_set_file_extent_compression(leaf, ei, 0);
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
inode_set_bytes(inode, name_len);
btrfs_i_size_write(inode, name_len - 1);
err = btrfs_update_inode(trans, root, inode);
if (err)
drop_inode = 1;
out_unlock:
if (!err)
d_instantiate(dentry, inode);
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root, nr);
return err;
}
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint,
struct btrfs_trans_handle *trans)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 cur_offset = start;
u64 i_size;
int ret = 0;
bool own_trans = true;
if (trans)
own_trans = false;
while (num_bytes > 0) {
if (own_trans) {
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
break;
}
}
ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
0, *alloc_hint, &ins, 1);
if (ret) {
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
ret = insert_reserved_file_extent(trans, inode,
cur_offset, ins.objectid,
ins.offset, ins.offset,
ins.offset, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
num_bytes -= ins.offset;
cur_offset += ins.offset;
*alloc_hint = ins.objectid + ins.offset;
inode->i_ctime = CURRENT_TIME;
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
(actual_len > inode->i_size) &&
(cur_offset > inode->i_size)) {
if (cur_offset > actual_len)
i_size = actual_len;
else
i_size = cur_offset;
i_size_write(inode, i_size);
btrfs_ordered_update_i_size(inode, i_size, NULL);
}
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
if (own_trans)
btrfs_end_transaction(trans, root);
}
return ret;
}
int btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint)
{
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
min_size, actual_len, alloc_hint,
NULL);
}
int btrfs_prealloc_file_range_trans(struct inode *inode,
struct btrfs_trans_handle *trans, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint)
{
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
min_size, actual_len, alloc_hint, trans);
}
static int btrfs_set_page_dirty(struct page *page)
{
return __set_page_dirty_nobuffers(page);
}
static int btrfs_permission(struct inode *inode, int mask)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
umode_t mode = inode->i_mode;
if (mask & MAY_WRITE &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
if (btrfs_root_readonly(root))
return -EROFS;
if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
return -EACCES;
}
return generic_permission(inode, mask);
}
static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
.create = btrfs_create,
.unlink = btrfs_unlink,
.link = btrfs_link,
.mkdir = btrfs_mkdir,
.rmdir = btrfs_rmdir,
.rename = btrfs_rename,
.symlink = btrfs_symlink,
.setattr = btrfs_setattr,
.mknod = btrfs_mknod,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
.get_acl = btrfs_get_acl,
};
static const struct inode_operations btrfs_dir_ro_inode_operations = {
.lookup = btrfs_lookup,
.permission = btrfs_permission,
.get_acl = btrfs_get_acl,
};
static const struct file_operations btrfs_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = btrfs_real_readdir,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_ioctl,
#endif
.release = btrfs_release_file,
.fsync = btrfs_sync_file,
};
static struct extent_io_ops btrfs_extent_io_ops = {
.fill_delalloc = run_delalloc_range,
.submit_bio_hook = btrfs_submit_bio_hook,
.merge_bio_hook = btrfs_merge_bio_hook,
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
.writepage_end_io_hook = btrfs_writepage_end_io_hook,
.writepage_start_hook = btrfs_writepage_start_hook,
.set_bit_hook = btrfs_set_bit_hook,
.clear_bit_hook = btrfs_clear_bit_hook,
.merge_extent_hook = btrfs_merge_extent_hook,
.split_extent_hook = btrfs_split_extent_hook,
};
/*
* btrfs doesn't support the bmap operation because swapfiles
* use bmap to make a mapping of extents in the file. They assume
* these extents won't change over the life of the file and they
* use the bmap result to do IO directly to the drive.
*
* the btrfs bmap call would return logical addresses that aren't
* suitable for IO and they also will change frequently as COW
* operations happen. So, swapfile + btrfs == corruption.
*
* For now we're avoiding this by dropping bmap.
*/
static const struct address_space_operations btrfs_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readpages = btrfs_readpages,
.direct_IO = btrfs_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
.set_page_dirty = btrfs_set_page_dirty,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations btrfs_symlink_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
};
static const struct inode_operations btrfs_file_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
.fiemap = btrfs_fiemap,
.get_acl = btrfs_get_acl,
};
static const struct inode_operations btrfs_special_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.get_acl = btrfs_get_acl,
};
static const struct inode_operations btrfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.get_acl = btrfs_get_acl,
};
const struct dentry_operations btrfs_dentry_operations = {
.d_delete = btrfs_dentry_delete,
.d_release = btrfs_dentry_release,
};
| gpl-2.0 |
cretin45/htc-kernel-pyramid | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 2482 | 8850 | /**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED;
static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT;
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED;
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_placement_flags
};
static uint32_t vram_gmr_placement_flags[] = {
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
struct ttm_placement vmw_vram_gmr_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 2,
.placement = vram_gmr_placement_flags,
.num_busy_placement = 1,
.busy_placement = &gmr_placement_flags
};
struct ttm_placement vmw_vram_sys_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
struct ttm_placement vmw_vram_ne_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &vram_ne_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_ne_placement_flags
};
struct ttm_placement vmw_sys_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &sys_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
struct vmw_ttm_backend {
struct ttm_backend backend;
struct page **pages;
unsigned long num_pages;
struct vmw_private *dev_priv;
int gmr_id;
};
static int vmw_ttm_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
vmw_be->pages = pages;
vmw_be->num_pages = num_pages;
return 0;
}
static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
vmw_be->gmr_id = bo_mem->start;
return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
vmw_be->num_pages, vmw_be->gmr_id);
}
static int vmw_ttm_unbind(struct ttm_backend *backend)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
return 0;
}
static void vmw_ttm_clear(struct ttm_backend *backend)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
vmw_be->pages = NULL;
vmw_be->num_pages = 0;
}
static void vmw_ttm_destroy(struct ttm_backend *backend)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
kfree(vmw_be);
}
static struct ttm_backend_func vmw_ttm_func = {
.populate = vmw_ttm_populate,
.clear = vmw_ttm_clear,
.bind = vmw_ttm_bind,
.unbind = vmw_ttm_unbind,
.destroy = vmw_ttm_destroy,
};
struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
{
struct vmw_ttm_backend *vmw_be;
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
return NULL;
vmw_be->backend.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
return &vmw_be->backend;
}
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
}
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->func = &ttm_bo_manager_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case VMW_PL_GMR:
/*
* "Guest Memory Regions" is an aperture like feature with
* one slot per bo. There is an upper limit of the number of
* slots as well as the bo size.
*/
man->func = &vmw_gmrid_manager_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
void vmw_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
*placement = vmw_sys_placement;
}
/**
* FIXME: Proper access checks on buffers.
*/
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
return 0;
}
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
mem->bus.addr = NULL;
mem->bus.is_iomem = false;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
case VMW_PL_GMR:
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->vram_start;
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
}
return 0;
}
static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
return 0;
}
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
*/
static void *vmw_sync_obj_ref(void *sync_obj)
{
return sync_obj;
}
static void vmw_sync_obj_unref(void **sync_obj)
{
*sync_obj = NULL;
}
static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
{
struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
mutex_unlock(&dev_priv->hw_mutex);
return 0;
}
static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
{
struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
uint32_t sequence = (unsigned long) sync_obj;
return vmw_fence_signaled(dev_priv, sequence);
}
static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
uint32_t sequence = (unsigned long) sync_obj;
return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
}
struct ttm_bo_driver vmw_bo_driver = {
.create_ttm_backend_entry = vmw_ttm_backend_init,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
.sync_obj_signaled = vmw_sync_obj_signaled,
.sync_obj_wait = vmw_sync_obj_wait,
.sync_obj_flush = vmw_sync_obj_flush,
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
.move_notify = NULL,
.swap_notify = NULL,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
};
| gpl-2.0 |
profglavcho/mt6735-kernel-3.10.61 | drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c | 2738 | 17518 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "hal_bt_coexist.h"
#include "../pci.h"
#include "dm.h"
#include "fw.h"
#include "phy.h"
#include "reg.h"
#include "hal_btc.h"
void rtl8723ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw,
bool reject)
{
}
void _rtl8723_dm_bt_check_wifi_state(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
if (rtlpriv->link_info.busytraffic) {
rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_IDLE;
if (rtlpriv->link_info.tx_busy_traffic)
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_UPLINK;
else
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_UPLINK;
if (rtlpriv->link_info.rx_busy_traffic)
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_DOWNLINK;
else
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_DOWNLINK;
} else {
rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_WIFI_IDLE;
rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_UPLINK;
rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_DOWNLINK;
}
if (rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
rtlpriv->mac80211.mode == WIRELESS_MODE_B) {
rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_WIFI_LEGACY;
rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_HT20;
rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_HT40;
} else {
rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_LEGACY;
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_HT40;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_HT20;
} else {
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_HT20;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_HT40;
}
}
if (rtlpriv->bt_operation_on)
rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_BT30;
else
rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_BT30;
}
u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
u8 level_num, u8 rssi_thresh,
u8 rssi_thresh1)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
long smooth;
u8 bt_rssi_state = 0;
smooth = rtl8723ae_dm_bt_get_rx_ss(hw);
if (level_num == 2) {
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_LOW) ||
(rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_STAY_LOW)) {
if (smooth >= (rssi_thresh +
BT_FW_COEX_THRESH_TOL)) {
bt_rssi_state = BT_RSSI_STATE_HIGH;
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_RSSI_1_HIGH;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state switch to High\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state stay at Low\n");
}
} else {
if (smooth < rssi_thresh) {
bt_rssi_state = BT_RSSI_STATE_LOW;
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_RSSI_1_LOW;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 thresh error!!\n");
return rtlpcipriv->bt_coexist.bt_pre_rssi_state;
}
if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_LOW) ||
(rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_STAY_LOW)) {
if (smooth >=
(rssi_thresh+BT_FW_COEX_THRESH_TOL)) {
bt_rssi_state = BT_RSSI_STATE_MEDIUM;
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state stay at Low\n");
}
} else if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_MEDIUM) ||
(rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_STAY_MEDIUM)) {
if (smooth >= (rssi_thresh1 +
BT_FW_COEX_THRESH_TOL)) {
bt_rssi_state = BT_RSSI_STATE_HIGH;
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_RSSI_1_HIGH;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state switch to High\n");
} else if (smooth < rssi_thresh) {
bt_rssi_state = BT_RSSI_STATE_LOW;
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_RSSI_1_LOW;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state stay at Medium\n");
}
} else {
if (smooth < rssi_thresh1) {
bt_rssi_state = BT_RSSI_STATE_MEDIUM;
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI_1 state stay at High\n");
}
}
}
rtlpcipriv->bt_coexist.bt_pre_rssi_state1 = bt_rssi_state;
return bt_rssi_state;
}
u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
u8 level_num, u8 rssi_thresh,
u8 rssi_thresh1)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
long smooth;
u8 bt_rssi_state = 0;
smooth = rtl8723ae_dm_bt_get_rx_ss(hw);
if (level_num == 2) {
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_LOW) ||
(rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_STAY_LOW)){
if (smooth >=
(rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
bt_rssi_state = BT_RSSI_STATE_HIGH;
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_RSSI_HIGH;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_LOW;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state switch to High\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state stay at Low\n");
}
} else {
if (smooth < rssi_thresh) {
bt_rssi_state = BT_RSSI_STATE_LOW;
rtlpcipriv->bt_coexist.cstate |=
BT_COEX_STATE_WIFI_RSSI_LOW;
rtlpcipriv->bt_coexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI thresh error!!\n");
return rtlpcipriv->bt_coexist.bt_pre_rssi_state;
}
if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_LOW) ||
(rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_STAY_LOW)) {
if (smooth >=
(rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
bt_rssi_state = BT_RSSI_STATE_MEDIUM;
rtlpcipriv->bt_coexist.cstate
|= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
rtlpcipriv->bt_coexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
rtlpcipriv->bt_coexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state stay at Low\n");
}
} else if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_MEDIUM) ||
(rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
BT_RSSI_STATE_STAY_MEDIUM)) {
if (smooth >=
(rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) {
bt_rssi_state = BT_RSSI_STATE_HIGH;
rtlpcipriv->bt_coexist.cstate
|= BT_COEX_STATE_WIFI_RSSI_HIGH;
rtlpcipriv->bt_coexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
rtlpcipriv->bt_coexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state switch to High\n");
} else if (smooth < rssi_thresh) {
bt_rssi_state = BT_RSSI_STATE_LOW;
rtlpcipriv->bt_coexist.cstate
|= BT_COEX_STATE_WIFI_RSSI_LOW;
rtlpcipriv->bt_coexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
rtlpcipriv->bt_coexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state stay at Medium\n");
}
} else {
if (smooth < rssi_thresh1) {
bt_rssi_state = BT_RSSI_STATE_MEDIUM;
rtlpcipriv->bt_coexist.cstate
|= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
rtlpcipriv->bt_coexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
rtlpcipriv->bt_coexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], RSSI state stay at High\n");
}
}
}
rtlpcipriv->bt_coexist.bt_pre_rssi_state = bt_rssi_state;
return bt_rssi_state;
}
long rtl8723ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
long smooth = 0;
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
smooth = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
else
smooth = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"rtl8723ae_dm_bt_get_rx_ss() = %ld\n", smooth);
return smooth;
}
void rtl8723ae_dm_bt_balance(struct ieee80211_hw *hw,
bool balance_on, u8 ms0, u8 ms1)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 h2c_parameter[3] = {0};
if (balance_on) {
h2c_parameter[2] = 1;
h2c_parameter[1] = ms1;
h2c_parameter[0] = ms0;
rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
} else {
h2c_parameter[2] = 0;
h2c_parameter[1] = 0;
h2c_parameter[0] = 0;
}
rtlpcipriv->bt_coexist.balance_on = balance_on;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n",
balance_on ? "ON" : "OFF", ms0, ms1,
h2c_parameter[0]<<16 | h2c_parameter[1]<<8 | h2c_parameter[2]);
rtl8723ae_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter);
}
void rtl8723ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
if (type == BT_AGCTABLE_OFF) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BT]AGCTable Off!\n");
rtl_write_dword(rtlpriv, 0xc78, 0x641c0001);
rtl_write_dword(rtlpriv, 0xc78, 0x631d0001);
rtl_write_dword(rtlpriv, 0xc78, 0x621e0001);
rtl_write_dword(rtlpriv, 0xc78, 0x611f0001);
rtl_write_dword(rtlpriv, 0xc78, 0x60200001);
rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_AGC_HP, 0xfffff, 0x32000);
rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_AGC_HP, 0xfffff, 0x71000);
rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_AGC_HP, 0xfffff, 0xb0000);
rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_AGC_HP, 0xfffff, 0xfc000);
rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_G1, 0xfffff, 0x30355);
} else if (type == BT_AGCTABLE_ON) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BT]AGCTable On!\n");
rtl_write_dword(rtlpriv, 0xc78, 0x4e1c0001);
rtl_write_dword(rtlpriv, 0xc78, 0x4d1d0001);
rtl_write_dword(rtlpriv, 0xc78, 0x4c1e0001);
rtl_write_dword(rtlpriv, 0xc78, 0x4b1f0001);
rtl_write_dword(rtlpriv, 0xc78, 0x4a200001);
rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_AGC_HP, 0xfffff, 0xdc000);
rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_AGC_HP, 0xfffff, 0x90000);
rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_AGC_HP, 0xfffff, 0x51000);
rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_AGC_HP, 0xfffff, 0x12000);
rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_G1, 0xfffff, 0x00355);
rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
}
}
void rtl8723ae_dm_bt_bback_off_level(struct ieee80211_hw *hw, u8 type)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
if (type == BT_BB_BACKOFF_OFF) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BT]BBBackOffLevel Off!\n");
rtl_write_dword(rtlpriv, 0xc04, 0x3a05611);
} else if (type == BT_BB_BACKOFF_ON) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BT]BBBackOffLevel On!\n");
rtl_write_dword(rtlpriv, 0xc04, 0x3a07611);
rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
}
}
void rtl8723ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"rtl8723ae_dm_bt_fw_coex_all_off()\n");
if (rtlpcipriv->bt_coexist.fw_coexist_all_off)
return;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"rtl8723ae_dm_bt_fw_coex_all_off(), real Do\n");
rtl8723ae_dm_bt_fw_coex_all_off_8723a(hw);
rtlpcipriv->bt_coexist.fw_coexist_all_off = true;
}
void rtl8723ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"rtl8723ae_dm_bt_sw_coex_all_off()\n");
if (rtlpcipriv->bt_coexist.sw_coexist_all_off)
return;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"rtl8723ae_dm_bt_sw_coex_all_off(), real Do\n");
rtl8723ae_dm_bt_sw_coex_all_off_8723a(hw);
rtlpcipriv->bt_coexist.sw_coexist_all_off = true;
}
void rtl8723ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"rtl8723ae_dm_bt_hw_coex_all_off()\n");
if (rtlpcipriv->bt_coexist.hw_coexist_all_off)
return;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"rtl8723ae_dm_bt_hw_coex_all_off(), real Do\n");
rtl8723ae_dm_bt_hw_coex_all_off_8723a(hw);
rtlpcipriv->bt_coexist.hw_coexist_all_off = true;
}
void rtl8723ae_btdm_coex_all_off(struct ieee80211_hw *hw)
{
rtl8723ae_dm_bt_fw_coex_all_off(hw);
rtl8723ae_dm_bt_sw_coex_all_off(hw);
rtl8723ae_dm_bt_hw_coex_all_off(hw);
}
bool rtl8723ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
if ((rtlpcipriv->bt_coexist.previous_state ==
rtlpcipriv->bt_coexist.cstate) &&
(rtlpcipriv->bt_coexist.previous_state_h ==
rtlpcipriv->bt_coexist.cstate_h))
return false;
else
return true;
}
bool rtl8723ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->link_info.tx_busy_traffic)
return true;
else
return false;
}
| gpl-2.0 |
primiano/udoo_kernel_imx | drivers/ide/ht6560b.c | 3250 | 10678 | /*
* Copyright (C) 1995-2000 Linus Torvalds & author (see below)
*/
/*
* HT-6560B EIDE-controller support
* To activate controller support use kernel parameter "ide0=ht6560b".
* Use hdparm utility to enable PIO mode support.
*
* Author: Mikko Ala-Fossi <maf@iki.fi>
* Jan Evert van Grootheest <j.e.van.grootheest@caiway.nl>
*
*/
#define DRV_NAME "ht6560b"
#define HT6560B_VERSION "v0.08"
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/blkdev.h>
#include <linux/ide.h>
#include <linux/init.h>
#include <asm/io.h>
/* #define DEBUG */ /* remove comments for DEBUG messages */
/*
* The special i/o-port that HT-6560B uses to configuration:
* bit0 (0x01): "1" selects secondary interface
* bit2 (0x04): "1" enables FIFO function
* bit5 (0x20): "1" enables prefetched data read function (???)
*
* The special i/o-port that HT-6560A uses to configuration:
* bit0 (0x01): "1" selects secondary interface
* bit1 (0x02): "1" enables prefetched data read function
* bit2 (0x04): "0" enables multi-master system (?)
* bit3 (0x08): "1" 3 cycle time, "0" 2 cycle time (?)
*/
#define HT_CONFIG_PORT 0x3e6
static inline u8 HT_CONFIG(ide_drive_t *drive)
{
return ((unsigned long)ide_get_drivedata(drive) & 0xff00) >> 8;
}
/*
* FIFO + PREFETCH (both a/b-model)
*/
#define HT_CONFIG_DEFAULT 0x1c /* no prefetch */
/* #define HT_CONFIG_DEFAULT 0x3c */ /* with prefetch */
#define HT_SECONDARY_IF 0x01
#define HT_PREFETCH_MODE 0x20
/*
* ht6560b Timing values:
*
* I reviewed some assembler source listings of htide drivers and found
* out how they setup those cycle time interfacing values, as they at Holtek
* call them. IDESETUP.COM that is supplied with the drivers figures out
* optimal values and fetches those values to drivers. I found out that
* they use Select register to fetch timings to the ide board right after
* interface switching. After that it was quite easy to add code to
* ht6560b.c.
*
* IDESETUP.COM gave me values 0x24, 0x45, 0xaa, 0xff that worked fine
* for hda and hdc. But hdb needed higher values to work, so I guess
* that sometimes it is necessary to give higher value than IDESETUP
* gives. [see cmd640.c for an extreme example of this. -ml]
*
* Perhaps I should explain something about these timing values:
* The higher nibble of value is the Recovery Time (rt) and the lower nibble
* of the value is the Active Time (at). Minimum value 2 is the fastest and
* the maximum value 15 is the slowest. Default values should be 15 for both.
* So 0x24 means 2 for rt and 4 for at. Each of the drives should have
* both values, and IDESETUP gives automatically rt=15 st=15 for CDROMs or
* similar. If value is too small there will be all sorts of failures.
*
* Timing byte consists of
* High nibble: Recovery Cycle Time (rt)
* The valid values range from 2 to 15. The default is 15.
*
* Low nibble: Active Cycle Time (at)
* The valid values range from 2 to 15. The default is 15.
*
* You can obtain optimized timing values by running Holtek IDESETUP.COM
* for DOS. DOS drivers get their timing values from command line, where
* the first value is the Recovery Time and the second value is the
* Active Time for each drive. Smaller value gives higher speed.
* In case of failures you should probably fall back to a higher value.
*/
static inline u8 HT_TIMING(ide_drive_t *drive)
{
return (unsigned long)ide_get_drivedata(drive) & 0x00ff;
}
#define HT_TIMING_DEFAULT 0xff
/*
* This routine handles interface switching for the peculiar hardware design
* on the F.G.I./Holtek HT-6560B VLB IDE interface.
* The HT-6560B can only enable one IDE port at a time, and requires a
* silly sequence (below) whenever we switch between primary and secondary.
*/
/*
* This routine is invoked from ide.c to prepare for access to a given drive.
*/
static void ht6560b_dev_select(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
unsigned long flags;
static u8 current_select = 0;
static u8 current_timing = 0;
u8 select, timing;
local_irq_save(flags);
select = HT_CONFIG(drive);
timing = HT_TIMING(drive);
/*
* Need to enforce prefetch sometimes because otherwise
* it'll hang (hard).
*/
if (drive->media != ide_disk ||
(drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
select |= HT_PREFETCH_MODE;
if (select != current_select || timing != current_timing) {
current_select = select;
current_timing = timing;
(void)inb(HT_CONFIG_PORT);
(void)inb(HT_CONFIG_PORT);
(void)inb(HT_CONFIG_PORT);
(void)inb(HT_CONFIG_PORT);
outb(select, HT_CONFIG_PORT);
/*
* Set timing for this drive:
*/
outb(timing, hwif->io_ports.device_addr);
(void)inb(hwif->io_ports.status_addr);
#ifdef DEBUG
printk("ht6560b: %s: select=%#x timing=%#x\n",
drive->name, select, timing);
#endif
}
local_irq_restore(flags);
outb(drive->select | ATA_DEVICE_OBS, hwif->io_ports.device_addr);
}
/*
* Autodetection and initialization of ht6560b
*/
static int __init try_to_init_ht6560b(void)
{
u8 orig_value;
int i;
/* Autodetect ht6560b */
if ((orig_value = inb(HT_CONFIG_PORT)) == 0xff)
return 0;
for (i=3;i>0;i--) {
outb(0x00, HT_CONFIG_PORT);
if (!( (~inb(HT_CONFIG_PORT)) & 0x3f )) {
outb(orig_value, HT_CONFIG_PORT);
return 0;
}
}
outb(0x00, HT_CONFIG_PORT);
if ((~inb(HT_CONFIG_PORT))& 0x3f) {
outb(orig_value, HT_CONFIG_PORT);
return 0;
}
/*
* Ht6560b autodetected
*/
outb(HT_CONFIG_DEFAULT, HT_CONFIG_PORT);
outb(HT_TIMING_DEFAULT, 0x1f6); /* Select register */
(void)inb(0x1f7); /* Status register */
printk("ht6560b " HT6560B_VERSION
": chipset detected and initialized"
#ifdef DEBUG
" with debug enabled"
#endif
"\n"
);
return 1;
}
static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio)
{
int active_time, recovery_time;
int active_cycles, recovery_cycles;
int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
if (pio) {
unsigned int cycle_time;
struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
cycle_time = ide_pio_cycle_time(drive, pio);
/*
* Just like opti621.c we try to calculate the
* actual cycle time for recovery and activity
* according system bus speed.
*/
active_time = t->active;
recovery_time = cycle_time - active_time - t->setup;
/*
* Cycle times should be Vesa bus cycles
*/
active_cycles = (active_time * bus_speed + 999) / 1000;
recovery_cycles = (recovery_time * bus_speed + 999) / 1000;
/*
* Upper and lower limits
*/
if (active_cycles < 2) active_cycles = 2;
if (recovery_cycles < 2) recovery_cycles = 2;
if (active_cycles > 15) active_cycles = 15;
if (recovery_cycles > 15) recovery_cycles = 0; /* 0==16 */
#ifdef DEBUG
printk("ht6560b: drive %s setting pio=%d recovery=%d (%dns) active=%d (%dns)\n", drive->name, pio, recovery_cycles, recovery_time, active_cycles, active_time);
#endif
return (u8)((recovery_cycles << 4) | active_cycles);
} else {
#ifdef DEBUG
printk("ht6560b: drive %s setting pio=0\n", drive->name);
#endif
return HT_TIMING_DEFAULT; /* default setting */
}
}
static DEFINE_SPINLOCK(ht6560b_lock);
/*
* Enable/Disable so called prefetch mode
*/
static void ht_set_prefetch(ide_drive_t *drive, u8 state)
{
unsigned long flags, config;
int t = HT_PREFETCH_MODE << 8;
spin_lock_irqsave(&ht6560b_lock, flags);
config = (unsigned long)ide_get_drivedata(drive);
/*
* Prefetch mode and unmask irq seems to conflict
*/
if (state) {
config |= t; /* enable prefetch mode */
drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
drive->dev_flags &= ~IDE_DFLAG_UNMASK;
} else {
config &= ~t; /* disable prefetch mode */
drive->dev_flags &= ~IDE_DFLAG_NO_UNMASK;
}
ide_set_drivedata(drive, (void *)config);
spin_unlock_irqrestore(&ht6560b_lock, flags);
#ifdef DEBUG
printk("ht6560b: drive %s prefetch mode %sabled\n", drive->name, (state ? "en" : "dis"));
#endif
}
static void ht6560b_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long flags, config;
const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 timing;
switch (pio) {
case 8: /* set prefetch off */
case 9: /* set prefetch on */
ht_set_prefetch(drive, pio & 1);
return;
}
timing = ht_pio2timings(drive, pio);
spin_lock_irqsave(&ht6560b_lock, flags);
config = (unsigned long)ide_get_drivedata(drive);
config &= 0xff00;
config |= timing;
ide_set_drivedata(drive, (void *)config);
spin_unlock_irqrestore(&ht6560b_lock, flags);
#ifdef DEBUG
printk("ht6560b: drive %s tuned to pio mode %#x timing=%#x\n", drive->name, pio, timing);
#endif
}
static void __init ht6560b_init_dev(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
/* Setting default configurations for drives. */
int t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
if (hwif->channel)
t |= (HT_SECONDARY_IF << 8);
ide_set_drivedata(drive, (void *)t);
}
static int probe_ht6560b;
module_param_named(probe, probe_ht6560b, bool, 0);
MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
static const struct ide_tp_ops ht6560b_tp_ops = {
.exec_command = ide_exec_command,
.read_status = ide_read_status,
.read_altstatus = ide_read_altstatus,
.write_devctl = ide_write_devctl,
.dev_select = ht6560b_dev_select,
.tf_load = ide_tf_load,
.tf_read = ide_tf_read,
.input_data = ide_input_data,
.output_data = ide_output_data,
};
static const struct ide_port_ops ht6560b_port_ops = {
.init_dev = ht6560b_init_dev,
.set_pio_mode = ht6560b_set_pio_mode,
};
static const struct ide_port_info ht6560b_port_info __initdata = {
.name = DRV_NAME,
.chipset = ide_ht6560b,
.tp_ops = &ht6560b_tp_ops,
.port_ops = &ht6560b_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */
IDE_HFLAG_NO_DMA |
IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO4,
};
static int __init ht6560b_init(void)
{
if (probe_ht6560b == 0)
return -ENODEV;
if (!request_region(HT_CONFIG_PORT, 1, DRV_NAME)) {
printk(KERN_NOTICE "%s: HT_CONFIG_PORT not found\n",
__func__);
return -ENODEV;
}
if (!try_to_init_ht6560b()) {
printk(KERN_NOTICE "%s: HBA not found\n", __func__);
goto release_region;
}
return ide_legacy_device_add(&ht6560b_port_info, 0);
release_region:
release_region(HT_CONFIG_PORT, 1);
return -ENODEV;
}
module_init(ht6560b_init);
MODULE_AUTHOR("See Local File");
MODULE_DESCRIPTION("HT-6560B EIDE-controller support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
zaventh/android_kernel_lge_hammerhead | drivers/video/msm/mipi_NT35510_video_wvga_pt.c | 3506 | 3315 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "msm_fb.h"
#include "mipi_dsi.h"
#include "mipi_NT35510.h"
static struct msm_panel_info pinfo;
static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = {
/* DSI Bit Clock at 500 MHz, 2 lane, RGB888 */
/* regulator */
{0x03, 0x01, 0x01, 0x00},
/* timing */
{0xb9, 0x8e, 0x1f, 0x00, 0x98, 0x9c, 0x22, 0x90,
0x18, 0x03, 0x04},
/* phy ctrl */
{0x7f, 0x00, 0x00, 0x00},
/* strength */
{0xbb, 0x02, 0x06, 0x00},
/* pll control */
{0x00, 0xec, 0x31, 0xd2, 0x00, 0x40, 0x37, 0x62,
0x01, 0x0f, 0x07,
0x05, 0x14, 0x03, 0x0, 0x0, 0x0, 0x20, 0x0, 0x02, 0x0},
};
static int mipi_video_nt35510_wvga_pt_init(void)
{
int ret;
if (msm_fb_detect_client("mipi_video_nt35510_wvga"))
return 0;
pinfo.xres = 480;
pinfo.yres = 800;
pinfo.type = MIPI_VIDEO_PANEL;
pinfo.pdest = DISPLAY_1;
pinfo.wait_cycle = 0;
pinfo.bpp = 24;
pinfo.lcdc.h_back_porch = 100;
pinfo.lcdc.h_front_porch = 100;
pinfo.lcdc.h_pulse_width = 8;
pinfo.lcdc.v_back_porch = 20;
pinfo.lcdc.v_front_porch = 20;
pinfo.lcdc.v_pulse_width = 1;
pinfo.lcdc.border_clr = 0; /* blk */
pinfo.lcdc.underflow_clr = 0xff; /* blue */
/* number of dot_clk cycles HSYNC active edge is
delayed from VSYNC active edge */
pinfo.lcdc.hsync_skew = 0;
pinfo.clk_rate = 499000000;
pinfo.bl_max = 255;
pinfo.bl_min = 1;
pinfo.fb_num = 2;
pinfo.mipi.mode = DSI_VIDEO_MODE;
/* send HSA and HE following VS/VE packet */
pinfo.mipi.pulse_mode_hsa_he = TRUE;
pinfo.mipi.hfp_power_stop = TRUE; /* LP-11 during the HFP period */
pinfo.mipi.hbp_power_stop = TRUE; /* LP-11 during the HBP period */
pinfo.mipi.hsa_power_stop = TRUE; /* LP-11 during the HSA period */
/* LP-11 or let Command Mode Engine send packets in
HS or LP mode for the BLLP of the last line of a frame */
pinfo.mipi.eof_bllp_power_stop = TRUE;
/* LP-11 or let Command Mode Engine send packets in
HS or LP mode for packets sent during BLLP period */
pinfo.mipi.bllp_power_stop = TRUE;
pinfo.mipi.traffic_mode = DSI_BURST_MODE;
pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
pinfo.mipi.vc = 0;
pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB; /* RGB */
pinfo.mipi.data_lane0 = TRUE;
pinfo.mipi.data_lane1 = TRUE;
pinfo.mipi.t_clk_post = 0x20;
pinfo.mipi.t_clk_pre = 0x2f;
pinfo.mipi.stream = 0; /* dma_p */
pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_NONE;
pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
pinfo.mipi.frame_rate = 60; /* FIXME */
pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db;
pinfo.mipi.dlane_swap = 0x01;
/* append EOT at the end of data burst */
pinfo.mipi.tx_eot_append = 0x01;
ret = mipi_nt35510_device_register(&pinfo, MIPI_DSI_PRIM,
MIPI_DSI_PANEL_WVGA_PT);
if (ret)
pr_err("%s: failed to register device!\n", __func__);
return ret;
}
module_init(mipi_video_nt35510_wvga_pt_init);
| gpl-2.0 |
lnfamous/Kernel_CyanogenMod11_Pico | drivers/media/dvb/frontends/sp8870.c | 4786 | 14719 | /*
Driver for Spase SP8870 demodulator
Copyright (C) 1999 Juergen Peitz
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* This driver needs external firmware. Please use the command
* "<kerneldir>/Documentation/dvb/get_dvb_firmware alps_tdlb7" to
* download/extract it, and then copy it to /usr/lib/hotplug/firmware
* or /lib/firmware (depending on configuration of firmware hotplug).
*/
#define SP8870_DEFAULT_FIRMWARE "dvb-fe-sp8870.fw"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "dvb_frontend.h"
#include "sp8870.h"
struct sp8870_state {
struct i2c_adapter* i2c;
const struct sp8870_config* config;
struct dvb_frontend frontend;
/* demodulator private data */
u8 initialised:1;
};
static int debug;
#define dprintk(args...) \
do { \
if (debug) printk(KERN_DEBUG "sp8870: " args); \
} while (0)
/* firmware size for sp8870 */
#define SP8870_FIRMWARE_SIZE 16382
/* starting point for firmware in file 'Sc_main.mc' */
#define SP8870_FIRMWARE_OFFSET 0x0A
static int sp8870_writereg (struct sp8870_state* state, u16 reg, u16 data)
{
u8 buf [] = { reg >> 8, reg & 0xff, data >> 8, data & 0xff };
struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 4 };
int err;
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
dprintk ("%s: writereg error (err == %i, reg == 0x%02x, data == 0x%02x)\n", __func__, err, reg, data);
return -EREMOTEIO;
}
return 0;
}
static int sp8870_readreg (struct sp8870_state* state, u16 reg)
{
int ret;
u8 b0 [] = { reg >> 8 , reg & 0xff };
u8 b1 [] = { 0, 0 };
struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 2 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 2 } };
ret = i2c_transfer (state->i2c, msg, 2);
if (ret != 2) {
dprintk("%s: readreg error (ret == %i)\n", __func__, ret);
return -1;
}
return (b1[0] << 8 | b1[1]);
}
static int sp8870_firmware_upload (struct sp8870_state* state, const struct firmware *fw)
{
struct i2c_msg msg;
const char *fw_buf = fw->data;
int fw_pos;
u8 tx_buf[255];
int tx_len;
int err = 0;
dprintk ("%s: ...\n", __func__);
if (fw->size < SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET)
return -EINVAL;
// system controller stop
sp8870_writereg(state, 0x0F00, 0x0000);
// instruction RAM register hiword
sp8870_writereg(state, 0x8F08, ((SP8870_FIRMWARE_SIZE / 2) & 0xFFFF));
// instruction RAM MWR
sp8870_writereg(state, 0x8F0A, ((SP8870_FIRMWARE_SIZE / 2) >> 16));
// do firmware upload
fw_pos = SP8870_FIRMWARE_OFFSET;
while (fw_pos < SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET){
tx_len = (fw_pos <= SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET - 252) ? 252 : SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET - fw_pos;
// write register 0xCF0A
tx_buf[0] = 0xCF;
tx_buf[1] = 0x0A;
memcpy(&tx_buf[2], fw_buf + fw_pos, tx_len);
msg.addr = state->config->demod_address;
msg.flags = 0;
msg.buf = tx_buf;
msg.len = tx_len + 2;
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
printk("%s: firmware upload failed!\n", __func__);
printk ("%s: i2c error (err == %i)\n", __func__, err);
return err;
}
fw_pos += tx_len;
}
dprintk ("%s: done!\n", __func__);
return 0;
};
static void sp8870_microcontroller_stop (struct sp8870_state* state)
{
sp8870_writereg(state, 0x0F08, 0x000);
sp8870_writereg(state, 0x0F09, 0x000);
// microcontroller STOP
sp8870_writereg(state, 0x0F00, 0x000);
}
static void sp8870_microcontroller_start (struct sp8870_state* state)
{
sp8870_writereg(state, 0x0F08, 0x000);
sp8870_writereg(state, 0x0F09, 0x000);
// microcontroller START
sp8870_writereg(state, 0x0F00, 0x001);
// not documented but if we don't read 0x0D01 out here
// we don't get a correct data valid signal
sp8870_readreg(state, 0x0D01);
}
static int sp8870_read_data_valid_signal(struct sp8870_state* state)
{
return (sp8870_readreg(state, 0x0D02) > 0);
}
static int configure_reg0xc05 (struct dvb_frontend_parameters *p, u16 *reg0xc05)
{
int known_parameters = 1;
*reg0xc05 = 0x000;
switch (p->u.ofdm.constellation) {
case QPSK:
break;
case QAM_16:
*reg0xc05 |= (1 << 10);
break;
case QAM_64:
*reg0xc05 |= (2 << 10);
break;
case QAM_AUTO:
known_parameters = 0;
break;
default:
return -EINVAL;
};
switch (p->u.ofdm.hierarchy_information) {
case HIERARCHY_NONE:
break;
case HIERARCHY_1:
*reg0xc05 |= (1 << 7);
break;
case HIERARCHY_2:
*reg0xc05 |= (2 << 7);
break;
case HIERARCHY_4:
*reg0xc05 |= (3 << 7);
break;
case HIERARCHY_AUTO:
known_parameters = 0;
break;
default:
return -EINVAL;
};
switch (p->u.ofdm.code_rate_HP) {
case FEC_1_2:
break;
case FEC_2_3:
*reg0xc05 |= (1 << 3);
break;
case FEC_3_4:
*reg0xc05 |= (2 << 3);
break;
case FEC_5_6:
*reg0xc05 |= (3 << 3);
break;
case FEC_7_8:
*reg0xc05 |= (4 << 3);
break;
case FEC_AUTO:
known_parameters = 0;
break;
default:
return -EINVAL;
};
if (known_parameters)
*reg0xc05 |= (2 << 1); /* use specified parameters */
else
*reg0xc05 |= (1 << 1); /* enable autoprobing */
return 0;
}
static int sp8870_wake_up(struct sp8870_state* state)
{
// enable TS output and interface pins
return sp8870_writereg(state, 0xC18, 0x00D);
}
static int sp8870_set_frontend_parameters (struct dvb_frontend* fe,
struct dvb_frontend_parameters *p)
{
struct sp8870_state* state = fe->demodulator_priv;
int err;
u16 reg0xc05;
if ((err = configure_reg0xc05(p, ®0xc05)))
return err;
// system controller stop
sp8870_microcontroller_stop(state);
// set tuner parameters
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe, p);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
// sample rate correction bit [23..17]
sp8870_writereg(state, 0x0319, 0x000A);
// sample rate correction bit [16..0]
sp8870_writereg(state, 0x031A, 0x0AAB);
// integer carrier offset
sp8870_writereg(state, 0x0309, 0x0400);
// fractional carrier offset
sp8870_writereg(state, 0x030A, 0x0000);
// filter for 6/7/8 Mhz channel
if (p->u.ofdm.bandwidth == BANDWIDTH_6_MHZ)
sp8870_writereg(state, 0x0311, 0x0002);
else if (p->u.ofdm.bandwidth == BANDWIDTH_7_MHZ)
sp8870_writereg(state, 0x0311, 0x0001);
else
sp8870_writereg(state, 0x0311, 0x0000);
// scan order: 2k first = 0x0000, 8k first = 0x0001
if (p->u.ofdm.transmission_mode == TRANSMISSION_MODE_2K)
sp8870_writereg(state, 0x0338, 0x0000);
else
sp8870_writereg(state, 0x0338, 0x0001);
sp8870_writereg(state, 0xc05, reg0xc05);
// read status reg in order to clear pending irqs
sp8870_readreg(state, 0x200);
// system controller start
sp8870_microcontroller_start(state);
return 0;
}
static int sp8870_init (struct dvb_frontend* fe)
{
struct sp8870_state* state = fe->demodulator_priv;
const struct firmware *fw = NULL;
sp8870_wake_up(state);
if (state->initialised) return 0;
state->initialised = 1;
dprintk ("%s\n", __func__);
/* request the firmware, this will block until someone uploads it */
printk("sp8870: waiting for firmware upload (%s)...\n", SP8870_DEFAULT_FIRMWARE);
if (state->config->request_firmware(fe, &fw, SP8870_DEFAULT_FIRMWARE)) {
printk("sp8870: no firmware upload (timeout or file not found?)\n");
return -EIO;
}
if (sp8870_firmware_upload(state, fw)) {
printk("sp8870: writing firmware to device failed\n");
release_firmware(fw);
return -EIO;
}
release_firmware(fw);
printk("sp8870: firmware upload complete\n");
/* enable TS output and interface pins */
sp8870_writereg(state, 0xc18, 0x00d);
// system controller stop
sp8870_microcontroller_stop(state);
// ADC mode
sp8870_writereg(state, 0x0301, 0x0003);
// Reed Solomon parity bytes passed to output
sp8870_writereg(state, 0x0C13, 0x0001);
// MPEG clock is suppressed if no valid data
sp8870_writereg(state, 0x0C14, 0x0001);
/* bit 0x010: enable data valid signal */
sp8870_writereg(state, 0x0D00, 0x010);
sp8870_writereg(state, 0x0D01, 0x000);
return 0;
}
static int sp8870_read_status (struct dvb_frontend* fe, fe_status_t * fe_status)
{
struct sp8870_state* state = fe->demodulator_priv;
int status;
int signal;
*fe_status = 0;
status = sp8870_readreg (state, 0x0200);
if (status < 0)
return -EIO;
signal = sp8870_readreg (state, 0x0303);
if (signal < 0)
return -EIO;
if (signal > 0x0F)
*fe_status |= FE_HAS_SIGNAL;
if (status & 0x08)
*fe_status |= FE_HAS_SYNC;
if (status & 0x04)
*fe_status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_VITERBI;
return 0;
}
static int sp8870_read_ber (struct dvb_frontend* fe, u32 * ber)
{
struct sp8870_state* state = fe->demodulator_priv;
int ret;
u32 tmp;
*ber = 0;
ret = sp8870_readreg(state, 0xC08);
if (ret < 0)
return -EIO;
tmp = ret & 0x3F;
ret = sp8870_readreg(state, 0xC07);
if (ret < 0)
return -EIO;
tmp = ret << 6;
if (tmp >= 0x3FFF0)
tmp = ~0;
*ber = tmp;
return 0;
}
static int sp8870_read_signal_strength(struct dvb_frontend* fe, u16 * signal)
{
struct sp8870_state* state = fe->demodulator_priv;
int ret;
u16 tmp;
*signal = 0;
ret = sp8870_readreg (state, 0x306);
if (ret < 0)
return -EIO;
tmp = ret << 8;
ret = sp8870_readreg (state, 0x303);
if (ret < 0)
return -EIO;
tmp |= ret;
if (tmp)
*signal = 0xFFFF - tmp;
return 0;
}
static int sp8870_read_uncorrected_blocks (struct dvb_frontend* fe, u32* ublocks)
{
struct sp8870_state* state = fe->demodulator_priv;
int ret;
*ublocks = 0;
ret = sp8870_readreg(state, 0xC0C);
if (ret < 0)
return -EIO;
if (ret == 0xFFFF)
ret = ~0;
*ublocks = ret;
return 0;
}
/* number of trials to recover from lockup */
#define MAXTRIALS 5
/* maximum checks for data valid signal */
#define MAXCHECKS 100
/* only for debugging: counter for detected lockups */
static int lockups;
/* only for debugging: counter for channel switches */
static int switches;
static int sp8870_set_frontend (struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
{
struct sp8870_state* state = fe->demodulator_priv;
/*
The firmware of the sp8870 sometimes locks up after setting frontend parameters.
We try to detect this by checking the data valid signal.
If it is not set after MAXCHECKS we try to recover the lockup by setting
the frontend parameters again.
*/
int err = 0;
int valid = 0;
int trials = 0;
int check_count = 0;
dprintk("%s: frequency = %i\n", __func__, p->frequency);
for (trials = 1; trials <= MAXTRIALS; trials++) {
if ((err = sp8870_set_frontend_parameters(fe, p)))
return err;
for (check_count = 0; check_count < MAXCHECKS; check_count++) {
// valid = ((sp8870_readreg(i2c, 0x0200) & 4) == 0);
valid = sp8870_read_data_valid_signal(state);
if (valid) {
dprintk("%s: delay = %i usec\n",
__func__, check_count * 10);
break;
}
udelay(10);
}
if (valid)
break;
}
if (!valid) {
printk("%s: firmware crash!!!!!!\n", __func__);
return -EIO;
}
if (debug) {
if (valid) {
if (trials > 1) {
printk("%s: firmware lockup!!!\n", __func__);
printk("%s: recovered after %i trial(s))\n", __func__, trials - 1);
lockups++;
}
}
switches++;
printk("%s: switches = %i lockups = %i\n", __func__, switches, lockups);
}
return 0;
}
static int sp8870_sleep(struct dvb_frontend* fe)
{
struct sp8870_state* state = fe->demodulator_priv;
// tristate TS output and disable interface pins
return sp8870_writereg(state, 0xC18, 0x000);
}
static int sp8870_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings)
{
fesettings->min_delay_ms = 350;
fesettings->step_size = 0;
fesettings->max_drift = 0;
return 0;
}
static int sp8870_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
{
struct sp8870_state* state = fe->demodulator_priv;
if (enable) {
return sp8870_writereg(state, 0x206, 0x001);
} else {
return sp8870_writereg(state, 0x206, 0x000);
}
}
static void sp8870_release(struct dvb_frontend* fe)
{
struct sp8870_state* state = fe->demodulator_priv;
kfree(state);
}
static struct dvb_frontend_ops sp8870_ops;
struct dvb_frontend* sp8870_attach(const struct sp8870_config* config,
struct i2c_adapter* i2c)
{
struct sp8870_state* state = NULL;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct sp8870_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
state->config = config;
state->i2c = i2c;
state->initialised = 0;
/* check if the demod is there */
if (sp8870_readreg(state, 0x0200) < 0) goto error;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &sp8870_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
error:
kfree(state);
return NULL;
}
static struct dvb_frontend_ops sp8870_ops = {
.info = {
.name = "Spase SP8870 DVB-T",
.type = FE_OFDM,
.frequency_min = 470000000,
.frequency_max = 860000000,
.frequency_stepsize = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 |
FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 |
FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER
},
.release = sp8870_release,
.init = sp8870_init,
.sleep = sp8870_sleep,
.i2c_gate_ctrl = sp8870_i2c_gate_ctrl,
.set_frontend = sp8870_set_frontend,
.get_tune_settings = sp8870_get_tune_settings,
.read_status = sp8870_read_status,
.read_ber = sp8870_read_ber,
.read_signal_strength = sp8870_read_signal_strength,
.read_ucblocks = sp8870_read_uncorrected_blocks,
};
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Spase SP8870 DVB-T Demodulator driver");
MODULE_AUTHOR("Juergen Peitz");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(sp8870_attach);
| gpl-2.0 |
mapleshadow/M7-4.3-Kernel | arch/s390/boot/compressed/misc.c | 4786 | 3788 | /*
* Definitions and wrapper functions for kernel decompressor
*
* Copyright IBM Corp. 2010
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/ipl.h>
#include "sizes.h"
/*
* gzip declarations
*/
#define STATIC static
#undef memset
#undef memcpy
#undef memmove
#define memmove memmove
#define memzero(s, n) memset((s), 0, (n))
/* Symbols defined by linker scripts */
extern char input_data[];
extern int input_len;
extern char _text, _end;
extern char _bss, _ebss;
static void error(char *m);
static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr;
#ifdef CONFIG_HAVE_KERNEL_BZIP2
#define HEAP_SIZE 0x400000
#else
#define HEAP_SIZE 0x10000
#endif
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
#ifdef CONFIG_KERNEL_XZ
#include "../../../../lib/decompress_unxz.c"
#endif
extern _sclp_print_early(const char *);
static int puts(const char *s)
{
_sclp_print_early(s);
return 0;
}
void *memset(void *s, int c, size_t n)
{
char *xs;
if (c == 0)
return __builtin_memset(s, 0, n);
xs = (char *) s;
if (n > 0)
do {
*xs++ = c;
} while (--n > 0);
return s;
}
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
return __builtin_memcpy(__dest, __src, __n);
}
void *memmove(void *__dest, __const void *__src, size_t __n)
{
char *d;
const char *s;
if (__dest <= __src)
return __builtin_memcpy(__dest, __src, __n);
d = __dest + __n;
s = __src + __n;
while (__n--)
*--d = *--s;
return __dest;
}
static void error(char *x)
{
unsigned long long psw = 0x000a0000deadbeefULL;
puts("\n\n");
puts(x);
puts("\n\n -- System halted");
asm volatile("lpsw %0" : : "Q" (psw));
}
/*
* Safe guard the ipl parameter block against a memory area that will be
* overwritten. The validity check for the ipl parameter block is complex
* (see cio_get_iplinfo and ipl_save_parameters) but if the pointer to
* the ipl parameter block intersects with the passed memory area we can
* safely assume that we can read from that memory. In that case just copy
* the memory to IPL_PARMBLOCK_ORIGIN even if there is no ipl parameter
* block.
*/
static void check_ipl_parmblock(void *start, unsigned long size)
{
void *src, *dst;
src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
if (src + PAGE_SIZE <= start || src >= start + size)
return;
dst = (void *) IPL_PARMBLOCK_ORIGIN;
memmove(dst, src, PAGE_SIZE);
S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
}
unsigned long decompress_kernel(void)
{
unsigned long output_addr;
unsigned char *output;
output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
memset(&_bss, 0, &_ebss - &_bss);
free_mem_ptr = (unsigned long)&_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
output = (unsigned char *) output_addr;
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
* kernel image.
*/
if (INITRD_START && INITRD_SIZE &&
INITRD_START < (unsigned long) output + SZ__bss_start) {
check_ipl_parmblock(output + SZ__bss_start,
INITRD_START + INITRD_SIZE);
memmove(output + SZ__bss_start,
(void *) INITRD_START, INITRD_SIZE);
INITRD_START = (unsigned long) output + SZ__bss_start;
}
#endif
puts("Uncompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
puts("Ok, booting the kernel.\n");
return (unsigned long) output;
}
| gpl-2.0 |
El-Nath/biji-find5-kernel | drivers/staging/winbond/wb35reg.c | 8114 | 20746 | #include "wb35reg_f.h"
#include <linux/usb.h>
#include <linux/slab.h>
extern void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency);
/*
* true : read command process successfully
* false : register not support
* RegisterNo : start base
* pRegisterData : data point
* NumberOfData : number of register data
* Flag : AUTO_INCREMENT - RegisterNo will auto increment 4
* NO_INCREMENT - Function will write data into the same register
*/
unsigned char Wb35Reg_BurstWrite(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterData, u8 NumberOfData, u8 Flag)
{
struct wb35_reg *reg = &pHwData->reg;
struct urb *urb = NULL;
struct wb35_reg_queue *reg_queue = NULL;
u16 UrbSize;
struct usb_ctrlrequest *dr;
u16 i, DataSize = NumberOfData * 4;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
/* Trying to use burst write function if use new hardware */
UrbSize = sizeof(struct wb35_reg_queue) + DataSize + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb && reg_queue) {
reg_queue->DIRECT = 2; /* burst write register */
reg_queue->INDEX = RegisterNo;
reg_queue->pBuffer = (u32 *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
memcpy(reg_queue->pBuffer, pRegisterData, DataSize);
/* the function for reversing register data from little endian to big endian */
for (i = 0; i < NumberOfData ; i++)
reg_queue->pBuffer[i] = cpu_to_le32(reg_queue->pBuffer[i]);
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue) + DataSize);
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
dr->bRequest = 0x04; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(Flag); /* 0: Register number auto-increment, 1: No auto increment */
dr->wIndex = cpu_to_le16(RegisterNo);
dr->wLength = cpu_to_le16(DataSize);
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
spin_lock_irq(®->EP0VM_spin_lock);
if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
spin_unlock_irq(®->EP0VM_spin_lock);
/* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
} else {
if (urb)
usb_free_urb(urb);
kfree(reg_queue);
return false;
}
return false;
}
void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
switch (RegisterNo) {
case 0x3b0: reg->U1B0 = RegisterValue; break;
case 0x3bc: reg->U1BC_LEDConfigure = RegisterValue; break;
case 0x400: reg->D00_DmaControl = RegisterValue; break;
case 0x800: reg->M00_MacControl = RegisterValue; break;
case 0x804: reg->M04_MulticastAddress1 = RegisterValue; break;
case 0x808: reg->M08_MulticastAddress2 = RegisterValue; break;
case 0x824: reg->M24_MacControl = RegisterValue; break;
case 0x828: reg->M28_MacControl = RegisterValue; break;
case 0x82c: reg->M2C_MacControl = RegisterValue; break;
case 0x838: reg->M38_MacControl = RegisterValue; break;
case 0x840: reg->M40_MacControl = RegisterValue; break;
case 0x844: reg->M44_MacControl = RegisterValue; break;
case 0x848: reg->M48_MacControl = RegisterValue; break;
case 0x84c: reg->M4C_MacStatus = RegisterValue; break;
case 0x860: reg->M60_MacControl = RegisterValue; break;
case 0x868: reg->M68_MacControl = RegisterValue; break;
case 0x870: reg->M70_MacControl = RegisterValue; break;
case 0x874: reg->M74_MacControl = RegisterValue; break;
case 0x878: reg->M78_ERPInformation = RegisterValue; break;
case 0x87C: reg->M7C_MacControl = RegisterValue; break;
case 0x880: reg->M80_MacControl = RegisterValue; break;
case 0x884: reg->M84_MacControl = RegisterValue; break;
case 0x888: reg->M88_MacControl = RegisterValue; break;
case 0x898: reg->M98_MacControl = RegisterValue; break;
case 0x100c: reg->BB0C = RegisterValue; break;
case 0x102c: reg->BB2C = RegisterValue; break;
case 0x1030: reg->BB30 = RegisterValue; break;
case 0x103c: reg->BB3C = RegisterValue; break;
case 0x1048: reg->BB48 = RegisterValue; break;
case 0x104c: reg->BB4C = RegisterValue; break;
case 0x1050: reg->BB50 = RegisterValue; break;
case 0x1054: reg->BB54 = RegisterValue; break;
case 0x1058: reg->BB58 = RegisterValue; break;
case 0x105c: reg->BB5C = RegisterValue; break;
case 0x1060: reg->BB60 = RegisterValue; break;
}
}
/*
* true : read command process successfully
* false : register not support
*/
unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
int ret = -1;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
RegisterValue = cpu_to_le32(RegisterValue);
/* update the register by send usb message */
reg->SyncIoPause = 1;
/* Wait until EP0VM stop */
while (reg->EP0vm_state != VM_STOP)
msleep(10);
/* Sync IoCallDriver */
reg->EP0vm_state = VM_RUNNING;
ret = usb_control_msg(pHwData->udev,
usb_sndctrlpipe(pHwData->udev, 0),
0x03, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x0, RegisterNo, &RegisterValue, 4, HZ * 100);
reg->EP0vm_state = VM_STOP;
reg->SyncIoPause = 0;
Wb35Reg_EP0VM_start(pHwData);
if (ret < 0) {
pr_debug("EP0 Write register usb message sending error\n");
pHwData->SurpriseRemove = 1;
return false;
}
return true;
}
/*
* true : read command process successfully
* false : register not support
*/
unsigned char Wb35Reg_Write(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
struct usb_ctrlrequest *dr;
struct urb *urb = NULL;
struct wb35_reg_queue *reg_queue = NULL;
u16 UrbSize;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
/* update the register by send urb request */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb && reg_queue) {
reg_queue->DIRECT = 1; /* burst write register */
reg_queue->INDEX = RegisterNo;
reg_queue->VALUE = cpu_to_le32(RegisterValue);
reg_queue->RESERVED_VALID = false;
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
dr->bRequest = 0x03; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(0x0);
dr->wIndex = cpu_to_le16(RegisterNo);
dr->wLength = cpu_to_le16(4);
/* Enter the sending queue */
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
spin_lock_irq(®->EP0VM_spin_lock);
if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
spin_unlock_irq(®->EP0VM_spin_lock);
/* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
} else {
if (urb)
usb_free_urb(urb);
kfree(reg_queue);
return false;
}
}
/*
* This command will be executed with a user defined value. When it completes,
* this value is useful. For example, hal_set_current_channel will use it.
* true : read command process successfully
* false : register not support
*/
unsigned char Wb35Reg_WriteWithCallbackValue(struct hw_data *pHwData,
u16 RegisterNo,
u32 RegisterValue,
s8 *pValue,
s8 Len)
{
struct wb35_reg *reg = &pHwData->reg;
struct usb_ctrlrequest *dr;
struct urb *urb = NULL;
struct wb35_reg_queue *reg_queue = NULL;
u16 UrbSize;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
/* update the register by send urb request */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb && reg_queue) {
reg_queue->DIRECT = 1; /* burst write register */
reg_queue->INDEX = RegisterNo;
reg_queue->VALUE = cpu_to_le32(RegisterValue);
/* NOTE : Users must guarantee the size of value will not exceed the buffer size. */
memcpy(reg_queue->RESERVED, pValue, Len);
reg_queue->RESERVED_VALID = true;
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
dr->bRequest = 0x03; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(0x0);
dr->wIndex = cpu_to_le16(RegisterNo);
dr->wLength = cpu_to_le16(4);
/* Enter the sending queue */
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
spin_lock_irq(®->EP0VM_spin_lock);
if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
spin_unlock_irq(®->EP0VM_spin_lock);
/* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
} else {
if (urb)
usb_free_urb(urb);
kfree(reg_queue);
return false;
}
}
/*
* true : read command process successfully
* false : register not support
* pRegisterValue : It must be a resident buffer due to
* asynchronous read register.
*/
unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
u32 *pltmp = pRegisterValue;
int ret = -1;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
/* Read the register by send usb message */
reg->SyncIoPause = 1;
/* Wait until EP0VM stop */
while (reg->EP0vm_state != VM_STOP)
msleep(10);
reg->EP0vm_state = VM_RUNNING;
ret = usb_control_msg(pHwData->udev,
usb_rcvctrlpipe(pHwData->udev, 0),
0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x0, RegisterNo, pltmp, 4, HZ * 100);
*pRegisterValue = cpu_to_le32(*pltmp);
reg->EP0vm_state = VM_STOP;
Wb35Reg_Update(pHwData, RegisterNo, *pRegisterValue);
reg->SyncIoPause = 0;
Wb35Reg_EP0VM_start(pHwData);
if (ret < 0) {
pr_debug("EP0 Read register usb message sending error\n");
pHwData->SurpriseRemove = 1;
return false;
}
return true;
}
/*
* true : read command process successfully
* false : register not support
* pRegisterValue : It must be a resident buffer due to
* asynchronous read register.
*/
unsigned char Wb35Reg_Read(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
struct usb_ctrlrequest *dr;
struct urb *urb;
struct wb35_reg_queue *reg_queue;
u16 UrbSize;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
/* update the variable by send Urb to read register */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb && reg_queue) {
reg_queue->DIRECT = 0; /* read register */
reg_queue->INDEX = RegisterNo;
reg_queue->pBuffer = pRegisterValue;
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN;
dr->bRequest = 0x01; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(0x0);
dr->wIndex = cpu_to_le16(RegisterNo);
dr->wLength = cpu_to_le16(4);
/* Enter the sending queue */
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
spin_lock_irq(®->EP0VM_spin_lock);
if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
spin_unlock_irq(®->EP0VM_spin_lock);
/* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
} else {
if (urb)
usb_free_urb(urb);
kfree(reg_queue);
return false;
}
}
void Wb35Reg_EP0VM_start(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
if (atomic_inc_return(®->RegFireCount) == 1) {
reg->EP0vm_state = VM_RUNNING;
Wb35Reg_EP0VM(pHwData);
} else
atomic_dec(®->RegFireCount);
}
void Wb35Reg_EP0VM(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
struct urb *urb;
struct usb_ctrlrequest *dr;
u32 *pBuffer;
int ret = -1;
struct wb35_reg_queue *reg_queue;
if (reg->SyncIoPause)
goto cleanup;
if (pHwData->SurpriseRemove)
goto cleanup;
/* Get the register data and send to USB through Irp */
spin_lock_irq(®->EP0VM_spin_lock);
reg_queue = reg->reg_first;
spin_unlock_irq(®->EP0VM_spin_lock);
if (!reg_queue)
goto cleanup;
/* Get an Urb, send it */
urb = (struct urb *)reg_queue->urb;
dr = reg_queue->pUsbReq;
urb = reg_queue->urb;
pBuffer = reg_queue->pBuffer;
if (reg_queue->DIRECT == 1) /* output */
pBuffer = ®_queue->VALUE;
usb_fill_control_urb(urb, pHwData->udev,
REG_DIRECTION(pHwData->udev, reg_queue),
(u8 *)dr, pBuffer, cpu_to_le16(dr->wLength),
Wb35Reg_EP0VM_complete, (void *)pHwData);
reg->EP0vm_state = VM_RUNNING;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
pr_debug("EP0 Irp sending error\n");
goto cleanup;
}
return;
cleanup:
reg->EP0vm_state = VM_STOP;
atomic_dec(®->RegFireCount);
}
void Wb35Reg_EP0VM_complete(struct urb *urb)
{
struct hw_data *pHwData = (struct hw_data *)urb->context;
struct wb35_reg *reg = &pHwData->reg;
struct wb35_reg_queue *reg_queue;
/* Variable setting */
reg->EP0vm_state = VM_COMPLETED;
reg->EP0VM_status = urb->status;
if (pHwData->SurpriseRemove) { /* Let WbWlanHalt to handle surprise remove */
reg->EP0vm_state = VM_STOP;
atomic_dec(®->RegFireCount);
} else {
/* Complete to send, remove the URB from the first */
spin_lock_irq(®->EP0VM_spin_lock);
reg_queue = reg->reg_first;
if (reg_queue == reg->reg_last)
reg->reg_last = NULL;
reg->reg_first = reg->reg_first->Next;
spin_unlock_irq(®->EP0VM_spin_lock);
if (reg->EP0VM_status) {
pr_debug("EP0 IoCompleteRoutine return error\n");
reg->EP0vm_state = VM_STOP;
pHwData->SurpriseRemove = 1;
} else {
/* Success. Update the result */
/* Start the next send */
Wb35Reg_EP0VM(pHwData);
}
kfree(reg_queue);
}
usb_free_urb(urb);
}
void Wb35Reg_destroy(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
struct urb *urb;
struct wb35_reg_queue *reg_queue;
Uxx_power_off_procedure(pHwData);
/* Wait for Reg operation completed */
do {
msleep(10); /* Delay for waiting function enter */
} while (reg->EP0vm_state != VM_STOP);
msleep(10); /* Delay for waiting function enter */
/* Release all the data in RegQueue */
spin_lock_irq(®->EP0VM_spin_lock);
reg_queue = reg->reg_first;
while (reg_queue) {
if (reg_queue == reg->reg_last)
reg->reg_last = NULL;
reg->reg_first = reg->reg_first->Next;
urb = reg_queue->urb;
spin_unlock_irq(®->EP0VM_spin_lock);
if (urb) {
usb_free_urb(urb);
kfree(reg_queue);
} else {
pr_debug("EP0 queue release error\n");
}
spin_lock_irq(®->EP0VM_spin_lock);
reg_queue = reg->reg_first;
}
spin_unlock_irq(®->EP0VM_spin_lock);
}
/*
* =======================================================================
* The function can be run in passive-level only.
* =========================================================================
*/
unsigned char Wb35Reg_initial(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
u32 ltmp;
u32 SoftwareSet, VCO_trim, TxVga, Region_ScanInterval;
/* Spin lock is acquired for read and write IRP command */
spin_lock_init(®->EP0VM_spin_lock);
/* Getting RF module type from EEPROM */
Wb35Reg_WriteSync(pHwData, 0x03b4, 0x080d0000); /* Start EEPROM access + Read + address(0x0d) */
Wb35Reg_ReadSync(pHwData, 0x03b4, <mp);
/* Update RF module type and determine the PHY type by inf or EEPROM */
reg->EEPROMPhyType = (u8)(ltmp & 0xff);
/*
* 0 V MAX2825, 1 V MAX2827, 2 V MAX2828, 3 V MAX2829
* 16V AL2230, 17 - AL7230, 18 - AL2230S
* 32 Reserved
* 33 - W89RF242(TxVGA 0~19), 34 - W89RF242(TxVGA 0~34)
*/
if (reg->EEPROMPhyType != RF_DECIDE_BY_INF) {
if ((reg->EEPROMPhyType == RF_MAXIM_2825) ||
(reg->EEPROMPhyType == RF_MAXIM_2827) ||
(reg->EEPROMPhyType == RF_MAXIM_2828) ||
(reg->EEPROMPhyType == RF_MAXIM_2829) ||
(reg->EEPROMPhyType == RF_MAXIM_V1) ||
(reg->EEPROMPhyType == RF_AIROHA_2230) ||
(reg->EEPROMPhyType == RF_AIROHA_2230S) ||
(reg->EEPROMPhyType == RF_AIROHA_7230) ||
(reg->EEPROMPhyType == RF_WB_242) ||
(reg->EEPROMPhyType == RF_WB_242_1))
pHwData->phy_type = reg->EEPROMPhyType;
}
/* Power On procedure running. The relative parameter will be set according to phy_type */
Uxx_power_on_procedure(pHwData);
/* Reading MAC address */
Uxx_ReadEthernetAddress(pHwData);
/* Read VCO trim for RF parameter */
Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08200000);
Wb35Reg_ReadSync(pHwData, 0x03b4, &VCO_trim);
/* Read Antenna On/Off of software flag */
Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08210000);
Wb35Reg_ReadSync(pHwData, 0x03b4, &SoftwareSet);
/* Read TXVGA */
Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08100000);
Wb35Reg_ReadSync(pHwData, 0x03b4, &TxVga);
/* Get Scan interval setting from EEPROM offset 0x1c */
Wb35Reg_WriteSync(pHwData, 0x03b4, 0x081d0000);
Wb35Reg_ReadSync(pHwData, 0x03b4, &Region_ScanInterval);
/* Update Ethernet address */
memcpy(pHwData->CurrentMacAddress, pHwData->PermanentMacAddress, ETH_ALEN);
/* Update software variable */
pHwData->SoftwareSet = (u16)(SoftwareSet & 0xffff);
TxVga &= 0x000000ff;
pHwData->PowerIndexFromEEPROM = (u8)TxVga;
pHwData->VCO_trim = (u8)VCO_trim & 0xff;
if (pHwData->VCO_trim == 0xff)
pHwData->VCO_trim = 0x28;
reg->EEPROMRegion = (u8)(Region_ScanInterval >> 8);
if (reg->EEPROMRegion < 1 || reg->EEPROMRegion > 6)
reg->EEPROMRegion = REGION_AUTO;
/* For Get Tx VGA from EEPROM */
GetTxVgaFromEEPROM(pHwData);
/* Set Scan Interval */
pHwData->Scan_Interval = (u8)(Region_ScanInterval & 0xff) * 10;
if ((pHwData->Scan_Interval == 2550) || (pHwData->Scan_Interval < 10)) /* Is default setting 0xff * 10 */
pHwData->Scan_Interval = SCAN_MAX_CHNL_TIME;
/* Initial register */
RFSynthesizer_initial(pHwData);
BBProcessor_initial(pHwData); /* Async write, must wait until complete */
Wb35Reg_phy_calibration(pHwData);
Mxx_initial(pHwData);
Dxx_initial(pHwData);
if (pHwData->SurpriseRemove)
return false;
else
return true; /* Initial fail */
}
/*
* ================================================================
* CardComputeCrc --
*
* Description:
* Runs the AUTODIN II CRC algorithm on buffer Buffer of length, Length.
*
* Arguments:
* Buffer - the input buffer
* Length - the length of Buffer
*
* Return Value:
* The 32-bit CRC value.
* ===================================================================
*/
u32 CardComputeCrc(u8 *Buffer, u32 Length)
{
u32 Crc, Carry;
u32 i, j;
u8 CurByte;
Crc = 0xffffffff;
for (i = 0; i < Length; i++) {
CurByte = Buffer[i];
for (j = 0; j < 8; j++) {
Carry = ((Crc & 0x80000000) ? 1 : 0) ^ (CurByte & 0x01);
Crc <<= 1;
CurByte >>= 1;
if (Carry)
Crc = (Crc ^ 0x04c11db6) | Carry;
}
}
return Crc;
}
/*
* ==================================================================
* BitReverse --
* Reverse the bits in the input argument, dwData, which is
* regarded as a string of bits with the length, DataLength.
*
* Arguments:
* dwData :
* DataLength :
*
* Return:
* The converted value.
* ==================================================================
*/
u32 BitReverse(u32 dwData, u32 DataLength)
{
u32 HalfLength, i, j;
u32 BitA, BitB;
if (DataLength <= 0)
return 0; /* No conversion is done. */
dwData = dwData & (0xffffffff >> (32 - DataLength));
HalfLength = DataLength / 2;
for (i = 0, j = DataLength - 1; i < HalfLength; i++, j--) {
BitA = GetBit(dwData, i);
BitB = GetBit(dwData, j);
if (BitA && !BitB) {
dwData = ClearBit(dwData, i);
dwData = SetBit(dwData, j);
} else if (!BitA && BitB) {
dwData = SetBit(dwData, i);
dwData = ClearBit(dwData, j);
} else {
/* Do nothing since these two bits are of the save values. */
}
}
return dwData;
}
void Wb35Reg_phy_calibration(struct hw_data *pHwData)
{
u32 BB3c, BB54;
if ((pHwData->phy_type == RF_WB_242) ||
(pHwData->phy_type == RF_WB_242_1)) {
phy_calibration_winbond(pHwData, 2412); /* Sync operation */
Wb35Reg_ReadSync(pHwData, 0x103c, &BB3c);
Wb35Reg_ReadSync(pHwData, 0x1054, &BB54);
pHwData->BB3c_cal = BB3c;
pHwData->BB54_cal = BB54;
RFSynthesizer_initial(pHwData);
BBProcessor_initial(pHwData); /* Async operation */
Wb35Reg_WriteSync(pHwData, 0x103c, BB3c);
Wb35Reg_WriteSync(pHwData, 0x1054, BB54);
}
}
| gpl-2.0 |
golden-guy/android_kernel_asus_grouper | drivers/staging/vt6655/tether.c | 8370 | 2852 | /*
* Copyright (c) 2003 VIA Networking, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: tether.c
*
* Purpose:
*
* Author: Tevin Chen
*
* Date: May 21, 1996
*
* Functions:
* ETHbyGetHashIndexByCrc32 - Caculate multicast hash value by CRC32
* ETHbIsBufferCrc32Ok - Check CRC value of the buffer if Ok or not
*
* Revision History:
*
*/
#include "device.h"
#include "tmacro.h"
#include "tcrc.h"
#include "tether.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*
* Description: Caculate multicast hash value by CRC32
*
* Parameters:
* In:
* pbyMultiAddr - Multicast Address
* Out:
* none
*
* Return Value: Hash value
*
*/
unsigned char ETHbyGetHashIndexByCrc32 (unsigned char *pbyMultiAddr)
{
int ii;
unsigned char byTmpHash;
unsigned char byHash = 0;
// get the least 6-bits from CRC generator
byTmpHash = (unsigned char)(CRCdwCrc32(pbyMultiAddr, ETH_ALEN,
0xFFFFFFFFL) & 0x3F);
// reverse most bit to least bit
for (ii = 0; ii < (sizeof(byTmpHash) * 8); ii++) {
byHash <<= 1;
if (byTmpHash & 0x01)
byHash |= 1;
byTmpHash >>= 1;
}
// adjust 6-bits to the right most
return (byHash >> 2);
}
/*
* Description: Check CRC value of the buffer if Ok or not
*
* Parameters:
* In:
* pbyBuffer - pointer of buffer (normally is rx buffer)
* cbFrameLength - length of buffer, including CRC portion
* Out:
* none
*
* Return Value: true if ok; false if error.
*
*/
bool ETHbIsBufferCrc32Ok (unsigned char *pbyBuffer, unsigned int cbFrameLength)
{
unsigned long dwCRC;
dwCRC = CRCdwGetCrc32(pbyBuffer, cbFrameLength - 4);
if (cpu_to_le32(*((unsigned long *)(pbyBuffer + cbFrameLength - 4))) != dwCRC) {
return false;
}
return true;
}
| gpl-2.0 |
razrqcom-dev-team/android_kernel_motorola_apq8084 | net/bridge/netfilter/ebt_redirect.c | 11186 | 2054 | /*
* ebt_redirect
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* April, 2002
*
*/
#include <linux/module.h>
#include <net/sock.h>
#include "../br_private.h"
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_redirect.h>
static unsigned int
ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_redirect_info *info = par->targinfo;
if (!skb_make_writable(skb, 0))
return EBT_DROP;
if (par->hooknum != NF_BR_BROUTING)
/* rcu_read_lock()ed by nf_hook_slow */
memcpy(eth_hdr(skb)->h_dest,
br_port_get_rcu(par->in)->br->dev->dev_addr, ETH_ALEN);
else
memcpy(eth_hdr(skb)->h_dest, par->in->dev_addr, ETH_ALEN);
skb->pkt_type = PACKET_HOST;
return info->target;
}
static int ebt_redirect_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_redirect_info *info = par->targinfo;
unsigned int hook_mask;
if (BASE_CHAIN && info->target == EBT_RETURN)
return -EINVAL;
hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS);
if ((strcmp(par->table, "nat") != 0 ||
hook_mask & ~(1 << NF_BR_PRE_ROUTING)) &&
(strcmp(par->table, "broute") != 0 ||
hook_mask & ~(1 << NF_BR_BROUTING)))
return -EINVAL;
if (INVALID_TARGET)
return -EINVAL;
return 0;
}
static struct xt_target ebt_redirect_tg_reg __read_mostly = {
.name = "redirect",
.revision = 0,
.family = NFPROTO_BRIDGE,
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_BROUTING),
.target = ebt_redirect_tg,
.checkentry = ebt_redirect_tg_check,
.targetsize = sizeof(struct ebt_redirect_info),
.me = THIS_MODULE,
};
static int __init ebt_redirect_init(void)
{
return xt_register_target(&ebt_redirect_tg_reg);
}
static void __exit ebt_redirect_fini(void)
{
xt_unregister_target(&ebt_redirect_tg_reg);
}
module_init(ebt_redirect_init);
module_exit(ebt_redirect_fini);
MODULE_DESCRIPTION("Ebtables: Packet redirection to localhost");
MODULE_LICENSE("GPL");
| gpl-2.0 |
InsomniaAOSP/android_kernel_samsung_d2 | arch/ia64/lib/csum_partial_copy.c | 13746 | 3069 | /*
* Network Checksum & Copy routine
*
* Copyright (C) 1999, 2003-2004 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*
* Most of the code has been imported from Linux/Alpha
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <asm/uaccess.h>
/*
* XXX Fixme: those 2 inlines are meant for debugging and will go away
*/
static inline unsigned
short from64to16(unsigned long x)
{
/* add up 32-bit words for 33 bits */
x = (x & 0xffffffff) + (x >> 32);
/* add up 16-bit and 17-bit words for 17+c bits */
x = (x & 0xffff) + (x >> 16);
/* add up 16-bit and 2-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
/* add up carry.. */
x = (x & 0xffff) + (x >> 16);
return x;
}
static inline
unsigned long do_csum_c(const unsigned char * buff, int len, unsigned int psum)
{
int odd, count;
unsigned long result = (unsigned long)psum;
if (len <= 0)
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
result = *buff << 8;
len--;
buff++;
}
count = len >> 1; /* nr of 16-bit words.. */
if (count) {
if (2 & (unsigned long) buff) {
result += *(unsigned short *) buff;
count--;
len -= 2;
buff += 2;
}
count >>= 1; /* nr of 32-bit words.. */
if (count) {
if (4 & (unsigned long) buff) {
result += *(unsigned int *) buff;
count--;
len -= 4;
buff += 4;
}
count >>= 1; /* nr of 64-bit words.. */
if (count) {
unsigned long carry = 0;
do {
unsigned long w = *(unsigned long *) buff;
count--;
buff += 8;
result += carry;
result += w;
carry = (w > result);
} while (count);
result += carry;
result = (result & 0xffffffff) + (result >> 32);
}
if (len & 4) {
result += *(unsigned int *) buff;
buff += 4;
}
}
if (len & 2) {
result += *(unsigned short *) buff;
buff += 2;
}
}
if (len & 1)
result += *buff;
result = from64to16(result);
if (odd)
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
out:
return result;
}
/*
* XXX Fixme
*
* This is very ugly but temporary. THIS NEEDS SERIOUS ENHANCEMENTS.
* But it's very tricky to get right even in C.
*/
extern unsigned long do_csum(const unsigned char *, long);
__wsum
csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum psum, int *errp)
{
unsigned long result;
/* XXX Fixme
* for now we separate the copy from checksum for obvious
* alignment difficulties. Look at the Alpha code and you'll be
* scared.
*/
if (__copy_from_user(dst, src, len) != 0 && errp)
*errp = -EFAULT;
result = do_csum(dst, len);
/* add in old sum, and carry.. */
result += (__force u32)psum;
/* 32+c bits -> 32 bits */
result = (result & 0xffffffff) + (result >> 32);
return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
return csum_partial_copy_from_user((__force const void __user *)src,
dst, len, sum, NULL);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
| gpl-2.0 |
dpuyosa/android_kernel_wiko_l5460 | drivers/misc/mediatek/gpu/mt6735/mali-EAC/drivers/gpu/arm/midgard/platform/vexpress_virtex7_40mhz/mali_kbase_config_vexpress.c | 179 | 9291 | /*
*
* (C) COPYRIGHT ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained
* from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#include <linux/ioport.h>
#include <mali_kbase.h>
#include <mali_kbase_defs.h>
#include <mali_kbase_config.h>
#include "mali_kbase_cpu_vexpress.h"
/* Versatile Express (VE) configuration defaults shared between config_attributes[]
* and config_attributes_hw_issue_8408[]. Settings are not shared for
* JS_HARD_STOP_TICKS_SS and JS_RESET_TICKS_SS.
*/
#define KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG 15000000u /* 15ms, an agressive tick for testing purposes. This will reduce performance significantly */
#define KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG 1 /* between 15ms and 30ms before soft-stop a job */
#define KBASE_VE_JS_SOFT_STOP_TICKS_CL_DEBUG 1 /* between 15ms and 30ms before soft-stop a CL job */
#define KBASE_VE_JS_HARD_STOP_TICKS_SS_DEBUG 333 /* 5s before hard-stop */
#define KBASE_VE_JS_HARD_STOP_TICKS_SS_8401_DEBUG 2000 /* 30s before hard-stop, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) - for issue 8401 */
#define KBASE_VE_JS_HARD_STOP_TICKS_CL_DEBUG 166 /* 2.5s before hard-stop */
#define KBASE_VE_JS_HARD_STOP_TICKS_NSS_DEBUG 100000 /* 1500s (25mins) before NSS hard-stop */
#define KBASE_VE_JS_RESET_TICKS_SS_DEBUG 500 /* 45s before resetting GPU, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) */
#define KBASE_VE_JS_RESET_TICKS_SS_8401_DEBUG 3000 /* 7.5s before resetting GPU - for issue 8401 */
#define KBASE_VE_JS_RESET_TICKS_CL_DEBUG 500 /* 45s before resetting GPU */
#define KBASE_VE_JS_RESET_TICKS_NSS_DEBUG 100166 /* 1502s before resetting GPU */
#define KBASE_VE_JS_SCHEDULING_TICK_NS 1250000000u /* 1.25s */
#define KBASE_VE_JS_SOFT_STOP_TICKS 2 /* 2.5s before soft-stop a job */
#define KBASE_VE_JS_SOFT_STOP_TICKS_CL 1 /* 1.25s before soft-stop a CL job */
#define KBASE_VE_JS_HARD_STOP_TICKS_SS 4 /* 5s before hard-stop */
#define KBASE_VE_JS_HARD_STOP_TICKS_SS_8401 24 /* 30s before hard-stop, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) - for issue 8401 */
#define KBASE_VE_JS_HARD_STOP_TICKS_CL 2 /* 2.5s before hard-stop */
#define KBASE_VE_JS_HARD_STOP_TICKS_NSS 1200 /* 1500s before NSS hard-stop */
#define KBASE_VE_JS_RESET_TICKS_SS 6 /* 7.5s before resetting GPU */
#define KBASE_VE_JS_RESET_TICKS_SS_8401 36 /* 45s before resetting GPU, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) - for issue 8401 */
#define KBASE_VE_JS_RESET_TICKS_CL 3 /* 3.75s before resetting GPU */
#define KBASE_VE_JS_RESET_TICKS_NSS 1201 /* 1502s before resetting GPU */
#define KBASE_VE_JS_RESET_TIMEOUT_MS 3000 /* 3s before cancelling stuck jobs */
#define KBASE_VE_JS_CTX_TIMESLICE_NS 1000000 /* 1ms - an agressive timeslice for testing purposes (causes lots of scheduling out for >4 ctxs) */
#define KBASE_VE_POWER_MANAGEMENT_CALLBACKS ((uintptr_t)&pm_callbacks)
#define KBASE_VE_CPU_SPEED_FUNC ((uintptr_t)&kbase_get_vexpress_cpu_clock_speed)
#define HARD_RESET_AT_POWER_OFF 0
#ifndef CONFIG_OF
static kbase_io_resources io_resources = {
.job_irq_number = 68,
.mmu_irq_number = 69,
.gpu_irq_number = 70,
.io_memory_region = {
.start = 0xFC010000,
.end = 0xFC010000 + (4096 * 4) - 1
}
};
#endif /* CONFIG_OF */
static int pm_callback_power_on(struct kbase_device *kbdev)
{
/* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
return 1;
}
static void pm_callback_power_off(struct kbase_device *kbdev)
{
#if HARD_RESET_AT_POWER_OFF
/* Cause a GPU hard reset to test whether we have actually idled the GPU
* and that we properly reconfigure the GPU on power up.
* Usually this would be dangerous, but if the GPU is working correctly it should
* be completely safe as the GPU should not be active at this point.
* However this is disabled normally because it will most likely interfere with
* bus logging etc.
*/
KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
#endif
}
static struct kbase_pm_callback_conf pm_callbacks = {
.power_on_callback = pm_callback_power_on,
.power_off_callback = pm_callback_power_off,
.power_suspend_callback = NULL,
.power_resume_callback = NULL
};
/* Please keep table config_attributes in sync with config_attributes_hw_issue_8408 */
static struct kbase_attribute config_attributes[] = {
#ifdef CONFIG_MALI_DEBUG
/* Use more aggressive scheduling timeouts in debug builds for testing purposes */
{
KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS,
KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG},
{
KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG},
{
KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS_CL,
KBASE_VE_JS_SOFT_STOP_TICKS_CL_DEBUG},
{
KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS,
KBASE_VE_JS_HARD_STOP_TICKS_SS_DEBUG},
{
KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL,
KBASE_VE_JS_HARD_STOP_TICKS_CL_DEBUG},
{
KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
KBASE_VE_JS_HARD_STOP_TICKS_NSS_DEBUG},
{
KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS,
KBASE_VE_JS_RESET_TICKS_SS_DEBUG},
{
KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL,
KBASE_VE_JS_RESET_TICKS_CL_DEBUG},
{
KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS,
KBASE_VE_JS_RESET_TICKS_NSS_DEBUG},
#else /* CONFIG_MALI_DEBUG */
/* In release builds same as the defaults but scaled for 5MHz FPGA */
{
KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS,
KBASE_VE_JS_SCHEDULING_TICK_NS},
{
KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
KBASE_VE_JS_SOFT_STOP_TICKS},
{
KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS_CL,
KBASE_VE_JS_SOFT_STOP_TICKS_CL},
{
KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS,
KBASE_VE_JS_HARD_STOP_TICKS_SS},
{
KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL,
KBASE_VE_JS_HARD_STOP_TICKS_CL},
{
KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
KBASE_VE_JS_HARD_STOP_TICKS_NSS},
{
KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS,
KBASE_VE_JS_RESET_TICKS_SS},
{
KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL,
KBASE_VE_JS_RESET_TICKS_CL},
{
KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS,
KBASE_VE_JS_RESET_TICKS_NSS},
#endif /* CONFIG_MALI_DEBUG */
{
KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS,
KBASE_VE_JS_RESET_TIMEOUT_MS},
{
KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS,
KBASE_VE_JS_CTX_TIMESLICE_NS},
{
KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS,
KBASE_VE_POWER_MANAGEMENT_CALLBACKS},
{
KBASE_CONFIG_ATTR_CPU_SPEED_FUNC,
KBASE_VE_CPU_SPEED_FUNC},
{
KBASE_CONFIG_ATTR_END,
0}
};
/* as config_attributes array above except with different settings for
* JS_HARD_STOP_TICKS_SS, JS_RESET_TICKS_SS that
* are needed for BASE_HW_ISSUE_8408.
*/
struct kbase_attribute config_attributes_hw_issue_8408[] = {
#ifdef CONFIG_MALI_DEBUG
/* Use more aggressive scheduling timeouts in debug builds for testing purposes */
{
KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS,
KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG},
{
KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG},
{
KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS,
KBASE_VE_JS_HARD_STOP_TICKS_SS_8401_DEBUG},
{
KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
KBASE_VE_JS_HARD_STOP_TICKS_NSS_DEBUG},
{
KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS,
KBASE_VE_JS_RESET_TICKS_SS_8401_DEBUG},
{
KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS,
KBASE_VE_JS_RESET_TICKS_NSS_DEBUG},
#else /* CONFIG_MALI_DEBUG */
/* In release builds same as the defaults but scaled for 5MHz FPGA */
{
KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS,
KBASE_VE_JS_SCHEDULING_TICK_NS},
{
KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
KBASE_VE_JS_SOFT_STOP_TICKS},
{
KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS,
KBASE_VE_JS_HARD_STOP_TICKS_SS_8401},
{
KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
KBASE_VE_JS_HARD_STOP_TICKS_NSS},
{
KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS,
KBASE_VE_JS_RESET_TICKS_SS_8401},
{
KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS,
KBASE_VE_JS_RESET_TICKS_NSS},
#endif /* CONFIG_MALI_DEBUG */
{
KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS,
KBASE_VE_JS_RESET_TIMEOUT_MS},
{
KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS,
KBASE_VE_JS_CTX_TIMESLICE_NS},
{
KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS,
KBASE_VE_POWER_MANAGEMENT_CALLBACKS},
{
KBASE_CONFIG_ATTR_CPU_SPEED_FUNC,
KBASE_VE_CPU_SPEED_FUNC},
{
KBASE_CONFIG_ATTR_END,
0}
};
static struct kbase_platform_config virtex7_platform_config = {
.attributes = config_attributes,
#ifndef CONFIG_OF
.io_resources = &io_resources
#endif
};
struct kbase_platform_config *kbase_get_platform_config(void)
{
return &virtex7_platform_config;
}
int kbase_platform_early_init(void)
{
/* Nothing needed at this stage */
return 0;
}
| gpl-2.0 |
ziozzang/linux | drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 179 | 204322 | /*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/delay.h>
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4_values.h"
#include "t4fw_api.h"
/**
* t4_wait_op_done_val - wait until an operation is completed
* @adapter: the adapter performing the operation
* @reg: the register to check for completion
* @mask: a single-bit field within @reg that indicates completion
* @polarity: the value of the field when the operation is completed
* @attempts: number of check iterations
* @delay: delay in usecs between iterations
* @valp: where to store the value of the register at completion time
*
* Wait until an operation is completed by checking a bit in a register
* up to @attempts times. If @valp is not NULL the value of the register
* at the time it indicated completion is stored there. Returns 0 if the
* operation completes and -EAGAIN otherwise.
*/
static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
int polarity, int attempts, int delay, u32 *valp)
{
while (1) {
u32 val = t4_read_reg(adapter, reg);
if (!!(val & mask) == polarity) {
if (valp)
*valp = val;
return 0;
}
if (--attempts == 0)
return -EAGAIN;
if (delay)
udelay(delay);
}
}
static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
int polarity, int attempts, int delay)
{
return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
delay, NULL);
}
/**
* t4_set_reg_field - set a register field to a value
* @adapter: the adapter to program
* @addr: the register address
* @mask: specifies the portion of the register to modify
* @val: the new value for the register field
*
* Sets a register field specified by the supplied mask to the
* given value.
*/
void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
u32 val)
{
u32 v = t4_read_reg(adapter, addr) & ~mask;
t4_write_reg(adapter, addr, v | val);
(void) t4_read_reg(adapter, addr); /* flush */
}
/**
* t4_read_indirect - read indirectly addressed registers
* @adap: the adapter
* @addr_reg: register holding the indirect address
* @data_reg: register holding the value of the indirect register
* @vals: where the read register values are stored
* @nregs: how many indirect registers to read
* @start_idx: index of first indirect register to read
*
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals,
unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx);
*vals++ = t4_read_reg(adap, data_reg);
start_idx++;
}
}
/**
* t4_write_indirect - write indirectly addressed registers
* @adap: the adapter
* @addr_reg: register holding the indirect addresses
* @data_reg: register holding the value for the indirect registers
* @vals: values to write
* @nregs: how many indirect registers to write
* @start_idx: address of first indirect register to write
*
* Writes a sequential block of registers that are accessed indirectly
* through an address/data register pair.
*/
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx++);
t4_write_reg(adap, data_reg, *vals++);
}
}
/*
* Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
* mechanism. This guarantees that we get the real value even if we're
* operating within a Virtual Machine and the Hypervisor is trapping our
* Configuration Space accesses.
*/
void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
{
u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
req |= ENABLE_F;
else
req |= T6_ENABLE_F;
if (is_t4(adap->params.chip))
req |= LOCALCFG_F;
t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
*val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
/* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
* Configuration Space read. (None of the other fields matter when
* ENABLE is 0 so a simple register write is easier than a
* read-modify-write via t4_set_reg_field().)
*/
t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
}
/*
* t4_report_fw_error - report firmware error
* @adap: the adapter
*
* The adapter firmware can indicate error conditions to the host.
* If the firmware has indicated an error, print out the reason for
* the firmware error.
*/
static void t4_report_fw_error(struct adapter *adap)
{
static const char *const reason[] = {
"Crash", /* PCIE_FW_EVAL_CRASH */
"During Device Preparation", /* PCIE_FW_EVAL_PREP */
"During Device Configuration", /* PCIE_FW_EVAL_CONF */
"During Device Initialization", /* PCIE_FW_EVAL_INIT */
"Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
"Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
"Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
"Reserved", /* reserved */
};
u32 pcie_fw;
pcie_fw = t4_read_reg(adap, PCIE_FW_A);
if (pcie_fw & PCIE_FW_ERR_F)
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]);
}
/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
*/
static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
u32 mbox_addr)
{
for ( ; nflit; nflit--, mbox_addr += 8)
*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
}
/*
* Handle a FW assertion reported in a mailbox.
*/
static void fw_asrt(struct adapter *adap, u32 mbox_addr)
{
struct fw_debug_cmd asrt;
get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
dev_alert(adap->pdev_dev,
"FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
}
static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
{
dev_err(adap->pdev_dev,
"mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
(unsigned long long)t4_read_reg64(adap, data_reg),
(unsigned long long)t4_read_reg64(adap, data_reg + 8),
(unsigned long long)t4_read_reg64(adap, data_reg + 16),
(unsigned long long)t4_read_reg64(adap, data_reg + 24),
(unsigned long long)t4_read_reg64(adap, data_reg + 32),
(unsigned long long)t4_read_reg64(adap, data_reg + 40),
(unsigned long long)t4_read_reg64(adap, data_reg + 48),
(unsigned long long)t4_read_reg64(adap, data_reg + 56));
}
/**
* t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
* @adap: the adapter
* @mbox: index of the mailbox to use
* @cmd: the command to write
* @size: command length in bytes
* @rpl: where to optionally store the reply
* @sleep_ok: if true we may sleep while awaiting command completion
* @timeout: time to wait for command to finish before timing out
*
* Sends the given command to FW through the selected mailbox and waits
* for the FW to execute the command. If @rpl is not %NULL it is used to
* store the FW's reply to the command. The command and its optional
* reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
* to respond. @sleep_ok determines whether we may sleep while awaiting
* the response. If sleeping is allowed we use progressive backoff
* otherwise we spin.
*
* The return value is 0 on success or a negative errno on failure. A
* failure can happen either because we are not able to execute the
* command or FW executes it but signals an error. In the latter case
* the return value is the error code indicated by FW (negated).
*/
int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
int size, void *rpl, bool sleep_ok, int timeout)
{
static const int delay[] = {
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
};
u32 v;
u64 res;
int i, ms, delay_idx;
const __be64 *p = cmd;
u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
if ((size & 15) || size > MBOX_LEN)
return -EINVAL;
/*
* If the device is off-line, as in EEH, commands will time out.
* Fail them early so we don't waste time waiting.
*/
if (adap->pdev->error_state != pci_channel_io_normal)
return -EIO;
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
if (v != MBOX_OWNER_DRV)
return v ? -EBUSY : -ETIMEDOUT;
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
t4_read_reg(adap, ctl_reg); /* flush write */
delay_idx = 0;
ms = delay[0];
for (i = 0; i < timeout; i += ms) {
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
delay_idx++;
msleep(ms);
} else
mdelay(ms);
v = t4_read_reg(adap, ctl_reg);
if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
if (!(v & MBMSGVALID_F)) {
t4_write_reg(adap, ctl_reg, 0);
continue;
}
res = t4_read_reg64(adap, data_reg);
if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
fw_asrt(adap, data_reg);
res = FW_CMD_RETVAL_V(EIO);
} else if (rpl) {
get_mbox_rpl(adap, rpl, size / 8, data_reg);
}
if (FW_CMD_RETVAL_G((int)res))
dump_mbox(adap, mbox, data_reg);
t4_write_reg(adap, ctl_reg, 0);
return -FW_CMD_RETVAL_G((int)res);
}
}
dump_mbox(adap, mbox, data_reg);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
return -ETIMEDOUT;
}
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
FW_CMD_MAX_TIMEOUT);
}
/**
* t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
* @adap: the adapter
* @win: PCI-E Memory Window to use
* @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
* @addr: address within indicated memory type
* @len: amount of memory to transfer
* @hbuf: host memory buffer
* @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
*
* Reads/writes an [almost] arbitrary memory region in the firmware: the
* firmware memory address and host buffer must be aligned on 32-bit
* boudaries; the length may be arbitrary. The memory is transferred as
* a raw byte sequence from/to the firmware's memory. If this memory
* contains data structures which contain multi-byte integers, it's the
* caller's responsibility to perform appropriate byte order conversions.
*/
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
u32 len, void *hbuf, int dir)
{
u32 pos, offset, resid, memoffset;
u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
u32 *buf;
/* Argument sanity checks ...
*/
if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
return -EINVAL;
buf = (u32 *)hbuf;
/* It's convenient to be able to handle lengths which aren't a
* multiple of 32-bits because we often end up transferring files to
* the firmware. So we'll handle that by normalizing the length here
* and then handling any residual transfer at the end.
*/
resid = len & 0x3;
len -= resid;
/* Offset into the region of memory which is being accessed
* MEM_EDC0 = 0
* MEM_EDC1 = 1
* MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
* MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
*/
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1)
memoffset = (mtype * (edc_size * 1024 * 1024));
else {
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
MA_EXT_MEMORY0_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
}
/* Determine the PCIE_MEM_ACCESS_OFFSET */
addr = addr + memoffset;
/* Each PCI-E Memory Window is programmed with a window size -- or
* "aperture" -- which controls the granularity of its mapping onto
* adapter memory. We need to grab that aperture in order to know
* how to use the specified window. The window is also programmed
* with the base address of the Memory Window in BAR0's address
* space. For T4 this is an absolute PCI-E Bus Address. For T5
* the address is relative to BAR0.
*/
mem_reg = t4_read_reg(adap,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
win));
mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
if (is_t4(adap->params.chip))
mem_base -= adap->t4_bar0;
win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
/* Calculate our initial PCI-E Memory Window Position and Offset into
* that Window.
*/
pos = addr & ~(mem_aperture-1);
offset = addr - pos;
/* Set up initial PCI-E Memory Window to cover the start of our
* transfer. (Read it back to ensure that changes propagate before we
* attempt to use the new value.)
*/
t4_write_reg(adap,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
pos | win_pf);
t4_read_reg(adap,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
/* Transfer data to/from the adapter as long as there's an integral
* number of 32-bit transfers to complete.
*
* A note on Endianness issues:
*
* The "register" reads and writes below from/to the PCI-E Memory
* Window invoke the standard adapter Big-Endian to PCI-E Link
* Little-Endian "swizzel." As a result, if we have the following
* data in adapter memory:
*
* Memory: ... | b0 | b1 | b2 | b3 | ...
* Address: i+0 i+1 i+2 i+3
*
* Then a read of the adapter memory via the PCI-E Memory Window
* will yield:
*
* x = readl(i)
* 31 0
* [ b3 | b2 | b1 | b0 ]
*
* If this value is stored into local memory on a Little-Endian system
* it will show up correctly in local memory as:
*
* ( ..., b0, b1, b2, b3, ... )
*
* But on a Big-Endian system, the store will show up in memory
* incorrectly swizzled as:
*
* ( ..., b3, b2, b1, b0, ... )
*
* So we need to account for this in the reads and writes to the
* PCI-E Memory Window below by undoing the register read/write
* swizzels.
*/
while (len > 0) {
if (dir == T4_MEMORY_READ)
*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
mem_base + offset));
else
t4_write_reg(adap, mem_base + offset,
(__force u32)cpu_to_le32(*buf++));
offset += sizeof(__be32);
len -= sizeof(__be32);
/* If we've reached the end of our current window aperture,
* move the PCI-E Memory Window on to the next. Note that
* doing this here after "len" may be 0 allows us to set up
* the PCI-E Memory Window for a possible final residual
* transfer below ...
*/
if (offset == mem_aperture) {
pos += mem_aperture;
offset = 0;
t4_write_reg(adap,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
win), pos | win_pf);
t4_read_reg(adap,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
win));
}
}
/* If the original transfer had a length which wasn't a multiple of
* 32-bits, now's where we need to finish off the transfer of the
* residual amount. The PCI-E Memory Window has already been moved
* above (if necessary) to cover this final transfer.
*/
if (resid) {
union {
u32 word;
char byte[4];
} last;
unsigned char *bp;
int i;
if (dir == T4_MEMORY_READ) {
last.word = le32_to_cpu(
(__force __le32)t4_read_reg(adap,
mem_base + offset));
for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
bp[i] = last.byte[i];
} else {
last.word = *buf;
for (i = resid; i < 4; i++)
last.byte[i] = 0;
t4_write_reg(adap, mem_base + offset,
(__force u32)cpu_to_le32(last.word));
}
}
return 0;
}
/* Return the specified PCI-E Configuration Space register from our Physical
* Function. We try first via a Firmware LDST Command since we prefer to let
* the firmware own all of these registers, but if that fails we go for it
* directly ourselves.
*/
u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
{
u32 val, ldst_addrspace;
/* If fw_attach != 0, construct and send the Firmware LDST Command to
* retrieve the specified PCI-E Configuration Space register.
*/
struct fw_ldst_cmd ldst_cmd;
int ret;
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_READ_F |
ldst_addrspace);
ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
ldst_cmd.u.pcie.ctrl_to_fn =
(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
ldst_cmd.u.pcie.r = reg;
/* If the LDST Command succeeds, return the result, otherwise
* fall through to reading it directly ourselves ...
*/
ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
&ldst_cmd);
if (ret == 0)
val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
else
/* Read the desired Configuration Space register via the PCI-E
* Backdoor mechanism.
*/
t4_hw_pci_read_cfg4(adap, reg, &val);
return val;
}
/* Get the window based on base passed to it.
* Window aperture is currently unhandled, but there is no use case for it
* right now
*/
static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
u32 memwin_base)
{
u32 ret;
if (is_t4(adap->params.chip)) {
u32 bar0;
/* Truncation intentional: we only read the bottom 32-bits of
* the 64-bit BAR0/BAR1 ... We use the hardware backdoor
* mechanism to read BAR0 instead of using
* pci_resource_start() because we could be operating from
* within a Virtual Machine which is trapping our accesses to
* our Configuration Space and we need to set up the PCI-E
* Memory Window decoders with the actual addresses which will
* be coming across the PCI-E link.
*/
bar0 = t4_read_pcie_cfg4(adap, pci_base);
bar0 &= pci_mask;
adap->t4_bar0 = bar0;
ret = bar0 + memwin_base;
} else {
/* For T5, only relative offset inside the PCIe BAR is passed */
ret = memwin_base;
}
return ret;
}
/* Get the default utility window (win0) used by everyone */
u32 t4_get_util_window(struct adapter *adap)
{
return t4_get_window(adap, PCI_BASE_ADDRESS_0,
PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
}
/* Set up memory window for accessing adapter memory ranges. (Read
* back MA register to ensure that changes propagate before we attempt
* to use the new values.)
*/
void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
{
t4_write_reg(adap,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
memwin_base | BIR_V(0) |
WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
t4_read_reg(adap,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
}
/**
* t4_get_regs_len - return the size of the chips register set
* @adapter: the adapter
*
* Returns the size of the chip's BAR0 register space.
*/
unsigned int t4_get_regs_len(struct adapter *adapter)
{
unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
switch (chip_version) {
case CHELSIO_T4:
return T4_REGMAP_SIZE;
case CHELSIO_T5:
case CHELSIO_T6:
return T5_REGMAP_SIZE;
}
dev_err(adapter->pdev_dev,
"Unsupported chip version %d\n", chip_version);
return 0;
}
/**
* t4_get_regs - read chip registers into provided buffer
* @adap: the adapter
* @buf: register buffer
* @buf_size: size (in bytes) of register buffer
*
* If the provided register buffer isn't large enough for the chip's
* full register range, the register dump will be truncated to the
* register buffer's size.
*/
void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
{
static const unsigned int t4_reg_ranges[] = {
0x1008, 0x1108,
0x1180, 0x11b4,
0x11fc, 0x123c,
0x1300, 0x173c,
0x1800, 0x18fc,
0x3000, 0x305c,
0x3068, 0x30d8,
0x30e0, 0x5924,
0x5960, 0x59d4,
0x5a00, 0x5af8,
0x6000, 0x6098,
0x6100, 0x6150,
0x6200, 0x6208,
0x6240, 0x6248,
0x6280, 0x6338,
0x6370, 0x638c,
0x6400, 0x643c,
0x6500, 0x6524,
0x6a00, 0x6a38,
0x6a60, 0x6a78,
0x6b00, 0x6b84,
0x6bf0, 0x6c84,
0x6cf0, 0x6d84,
0x6df0, 0x6e84,
0x6ef0, 0x6f84,
0x6ff0, 0x7084,
0x70f0, 0x7184,
0x71f0, 0x7284,
0x72f0, 0x7384,
0x73f0, 0x7450,
0x7500, 0x7530,
0x7600, 0x761c,
0x7680, 0x76cc,
0x7700, 0x7798,
0x77c0, 0x77fc,
0x7900, 0x79fc,
0x7b00, 0x7c38,
0x7d00, 0x7efc,
0x8dc0, 0x8e1c,
0x8e30, 0x8e78,
0x8ea0, 0x8f6c,
0x8fc0, 0x9074,
0x90fc, 0x90fc,
0x9400, 0x9458,
0x9600, 0x96bc,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
0x9c00, 0x9c6c,
0x9c80, 0x9cec,
0x9d00, 0x9d6c,
0x9d80, 0x9dec,
0x9e00, 0x9e6c,
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0x9fec,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0xea7c,
0xf000, 0x11110,
0x11118, 0x11190,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
0x19150, 0x191b0,
0x191d0, 0x191e8,
0x19238, 0x1924c,
0x193f8, 0x19474,
0x19490, 0x194f8,
0x19800, 0x19f4c,
0x1a000, 0x1a06c,
0x1a0b0, 0x1a120,
0x1a128, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e040, 0x1e04c,
0x1e284, 0x1e28c,
0x1e2c0, 0x1e2c0,
0x1e2e0, 0x1e2e0,
0x1e300, 0x1e384,
0x1e3c0, 0x1e3c8,
0x1e440, 0x1e44c,
0x1e684, 0x1e68c,
0x1e6c0, 0x1e6c0,
0x1e6e0, 0x1e6e0,
0x1e700, 0x1e784,
0x1e7c0, 0x1e7c8,
0x1e840, 0x1e84c,
0x1ea84, 0x1ea8c,
0x1eac0, 0x1eac0,
0x1eae0, 0x1eae0,
0x1eb00, 0x1eb84,
0x1ebc0, 0x1ebc8,
0x1ec40, 0x1ec4c,
0x1ee84, 0x1ee8c,
0x1eec0, 0x1eec0,
0x1eee0, 0x1eee0,
0x1ef00, 0x1ef84,
0x1efc0, 0x1efc8,
0x1f040, 0x1f04c,
0x1f284, 0x1f28c,
0x1f2c0, 0x1f2c0,
0x1f2e0, 0x1f2e0,
0x1f300, 0x1f384,
0x1f3c0, 0x1f3c8,
0x1f440, 0x1f44c,
0x1f684, 0x1f68c,
0x1f6c0, 0x1f6c0,
0x1f6e0, 0x1f6e0,
0x1f700, 0x1f784,
0x1f7c0, 0x1f7c8,
0x1f840, 0x1f84c,
0x1fa84, 0x1fa8c,
0x1fac0, 0x1fac0,
0x1fae0, 0x1fae0,
0x1fb00, 0x1fb84,
0x1fbc0, 0x1fbc8,
0x1fc40, 0x1fc4c,
0x1fe84, 0x1fe8c,
0x1fec0, 0x1fec0,
0x1fee0, 0x1fee0,
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
0x20000, 0x2002c,
0x20100, 0x2013c,
0x20190, 0x201c8,
0x20200, 0x20318,
0x20400, 0x20528,
0x20540, 0x20614,
0x21000, 0x21040,
0x2104c, 0x21060,
0x210c0, 0x210ec,
0x21200, 0x21268,
0x21270, 0x21284,
0x212fc, 0x21388,
0x21400, 0x21404,
0x21500, 0x21518,
0x2152c, 0x2153c,
0x21550, 0x21554,
0x21600, 0x21600,
0x21608, 0x21628,
0x21630, 0x2163c,
0x21700, 0x2171c,
0x21780, 0x2178c,
0x21800, 0x21c38,
0x21c80, 0x21d7c,
0x21e00, 0x21e04,
0x22000, 0x2202c,
0x22100, 0x2213c,
0x22190, 0x221c8,
0x22200, 0x22318,
0x22400, 0x22528,
0x22540, 0x22614,
0x23000, 0x23040,
0x2304c, 0x23060,
0x230c0, 0x230ec,
0x23200, 0x23268,
0x23270, 0x23284,
0x232fc, 0x23388,
0x23400, 0x23404,
0x23500, 0x23518,
0x2352c, 0x2353c,
0x23550, 0x23554,
0x23600, 0x23600,
0x23608, 0x23628,
0x23630, 0x2363c,
0x23700, 0x2371c,
0x23780, 0x2378c,
0x23800, 0x23c38,
0x23c80, 0x23d7c,
0x23e00, 0x23e04,
0x24000, 0x2402c,
0x24100, 0x2413c,
0x24190, 0x241c8,
0x24200, 0x24318,
0x24400, 0x24528,
0x24540, 0x24614,
0x25000, 0x25040,
0x2504c, 0x25060,
0x250c0, 0x250ec,
0x25200, 0x25268,
0x25270, 0x25284,
0x252fc, 0x25388,
0x25400, 0x25404,
0x25500, 0x25518,
0x2552c, 0x2553c,
0x25550, 0x25554,
0x25600, 0x25600,
0x25608, 0x25628,
0x25630, 0x2563c,
0x25700, 0x2571c,
0x25780, 0x2578c,
0x25800, 0x25c38,
0x25c80, 0x25d7c,
0x25e00, 0x25e04,
0x26000, 0x2602c,
0x26100, 0x2613c,
0x26190, 0x261c8,
0x26200, 0x26318,
0x26400, 0x26528,
0x26540, 0x26614,
0x27000, 0x27040,
0x2704c, 0x27060,
0x270c0, 0x270ec,
0x27200, 0x27268,
0x27270, 0x27284,
0x272fc, 0x27388,
0x27400, 0x27404,
0x27500, 0x27518,
0x2752c, 0x2753c,
0x27550, 0x27554,
0x27600, 0x27600,
0x27608, 0x27628,
0x27630, 0x2763c,
0x27700, 0x2771c,
0x27780, 0x2778c,
0x27800, 0x27c38,
0x27c80, 0x27d7c,
0x27e00, 0x27e04,
};
static const unsigned int t5_reg_ranges[] = {
0x1008, 0x1148,
0x1180, 0x11b4,
0x11fc, 0x123c,
0x1280, 0x173c,
0x1800, 0x18fc,
0x3000, 0x3028,
0x3068, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
0x35ec, 0x35ec,
0x3600, 0x5624,
0x56cc, 0x575c,
0x580c, 0x5814,
0x5890, 0x58bc,
0x5940, 0x59dc,
0x59fc, 0x5a18,
0x5a60, 0x5a9c,
0x5b94, 0x5bfc,
0x6000, 0x6040,
0x6058, 0x614c,
0x7700, 0x7798,
0x77c0, 0x78fc,
0x7b00, 0x7c54,
0x7d00, 0x7efc,
0x8dc0, 0x8de0,
0x8df8, 0x8e84,
0x8ea0, 0x8f84,
0x8fc0, 0x90f8,
0x9400, 0x9470,
0x9600, 0x96f4,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
0x9c00, 0x9c6c,
0x9c80, 0x9cec,
0x9d00, 0x9d6c,
0x9d80, 0x9dec,
0x9e00, 0x9e6c,
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0xa020,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0x11088,
0x1109c, 0x11110,
0x11118, 0x1117c,
0x11190, 0x11204,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
0x19150, 0x191b0,
0x191d0, 0x191e8,
0x19238, 0x19290,
0x193f8, 0x19474,
0x19490, 0x194cc,
0x194f0, 0x194f8,
0x19c00, 0x19c60,
0x19c94, 0x19e10,
0x19e50, 0x19f34,
0x19f40, 0x19f50,
0x19f90, 0x19fe4,
0x1a000, 0x1a06c,
0x1a0b0, 0x1a120,
0x1a128, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
0x1e040, 0x1e04c,
0x1e284, 0x1e290,
0x1e2c0, 0x1e2c0,
0x1e2e0, 0x1e2e0,
0x1e300, 0x1e384,
0x1e3c0, 0x1e3c8,
0x1e408, 0x1e40c,
0x1e440, 0x1e44c,
0x1e684, 0x1e690,
0x1e6c0, 0x1e6c0,
0x1e6e0, 0x1e6e0,
0x1e700, 0x1e784,
0x1e7c0, 0x1e7c8,
0x1e808, 0x1e80c,
0x1e840, 0x1e84c,
0x1ea84, 0x1ea90,
0x1eac0, 0x1eac0,
0x1eae0, 0x1eae0,
0x1eb00, 0x1eb84,
0x1ebc0, 0x1ebc8,
0x1ec08, 0x1ec0c,
0x1ec40, 0x1ec4c,
0x1ee84, 0x1ee90,
0x1eec0, 0x1eec0,
0x1eee0, 0x1eee0,
0x1ef00, 0x1ef84,
0x1efc0, 0x1efc8,
0x1f008, 0x1f00c,
0x1f040, 0x1f04c,
0x1f284, 0x1f290,
0x1f2c0, 0x1f2c0,
0x1f2e0, 0x1f2e0,
0x1f300, 0x1f384,
0x1f3c0, 0x1f3c8,
0x1f408, 0x1f40c,
0x1f440, 0x1f44c,
0x1f684, 0x1f690,
0x1f6c0, 0x1f6c0,
0x1f6e0, 0x1f6e0,
0x1f700, 0x1f784,
0x1f7c0, 0x1f7c8,
0x1f808, 0x1f80c,
0x1f840, 0x1f84c,
0x1fa84, 0x1fa90,
0x1fac0, 0x1fac0,
0x1fae0, 0x1fae0,
0x1fb00, 0x1fb84,
0x1fbc0, 0x1fbc8,
0x1fc08, 0x1fc0c,
0x1fc40, 0x1fc4c,
0x1fe84, 0x1fe90,
0x1fec0, 0x1fec0,
0x1fee0, 0x1fee0,
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
0x30000, 0x30030,
0x30100, 0x30144,
0x30190, 0x301d0,
0x30200, 0x30318,
0x30400, 0x3052c,
0x30540, 0x3061c,
0x30800, 0x30834,
0x308c0, 0x30908,
0x30910, 0x309ac,
0x30a00, 0x30a2c,
0x30a44, 0x30a50,
0x30a74, 0x30c24,
0x30d00, 0x30d00,
0x30d08, 0x30d14,
0x30d1c, 0x30d20,
0x30d3c, 0x30d50,
0x31200, 0x3120c,
0x31220, 0x31220,
0x31240, 0x31240,
0x31600, 0x3160c,
0x31a00, 0x31a1c,
0x31e00, 0x31e20,
0x31e38, 0x31e3c,
0x31e80, 0x31e80,
0x31e88, 0x31ea8,
0x31eb0, 0x31eb4,
0x31ec8, 0x31ed4,
0x31fb8, 0x32004,
0x32200, 0x32200,
0x32208, 0x32240,
0x32248, 0x32280,
0x32288, 0x322c0,
0x322c8, 0x322fc,
0x32600, 0x32630,
0x32a00, 0x32abc,
0x32b00, 0x32b70,
0x33000, 0x33048,
0x33060, 0x3309c,
0x330f0, 0x33148,
0x33160, 0x3319c,
0x331f0, 0x332e4,
0x332f8, 0x333e4,
0x333f8, 0x33448,
0x33460, 0x3349c,
0x334f0, 0x33548,
0x33560, 0x3359c,
0x335f0, 0x336e4,
0x336f8, 0x337e4,
0x337f8, 0x337fc,
0x33814, 0x33814,
0x3382c, 0x3382c,
0x33880, 0x3388c,
0x338e8, 0x338ec,
0x33900, 0x33948,
0x33960, 0x3399c,
0x339f0, 0x33ae4,
0x33af8, 0x33b10,
0x33b28, 0x33b28,
0x33b3c, 0x33b50,
0x33bf0, 0x33c10,
0x33c28, 0x33c28,
0x33c3c, 0x33c50,
0x33cf0, 0x33cfc,
0x34000, 0x34030,
0x34100, 0x34144,
0x34190, 0x341d0,
0x34200, 0x34318,
0x34400, 0x3452c,
0x34540, 0x3461c,
0x34800, 0x34834,
0x348c0, 0x34908,
0x34910, 0x349ac,
0x34a00, 0x34a2c,
0x34a44, 0x34a50,
0x34a74, 0x34c24,
0x34d00, 0x34d00,
0x34d08, 0x34d14,
0x34d1c, 0x34d20,
0x34d3c, 0x34d50,
0x35200, 0x3520c,
0x35220, 0x35220,
0x35240, 0x35240,
0x35600, 0x3560c,
0x35a00, 0x35a1c,
0x35e00, 0x35e20,
0x35e38, 0x35e3c,
0x35e80, 0x35e80,
0x35e88, 0x35ea8,
0x35eb0, 0x35eb4,
0x35ec8, 0x35ed4,
0x35fb8, 0x36004,
0x36200, 0x36200,
0x36208, 0x36240,
0x36248, 0x36280,
0x36288, 0x362c0,
0x362c8, 0x362fc,
0x36600, 0x36630,
0x36a00, 0x36abc,
0x36b00, 0x36b70,
0x37000, 0x37048,
0x37060, 0x3709c,
0x370f0, 0x37148,
0x37160, 0x3719c,
0x371f0, 0x372e4,
0x372f8, 0x373e4,
0x373f8, 0x37448,
0x37460, 0x3749c,
0x374f0, 0x37548,
0x37560, 0x3759c,
0x375f0, 0x376e4,
0x376f8, 0x377e4,
0x377f8, 0x377fc,
0x37814, 0x37814,
0x3782c, 0x3782c,
0x37880, 0x3788c,
0x378e8, 0x378ec,
0x37900, 0x37948,
0x37960, 0x3799c,
0x379f0, 0x37ae4,
0x37af8, 0x37b10,
0x37b28, 0x37b28,
0x37b3c, 0x37b50,
0x37bf0, 0x37c10,
0x37c28, 0x37c28,
0x37c3c, 0x37c50,
0x37cf0, 0x37cfc,
0x38000, 0x38030,
0x38100, 0x38144,
0x38190, 0x381d0,
0x38200, 0x38318,
0x38400, 0x3852c,
0x38540, 0x3861c,
0x38800, 0x38834,
0x388c0, 0x38908,
0x38910, 0x389ac,
0x38a00, 0x38a2c,
0x38a44, 0x38a50,
0x38a74, 0x38c24,
0x38d00, 0x38d00,
0x38d08, 0x38d14,
0x38d1c, 0x38d20,
0x38d3c, 0x38d50,
0x39200, 0x3920c,
0x39220, 0x39220,
0x39240, 0x39240,
0x39600, 0x3960c,
0x39a00, 0x39a1c,
0x39e00, 0x39e20,
0x39e38, 0x39e3c,
0x39e80, 0x39e80,
0x39e88, 0x39ea8,
0x39eb0, 0x39eb4,
0x39ec8, 0x39ed4,
0x39fb8, 0x3a004,
0x3a200, 0x3a200,
0x3a208, 0x3a240,
0x3a248, 0x3a280,
0x3a288, 0x3a2c0,
0x3a2c8, 0x3a2fc,
0x3a600, 0x3a630,
0x3aa00, 0x3aabc,
0x3ab00, 0x3ab70,
0x3b000, 0x3b048,
0x3b060, 0x3b09c,
0x3b0f0, 0x3b148,
0x3b160, 0x3b19c,
0x3b1f0, 0x3b2e4,
0x3b2f8, 0x3b3e4,
0x3b3f8, 0x3b448,
0x3b460, 0x3b49c,
0x3b4f0, 0x3b548,
0x3b560, 0x3b59c,
0x3b5f0, 0x3b6e4,
0x3b6f8, 0x3b7e4,
0x3b7f8, 0x3b7fc,
0x3b814, 0x3b814,
0x3b82c, 0x3b82c,
0x3b880, 0x3b88c,
0x3b8e8, 0x3b8ec,
0x3b900, 0x3b948,
0x3b960, 0x3b99c,
0x3b9f0, 0x3bae4,
0x3baf8, 0x3bb10,
0x3bb28, 0x3bb28,
0x3bb3c, 0x3bb50,
0x3bbf0, 0x3bc10,
0x3bc28, 0x3bc28,
0x3bc3c, 0x3bc50,
0x3bcf0, 0x3bcfc,
0x3c000, 0x3c030,
0x3c100, 0x3c144,
0x3c190, 0x3c1d0,
0x3c200, 0x3c318,
0x3c400, 0x3c52c,
0x3c540, 0x3c61c,
0x3c800, 0x3c834,
0x3c8c0, 0x3c908,
0x3c910, 0x3c9ac,
0x3ca00, 0x3ca2c,
0x3ca44, 0x3ca50,
0x3ca74, 0x3cc24,
0x3cd00, 0x3cd00,
0x3cd08, 0x3cd14,
0x3cd1c, 0x3cd20,
0x3cd3c, 0x3cd50,
0x3d200, 0x3d20c,
0x3d220, 0x3d220,
0x3d240, 0x3d240,
0x3d600, 0x3d60c,
0x3da00, 0x3da1c,
0x3de00, 0x3de20,
0x3de38, 0x3de3c,
0x3de80, 0x3de80,
0x3de88, 0x3dea8,
0x3deb0, 0x3deb4,
0x3dec8, 0x3ded4,
0x3dfb8, 0x3e004,
0x3e200, 0x3e200,
0x3e208, 0x3e240,
0x3e248, 0x3e280,
0x3e288, 0x3e2c0,
0x3e2c8, 0x3e2fc,
0x3e600, 0x3e630,
0x3ea00, 0x3eabc,
0x3eb00, 0x3eb70,
0x3f000, 0x3f048,
0x3f060, 0x3f09c,
0x3f0f0, 0x3f148,
0x3f160, 0x3f19c,
0x3f1f0, 0x3f2e4,
0x3f2f8, 0x3f3e4,
0x3f3f8, 0x3f448,
0x3f460, 0x3f49c,
0x3f4f0, 0x3f548,
0x3f560, 0x3f59c,
0x3f5f0, 0x3f6e4,
0x3f6f8, 0x3f7e4,
0x3f7f8, 0x3f7fc,
0x3f814, 0x3f814,
0x3f82c, 0x3f82c,
0x3f880, 0x3f88c,
0x3f8e8, 0x3f8ec,
0x3f900, 0x3f948,
0x3f960, 0x3f99c,
0x3f9f0, 0x3fae4,
0x3faf8, 0x3fb10,
0x3fb28, 0x3fb28,
0x3fb3c, 0x3fb50,
0x3fbf0, 0x3fc10,
0x3fc28, 0x3fc28,
0x3fc3c, 0x3fc50,
0x3fcf0, 0x3fcfc,
0x40000, 0x4000c,
0x40040, 0x40068,
0x4007c, 0x40144,
0x40180, 0x4018c,
0x40200, 0x40298,
0x402ac, 0x4033c,
0x403f8, 0x403fc,
0x41304, 0x413c4,
0x41400, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x44078,
0x440c0, 0x44278,
0x442c0, 0x44478,
0x444c0, 0x44678,
0x446c0, 0x44878,
0x448c0, 0x449fc,
0x45000, 0x45068,
0x45080, 0x45084,
0x450a0, 0x450b0,
0x45200, 0x45268,
0x45280, 0x45284,
0x452a0, 0x452b0,
0x460c0, 0x460e4,
0x47000, 0x4708c,
0x47200, 0x47250,
0x47400, 0x47420,
0x47600, 0x47618,
0x47800, 0x47814,
0x48000, 0x4800c,
0x48040, 0x48068,
0x4807c, 0x48144,
0x48180, 0x4818c,
0x48200, 0x48298,
0x482ac, 0x4833c,
0x483f8, 0x483fc,
0x49304, 0x493c4,
0x49400, 0x4941c,
0x49480, 0x494d0,
0x4c000, 0x4c078,
0x4c0c0, 0x4c278,
0x4c2c0, 0x4c478,
0x4c4c0, 0x4c678,
0x4c6c0, 0x4c878,
0x4c8c0, 0x4c9fc,
0x4d000, 0x4d068,
0x4d080, 0x4d084,
0x4d0a0, 0x4d0b0,
0x4d200, 0x4d268,
0x4d280, 0x4d284,
0x4d2a0, 0x4d2b0,
0x4e0c0, 0x4e0e4,
0x4f000, 0x4f08c,
0x4f200, 0x4f250,
0x4f400, 0x4f420,
0x4f600, 0x4f618,
0x4f800, 0x4f814,
0x50000, 0x500cc,
0x50400, 0x50400,
0x50800, 0x508cc,
0x50c00, 0x50c00,
0x51000, 0x5101c,
0x51300, 0x51308,
};
static const unsigned int t6_reg_ranges[] = {
0x1008, 0x114c,
0x1180, 0x11b4,
0x11fc, 0x1250,
0x1280, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
0x3060, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
0x35ec, 0x35ec,
0x3600, 0x5624,
0x56cc, 0x575c,
0x580c, 0x5814,
0x5890, 0x58bc,
0x5940, 0x595c,
0x5980, 0x598c,
0x59b0, 0x59dc,
0x59fc, 0x5a18,
0x5a60, 0x5a6c,
0x5a80, 0x5a9c,
0x5b94, 0x5bfc,
0x5c10, 0x5ec0,
0x5ec8, 0x5ec8,
0x6000, 0x6040,
0x6058, 0x6154,
0x7700, 0x7798,
0x77c0, 0x7880,
0x78cc, 0x78fc,
0x7b00, 0x7c54,
0x7d00, 0x7efc,
0x8dc0, 0x8de0,
0x8df8, 0x8e84,
0x8ea0, 0x8f88,
0x8fb8, 0x911c,
0x9400, 0x9470,
0x9600, 0x971c,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
0x9c00, 0x9c6c,
0x9c80, 0x9cec,
0x9d00, 0x9d6c,
0x9d80, 0x9dec,
0x9e00, 0x9e6c,
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0xa020,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0xf008,
0x11000, 0x11014,
0x11048, 0x11110,
0x11118, 0x1117c,
0x11190, 0x11260,
0x11300, 0x1130c,
0x12000, 0x1205c,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
0x19150, 0x191b0,
0x191d0, 0x191e8,
0x19238, 0x192b8,
0x193f8, 0x19474,
0x19490, 0x194cc,
0x194f0, 0x194f8,
0x19c00, 0x19c80,
0x19c94, 0x19cbc,
0x19ce4, 0x19d28,
0x19d50, 0x19d78,
0x19d94, 0x19dc8,
0x19df0, 0x19e10,
0x19e50, 0x19e6c,
0x19ea0, 0x19f34,
0x19f40, 0x19f50,
0x19f90, 0x19fac,
0x19fc4, 0x19fe4,
0x1a000, 0x1a06c,
0x1a0b0, 0x1a120,
0x1a128, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
0x1e040, 0x1e04c,
0x1e284, 0x1e290,
0x1e2c0, 0x1e2c0,
0x1e2e0, 0x1e2e0,
0x1e300, 0x1e384,
0x1e3c0, 0x1e3c8,
0x1e408, 0x1e40c,
0x1e440, 0x1e44c,
0x1e684, 0x1e690,
0x1e6c0, 0x1e6c0,
0x1e6e0, 0x1e6e0,
0x1e700, 0x1e784,
0x1e7c0, 0x1e7c8,
0x1e808, 0x1e80c,
0x1e840, 0x1e84c,
0x1ea84, 0x1ea90,
0x1eac0, 0x1eac0,
0x1eae0, 0x1eae0,
0x1eb00, 0x1eb84,
0x1ebc0, 0x1ebc8,
0x1ec08, 0x1ec0c,
0x1ec40, 0x1ec4c,
0x1ee84, 0x1ee90,
0x1eec0, 0x1eec0,
0x1eee0, 0x1eee0,
0x1ef00, 0x1ef84,
0x1efc0, 0x1efc8,
0x1f008, 0x1f00c,
0x1f040, 0x1f04c,
0x1f284, 0x1f290,
0x1f2c0, 0x1f2c0,
0x1f2e0, 0x1f2e0,
0x1f300, 0x1f384,
0x1f3c0, 0x1f3c8,
0x1f408, 0x1f40c,
0x1f440, 0x1f44c,
0x1f684, 0x1f690,
0x1f6c0, 0x1f6c0,
0x1f6e0, 0x1f6e0,
0x1f700, 0x1f784,
0x1f7c0, 0x1f7c8,
0x1f808, 0x1f80c,
0x1f840, 0x1f84c,
0x1fa84, 0x1fa90,
0x1fac0, 0x1fac0,
0x1fae0, 0x1fae0,
0x1fb00, 0x1fb84,
0x1fbc0, 0x1fbc8,
0x1fc08, 0x1fc0c,
0x1fc40, 0x1fc4c,
0x1fe84, 0x1fe90,
0x1fec0, 0x1fec0,
0x1fee0, 0x1fee0,
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
0x30000, 0x30070,
0x30100, 0x3015c,
0x30190, 0x301d0,
0x30200, 0x30318,
0x30400, 0x3052c,
0x30540, 0x3061c,
0x30800, 0x3088c,
0x308c0, 0x30908,
0x30910, 0x309b8,
0x30a00, 0x30a04,
0x30a0c, 0x30a2c,
0x30a44, 0x30a50,
0x30a74, 0x30c24,
0x30d00, 0x30d3c,
0x30d44, 0x30d7c,
0x30de0, 0x30de0,
0x30e00, 0x30ed4,
0x30f00, 0x30fa4,
0x30fc0, 0x30fc4,
0x31000, 0x31004,
0x31080, 0x310fc,
0x31208, 0x31220,
0x3123c, 0x31254,
0x31300, 0x31300,
0x31308, 0x3131c,
0x31338, 0x3133c,
0x31380, 0x31380,
0x31388, 0x313a8,
0x313b4, 0x313b4,
0x31400, 0x31420,
0x31438, 0x3143c,
0x31480, 0x31480,
0x314a8, 0x314a8,
0x314b0, 0x314b4,
0x314c8, 0x314d4,
0x31a40, 0x31a4c,
0x31af0, 0x31b20,
0x31b38, 0x31b3c,
0x31b80, 0x31b80,
0x31ba8, 0x31ba8,
0x31bb0, 0x31bb4,
0x31bc8, 0x31bd4,
0x32140, 0x3218c,
0x321f0, 0x32200,
0x32218, 0x32218,
0x32400, 0x32400,
0x32408, 0x3241c,
0x32618, 0x32620,
0x32664, 0x32664,
0x326a8, 0x326a8,
0x326ec, 0x326ec,
0x32a00, 0x32abc,
0x32b00, 0x32b78,
0x32c00, 0x32c00,
0x32c08, 0x32c3c,
0x32e00, 0x32e2c,
0x32f00, 0x32f2c,
0x33000, 0x330ac,
0x330c0, 0x331ac,
0x331c0, 0x332c4,
0x332e4, 0x333c4,
0x333e4, 0x334ac,
0x334c0, 0x335ac,
0x335c0, 0x336c4,
0x336e4, 0x337c4,
0x337e4, 0x337fc,
0x33814, 0x33814,
0x33854, 0x33868,
0x33880, 0x3388c,
0x338c0, 0x338d0,
0x338e8, 0x338ec,
0x33900, 0x339ac,
0x339c0, 0x33ac4,
0x33ae4, 0x33b10,
0x33b24, 0x33b50,
0x33bf0, 0x33c10,
0x33c24, 0x33c50,
0x33cf0, 0x33cfc,
0x34000, 0x34070,
0x34100, 0x3415c,
0x34190, 0x341d0,
0x34200, 0x34318,
0x34400, 0x3452c,
0x34540, 0x3461c,
0x34800, 0x3488c,
0x348c0, 0x34908,
0x34910, 0x349b8,
0x34a00, 0x34a04,
0x34a0c, 0x34a2c,
0x34a44, 0x34a50,
0x34a74, 0x34c24,
0x34d00, 0x34d3c,
0x34d44, 0x34d7c,
0x34de0, 0x34de0,
0x34e00, 0x34ed4,
0x34f00, 0x34fa4,
0x34fc0, 0x34fc4,
0x35000, 0x35004,
0x35080, 0x350fc,
0x35208, 0x35220,
0x3523c, 0x35254,
0x35300, 0x35300,
0x35308, 0x3531c,
0x35338, 0x3533c,
0x35380, 0x35380,
0x35388, 0x353a8,
0x353b4, 0x353b4,
0x35400, 0x35420,
0x35438, 0x3543c,
0x35480, 0x35480,
0x354a8, 0x354a8,
0x354b0, 0x354b4,
0x354c8, 0x354d4,
0x35a40, 0x35a4c,
0x35af0, 0x35b20,
0x35b38, 0x35b3c,
0x35b80, 0x35b80,
0x35ba8, 0x35ba8,
0x35bb0, 0x35bb4,
0x35bc8, 0x35bd4,
0x36140, 0x3618c,
0x361f0, 0x36200,
0x36218, 0x36218,
0x36400, 0x36400,
0x36408, 0x3641c,
0x36618, 0x36620,
0x36664, 0x36664,
0x366a8, 0x366a8,
0x366ec, 0x366ec,
0x36a00, 0x36abc,
0x36b00, 0x36b78,
0x36c00, 0x36c00,
0x36c08, 0x36c3c,
0x36e00, 0x36e2c,
0x36f00, 0x36f2c,
0x37000, 0x370ac,
0x370c0, 0x371ac,
0x371c0, 0x372c4,
0x372e4, 0x373c4,
0x373e4, 0x374ac,
0x374c0, 0x375ac,
0x375c0, 0x376c4,
0x376e4, 0x377c4,
0x377e4, 0x377fc,
0x37814, 0x37814,
0x37854, 0x37868,
0x37880, 0x3788c,
0x378c0, 0x378d0,
0x378e8, 0x378ec,
0x37900, 0x379ac,
0x379c0, 0x37ac4,
0x37ae4, 0x37b10,
0x37b24, 0x37b50,
0x37bf0, 0x37c10,
0x37c24, 0x37c50,
0x37cf0, 0x37cfc,
0x40040, 0x40040,
0x40080, 0x40084,
0x40100, 0x40100,
0x40140, 0x401bc,
0x40200, 0x40214,
0x40228, 0x40228,
0x40240, 0x40258,
0x40280, 0x40280,
0x40304, 0x40304,
0x40330, 0x4033c,
0x41304, 0x413dc,
0x41400, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x4407c,
0x440c0, 0x4427c,
0x442c0, 0x4447c,
0x444c0, 0x4467c,
0x446c0, 0x4487c,
0x448c0, 0x44a7c,
0x44ac0, 0x44c7c,
0x44cc0, 0x44e7c,
0x44ec0, 0x4507c,
0x450c0, 0x451fc,
0x45800, 0x45868,
0x45880, 0x45884,
0x458a0, 0x458b0,
0x45a00, 0x45a68,
0x45a80, 0x45a84,
0x45aa0, 0x45ab0,
0x460c0, 0x460e4,
0x47000, 0x4708c,
0x47200, 0x47250,
0x47400, 0x47420,
0x47600, 0x47618,
0x47800, 0x4782c,
0x50000, 0x500cc,
0x50400, 0x50400,
0x50800, 0x508cc,
0x50c00, 0x50c00,
0x51000, 0x510b0,
0x51300, 0x51324,
};
u32 *buf_end = (u32 *)((char *)buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
/* Select the right set of register ranges to dump depending on the
* adapter chip type.
*/
switch (chip_version) {
case CHELSIO_T4:
reg_ranges = t4_reg_ranges;
reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
break;
case CHELSIO_T5:
reg_ranges = t5_reg_ranges;
reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
break;
case CHELSIO_T6:
reg_ranges = t6_reg_ranges;
reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
break;
default:
dev_err(adap->pdev_dev,
"Unsupported chip version %d\n", chip_version);
return;
}
/* Clear the register buffer and insert the appropriate register
* values selected by the above register ranges.
*/
memset(buf, 0, buf_size);
for (range = 0; range < reg_ranges_size; range += 2) {
unsigned int reg = reg_ranges[range];
unsigned int last_reg = reg_ranges[range + 1];
u32 *bufp = (u32 *)((char *)buf + reg);
/* Iterate across the register range filling in the register
* buffer but don't write past the end of the register buffer.
*/
while (reg <= last_reg && bufp < buf_end) {
*bufp++ = t4_read_reg(adap, reg);
reg += sizeof(u32);
}
}
}
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
#define CHELSIO_VPD_UNIQUE_ID 0x82
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
* @adapter: the adapter
* @enable: whether to enable or disable write protection
*
* Enables or disables write protection on the serial EEPROM.
*/
int t4_seeprom_wp(struct adapter *adapter, bool enable)
{
unsigned int v = enable ? 0xc : 0;
int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
return ret < 0 ? ret : 0;
}
/**
* t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
* @adapter: adapter to read
* @p: where to store the parameters
*
* Reads card parameters stored in VPD EEPROM.
*/
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
int i, ret = 0, addr;
int ec, sn, pn, na;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
vpd = vmalloc(VPD_LEN);
if (!vpd)
return -ENOMEM;
/* Card information normally starts at VPD_BASE but early cards had
* it at 0.
*/
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
if (ret < 0)
goto out;
/* The VPD shall have a unique identifier specified by the PCI SIG.
* For chelsio adapters, the identifier is 0x82. The first byte of a VPD
* shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
* is expected to automatically put this entry at the
* beginning of the VPD.
*/
addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
if (ret < 0)
goto out;
if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
dev_err(adapter->pdev_dev, "missing VPD ID string\n");
ret = -EINVAL;
goto out;
}
id_len = pci_vpd_lrdt_size(vpd);
if (id_len > ID_LEN)
id_len = ID_LEN;
i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
if (i < 0) {
dev_err(adapter->pdev_dev, "missing VPD-R section\n");
ret = -EINVAL;
goto out;
}
vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
if (vpdr_len + kw_offset > VPD_LEN) {
dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
ret = -EINVAL;
goto out;
}
#define FIND_VPD_KW(var, name) do { \
var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
if (var < 0) { \
dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
ret = -EINVAL; \
goto out; \
} \
var += PCI_VPD_INFO_FLD_HDR_SIZE; \
} while (0)
FIND_VPD_KW(i, "RV");
for (csum = 0; i >= 0; i--)
csum += vpd[i];
if (csum) {
dev_err(adapter->pdev_dev,
"corrupted VPD EEPROM, actual csum %u\n", csum);
ret = -EINVAL;
goto out;
}
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
FIND_VPD_KW(pn, "PN");
FIND_VPD_KW(na, "NA");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
strim(p->id);
memcpy(p->ec, vpd + ec, EC_LEN);
strim(p->ec);
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->pn, vpd + pn, min(i, PN_LEN));
strim(p->pn);
memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
strim((char *)p->na);
out:
vfree(vpd);
return ret;
}
/**
* t4_get_vpd_params - read VPD parameters & retrieve Core Clock
* @adapter: adapter to read
* @p: where to store the parameters
*
* Reads card parameters stored in VPD EEPROM and retrieves the Core
* Clock. This can only be called after a connection to the firmware
* is established.
*/
int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
u32 cclk_param, cclk_val;
int ret;
/* Grab the raw VPD parameters.
*/
ret = t4_get_raw_vpd_params(adapter, p);
if (ret)
return ret;
/* Ask firmware for the Core Clock since it knows how to translate the
* Reference Clock ('V2') VPD field into a Core Clock value ...
*/
cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
1, &cclk_param, &cclk_val);
if (ret)
return ret;
p->cclk = cclk_val;
return 0;
}
/* serial flash and firmware constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
/* flash command opcodes */
SF_PROG_PAGE = 2, /* program page */
SF_WR_DISABLE = 4, /* disable writes */
SF_RD_STATUS = 5, /* read status register */
SF_WR_ENABLE = 6, /* enable writes */
SF_RD_DATA_FAST = 0xb, /* read flash */
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
FW_MAX_SIZE = 16 * SF_SEC_SIZE,
};
/**
* sf1_read - read data from the serial flash
* @adapter: the adapter
* @byte_cnt: number of bytes to read
* @cont: whether another operation will be chained
* @lock: whether to lock SF for PL access only
* @valp: where to store the read data
*
* Reads up to 4 bytes of data from the serial flash. The location of
* the read needs to be specified prior to calling this by issuing the
* appropriate commands to the serial flash.
*/
static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
int lock, u32 *valp)
{
int ret;
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
return -EBUSY;
t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
if (!ret)
*valp = t4_read_reg(adapter, SF_DATA_A);
return ret;
}
/**
* sf1_write - write data to the serial flash
* @adapter: the adapter
* @byte_cnt: number of bytes to write
* @cont: whether another operation will be chained
* @lock: whether to lock SF for PL access only
* @val: value to write
*
* Writes up to 4 bytes of data to the serial flash. The location of
* the write needs to be specified prior to calling this by issuing the
* appropriate commands to the serial flash.
*/
static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
int lock, u32 val)
{
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
return -EBUSY;
t4_write_reg(adapter, SF_DATA_A, val);
t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
}
/**
* flash_wait_op - wait for a flash operation to complete
* @adapter: the adapter
* @attempts: max number of polls of the status register
* @delay: delay between polls in ms
*
* Wait for a flash operation to complete by polling the status register.
*/
static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
{
int ret;
u32 status;
while (1) {
if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
(ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
return ret;
if (!(status & 1))
return 0;
if (--attempts == 0)
return -EAGAIN;
if (delay)
msleep(delay);
}
}
/**
* t4_read_flash - read words from serial flash
* @adapter: the adapter
* @addr: the start address for the read
* @nwords: how many 32-bit words to read
* @data: where to store the read data
* @byte_oriented: whether to store data as bytes or as words
*
* Read the specified number of 32-bit words from the serial flash.
* If @byte_oriented is set the read data is stored as a byte array
* (i.e., big-endian), otherwise as 32-bit words in the platform's
* natural endianness.
*/
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented)
{
int ret;
if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
return -EINVAL;
addr = swab32(addr) | SF_RD_DATA_FAST;
if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
(ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
return ret;
for ( ; nwords; nwords--, data++) {
ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
if (nwords == 1)
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
if (ret)
return ret;
if (byte_oriented)
*data = (__force __u32)(cpu_to_be32(*data));
}
return 0;
}
/**
* t4_write_flash - write up to a page of data to the serial flash
* @adapter: the adapter
* @addr: the start address to write
* @n: length of data to write in bytes
* @data: the data to write
*
* Writes up to a page of data (256 bytes) to the serial flash starting
* at the given address. All the data must be written to the same page.
*/
static int t4_write_flash(struct adapter *adapter, unsigned int addr,
unsigned int n, const u8 *data)
{
int ret;
u32 buf[64];
unsigned int i, c, left, val, offset = addr & 0xff;
if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
return -EINVAL;
val = swab32(addr) | SF_PROG_PAGE;
if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
(ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
goto unlock;
for (left = n; left; left -= c) {
c = min(left, 4U);
for (val = 0, i = 0; i < c; ++i)
val = (val << 8) + *data++;
ret = sf1_write(adapter, c, c != left, 1, val);
if (ret)
goto unlock;
}
ret = flash_wait_op(adapter, 8, 1);
if (ret)
goto unlock;
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
/* Read the page to verify the write succeeded */
ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
if (ret)
return ret;
if (memcmp(data - n, (u8 *)buf + offset, n)) {
dev_err(adapter->pdev_dev,
"failed to correctly write the flash page at %#x\n",
addr);
return -EIO;
}
return 0;
unlock:
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
return ret;
}
/**
* t4_get_fw_version - read the firmware version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the FW version from flash.
*/
int t4_get_fw_version(struct adapter *adapter, u32 *vers)
{
return t4_read_flash(adapter, FLASH_FW_START +
offsetof(struct fw_hdr, fw_ver), 1,
vers, 0);
}
/**
* t4_get_tp_version - read the TP microcode version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the TP microcode version from flash.
*/
int t4_get_tp_version(struct adapter *adapter, u32 *vers)
{
return t4_read_flash(adapter, FLASH_FW_START +
offsetof(struct fw_hdr, tp_microcode_ver),
1, vers, 0);
}
/**
* t4_get_exprom_version - return the Expansion ROM version (if any)
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the Expansion ROM header from FLASH and returns the version
* number (if present) through the @vers return value pointer. We return
* this in the Firmware Version Format since it's convenient. Return
* 0 on success, -ENOENT if no Expansion ROM is present.
*/
int t4_get_exprom_version(struct adapter *adap, u32 *vers)
{
struct exprom_header {
unsigned char hdr_arr[16]; /* must start with 0x55aa */
unsigned char hdr_ver[4]; /* Expansion ROM version */
} *hdr;
u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
sizeof(u32))];
int ret;
ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
0);
if (ret)
return ret;
hdr = (struct exprom_header *)exprom_header_buf;
if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
return -ENOENT;
*vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
return 0;
}
/* Is the given firmware API compatible with the one the driver was compiled
* with?
*/
static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
{
/* short circuit if it's the exact same firmware version */
if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
return 1;
#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
return 1;
#undef SAME_INTF
return 0;
}
/* The firmware in the filesystem is usable, but should it be installed?
* This routine explains itself in detail if it indicates the filesystem
* firmware should be installed.
*/
static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
int k, int c)
{
const char *reason;
if (!card_fw_usable) {
reason = "incompatible or unusable";
goto install;
}
if (k > c) {
reason = "older than the version supported with this driver";
goto install;
}
return 0;
install:
dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
"installing firmware %u.%u.%u.%u on card.\n",
FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
return 1;
}
int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state,
int *reset)
{
int ret, card_fw_usable, fs_fw_usable;
const struct fw_hdr *fs_fw;
const struct fw_hdr *drv_fw;
drv_fw = &fw_info->fw_hdr;
/* Read the header of the firmware on the card */
ret = -t4_read_flash(adap, FLASH_FW_START,
sizeof(*card_fw) / sizeof(uint32_t),
(uint32_t *)card_fw, 1);
if (ret == 0) {
card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
} else {
dev_err(adap->pdev_dev,
"Unable to read card's firmware header: %d\n", ret);
card_fw_usable = 0;
}
if (fw_data != NULL) {
fs_fw = (const void *)fw_data;
fs_fw_usable = fw_compatible(drv_fw, fs_fw);
} else {
fs_fw = NULL;
fs_fw_usable = 0;
}
if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
(!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
/* Common case: the firmware on the card is an exact match and
* the filesystem one is an exact match too, or the filesystem
* one is absent/incompatible.
*/
} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
should_install_fs_fw(adap, card_fw_usable,
be32_to_cpu(fs_fw->fw_ver),
be32_to_cpu(card_fw->fw_ver))) {
ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
fw_size, 0);
if (ret != 0) {
dev_err(adap->pdev_dev,
"failed to install firmware: %d\n", ret);
goto bye;
}
/* Installed successfully, update the cached header too. */
*card_fw = *fs_fw;
card_fw_usable = 1;
*reset = 0; /* already reset as part of load_fw */
}
if (!card_fw_usable) {
uint32_t d, c, k;
d = be32_to_cpu(drv_fw->fw_ver);
c = be32_to_cpu(card_fw->fw_ver);
k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
"chip state %d, "
"driver compiled with %d.%d.%d.%d, "
"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
state,
FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
ret = EINVAL;
goto bye;
}
/* We're using whatever's on the card and it's known to be good. */
adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
bye:
return ret;
}
/**
* t4_flash_erase_sectors - erase a range of flash sectors
* @adapter: the adapter
* @start: the first sector to erase
* @end: the last sector to erase
*
* Erases the sectors in the given inclusive range.
*/
static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
{
int ret = 0;
if (end >= adapter->params.sf_nsec)
return -EINVAL;
while (start <= end) {
if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
(ret = sf1_write(adapter, 4, 0, 1,
SF_ERASE_SECTOR | (start << 8))) != 0 ||
(ret = flash_wait_op(adapter, 14, 500)) != 0) {
dev_err(adapter->pdev_dev,
"erase of flash sector %d failed, error %d\n",
start, ret);
break;
}
start++;
}
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
return ret;
}
/**
* t4_flash_cfg_addr - return the address of the flash configuration file
* @adapter: the adapter
*
* Return the address within the flash where the Firmware Configuration
* File is stored.
*/
unsigned int t4_flash_cfg_addr(struct adapter *adapter)
{
if (adapter->params.sf_size == 0x100000)
return FLASH_FPGA_CFG_START;
else
return FLASH_CFG_START;
}
/* Return TRUE if the specified firmware matches the adapter. I.e. T4
* firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
* and emit an error message for mismatched firmware to save our caller the
* effort ...
*/
static bool t4_fw_matches_chip(const struct adapter *adap,
const struct fw_hdr *hdr)
{
/* The expression below will return FALSE for any unsupported adapter
* which will keep us "honest" in the future ...
*/
if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
(is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
(is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
return true;
dev_err(adap->pdev_dev,
"FW image (%d) is not suitable for this adapter (%d)\n",
hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
return false;
}
/**
* t4_load_fw - download firmware
* @adap: the adapter
* @fw_data: the firmware image to write
* @size: image size
*
* Write the supplied firmware image to the card's serial flash.
*/
int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
{
u32 csum;
int ret, addr;
unsigned int i;
u8 first_page[SF_PAGE_SIZE];
const __be32 *p = (const __be32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
unsigned int fw_img_start = adap->params.sf_fw_start;
unsigned int fw_start_sec = fw_img_start / sf_sec_size;
if (!size) {
dev_err(adap->pdev_dev, "FW image has no data\n");
return -EINVAL;
}
if (size & 511) {
dev_err(adap->pdev_dev,
"FW image size not multiple of 512 bytes\n");
return -EINVAL;
}
if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
dev_err(adap->pdev_dev,
"FW image size differs from size in FW header\n");
return -EINVAL;
}
if (size > FW_MAX_SIZE) {
dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
FW_MAX_SIZE);
return -EFBIG;
}
if (!t4_fw_matches_chip(adap, hdr))
return -EINVAL;
for (csum = 0, i = 0; i < size / sizeof(csum); i++)
csum += be32_to_cpu(p[i]);
if (csum != 0xffffffff) {
dev_err(adap->pdev_dev,
"corrupted firmware image, checksum %#x\n", csum);
return -EINVAL;
}
i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
if (ret)
goto out;
/*
* We write the correct version at the end so the driver can see a bad
* version if the FW write fails. Start by writing a copy of the
* first page with a bad version.
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
addr = fw_img_start;
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
if (ret)
goto out;
}
ret = t4_write_flash(adap,
fw_img_start + offsetof(struct fw_hdr, fw_ver),
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
out:
if (ret)
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
ret);
else
ret = t4_get_fw_version(adap, &adap->params.fw_vers);
return ret;
}
/**
* t4_phy_fw_ver - return current PHY firmware version
* @adap: the adapter
* @phy_fw_ver: return value buffer for PHY firmware version
*
* Returns the current version of external PHY firmware on the
* adapter.
*/
int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
{
u32 param, val;
int ret;
param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
¶m, &val);
if (ret < 0)
return ret;
*phy_fw_ver = val;
return 0;
}
/**
* t4_load_phy_fw - download port PHY firmware
* @adap: the adapter
* @win: the PCI-E Memory Window index to use for t4_memory_rw()
* @win_lock: the lock to use to guard the memory copy
* @phy_fw_version: function to check PHY firmware versions
* @phy_fw_data: the PHY firmware image to write
* @phy_fw_size: image size
*
* Transfer the specified PHY firmware to the adapter. If a non-NULL
* @phy_fw_version is supplied, then it will be used to determine if
* it's necessary to perform the transfer by comparing the version
* of any existing adapter PHY firmware with that of the passed in
* PHY firmware image. If @win_lock is non-NULL then it will be used
* around the call to t4_memory_rw() which transfers the PHY firmware
* to the adapter.
*
* A negative error number will be returned if an error occurs. If
* version number support is available and there's no need to upgrade
* the firmware, 0 will be returned. If firmware is successfully
* transferred to the adapter, 1 will be retured.
*
* NOTE: some adapters only have local RAM to store the PHY firmware. As
* a result, a RESET of the adapter would cause that RAM to lose its
* contents. Thus, loading PHY firmware on such adapters must happen
* after any FW_RESET_CMDs ...
*/
int t4_load_phy_fw(struct adapter *adap,
int win, spinlock_t *win_lock,
int (*phy_fw_version)(const u8 *, size_t),
const u8 *phy_fw_data, size_t phy_fw_size)
{
unsigned long mtype = 0, maddr = 0;
u32 param, val;
int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
int ret;
/* If we have version number support, then check to see if the adapter
* already has up-to-date PHY firmware loaded.
*/
if (phy_fw_version) {
new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
if (ret < 0)
return ret;
if (cur_phy_fw_ver >= new_phy_fw_vers) {
CH_WARN(adap, "PHY Firmware already up-to-date, "
"version %#x\n", cur_phy_fw_ver);
return 0;
}
}
/* Ask the firmware where it wants us to copy the PHY firmware image.
* The size of the file requires a special version of the READ coommand
* which will pass the file size via the values field in PARAMS_CMD and
* retrieve the return value from firmware and place it in the same
* buffer values
*/
param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
val = phy_fw_size;
ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
¶m, &val, 1);
if (ret < 0)
return ret;
mtype = val >> 8;
maddr = (val & 0xff) << 16;
/* Copy the supplied PHY Firmware image to the adapter memory location
* allocated by the adapter firmware.
*/
if (win_lock)
spin_lock_bh(win_lock);
ret = t4_memory_rw(adap, win, mtype, maddr,
phy_fw_size, (__be32 *)phy_fw_data,
T4_MEMORY_WRITE);
if (win_lock)
spin_unlock_bh(win_lock);
if (ret)
return ret;
/* Tell the firmware that the PHY firmware image has been written to
* RAM and it can now start copying it over to the PHYs. The chip
* firmware will RESET the affected PHYs as part of this operation
* leaving them running the new PHY firmware image.
*/
param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
¶m, &val, 30000);
/* If we have version number support, then check to see that the new
* firmware got loaded properly.
*/
if (phy_fw_version) {
ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
if (ret < 0)
return ret;
if (cur_phy_fw_ver != new_phy_fw_vers) {
CH_WARN(adap, "PHY Firmware did not update: "
"version on adapter %#x, "
"version flashed %#x\n",
cur_phy_fw_ver, new_phy_fw_vers);
return -ENXIO;
}
}
return 1;
}
/**
* t4_fwcache - firmware cache operation
* @adap: the adapter
* @op : the operation (flush or flush and invalidate)
*/
int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
{
struct fw_params_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn =
cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_PARAMS_CMD_PFN_V(adap->pf) |
FW_PARAMS_CMD_VFN_V(0));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.param[0].mnem =
cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
c.param[0].val = (__force __be32)op;
return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
}
void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
unsigned int *pif_req_wrptr,
unsigned int *pif_rsp_wrptr)
{
int i, j;
u32 cfg, val, req, rsp;
cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
if (cfg & LADBGEN_F)
t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
val = t4_read_reg(adap, CIM_DEBUGSTS_A);
req = POLADBGWRPTR_G(val);
rsp = PILADBGWRPTR_G(val);
if (pif_req_wrptr)
*pif_req_wrptr = req;
if (pif_rsp_wrptr)
*pif_rsp_wrptr = rsp;
for (i = 0; i < CIM_PIFLA_SIZE; i++) {
for (j = 0; j < 6; j++) {
t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
PILADBGRDPTR_V(rsp));
*pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
*pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
req++;
rsp++;
}
req = (req + 2) & POLADBGRDPTR_M;
rsp = (rsp + 2) & PILADBGRDPTR_M;
}
t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
}
void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
{
u32 cfg;
int i, j, idx;
cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
if (cfg & LADBGEN_F)
t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
for (i = 0; i < CIM_MALA_SIZE; i++) {
for (j = 0; j < 5; j++) {
idx = 8 * i + j;
t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
PILADBGRDPTR_V(idx));
*ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
*ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
}
}
t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
}
void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
{
unsigned int i, j;
for (i = 0; i < 8; i++) {
u32 *p = la_buf + i;
t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
*p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
}
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
FW_PORT_CAP_ANEG)
/**
* t4_link_l1cfg - apply link configuration to MAC/PHY
* @phy: the PHY to setup
* @mac: the MAC to setup
* @lc: the requested link configuration
*
* Set up a port's MAC and PHY according to a desired link configuration.
* - If the PHY can auto-negotiate first decide what to advertise, then
* enable/disable auto-negotiation as desired, and reset.
* - If the PHY does not auto-negotiate just reset it.
* - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
* otherwise do it later based on the outcome of auto-negotiation.
*/
int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
lc->link_ok = 0;
if (lc->requested_fc & PAUSE_RX)
fc |= FW_PORT_CAP_FC_RX;
if (lc->requested_fc & PAUSE_TX)
fc |= FW_PORT_CAP_FC_TX;
memset(&c, 0, sizeof(c));
c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
FW_PORT_CMD_PORTID_V(port));
c.action_to_len16 =
cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
FW_LEN16(c));
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
fc);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else if (lc->autoneg == AUTONEG_DISABLE) {
c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else
c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_restart_aneg - restart autonegotiation
* @adap: the adapter
* @mbox: mbox to use for the FW command
* @port: the port id
*
* Restarts autonegotiation for the selected port.
*/
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
{
struct fw_port_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
FW_PORT_CMD_PORTID_V(port));
c.action_to_len16 =
cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
FW_LEN16(c));
c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
typedef void (*int_handler_t)(struct adapter *adap);
struct intr_info {
unsigned int mask; /* bits to check in interrupt status */
const char *msg; /* message to print or NULL */
short stat_idx; /* stat counter to increment or -1 */
unsigned short fatal; /* whether the condition reported is fatal */
int_handler_t int_handler; /* platform-specific int handler */
};
/**
* t4_handle_intr_status - table driven interrupt handler
* @adapter: the adapter that generated the interrupt
* @reg: the interrupt status register to process
* @acts: table of interrupt actions
*
* A table driven interrupt handler that applies a set of masks to an
* interrupt status word and performs the corresponding actions if the
* interrupts described by the mask have occurred. The actions include
* optionally emitting a warning or alert message. The table is terminated
* by an entry specifying mask 0. Returns the number of fatal interrupt
* conditions.
*/
static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
const struct intr_info *acts)
{
int fatal = 0;
unsigned int mask = 0;
unsigned int status = t4_read_reg(adapter, reg);
for ( ; acts->mask; ++acts) {
if (!(status & acts->mask))
continue;
if (acts->fatal) {
fatal++;
dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
status & acts->mask);
} else if (acts->msg && printk_ratelimit())
dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
status & acts->mask);
if (acts->int_handler)
acts->int_handler(adapter);
mask |= acts->mask;
}
status &= mask;
if (status) /* clear processed interrupts */
t4_write_reg(adapter, reg, status);
return fatal;
}
/*
* Interrupt handler for the PCIE module.
*/
static void pcie_intr_handler(struct adapter *adapter)
{
static const struct intr_info sysbus_intr_info[] = {
{ RNPP_F, "RXNP array parity error", -1, 1 },
{ RPCP_F, "RXPC array parity error", -1, 1 },
{ RCIP_F, "RXCIF array parity error", -1, 1 },
{ RCCP_F, "Rx completions control array parity error", -1, 1 },
{ RFTP_F, "RXFT array parity error", -1, 1 },
{ 0 }
};
static const struct intr_info pcie_port_intr_info[] = {
{ TPCP_F, "TXPC array parity error", -1, 1 },
{ TNPP_F, "TXNP array parity error", -1, 1 },
{ TFTP_F, "TXFT array parity error", -1, 1 },
{ TCAP_F, "TXCA array parity error", -1, 1 },
{ TCIP_F, "TXCIF array parity error", -1, 1 },
{ RCAP_F, "RXCA array parity error", -1, 1 },
{ OTDD_F, "outbound request TLP discarded", -1, 1 },
{ RDPE_F, "Rx data parity error", -1, 1 },
{ TDUE_F, "Tx uncorrectable data error", -1, 1 },
{ 0 }
};
static const struct intr_info pcie_intr_info[] = {
{ MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
{ MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
{ MSIDATAPERR_F, "MSI data parity error", -1, 1 },
{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
{ PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
{ PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
{ CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
{ DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
{ HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
{ FIDPERR_F, "PCI FID parity error", -1, 1 },
{ INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
{ MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
{ RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
{ RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
{ RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
{ PCIESINT_F, "PCI core secondary fault", -1, 1 },
{ PCIEPINT_F, "PCI core primary fault", -1, 1 },
{ UNXSPLCPLERR_F, "PCI unexpected split completion error",
-1, 0 },
{ 0 }
};
static struct intr_info t5_pcie_intr_info[] = {
{ MSTGRPPERR_F, "Master Response Read Queue parity error",
-1, 1 },
{ MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
{ MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
{ PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
-1, 1 },
{ PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
-1, 1 },
{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
{ MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
{ DREQWRPERR_F, "PCI DMA channel write request parity error",
-1, 1 },
{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
{ HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
{ FIDPERR_F, "PCI FID parity error", -1, 1 },
{ VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
{ MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
{ IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
-1, 1 },
{ IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
-1, 1 },
{ RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
{ IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
{ TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
{ READRSPERR_F, "Outbound read error", -1, 0 },
{ 0 }
};
int fat;
if (is_t4(adapter->params.chip))
fat = t4_handle_intr_status(adapter,
PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
sysbus_intr_info) +
t4_handle_intr_status(adapter,
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
pcie_port_intr_info) +
t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
pcie_intr_info);
else
fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
t5_pcie_intr_info);
if (fat)
t4_fatal_err(adapter);
}
/*
* TP interrupt handler.
*/
static void tp_intr_handler(struct adapter *adapter)
{
static const struct intr_info tp_intr_info[] = {
{ 0x3fffffff, "TP parity error", -1, 1 },
{ FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
t4_fatal_err(adapter);
}
/*
* SGE interrupt handler.
*/
static void sge_intr_handler(struct adapter *adapter)
{
u64 v;
u32 err;
static const struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE_F,
"SGE received CPL exceeding IQE size", -1, 1 },
{ ERR_INVALID_CIDX_INC_F,
"SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
0 },
{ ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
0 },
{ ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
0 },
{ ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
0 },
{ ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 },
{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0 }
};
static struct intr_info t4t5_sge_intr_info[] = {
{ ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
{ DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
{ ERR_EGR_CTXT_PRIO_F,
"SGE too many priority egress contexts", -1, 0 },
{ 0 }
};
v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
if (v) {
dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
(unsigned long long)v);
t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
}
v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
t4t5_sge_intr_info);
err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
if (err & ERROR_QID_VALID_F) {
dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
ERROR_QID_G(err));
if (err & UNCAPTURED_ERROR_F)
dev_err(adapter->pdev_dev,
"SGE UNCAPTURED_ERROR set (clearing)\n");
t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
UNCAPTURED_ERROR_F);
}
if (v != 0)
t4_fatal_err(adapter);
}
#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
/*
* CIM interrupt handler.
*/
static void cim_intr_handler(struct adapter *adapter)
{
static const struct intr_info cim_intr_info[] = {
{ PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
{ MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
{ MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
{ TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
{ TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
{ 0 }
};
static const struct intr_info cim_upintr_info[] = {
{ RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
{ ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
{ ILLWRINT_F, "CIM illegal write", -1, 1 },
{ ILLRDINT_F, "CIM illegal read", -1, 1 },
{ ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
{ ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
{ SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
{ SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
{ BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
{ SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
{ SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
{ BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
{ SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
{ SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
{ BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
{ BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
{ SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
{ SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
{ BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
{ BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
{ SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
{ SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
{ BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
{ BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
{ REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
{ RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
{ TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
{ TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
{ 0 }
};
int fat;
if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
t4_report_fw_error(adapter);
fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
cim_intr_info) +
t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
cim_upintr_info);
if (fat)
t4_fatal_err(adapter);
}
/*
* ULP RX interrupt handler.
*/
static void ulprx_intr_handler(struct adapter *adapter)
{
static const struct intr_info ulprx_intr_info[] = {
{ 0x1800000, "ULPRX context error", -1, 1 },
{ 0x7fffff, "ULPRX parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
t4_fatal_err(adapter);
}
/*
* ULP TX interrupt handler.
*/
static void ulptx_intr_handler(struct adapter *adapter)
{
static const struct intr_info ulptx_intr_info[] = {
{ PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
0 },
{ PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
0 },
{ PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
0 },
{ PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
0 },
{ 0xfffffff, "ULPTX parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
t4_fatal_err(adapter);
}
/*
* PM TX interrupt handler.
*/
static void pmtx_intr_handler(struct adapter *adapter)
{
static const struct intr_info pmtx_intr_info[] = {
{ PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
{ PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
{ PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
{ ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
{ PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
{ OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
{ DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
-1, 1 },
{ ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
{ PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
{ 0 }
};
if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
t4_fatal_err(adapter);
}
/*
* PM RX interrupt handler.
*/
static void pmrx_intr_handler(struct adapter *adapter)
{
static const struct intr_info pmrx_intr_info[] = {
{ ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
{ PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
{ OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
{ DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
-1, 1 },
{ IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
{ PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
{ 0 }
};
if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
t4_fatal_err(adapter);
}
/*
* CPL switch interrupt handler.
*/
static void cplsw_intr_handler(struct adapter *adapter)
{
static const struct intr_info cplsw_intr_info[] = {
{ CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
{ CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
{ TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
{ SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
{ CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
{ ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
t4_fatal_err(adapter);
}
/*
* LE interrupt handler.
*/
static void le_intr_handler(struct adapter *adap)
{
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
static const struct intr_info le_intr_info[] = {
{ LIPMISS_F, "LE LIP miss", -1, 0 },
{ LIP0_F, "LE 0 LIP error", -1, 0 },
{ PARITYERR_F, "LE parity error", -1, 1 },
{ UNKNOWNCMD_F, "LE unknown command", -1, 1 },
{ REQQPARERR_F, "LE request queue parity error", -1, 1 },
{ 0 }
};
static struct intr_info t6_le_intr_info[] = {
{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
{ TCAMINTPERR_F, "LE parity error", -1, 1 },
{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
(chip <= CHELSIO_T5) ?
le_intr_info : t6_le_intr_info))
t4_fatal_err(adap);
}
/*
* MPS interrupt handler.
*/
static void mps_intr_handler(struct adapter *adapter)
{
static const struct intr_info mps_rx_intr_info[] = {
{ 0xffffff, "MPS Rx parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_tx_intr_info[] = {
{ TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
{ NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
{ TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
-1, 1 },
{ TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
-1, 1 },
{ BUBBLE_F, "MPS Tx underflow", -1, 1 },
{ SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
{ FRMERR_F, "MPS Tx framing error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_trc_intr_info[] = {
{ FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
{ PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
-1, 1 },
{ MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_stat_sram_intr_info[] = {
{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_stat_tx_intr_info[] = {
{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_stat_rx_intr_info[] = {
{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_cls_intr_info[] = {
{ MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
{ MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
{ HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
{ 0 }
};
int fat;
fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
mps_rx_intr_info) +
t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
mps_tx_intr_info) +
t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
mps_trc_intr_info) +
t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
mps_stat_sram_intr_info) +
t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
mps_stat_tx_intr_info) +
t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
mps_stat_rx_intr_info) +
t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
mps_cls_intr_info);
t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
if (fat)
t4_fatal_err(adapter);
}
#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
ECC_UE_INT_CAUSE_F)
/*
* EDC/MC interrupt handler.
*/
static void mem_intr_handler(struct adapter *adapter, int idx)
{
static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
unsigned int addr, cnt_addr, v;
if (idx <= MEM_EDC1) {
addr = EDC_REG(EDC_INT_CAUSE_A, idx);
cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
} else if (idx == MEM_MC) {
if (is_t4(adapter->params.chip)) {
addr = MC_INT_CAUSE_A;
cnt_addr = MC_ECC_STATUS_A;
} else {
addr = MC_P_INT_CAUSE_A;
cnt_addr = MC_P_ECC_STATUS_A;
}
} else {
addr = MC_REG(MC_P_INT_CAUSE_A, 1);
cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
}
v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
if (v & PERR_INT_CAUSE_F)
dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
name[idx]);
if (v & ECC_CE_INT_CAUSE_F) {
u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
if (printk_ratelimit())
dev_warn(adapter->pdev_dev,
"%u %s correctable ECC data error%s\n",
cnt, name[idx], cnt > 1 ? "s" : "");
}
if (v & ECC_UE_INT_CAUSE_F)
dev_alert(adapter->pdev_dev,
"%s uncorrectable ECC data error\n", name[idx]);
t4_write_reg(adapter, addr, v);
if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
t4_fatal_err(adapter);
}
/*
* MA interrupt handler.
*/
static void ma_intr_handler(struct adapter *adap)
{
u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
if (status & MEM_PERR_INT_CAUSE_F) {
dev_alert(adap->pdev_dev,
"MA parity error, parity status %#x\n",
t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
if (is_t5(adap->params.chip))
dev_alert(adap->pdev_dev,
"MA parity error, parity status %#x\n",
t4_read_reg(adap,
MA_PARITY_ERROR_STATUS2_A));
}
if (status & MEM_WRAP_INT_CAUSE_F) {
v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
dev_alert(adap->pdev_dev, "MA address wrap-around error by "
"client %u to address %#x\n",
MEM_WRAP_CLIENT_NUM_G(v),
MEM_WRAP_ADDRESS_G(v) << 4);
}
t4_write_reg(adap, MA_INT_CAUSE_A, status);
t4_fatal_err(adap);
}
/*
* SMB interrupt handler.
*/
static void smb_intr_handler(struct adapter *adap)
{
static const struct intr_info smb_intr_info[] = {
{ MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
{ MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
{ SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
t4_fatal_err(adap);
}
/*
* NC-SI interrupt handler.
*/
static void ncsi_intr_handler(struct adapter *adap)
{
static const struct intr_info ncsi_intr_info[] = {
{ CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
{ MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
{ TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
{ RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
t4_fatal_err(adap);
}
/*
* XGMAC interrupt handler.
*/
static void xgmac_intr_handler(struct adapter *adap, int port)
{
u32 v, int_cause_reg;
if (is_t4(adap->params.chip))
int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
else
int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
v = t4_read_reg(adap, int_cause_reg);
v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
if (!v)
return;
if (v & TXFIFO_PRTY_ERR_F)
dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
port);
if (v & RXFIFO_PRTY_ERR_F)
dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
port);
t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
t4_fatal_err(adap);
}
/*
* PL interrupt handler.
*/
static void pl_intr_handler(struct adapter *adap)
{
static const struct intr_info pl_intr_info[] = {
{ FATALPERR_F, "T4 fatal parity error", -1, 1 },
{ PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
t4_fatal_err(adap);
}
#define PF_INTR_MASK (PFSW_F)
#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
CPL_SWITCH_F | SGE_F | ULP_TX_F)
/**
* t4_slow_intr_handler - control path interrupt handler
* @adapter: the adapter
*
* T4 interrupt handler for non-data global interrupt events, e.g., errors.
* The designation 'slow' is because it involves register reads, while
* data interrupts typically don't involve any MMIOs.
*/
int t4_slow_intr_handler(struct adapter *adapter)
{
u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
if (!(cause & GLBL_INTR_MASK))
return 0;
if (cause & CIM_F)
cim_intr_handler(adapter);
if (cause & MPS_F)
mps_intr_handler(adapter);
if (cause & NCSI_F)
ncsi_intr_handler(adapter);
if (cause & PL_F)
pl_intr_handler(adapter);
if (cause & SMB_F)
smb_intr_handler(adapter);
if (cause & XGMAC0_F)
xgmac_intr_handler(adapter, 0);
if (cause & XGMAC1_F)
xgmac_intr_handler(adapter, 1);
if (cause & XGMAC_KR0_F)
xgmac_intr_handler(adapter, 2);
if (cause & XGMAC_KR1_F)
xgmac_intr_handler(adapter, 3);
if (cause & PCIE_F)
pcie_intr_handler(adapter);
if (cause & MC_F)
mem_intr_handler(adapter, MEM_MC);
if (is_t5(adapter->params.chip) && (cause & MC1_F))
mem_intr_handler(adapter, MEM_MC1);
if (cause & EDC0_F)
mem_intr_handler(adapter, MEM_EDC0);
if (cause & EDC1_F)
mem_intr_handler(adapter, MEM_EDC1);
if (cause & LE_F)
le_intr_handler(adapter);
if (cause & TP_F)
tp_intr_handler(adapter);
if (cause & MA_F)
ma_intr_handler(adapter);
if (cause & PM_TX_F)
pmtx_intr_handler(adapter);
if (cause & PM_RX_F)
pmrx_intr_handler(adapter);
if (cause & ULP_RX_F)
ulprx_intr_handler(adapter);
if (cause & CPL_SWITCH_F)
cplsw_intr_handler(adapter);
if (cause & SGE_F)
sge_intr_handler(adapter);
if (cause & ULP_TX_F)
ulptx_intr_handler(adapter);
/* Clear the interrupts just processed for which we are the master. */
t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
(void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
return 1;
}
/**
* t4_intr_enable - enable interrupts
* @adapter: the adapter whose interrupts should be enabled
*
* Enable PF-specific interrupts for the calling function and the top-level
* interrupt concentrator for global interrupts. Interrupts are already
* enabled at each module, here we just enable the roots of the interrupt
* hierarchies.
*
* Note: this function should be called only when the driver manages
* non PF-specific interrupts from the various HW modules. Only one PCI
* function at a time should be doing this.
*/
void t4_intr_enable(struct adapter *adapter)
{
u32 val = 0;
u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
}
/**
* t4_intr_disable - disable interrupts
* @adapter: the adapter whose interrupts should be disabled
*
* Disable interrupts. We only disable the top-level interrupt
* concentrators. The caller must be a PCI function managing global
* interrupts.
*/
void t4_intr_disable(struct adapter *adapter)
{
u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
}
/**
* hash_mac_addr - return the hash value of a MAC address
* @addr: the 48-bit Ethernet MAC address
*
* Hashes a MAC address according to the hash function used by HW inexact
* (hash) address matching.
*/
static int hash_mac_addr(const u8 *addr)
{
u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
a ^= b;
a ^= (a >> 12);
a ^= (a >> 6);
return a & 0x3f;
}
/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
* @mbox: mbox to use for the FW command
* @viid: virtual interface whose RSS subtable is to be written
* @start: start entry in the table to write
* @n: how many table entries to write
* @rspq: values for the response queue lookup table
* @nrspq: number of values in @rspq
*
* Programs the selected part of the VI's RSS mapping table with the
* provided values. If @nrspq < @n the supplied values are used repeatedly
* until the full table range is populated.
*
* The caller must ensure the values in @rspq are in the range allowed for
* @viid.
*/
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq)
{
int ret;
const u16 *rsp = rspq;
const u16 *rsp_end = rspq + nrspq;
struct fw_rss_ind_tbl_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_RSS_IND_TBL_CMD_VIID_V(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
while (n > 0) {
int nq = min(n, 32);
__be32 *qp = &cmd.iq0_to_iq2;
cmd.niqid = cpu_to_be16(nq);
cmd.startidx = cpu_to_be16(start);
start += nq;
n -= nq;
while (nq > 0) {
unsigned int v;
v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
*qp++ = cpu_to_be32(v);
nq -= 3;
}
ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
if (ret)
return ret;
}
return 0;
}
/**
* t4_config_glbl_rss - configure the global RSS mode
* @adapter: the adapter
* @mbox: mbox to use for the FW command
* @mode: global RSS mode
* @flags: mode-specific flags
*
* Sets the global RSS mode.
*/
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags)
{
struct fw_rss_glb_config_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
c.u.manual.mode_pkd =
cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
c.u.basicvirtual.mode_pkd =
cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
} else
return -EINVAL;
return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
}
/**
* t4_config_vi_rss - configure per VI RSS settings
* @adapter: the adapter
* @mbox: mbox to use for the FW command
* @viid: the VI id
* @flags: RSS flags
* @defq: id of the default RSS queue for the VI.
*
* Configures VI-specific RSS properties.
*/
int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
unsigned int flags, unsigned int defq)
{
struct fw_rss_vi_config_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
}
/* Read an RSS table row */
static int rd_rss_row(struct adapter *adap, int row, u32 *val)
{
t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5, 0, val);
}
/**
* t4_read_rss - read the contents of the RSS mapping table
* @adapter: the adapter
* @map: holds the contents of the RSS mapping table
*
* Reads the contents of the RSS hash->queue mapping table.
*/
int t4_read_rss(struct adapter *adapter, u16 *map)
{
u32 val;
int i, ret;
for (i = 0; i < RSS_NENTRIES / 2; ++i) {
ret = rd_rss_row(adapter, i, &val);
if (ret)
return ret;
*map++ = LKPTBLQUEUE0_G(val);
*map++ = LKPTBLQUEUE1_G(val);
}
return 0;
}
/**
* t4_fw_tp_pio_rw - Access TP PIO through LDST
* @adap: the adapter
* @vals: where the indirect register values are stored/written
* @nregs: how many indirect registers to read/write
* @start_idx: index of first indirect register to read/write
* @rw: Read (1) or Write (0)
*
* Access TP PIO registers through LDST
*/
static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
unsigned int start_index, unsigned int rw)
{
int ret, i;
int cmd = FW_LDST_ADDRSPC_TP_PIO;
struct fw_ldst_cmd c;
for (i = 0 ; i < nregs; i++) {
memset(&c, 0, sizeof(c));
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F |
(rw ? FW_CMD_READ_F :
FW_CMD_WRITE_F) |
FW_LDST_CMD_ADDRSPACE_V(cmd));
c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
c.u.addrval.addr = cpu_to_be32(start_index + i);
c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (!ret && rw)
vals[i] = be32_to_cpu(c.u.addrval.val);
}
}
/**
* t4_read_rss_key - read the global RSS key
* @adap: the adapter
* @key: 10-entry array holding the 320-bit RSS key
*
* Reads the global 320-bit RSS key.
*/
void t4_read_rss_key(struct adapter *adap, u32 *key)
{
if (adap->flags & FW_OK)
t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
else
t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
TP_RSS_SECRET_KEY0_A);
}
/**
* t4_write_rss_key - program one of the RSS keys
* @adap: the adapter
* @key: 10-entry array holding the 320-bit RSS key
* @idx: which RSS key to write
*
* Writes one of the RSS keys with the given 320-bit value. If @idx is
* 0..15 the corresponding entry in the RSS key table is written,
* otherwise the global RSS key is written.
*/
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
{
u8 rss_key_addr_cnt = 16;
u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
* allows access to key addresses 16-63 by using KeyWrAddrX
* as index[5:4](upper 2) into key table
*/
if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
(vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
rss_key_addr_cnt = 32;
if (adap->flags & FW_OK)
t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
else
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
TP_RSS_SECRET_KEY0_A);
if (idx >= 0 && idx < rss_key_addr_cnt) {
if (rss_key_addr_cnt > 16)
t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
KEYWRADDRX_V(idx >> 4) |
T6_VFWRADDR_V(idx) | KEYWREN_F);
else
t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
KEYWRADDR_V(idx) | KEYWREN_F);
}
}
/**
* t4_read_rss_pf_config - read PF RSS Configuration Table
* @adapter: the adapter
* @index: the entry in the PF RSS table to read
* @valp: where to store the returned value
*
* Reads the PF RSS Configuration Table at the specified index and returns
* the value found there.
*/
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
u32 *valp)
{
if (adapter->flags & FW_OK)
t4_fw_tp_pio_rw(adapter, valp, 1,
TP_RSS_PF0_CONFIG_A + index, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
valp, 1, TP_RSS_PF0_CONFIG_A + index);
}
/**
* t4_read_rss_vf_config - read VF RSS Configuration Table
* @adapter: the adapter
* @index: the entry in the VF RSS table to read
* @vfl: where to store the returned VFL
* @vfh: where to store the returned VFH
*
* Reads the VF RSS Configuration Table at the specified index and returns
* the (VFL, VFH) values found there.
*/
void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
u32 *vfl, u32 *vfh)
{
u32 vrt, mask, data;
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
mask = VFWRADDR_V(VFWRADDR_M);
data = VFWRADDR_V(index);
} else {
mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
data = T6_VFWRADDR_V(index);
}
/* Request that the index'th VF Table values be read into VFL/VFH.
*/
vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
vrt |= data | VFRDEN_F;
t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
/* Grab the VFL/VFH values ...
*/
if (adapter->flags & FW_OK) {
t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
} else {
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
vfl, 1, TP_RSS_VFL_CONFIG_A);
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
vfh, 1, TP_RSS_VFH_CONFIG_A);
}
}
/**
* t4_read_rss_pf_map - read PF RSS Map
* @adapter: the adapter
*
* Reads the PF RSS Map register and returns its value.
*/
u32 t4_read_rss_pf_map(struct adapter *adapter)
{
u32 pfmap;
if (adapter->flags & FW_OK)
t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&pfmap, 1, TP_RSS_PF_MAP_A);
return pfmap;
}
/**
* t4_read_rss_pf_mask - read PF RSS Mask
* @adapter: the adapter
*
* Reads the PF RSS Mask register and returns its value.
*/
u32 t4_read_rss_pf_mask(struct adapter *adapter)
{
u32 pfmask;
if (adapter->flags & FW_OK)
t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&pfmask, 1, TP_RSS_PF_MSK_A);
return pfmask;
}
/**
* t4_tp_get_tcp_stats - read TP's TCP MIB counters
* @adap: the adapter
* @v4: holds the TCP/IP counter values
* @v6: holds the TCP/IPv6 counter values
*
* Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
* Either @v4 or @v6 may be %NULL to skip the corresponding stats.
*/
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6)
{
u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
#define STAT(x) val[STAT_IDX(x)]
#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
if (v4) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
v4->tcp_out_rsts = STAT(OUT_RST);
v4->tcp_in_segs = STAT64(IN_SEG);
v4->tcp_out_segs = STAT64(OUT_SEG);
v4->tcp_retrans_segs = STAT64(RXT_SEG);
}
if (v6) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
v6->tcp_out_rsts = STAT(OUT_RST);
v6->tcp_in_segs = STAT64(IN_SEG);
v6->tcp_out_segs = STAT64(OUT_SEG);
v6->tcp_retrans_segs = STAT64(RXT_SEG);
}
#undef STAT64
#undef STAT
#undef STAT_IDX
}
/**
* t4_tp_get_err_stats - read TP's error MIB counters
* @adap: the adapter
* @st: holds the counter values
*
* Returns the values of TP's error counters.
*/
void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
{
/* T6 and later has 2 channels */
if (adap->params.arch.nchan == NCHAN) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tnl_cong_drops, 8,
TP_MIB_TNL_CNG_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tnl_tx_drops, 4,
TP_MIB_TNL_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->ofld_vlan_drops, 4,
TP_MIB_OFD_VLN_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tcp6_in_errs, 4,
TP_MIB_TCP_V6IN_ERR_0_A);
} else {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tnl_cong_drops, 2,
TP_MIB_TNL_CNG_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->ofld_chan_drops, 2,
TP_MIB_OFD_CHN_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->ofld_vlan_drops, 2,
TP_MIB_OFD_VLN_DROP_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
}
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
&st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
}
/**
* t4_tp_get_cpl_stats - read TP's CPL MIB counters
* @adap: the adapter
* @st: holds the counter values
*
* Returns the values of TP's CPL counters.
*/
void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
{
/* T6 and later has 2 channels */
if (adap->params.arch.nchan == NCHAN) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
8, TP_MIB_CPL_IN_REQ_0_A);
} else {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
2, TP_MIB_CPL_IN_REQ_0_A);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
2, TP_MIB_CPL_OUT_RSP_0_A);
}
}
/**
* t4_tp_get_rdma_stats - read TP's RDMA MIB counters
* @adap: the adapter
* @st: holds the counter values
*
* Returns the values of TP's RDMA counters.
*/
void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
{
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
2, TP_MIB_RQE_DFR_PKT_A);
}
/**
* t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
* @adap: the adapter
* @idx: the port index
* @st: holds the counter values
*
* Returns the values of TP's FCoE counters for the selected port.
*/
void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
struct tp_fcoe_stats *st)
{
u32 val[2];
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
1, TP_MIB_FCOE_DDP_0_A + idx);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
1, TP_MIB_FCOE_DROP_0_A + idx);
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
st->octets_ddp = ((u64)val[0] << 32) | val[1];
}
/**
* t4_get_usm_stats - read TP's non-TCP DDP MIB counters
* @adap: the adapter
* @st: holds the counter values
*
* Returns the values of TP's counters for non-TCP directly-placed packets.
*/
void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
{
u32 val[4];
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
TP_MIB_USM_PKTS_A);
st->frames = val[0];
st->drops = val[1];
st->octets = ((u64)val[2] << 32) | val[3];
}
/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
* @adap: the adapter
* @mtus: where to store the MTU values
* @mtu_log: where to store the MTU base-2 log (may be %NULL)
*
* Reads the HW path MTU table.
*/
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
{
u32 v;
int i;
for (i = 0; i < NMTUS; ++i) {
t4_write_reg(adap, TP_MTU_TABLE_A,
MTUINDEX_V(0xff) | MTUVALUE_V(i));
v = t4_read_reg(adap, TP_MTU_TABLE_A);
mtus[i] = MTUVALUE_G(v);
if (mtu_log)
mtu_log[i] = MTUWIDTH_G(v);
}
}
/**
* t4_read_cong_tbl - reads the congestion control table
* @adap: the adapter
* @incr: where to store the alpha values
*
* Reads the additive increments programmed into the HW congestion
* control table.
*/
void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
{
unsigned int mtu, w;
for (mtu = 0; mtu < NMTUS; ++mtu)
for (w = 0; w < NCCTRL_WIN; ++w) {
t4_write_reg(adap, TP_CCTRL_TABLE_A,
ROWINDEX_V(0xffff) | (mtu << 5) | w);
incr[mtu][w] = (u16)t4_read_reg(adap,
TP_CCTRL_TABLE_A) & 0x1fff;
}
}
/**
* t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
* @adap: the adapter
* @addr: the indirect TP register address
* @mask: specifies the field within the register to modify
* @val: new value for the field
*
* Sets a field of an indirect TP register to the given value.
*/
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val)
{
t4_write_reg(adap, TP_PIO_ADDR_A, addr);
val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
t4_write_reg(adap, TP_PIO_DATA_A, val);
}
/**
* init_cong_ctrl - initialize congestion control parameters
* @a: the alpha values for congestion control
* @b: the beta values for congestion control
*
* Initialize the congestion control parameters.
*/
static void init_cong_ctrl(unsigned short *a, unsigned short *b)
{
a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
a[9] = 2;
a[10] = 3;
a[11] = 4;
a[12] = 5;
a[13] = 6;
a[14] = 7;
a[15] = 8;
a[16] = 9;
a[17] = 10;
a[18] = 14;
a[19] = 17;
a[20] = 21;
a[21] = 25;
a[22] = 30;
a[23] = 35;
a[24] = 45;
a[25] = 60;
a[26] = 80;
a[27] = 100;
a[28] = 200;
a[29] = 300;
a[30] = 400;
a[31] = 500;
b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
b[9] = b[10] = 1;
b[11] = b[12] = 2;
b[13] = b[14] = b[15] = b[16] = 3;
b[17] = b[18] = b[19] = b[20] = b[21] = 4;
b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
b[28] = b[29] = 6;
b[30] = b[31] = 7;
}
/* The minimum additive increment value for the congestion control table */
#define CC_MIN_INCR 2U
/**
* t4_load_mtus - write the MTU and congestion control HW tables
* @adap: the adapter
* @mtus: the values for the MTU table
* @alpha: the values for the congestion control alpha parameter
* @beta: the values for the congestion control beta parameter
*
* Write the HW MTU table with the supplied MTUs and the high-speed
* congestion control table with the supplied alpha, beta, and MTUs.
* We write the two tables together because the additive increments
* depend on the MTUs.
*/
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta)
{
static const unsigned int avg_pkts[NCCTRL_WIN] = {
2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
28672, 40960, 57344, 81920, 114688, 163840, 229376
};
unsigned int i, w;
for (i = 0; i < NMTUS; ++i) {
unsigned int mtu = mtus[i];
unsigned int log2 = fls(mtu);
if (!(mtu & ((1 << log2) >> 2))) /* round */
log2--;
t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
for (w = 0; w < NCCTRL_WIN; ++w) {
unsigned int inc;
inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
CC_MIN_INCR);
t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
(w << 16) | (beta[w] << 13) | inc);
}
}
}
/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
* clocks. The formula is
*
* bytes/s = bytes256 * 256 * ClkFreq / 4096
*
* which is equivalent to
*
* bytes/s = 62.5 * bytes256 * ClkFreq_ms
*/
static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
{
u64 v = bytes256 * adap->params.vpd.cclk;
return v * 62 + v / 2;
}
/**
* t4_get_chan_txrate - get the current per channel Tx rates
* @adap: the adapter
* @nic_rate: rates for NIC traffic
* @ofld_rate: rates for offloaded traffic
*
* Return the current Tx rates in bytes/s for NIC and offloaded traffic
* for each channel.
*/
void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
{
u32 v;
v = t4_read_reg(adap, TP_TX_TRATE_A);
nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
if (adap->params.arch.nchan == NCHAN) {
nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
}
v = t4_read_reg(adap, TP_TX_ORATE_A);
ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
if (adap->params.arch.nchan == NCHAN) {
ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
}
}
/**
* t4_pmtx_get_stats - returns the HW stats from PMTX
* @adap: the adapter
* @cnt: where to store the count statistics
* @cycles: where to store the cycle statistics
*
* Returns performance statistics from PMTX.
*/
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
{
int i;
u32 data[2];
for (i = 0; i < PM_NSTATS; i++) {
t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
if (is_t4(adap->params.chip)) {
cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
} else {
t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
PM_TX_DBG_DATA_A, data, 2,
PM_TX_DBG_STAT_MSB_A);
cycles[i] = (((u64)data[0] << 32) | data[1]);
}
}
}
/**
* t4_pmrx_get_stats - returns the HW stats from PMRX
* @adap: the adapter
* @cnt: where to store the count statistics
* @cycles: where to store the cycle statistics
*
* Returns performance statistics from PMRX.
*/
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
{
int i;
u32 data[2];
for (i = 0; i < PM_NSTATS; i++) {
t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
if (is_t4(adap->params.chip)) {
cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
} else {
t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
PM_RX_DBG_DATA_A, data, 2,
PM_RX_DBG_STAT_MSB_A);
cycles[i] = (((u64)data[0] << 32) | data[1]);
}
}
}
/**
* t4_get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
*
* Returns a bitmap indicating which MPS buffer groups are associated
* with the given port. Bit i is set if buffer group i is used by the
* port.
*/
unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
{
u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
if (n == 0)
return idx == 0 ? 0xf : 0;
if (n == 1)
return idx < 2 ? (3 << (2 * idx)) : 0;
return 1 << idx;
}
/**
* t4_get_port_type_description - return Port Type string description
* @port_type: firmware Port Type enumeration
*/
const char *t4_get_port_type_description(enum fw_port_type port_type)
{
static const char *const port_type_description[] = {
"R XFI",
"R XAUI",
"T SGMII",
"T XFI",
"T XAUI",
"KX4",
"CX4",
"KX",
"KR",
"R SFP+",
"KR/KX",
"KR/KX/KX4",
"R QSFP_10G",
"R QSA",
"R QSFP",
"R BP40_BA",
};
if (port_type < ARRAY_SIZE(port_type_description))
return port_type_description[port_type];
return "UNKNOWN";
}
/**
* t4_get_port_stats_offset - collect port stats relative to a previous
* snapshot
* @adap: The adapter
* @idx: The port
* @stats: Current stats to fill
* @offset: Previous stats snapshot
*/
void t4_get_port_stats_offset(struct adapter *adap, int idx,
struct port_stats *stats,
struct port_stats *offset)
{
u64 *s, *o;
int i;
t4_get_port_stats(adap, idx, stats);
for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
i < (sizeof(struct port_stats) / sizeof(u64));
i++, s++, o++)
*s -= *o;
}
/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
* @p: the stats structure to fill
*
* Collect statistics related to the given port from HW.
*/
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
u32 bgmap = t4_get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
t4_read_reg64(adap, \
(is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
p->tx_octets = GET_STAT(TX_PORT_BYTES);
p->tx_frames = GET_STAT(TX_PORT_FRAMES);
p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
p->tx_frames_64 = GET_STAT(TX_PORT_64B);
p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
p->tx_drop = GET_STAT(TX_PORT_DROP);
p->tx_pause = GET_STAT(TX_PORT_PAUSE);
p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
p->rx_octets = GET_STAT(RX_PORT_BYTES);
p->rx_frames = GET_STAT(RX_PORT_FRAMES);
p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
p->rx_frames_64 = GET_STAT(RX_PORT_64B);
p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
p->rx_pause = GET_STAT(RX_PORT_PAUSE);
p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
#undef GET_STAT
#undef GET_STAT_COM
}
/**
* t4_get_lb_stats - collect loopback port statistics
* @adap: the adapter
* @idx: the loopback port index
* @p: the stats structure to fill
*
* Return HW statistics for the given loopback port.
*/
void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
{
u32 bgmap = t4_get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
t4_read_reg64(adap, \
(is_t4(adap->params.chip) ? \
PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
p->octets = GET_STAT(BYTES);
p->frames = GET_STAT(FRAMES);
p->bcast_frames = GET_STAT(BCAST);
p->mcast_frames = GET_STAT(MCAST);
p->ucast_frames = GET_STAT(UCAST);
p->error_frames = GET_STAT(ERROR);
p->frames_64 = GET_STAT(64B);
p->frames_65_127 = GET_STAT(65B_127B);
p->frames_128_255 = GET_STAT(128B_255B);
p->frames_256_511 = GET_STAT(256B_511B);
p->frames_512_1023 = GET_STAT(512B_1023B);
p->frames_1024_1518 = GET_STAT(1024B_1518B);
p->frames_1519_max = GET_STAT(1519B_MAX);
p->drop = GET_STAT(DROP_FRAMES);
p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
#undef GET_STAT
#undef GET_STAT_COM
}
/* t4_mk_filtdelwr - create a delete filter WR
* @ftid: the filter ID
* @wr: the filter work request to populate
* @qid: ingress queue to receive the delete notification
*
* Creates a filter work request to delete the supplied filter. If @qid is
* negative the delete notification is suppressed.
*/
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
{
memset(wr, 0, sizeof(*wr));
wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
FW_FILTER_WR_NOREPLY_V(qid < 0));
wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
if (qid >= 0)
wr->rx_chan_rx_rpl_iq =
cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
}
#define INIT_CMD(var, cmd, rd_wr) do { \
(var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
FW_CMD_REQUEST_F | \
FW_CMD_##rd_wr##_F); \
(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
} while (0)
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val)
{
u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_WRITE_F |
ldst_addrspace);
c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
c.u.addrval.addr = cpu_to_be32(addr);
c.u.addrval.val = cpu_to_be32(val);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_mdio_rd - read a PHY register through MDIO
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @phy_addr: the PHY address
* @mmd: the PHY MMD to access (0 for clause 22 PHYs)
* @reg: the register to read
* @valp: where to store the value
*
* Issues a FW command through the given mailbox to read a PHY register.
*/
int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp)
{
int ret;
u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F |
ldst_addrspace);
c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
FW_LDST_CMD_MMD_V(mmd));
c.u.mdio.raddr = cpu_to_be16(reg);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0)
*valp = be16_to_cpu(c.u.mdio.rval);
return ret;
}
/**
* t4_mdio_wr - write a PHY register through MDIO
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @phy_addr: the PHY address
* @mmd: the PHY MMD to access (0 for clause 22 PHYs)
* @reg: the register to write
* @valp: value to write
*
* Issues a FW command through the given mailbox to write a PHY register.
*/
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val)
{
u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
ldst_addrspace);
c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
FW_LDST_CMD_MMD_V(mmd));
c.u.mdio.raddr = cpu_to_be16(reg);
c.u.mdio.rval = cpu_to_be16(val);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_sge_decode_idma_state - decode the idma state
* @adap: the adapter
* @state: the state idma is stuck in
*/
void t4_sge_decode_idma_state(struct adapter *adapter, int state)
{
static const char * const t4_decode[] = {
"IDMA_IDLE",
"IDMA_PUSH_MORE_CPL_FIFO",
"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
"Not used",
"IDMA_PHYSADDR_SEND_PCIEHDR",
"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
"IDMA_PHYSADDR_SEND_PAYLOAD",
"IDMA_SEND_FIFO_TO_IMSG",
"IDMA_FL_REQ_DATA_FL_PREP",
"IDMA_FL_REQ_DATA_FL",
"IDMA_FL_DROP",
"IDMA_FL_H_REQ_HEADER_FL",
"IDMA_FL_H_SEND_PCIEHDR",
"IDMA_FL_H_PUSH_CPL_FIFO",
"IDMA_FL_H_SEND_CPL",
"IDMA_FL_H_SEND_IP_HDR_FIRST",
"IDMA_FL_H_SEND_IP_HDR",
"IDMA_FL_H_REQ_NEXT_HEADER_FL",
"IDMA_FL_H_SEND_NEXT_PCIEHDR",
"IDMA_FL_H_SEND_IP_HDR_PADDING",
"IDMA_FL_D_SEND_PCIEHDR",
"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
"IDMA_FL_D_REQ_NEXT_DATA_FL",
"IDMA_FL_SEND_PCIEHDR",
"IDMA_FL_PUSH_CPL_FIFO",
"IDMA_FL_SEND_CPL",
"IDMA_FL_SEND_PAYLOAD_FIRST",
"IDMA_FL_SEND_PAYLOAD",
"IDMA_FL_REQ_NEXT_DATA_FL",
"IDMA_FL_SEND_NEXT_PCIEHDR",
"IDMA_FL_SEND_PADDING",
"IDMA_FL_SEND_COMPLETION_TO_IMSG",
"IDMA_FL_SEND_FIFO_TO_IMSG",
"IDMA_FL_REQ_DATAFL_DONE",
"IDMA_FL_REQ_HEADERFL_DONE",
};
static const char * const t5_decode[] = {
"IDMA_IDLE",
"IDMA_ALMOST_IDLE",
"IDMA_PUSH_MORE_CPL_FIFO",
"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
"IDMA_PHYSADDR_SEND_PCIEHDR",
"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
"IDMA_PHYSADDR_SEND_PAYLOAD",
"IDMA_SEND_FIFO_TO_IMSG",
"IDMA_FL_REQ_DATA_FL",
"IDMA_FL_DROP",
"IDMA_FL_DROP_SEND_INC",
"IDMA_FL_H_REQ_HEADER_FL",
"IDMA_FL_H_SEND_PCIEHDR",
"IDMA_FL_H_PUSH_CPL_FIFO",
"IDMA_FL_H_SEND_CPL",
"IDMA_FL_H_SEND_IP_HDR_FIRST",
"IDMA_FL_H_SEND_IP_HDR",
"IDMA_FL_H_REQ_NEXT_HEADER_FL",
"IDMA_FL_H_SEND_NEXT_PCIEHDR",
"IDMA_FL_H_SEND_IP_HDR_PADDING",
"IDMA_FL_D_SEND_PCIEHDR",
"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
"IDMA_FL_D_REQ_NEXT_DATA_FL",
"IDMA_FL_SEND_PCIEHDR",
"IDMA_FL_PUSH_CPL_FIFO",
"IDMA_FL_SEND_CPL",
"IDMA_FL_SEND_PAYLOAD_FIRST",
"IDMA_FL_SEND_PAYLOAD",
"IDMA_FL_REQ_NEXT_DATA_FL",
"IDMA_FL_SEND_NEXT_PCIEHDR",
"IDMA_FL_SEND_PADDING",
"IDMA_FL_SEND_COMPLETION_TO_IMSG",
};
static const u32 sge_regs[] = {
SGE_DEBUG_DATA_LOW_INDEX_2_A,
SGE_DEBUG_DATA_LOW_INDEX_3_A,
SGE_DEBUG_DATA_HIGH_INDEX_10_A,
};
const char **sge_idma_decode;
int sge_idma_decode_nstates;
int i;
if (is_t4(adapter->params.chip)) {
sge_idma_decode = (const char **)t4_decode;
sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
} else {
sge_idma_decode = (const char **)t5_decode;
sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
}
if (state < sge_idma_decode_nstates)
CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
else
CH_WARN(adapter, "idma state %d unknown\n", state);
for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
CH_WARN(adapter, "SGE register %#x value %#x\n",
sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
}
/**
* t4_sge_ctxt_flush - flush the SGE context cache
* @adap: the adapter
* @mbox: mailbox to use for the FW command
*
* Issues a FW command through the given mailbox to flush the
* SGE context cache.
*/
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
{
int ret;
u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F |
ldst_addrspace);
c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
return ret;
}
/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @evt_mbox: mailbox to receive async FW events
* @master: specifies the caller's willingness to be the device master
* @state: returns the current device state (if non-NULL)
*
* Issues a command to establish communication with FW. Returns either
* an error (negative integer) or the mailbox of the Master PF.
*/
int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state)
{
int ret;
struct fw_hello_cmd c;
u32 v;
unsigned int master_mbox;
int retries = FW_CMD_HELLO_RETRIES;
retry:
memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
c.err_to_clearinit = cpu_to_be32(
FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
mbox : FW_HELLO_CMD_MBMASTER_M) |
FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
FW_HELLO_CMD_CLEARINIT_F);
/*
* Issue the HELLO command to the firmware. If it's not successful
* but indicates that we got a "busy" or "timeout" condition, retry
* the HELLO until we exhaust our retry limit. If we do exceed our
* retry limit, check to see if the firmware left us any error
* information and report that if so.
*/
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret < 0) {
if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
goto retry;
if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
t4_report_fw_error(adap);
return ret;
}
v = be32_to_cpu(c.err_to_clearinit);
master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
if (state) {
if (v & FW_HELLO_CMD_ERR_F)
*state = DEV_STATE_ERR;
else if (v & FW_HELLO_CMD_INIT_F)
*state = DEV_STATE_INIT;
else
*state = DEV_STATE_UNINIT;
}
/*
* If we're not the Master PF then we need to wait around for the
* Master PF Driver to finish setting up the adapter.
*
* Note that we also do this wait if we're a non-Master-capable PF and
* there is no current Master PF; a Master PF may show up momentarily
* and we wouldn't want to fail pointlessly. (This can happen when an
* OS loads lots of different drivers rapidly at the same time). In
* this case, the Master PF returned by the firmware will be
* PCIE_FW_MASTER_M so the test below will work ...
*/
if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
master_mbox != mbox) {
int waiting = FW_CMD_HELLO_TIMEOUT;
/*
* Wait for the firmware to either indicate an error or
* initialized state. If we see either of these we bail out
* and report the issue to the caller. If we exhaust the
* "hello timeout" and we haven't exhausted our retries, try
* again. Otherwise bail with a timeout error.
*/
for (;;) {
u32 pcie_fw;
msleep(50);
waiting -= 50;
/*
* If neither Error nor Initialialized are indicated
* by the firmware keep waiting till we exaust our
* timeout ... and then retry if we haven't exhausted
* our retries ...
*/
pcie_fw = t4_read_reg(adap, PCIE_FW_A);
if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
if (waiting <= 0) {
if (retries-- > 0)
goto retry;
return -ETIMEDOUT;
}
continue;
}
/*
* We either have an Error or Initialized condition
* report errors preferentially.
*/
if (state) {
if (pcie_fw & PCIE_FW_ERR_F)
*state = DEV_STATE_ERR;
else if (pcie_fw & PCIE_FW_INIT_F)
*state = DEV_STATE_INIT;
}
/*
* If we arrived before a Master PF was selected and
* there's not a valid Master PF, grab its identity
* for our caller.
*/
if (master_mbox == PCIE_FW_MASTER_M &&
(pcie_fw & PCIE_FW_MASTER_VLD_F))
master_mbox = PCIE_FW_MASTER_G(pcie_fw);
break;
}
}
return master_mbox;
}
/**
* t4_fw_bye - end communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
*
* Issues a command to terminate communication with FW.
*/
int t4_fw_bye(struct adapter *adap, unsigned int mbox)
{
struct fw_bye_cmd c;
memset(&c, 0, sizeof(c));
INIT_CMD(c, BYE, WRITE);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_init_cmd - ask FW to initialize the device
* @adap: the adapter
* @mbox: mailbox to use for the FW command
*
* Issues a command to FW to partially initialize the device. This
* performs initialization that generally doesn't depend on user input.
*/
int t4_early_init(struct adapter *adap, unsigned int mbox)
{
struct fw_initialize_cmd c;
memset(&c, 0, sizeof(c));
INIT_CMD(c, INITIALIZE, WRITE);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_fw_reset - issue a reset to FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @reset: specifies the type of reset to perform
*
* Issues a reset command of the specified type to FW.
*/
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
{
struct fw_reset_cmd c;
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
c.val = cpu_to_be32(reset);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_fw_halt - issue a reset/halt to FW and put uP into RESET
* @adap: the adapter
* @mbox: mailbox to use for the FW RESET command (if desired)
* @force: force uP into RESET even if FW RESET command fails
*
* Issues a RESET command to firmware (if desired) with a HALT indication
* and then puts the microprocessor into RESET state. The RESET command
* will only be issued if a legitimate mailbox is provided (mbox <=
* PCIE_FW_MASTER_M).
*
* This is generally used in order for the host to safely manipulate the
* adapter without fear of conflicting with whatever the firmware might
* be doing. The only way out of this state is to RESTART the firmware
* ...
*/
static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
{
int ret = 0;
/*
* If a legitimate mailbox is provided, issue a RESET command
* with a HALT indication.
*/
if (mbox <= PCIE_FW_MASTER_M) {
struct fw_reset_cmd c;
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/*
* Normally we won't complete the operation if the firmware RESET
* command fails but if our caller insists we'll go ahead and put the
* uP into RESET. This can be useful if the firmware is hung or even
* missing ... We'll have to take the risk of putting the uP into
* RESET without the cooperation of firmware in that case.
*
* We also force the firmware's HALT flag to be on in case we bypassed
* the firmware RESET command above or we're dealing with old firmware
* which doesn't have the HALT capability. This will serve as a flag
* for the incoming firmware to know that it's coming out of a HALT
* rather than a RESET ... if it's new enough to understand that ...
*/
if (ret == 0 || force) {
t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
PCIE_FW_HALT_F);
}
/*
* And we always return the result of the firmware RESET command
* even when we force the uP into RESET ...
*/
return ret;
}
/**
* t4_fw_restart - restart the firmware by taking the uP out of RESET
* @adap: the adapter
* @reset: if we want to do a RESET to restart things
*
* Restart firmware previously halted by t4_fw_halt(). On successful
* return the previous PF Master remains as the new PF Master and there
* is no need to issue a new HELLO command, etc.
*
* We do this in two ways:
*
* 1. If we're dealing with newer firmware we'll simply want to take
* the chip's microprocessor out of RESET. This will cause the
* firmware to start up from its start vector. And then we'll loop
* until the firmware indicates it's started again (PCIE_FW.HALT
* reset to 0) or we timeout.
*
* 2. If we're dealing with older firmware then we'll need to RESET
* the chip since older firmware won't recognize the PCIE_FW.HALT
* flag and automatically RESET itself on startup.
*/
static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
{
if (reset) {
/*
* Since we're directing the RESET instead of the firmware
* doing it automatically, we need to clear the PCIE_FW.HALT
* bit.
*/
t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
/*
* If we've been given a valid mailbox, first try to get the
* firmware to do the RESET. If that works, great and we can
* return success. Otherwise, if we haven't been given a
* valid mailbox or the RESET command failed, fall back to
* hitting the chip with a hammer.
*/
if (mbox <= PCIE_FW_MASTER_M) {
t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
msleep(100);
if (t4_fw_reset(adap, mbox,
PIORST_F | PIORSTMODE_F) == 0)
return 0;
}
t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
msleep(2000);
} else {
int ms;
t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
return 0;
msleep(100);
ms += 100;
}
return -ETIMEDOUT;
}
return 0;
}
/**
* t4_fw_upgrade - perform all of the steps necessary to upgrade FW
* @adap: the adapter
* @mbox: mailbox to use for the FW RESET command (if desired)
* @fw_data: the firmware image to write
* @size: image size
* @force: force upgrade even if firmware doesn't cooperate
*
* Perform all of the steps necessary for upgrading an adapter's
* firmware image. Normally this requires the cooperation of the
* existing firmware in order to halt all existing activities
* but if an invalid mailbox token is passed in we skip that step
* (though we'll still put the adapter microprocessor into RESET in
* that case).
*
* On successful return the new firmware will have been loaded and
* the adapter will have been fully RESET losing all previous setup
* state. On unsuccessful return the adapter may be completely hosed ...
* positive errno indicates that the adapter is ~probably~ intact, a
* negative errno indicates that things are looking bad ...
*/
int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
const u8 *fw_data, unsigned int size, int force)
{
const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
int reset, ret;
if (!t4_fw_matches_chip(adap, fw_hdr))
return -EINVAL;
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
return ret;
ret = t4_load_fw(adap, fw_data, size);
if (ret < 0)
return ret;
/*
* Older versions of the firmware don't understand the new
* PCIE_FW.HALT flag and so won't know to perform a RESET when they
* restart. So for newly loaded older firmware we'll have to do the
* RESET for it so it starts up on a clean slate. We can tell if
* the newly loaded firmware will handle this right by checking
* its header flags to see if it advertises the capability.
*/
reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
return t4_fw_restart(adap, mbox, reset);
}
/**
* t4_fixup_host_params - fix up host-dependent parameters
* @adap: the adapter
* @page_size: the host's Base Page Size
* @cache_line_size: the host's Cache Line Size
*
* Various registers in T4 contain values which are dependent on the
* host's Base Page and Cache Line Sizes. This function will fix all of
* those registers with the appropriate values as passed in ...
*/
int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size)
{
unsigned int page_shift = fls(page_size) - 1;
unsigned int sge_hps = page_shift - 10;
unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
HOSTPAGESIZEPF0_V(sge_hps) |
HOSTPAGESIZEPF1_V(sge_hps) |
HOSTPAGESIZEPF2_V(sge_hps) |
HOSTPAGESIZEPF3_V(sge_hps) |
HOSTPAGESIZEPF4_V(sge_hps) |
HOSTPAGESIZEPF5_V(sge_hps) |
HOSTPAGESIZEPF6_V(sge_hps) |
HOSTPAGESIZEPF7_V(sge_hps));
if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
EGRSTATUSPAGESIZE_F,
INGPADBOUNDARY_V(fl_align_log -
INGPADBOUNDARY_SHIFT_X) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
} else {
/* T5 introduced the separation of the Free List Padding and
* Packing Boundaries. Thus, we can select a smaller Padding
* Boundary to avoid uselessly chewing up PCIe Link and Memory
* Bandwidth, and use a Packing Boundary which is large enough
* to avoid false sharing between CPUs, etc.
*
* For the PCI Link, the smaller the Padding Boundary the
* better. For the Memory Controller, a smaller Padding
* Boundary is better until we cross under the Memory Line
* Size (the minimum unit of transfer to/from Memory). If we
* have a Padding Boundary which is smaller than the Memory
* Line Size, that'll involve a Read-Modify-Write cycle on the
* Memory Controller which is never good. For T5 the smallest
* Padding Boundary which we can select is 32 bytes which is
* larger than any known Memory Controller Line Size so we'll
* use that.
*
* T5 has a different interpretation of the "0" value for the
* Packing Boundary. This corresponds to 16 bytes instead of
* the expected 32 bytes. We never have a Packing Boundary
* less than 32 bytes so we can't use that special value but
* on the other hand, if we wanted 32 bytes, the best we can
* really do is 64 bytes.
*/
if (fl_align <= 32) {
fl_align = 64;
fl_align_log = 6;
}
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
EGRSTATUSPAGESIZE_F,
INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
t4_set_reg_field(adap, SGE_CONTROL2_A,
INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
INGPACKBOUNDARY_V(fl_align_log -
INGPACKBOUNDARY_SHIFT_X));
}
/*
* Adjust various SGE Free List Host Buffer Sizes.
*
* This is something of a crock since we're using fixed indices into
* the array which are also known by the sge.c code and the T4
* Firmware Configuration File. We need to come up with a much better
* approach to managing this array. For now, the first four entries
* are:
*
* 0: Host Page Size
* 1: 64KB
* 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
* 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
*
* For the single-MTU buffers in unpacked mode we need to include
* space for the SGE Control Packet Shift, 14 byte Ethernet header,
* possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
* Padding boundary. All of these are accommodated in the Factory
* Default Firmware Configuration File but we need to adjust it for
* this host's cache line size.
*/
t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
(t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
& ~(fl_align-1));
t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
(t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
& ~(fl_align-1));
t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
return 0;
}
/**
* t4_fw_initialize - ask FW to initialize the device
* @adap: the adapter
* @mbox: mailbox to use for the FW command
*
* Issues a command to FW to partially initialize the device. This
* performs initialization that generally doesn't depend on user input.
*/
int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
{
struct fw_initialize_cmd c;
memset(&c, 0, sizeof(c));
INIT_CMD(c, INITIALIZE, WRITE);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_query_params_rw - query FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
* @vf: the VF
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
* @rw: Write and read flag
*
* Reads the value of FW or device parameters. Up to 7 parameters can be
* queried at once.
*/
int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val, int rw)
{
int i, ret;
struct fw_params_cmd c;
__be32 *p = &c.param[0].mnem;
if (nparams > 7)
return -EINVAL;
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F |
FW_PARAMS_CMD_PFN_V(pf) |
FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
for (i = 0; i < nparams; i++) {
*p++ = cpu_to_be32(*params++);
if (rw)
*p = cpu_to_be32(*(val + i));
p++;
}
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0)
for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
*val++ = be32_to_cpu(*p);
return ret;
}
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val)
{
return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
}
/**
* t4_set_params_timeout - sets FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
* @vf: the VF
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
* @timeout: the timeout time
*
* Sets the value of FW or device parameters. Up to 7 parameters can be
* specified at once.
*/
int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int nparams, const u32 *params,
const u32 *val, int timeout)
{
struct fw_params_cmd c;
__be32 *p = &c.param[0].mnem;
if (nparams > 7)
return -EINVAL;
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_PARAMS_CMD_PFN_V(pf) |
FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
while (nparams--) {
*p++ = cpu_to_be32(*params++);
*p++ = cpu_to_be32(*val++);
}
return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
}
/**
* t4_set_params - sets FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
* @vf: the VF
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
*
* Sets the value of FW or device parameters. Up to 7 parameters can be
* specified at once.
*/
int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
const u32 *val)
{
return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
FW_CMD_MAX_TIMEOUT);
}
/**
* t4_cfg_pfvf - configure PF/VF resource limits
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF being configured
* @vf: the VF being configured
* @txq: the max number of egress queues
* @txq_eth_ctrl: the max number of egress Ethernet or control queues
* @rxqi: the max number of interrupt-capable ingress queues
* @rxq: the max number of interruptless ingress queues
* @tc: the PCI traffic class
* @vi: the max number of virtual interfaces
* @cmask: the channel access rights mask for the PF/VF
* @pmask: the port access rights mask for the PF/VF
* @nexact: the maximum number of exact MPS filters
* @rcaps: read capabilities
* @wxcaps: write/execute capabilities
*
* Configures resource limits and capabilities for a physical or virtual
* function.
*/
int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
unsigned int rxqi, unsigned int rxq, unsigned int tc,
unsigned int vi, unsigned int cmask, unsigned int pmask,
unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
{
struct fw_pfvf_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
FW_PFVF_CMD_VFN_V(vf));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
FW_PFVF_CMD_NIQ_V(rxq));
c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
FW_PFVF_CMD_PMASK_V(pmask) |
FW_PFVF_CMD_NEQ_V(txq));
c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
FW_PFVF_CMD_NVI_V(vi) |
FW_PFVF_CMD_NEXACTF_V(nexact));
c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_alloc_vi - allocate a virtual interface
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @port: physical port associated with the VI
* @pf: the PF owning the VI
* @vf: the VF owning the VI
* @nmac: number of MAC addresses needed (1 to 5)
* @mac: the MAC addresses of the VI
* @rss_size: size of RSS table slice associated with this VI
*
* Allocates a virtual interface for the given physical port. If @mac is
* not %NULL it contains the MAC addresses of the VI as assigned by FW.
* @mac should be large enough to hold @nmac Ethernet addresses, they are
* stored consecutively so the space needed is @nmac * 6 bytes.
* Returns a negative error number or the non-negative VI id.
*/
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
unsigned int *rss_size)
{
int ret;
struct fw_vi_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
c.portid_pkd = FW_VI_CMD_PORTID_V(port);
c.nmac = nmac - 1;
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret)
return ret;
if (mac) {
memcpy(mac, c.mac, sizeof(c.mac));
switch (nmac) {
case 5:
memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
case 4:
memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
case 3:
memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
case 2:
memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
}
}
if (rss_size)
*rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
}
/**
* t4_free_vi - free a virtual interface
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF owning the VI
* @vf: the VF owning the VI
* @viid: virtual interface identifiler
*
* Free a previously allocated virtual interface.
*/
int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int viid)
{
struct fw_vi_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_EXEC_F |
FW_VI_CMD_PFN_V(pf) |
FW_VI_CMD_VFN_V(vf));
c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
}
/**
* t4_set_rxmode - set Rx properties of a virtual interface
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @mtu: the new MTU or -1
* @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
* @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
* @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
* @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Sets Rx properties of a virtual interface.
*/
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok)
{
struct fw_vi_rxmode_cmd c;
/* convert to FW values */
if (mtu < 0)
mtu = FW_RXMODE_MTU_NO_CHG;
if (promisc < 0)
promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
if (all_multi < 0)
all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
if (bcast < 0)
bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
if (vlanex < 0)
vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_VI_RXMODE_CMD_VIID_V(viid));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.mtu_to_vlanexen =
cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
/**
* t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @free: if true any existing filters for this VI id are first removed
* @naddr: the number of MAC addresses to allocate filters for (up to 7)
* @addr: the MAC address(es)
* @idx: where to store the index of each allocated filter
* @hash: pointer to hash address filter bitmap
* @sleep_ok: call is allowed to sleep
*
* Allocates an exact-match filter for each of the supplied addresses and
* sets it to the corresponding address. If @idx is not %NULL it should
* have at least @naddr entries, each of which will be set to the index of
* the filter allocated for the corresponding MAC address. If a filter
* could not be allocated for an address its index is set to 0xffff.
* If @hash is not %NULL addresses that fail to allocate an exact filter
* are hashed and update the hash filter bitmap pointed at by @hash.
*
* Returns a negative error number or the number of filters allocated.
*/
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
{
int offset, ret = 0;
struct fw_vi_mac_cmd c;
unsigned int nfilters = 0;
unsigned int max_naddr = adap->params.arch.mps_tcam_size;
unsigned int rem = naddr;
if (naddr > max_naddr)
return -EINVAL;
for (offset = 0; offset < naddr ; /**/) {
unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
rem : ARRAY_SIZE(c.u.exact));
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[fw_naddr]), 16);
struct fw_vi_mac_exact *p;
int i;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_WRITE_F |
FW_CMD_EXEC_V(free) |
FW_VI_MAC_CMD_VIID_V(viid));
c.freemacs_to_len16 =
cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
FW_CMD_LEN16_V(len16));
for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
p->valid_to_idx =
cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
FW_VI_MAC_CMD_IDX_V(
FW_VI_MAC_ADD_MAC));
memcpy(p->macaddr, addr[offset + i],
sizeof(p->macaddr));
}
/* It's okay if we run out of space in our MAC address arena.
* Some of the addresses we submit may get stored so we need
* to run through the reply to see what the results were ...
*/
ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
if (ret && ret != -FW_ENOMEM)
break;
for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
u16 index = FW_VI_MAC_CMD_IDX_G(
be16_to_cpu(p->valid_to_idx));
if (idx)
idx[offset + i] = (index >= max_naddr ?
0xffff : index);
if (index < max_naddr)
nfilters++;
else if (hash)
*hash |= (1ULL <<
hash_mac_addr(addr[offset + i]));
}
free = false;
offset += fw_naddr;
rem -= fw_naddr;
}
if (ret == 0 || ret == -FW_ENOMEM)
ret = nfilters;
return ret;
}
/**
* t4_change_mac - modifies the exact-match filter for a MAC address
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @idx: index of existing filter for old value of MAC address, or -1
* @addr: the new MAC address value
* @persist: whether a new MAC allocation should be persistent
* @add_smt: if true also add the address to the HW SMT
*
* Modifies an exact-match filter and sets it to the new MAC address.
* Note that in general it is not possible to modify the value of a given
* filter so the generic way to modify an address filter is to free the one
* being used by the old address value and allocate a new filter for the
* new address value. @idx can be -1 if the address is a new addition.
*
* Returns a negative error number or the index of the filter with the new
* MAC value.
*/
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int idx, const u8 *addr, bool persist, bool add_smt)
{
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_VI_MAC_CMD_VIID_V(viid));
c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
FW_VI_MAC_CMD_IDX_V(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
}
/**
* t4_set_addr_hash - program the MAC inexact-match hash filter
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @ucast: whether the hash filter should also match unicast addresses
* @vec: the value to be written to the hash filter
* @sleep_ok: call is allowed to sleep
*
* Sets the 64-bit inexact-match hash filter for a virtual interface.
*/
int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool ucast, u64 vec, bool sleep_ok)
{
struct fw_vi_mac_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_VI_ENABLE_CMD_VIID_V(viid));
c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
FW_CMD_LEN16_V(1));
c.u.hash.hashvec = cpu_to_be64(vec);
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
/**
* t4_enable_vi_params - enable/disable a virtual interface
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @rx_en: 1=enable Rx, 0=disable Rx
* @tx_en: 1=enable Tx, 0=disable Tx
* @dcb_en: 1=enable delivery of Data Center Bridging messages.
*
* Enables/disables a virtual interface. Note that setting DCB Enable
* only makes sense when enabling a Virtual Interface ...
*/
int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
{
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
FW_VI_ENABLE_CMD_VIID_V(viid));
c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
FW_VI_ENABLE_CMD_EEN_V(tx_en) |
FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
FW_LEN16(c));
return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_enable_vi - enable/disable a virtual interface
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @rx_en: 1=enable Rx, 0=disable Rx
* @tx_en: 1=enable Tx, 0=disable Tx
*
* Enables/disables a virtual interface.
*/
int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool rx_en, bool tx_en)
{
return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
}
/**
* t4_identify_port - identify a VI's port by blinking its LED
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @nblinks: how many times to blink LED at 2.5 Hz
*
* Identifies a VI's port by blinking its LED.
*/
int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
unsigned int nblinks)
{
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
FW_VI_ENABLE_CMD_VIID_V(viid));
c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
c.blinkdur = cpu_to_be16(nblinks);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_iq_free - free an ingress queue and its FLs
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF owning the queues
* @vf: the VF owning the queues
* @iqtype: the ingress queue type
* @iqid: ingress queue id
* @fl0id: FL0 queue id or 0xffff if no attached FL0
* @fl1id: FL1 queue id or 0xffff if no attached FL1
*
* Frees an ingress queue and its associated FLs, if any.
*/
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int iqtype, unsigned int iqid,
unsigned int fl0id, unsigned int fl1id)
{
struct fw_iq_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
FW_IQ_CMD_VFN_V(vf));
c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
c.iqid = cpu_to_be16(iqid);
c.fl0id = cpu_to_be16(fl0id);
c.fl1id = cpu_to_be16(fl1id);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_eth_eq_free - free an Ethernet egress queue
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF owning the queue
* @vf: the VF owning the queue
* @eqid: egress queue id
*
* Frees an Ethernet egress queue.
*/
int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid)
{
struct fw_eq_eth_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
FW_EQ_ETH_CMD_PFN_V(pf) |
FW_EQ_ETH_CMD_VFN_V(vf));
c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_ctrl_eq_free - free a control egress queue
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF owning the queue
* @vf: the VF owning the queue
* @eqid: egress queue id
*
* Frees a control egress queue.
*/
int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid)
{
struct fw_eq_ctrl_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
FW_EQ_CTRL_CMD_PFN_V(pf) |
FW_EQ_CTRL_CMD_VFN_V(vf));
c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_ofld_eq_free - free an offload egress queue
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF owning the queue
* @vf: the VF owning the queue
* @eqid: egress queue id
*
* Frees a control egress queue.
*/
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid)
{
struct fw_eq_ofld_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
FW_EQ_OFLD_CMD_PFN_V(pf) |
FW_EQ_OFLD_CMD_VFN_V(vf));
c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_handle_fw_rpl - process a FW reply message
* @adap: the adapter
* @rpl: start of the FW message
*
* Processes a FW message, such as link state change messages.
*/
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
{
u8 opcode = *(const u8 *)rpl;
if (opcode == FW_PORT_CMD) { /* link/module state change message */
int speed = 0, fc = 0;
const struct fw_port_cmd *p = (void *)rpl;
int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
int port = adap->chan_map[chan];
struct port_info *pi = adap2pinfo(adap, port);
struct link_config *lc = &pi->link_cfg;
u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
if (stat & FW_PORT_CMD_RXPAUSE_F)
fc |= PAUSE_RX;
if (stat & FW_PORT_CMD_TXPAUSE_F)
fc |= PAUSE_TX;
if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
speed = 100;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) { /* something changed */
lc->link_ok = link_ok;
lc->speed = speed;
lc->fc = fc;
lc->supported = be16_to_cpu(p->u.info.pcap);
t4_os_link_changed(adap, port, link_ok);
}
if (mod != pi->mod_type) {
pi->mod_type = mod;
t4_os_portmod_changed(adap, port);
}
}
return 0;
}
static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
{
u16 val;
if (pci_is_pcie(adapter->pdev)) {
pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
p->speed = val & PCI_EXP_LNKSTA_CLS;
p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
}
}
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
* @caps: link capabilities
*
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
if (lc->supported & FW_PORT_CAP_ANEG) {
lc->advertising = lc->supported & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
lc->requested_fc |= PAUSE_AUTONEG;
} else {
lc->advertising = 0;
lc->autoneg = AUTONEG_DISABLE;
}
}
#define CIM_PF_NOACCESS 0xeeeeeeee
int t4_wait_dev_ready(void __iomem *regs)
{
u32 whoami;
whoami = readl(regs + PL_WHOAMI_A);
if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
return 0;
msleep(500);
whoami = readl(regs + PL_WHOAMI_A);
return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
}
struct flash_desc {
u32 vendor_and_model_id;
u32 size_mb;
};
static int get_flash_params(struct adapter *adap)
{
/* Table for non-Numonix supported flash parts. Numonix parts are left
* to the preexisting code. All flash parts have 64KB sectors.
*/
static struct flash_desc supported_flash[] = {
{ 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
};
int ret;
u32 info;
ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
if (!ret)
ret = sf1_read(adap, 3, 0, 1, &info);
t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
if (ret)
return ret;
for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
if (supported_flash[ret].vendor_and_model_id == info) {
adap->params.sf_size = supported_flash[ret].size_mb;
adap->params.sf_nsec =
adap->params.sf_size / SF_SEC_SIZE;
return 0;
}
if ((info & 0xff) != 0x20) /* not a Numonix flash */
return -EINVAL;
info >>= 16; /* log2 of size */
if (info >= 0x14 && info < 0x18)
adap->params.sf_nsec = 1 << (info - 16);
else if (info == 0x18)
adap->params.sf_nsec = 64;
else
return -EINVAL;
adap->params.sf_size = 1 << info;
adap->params.sf_fw_start =
t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
if (adap->params.sf_size < FLASH_MIN_SIZE)
dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
adap->params.sf_size, FLASH_MIN_SIZE);
return 0;
}
static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
{
u16 val;
u32 pcie_cap;
pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
if (pcie_cap) {
pci_read_config_word(adapter->pdev,
pcie_cap + PCI_EXP_DEVCTL2, &val);
val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
val |= range;
pci_write_config_word(adapter->pdev,
pcie_cap + PCI_EXP_DEVCTL2, val);
}
}
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
* @reset: if true perform a HW reset
*
* Initialize adapter SW state for the various HW modules, set initial
* values for some adapter tunables, take PHYs out of reset, and
* initialize the MDIO interface.
*/
int t4_prep_adapter(struct adapter *adapter)
{
int ret, ver;
uint16_t device_id;
u32 pl_rev;
get_pci_mode(adapter, &adapter->params.pci);
pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
ret = get_flash_params(adapter);
if (ret < 0) {
dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
return ret;
}
/* Retrieve adapter's device ID
*/
pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
ver = device_id >> 12;
adapter->params.chip = 0;
switch (ver) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
adapter->params.arch.sge_fl_db = DBPRIO_F;
adapter->params.arch.mps_tcam_size =
NUM_MPS_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 128;
adapter->params.arch.nchan = NCHAN;
adapter->params.arch.vfcount = 128;
break;
case CHELSIO_T5:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
adapter->params.arch.mps_tcam_size =
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 128;
adapter->params.arch.nchan = NCHAN;
adapter->params.arch.vfcount = 128;
break;
case CHELSIO_T6:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
adapter->params.arch.sge_fl_db = 0;
adapter->params.arch.mps_tcam_size =
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 256;
adapter->params.arch.nchan = 2;
adapter->params.arch.vfcount = 256;
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
device_id);
return -EINVAL;
}
adapter->params.cim_la_size = CIMLA_SIZE;
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*
* Default port for debugging in case we can't reach FW.
*/
adapter->params.nports = 1;
adapter->params.portvec = 1;
adapter->params.vpd.cclk = 50000;
/* Set pci completion timeout value to 4 seconds. */
set_pcie_completion_timeout(adapter, 0xd);
return 0;
}
/**
* t4_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
* @qtype: the Ingress or Egress type for @qid
* @user: true if this request is for a user mode queue
* @pbar2_qoffset: BAR2 Queue Offset
* @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
*
* Returns the BAR2 SGE Queue Registers information associated with the
* indicated Absolute Queue ID. These are passed back in return value
* pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
* and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
*
* This may return an error which indicates that BAR2 SGE Queue
* registers aren't available. If an error is not returned, then the
* following values are returned:
*
* *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
* *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
*
* If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
* require the "Inferred Queue ID" ability may be used. E.g. the
* Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
* then these "Inferred Queue ID" register may not be used.
*/
int t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
enum t4_bar2_qtype qtype,
int user,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid)
{
unsigned int page_shift, page_size, qpp_shift, qpp_mask;
u64 bar2_page_offset, bar2_qoffset;
unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
/* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
if (!user && is_t4(adapter->params.chip))
return -EINVAL;
/* Get our SGE Page Size parameters.
*/
page_shift = adapter->params.sge.hps + 10;
page_size = 1 << page_shift;
/* Get the right Queues per Page parameters for our Queue.
*/
qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
? adapter->params.sge.eq_qpp
: adapter->params.sge.iq_qpp);
qpp_mask = (1 << qpp_shift) - 1;
/* Calculate the basics of the BAR2 SGE Queue register area:
* o The BAR2 page the Queue registers will be in.
* o The BAR2 Queue ID.
* o The BAR2 Queue ID Offset into the BAR2 page.
*/
bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
bar2_qid = qid & qpp_mask;
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
/* If the BAR2 Queue ID Offset is less than the Page Size, then the
* hardware will infer the Absolute Queue ID simply from the writes to
* the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
* BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
* write to the first BAR2 SGE Queue Area within the BAR2 Page with
* the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
* from the BAR2 Page and BAR2 Queue ID.
*
* One important censequence of this is that some BAR2 SGE registers
* have a "Queue ID" field and we can write the BAR2 SGE Queue ID
* there. But other registers synthesize the SGE Queue ID purely
* from the writes to the registers -- the Write Combined Doorbell
* Buffer is a good example. These BAR2 SGE Registers are only
* available for those BAR2 SGE Register areas where the SGE Absolute
* Queue ID can be inferred from simple writes.
*/
bar2_qoffset = bar2_page_offset;
bar2_qinferred = (bar2_qid_offset < page_size);
if (bar2_qinferred) {
bar2_qoffset += bar2_qid_offset;
bar2_qid = 0;
}
*pbar2_qoffset = bar2_qoffset;
*pbar2_qid = bar2_qid;
return 0;
}
/**
* t4_init_devlog_params - initialize adapter->params.devlog
* @adap: the adapter
*
* Initialize various fields of the adapter's Firmware Device Log
* Parameters structure.
*/
int t4_init_devlog_params(struct adapter *adap)
{
struct devlog_params *dparams = &adap->params.devlog;
u32 pf_dparams;
unsigned int devlog_meminfo;
struct fw_devlog_cmd devlog_cmd;
int ret;
/* If we're dealing with newer firmware, the Device Log Paramerters
* are stored in a designated register which allows us to access the
* Device Log even if we can't talk to the firmware.
*/
pf_dparams =
t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
if (pf_dparams) {
unsigned int nentries, nentries128;
dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
nentries = (nentries128 + 1) * 128;
dparams->size = nentries * sizeof(struct fw_devlog_e);
return 0;
}
/* Otherwise, ask the firmware for it's Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F);
devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
&devlog_cmd);
if (ret)
return ret;
devlog_meminfo =
be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
return 0;
}
/**
* t4_init_sge_params - initialize adap->params.sge
* @adapter: the adapter
*
* Initialize various fields of the adapter's SGE Parameters structure.
*/
int t4_init_sge_params(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
u32 hps, qpp;
unsigned int s_hps, s_qpp;
/* Extract the SGE Page Size for our PF.
*/
hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
*/
s_qpp = (QUEUESPERPAGEPF0_S +
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
return 0;
}
/**
* t4_init_tp_params - initialize adap->params.tp
* @adap: the adapter
*
* Initialize various fields of the adapter's TP Parameters structure.
*/
int t4_init_tp_params(struct adapter *adap)
{
int chan;
u32 v;
v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
adap->params.tp.tre = TIMERRESOLUTION_G(v);
adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
for (chan = 0; chan < NCHAN; chan++)
adap->params.tp.tx_modq[chan] = chan;
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
if (adap->flags & FW_OK) {
t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
TP_VLAN_PRI_MAP_A, 1);
t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG_A, 1);
} else {
t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&adap->params.tp.vlan_pri_map, 1,
TP_VLAN_PRI_MAP_A);
t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG_A);
}
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
PROTOCOL_F);
/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
* represents the presence of an Outer VLAN instead of a VNIC ID.
*/
if ((adap->params.tp.ingress_config & VNIC_F) == 0)
adap->params.tp.vnic_shift = -1;
return 0;
}
/**
* t4_filter_field_shift - calculate filter field shift
* @adap: the adapter
* @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
*
* Return the shift position of a filter field within the Compressed
* Filter Tuple. The filter field is specified via its selection bit
* within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
*/
int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
{
unsigned int filter_mode = adap->params.tp.vlan_pri_map;
unsigned int sel;
int field_shift;
if ((filter_mode & filter_sel) == 0)
return -1;
for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
switch (filter_mode & sel) {
case FCOE_F:
field_shift += FT_FCOE_W;
break;
case PORT_F:
field_shift += FT_PORT_W;
break;
case VNIC_ID_F:
field_shift += FT_VNIC_ID_W;
break;
case VLAN_F:
field_shift += FT_VLAN_W;
break;
case TOS_F:
field_shift += FT_TOS_W;
break;
case PROTOCOL_F:
field_shift += FT_PROTOCOL_W;
break;
case ETHERTYPE_F:
field_shift += FT_ETHERTYPE_W;
break;
case MACMATCH_F:
field_shift += FT_MACMATCH_W;
break;
case MPSHITTYPE_F:
field_shift += FT_MPSHITTYPE_W;
break;
case FRAGMENTATION_F:
field_shift += FT_FRAGMENTATION_W;
break;
}
}
return field_shift;
}
int t4_init_rss_mode(struct adapter *adap, int mbox)
{
int i, ret;
struct fw_rss_vi_config_cmd rvc;
memset(&rvc, 0, sizeof(rvc));
for_each_port(adap, i) {
struct port_info *p = adap2pinfo(adap, i);
rvc.op_to_viid =
cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F |
FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
if (ret)
return ret;
p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
}
return 0;
}
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
int ret, i, j = 0;
struct fw_port_cmd c;
struct fw_rss_vi_config_cmd rvc;
memset(&c, 0, sizeof(c));
memset(&rvc, 0, sizeof(rvc));
for_each_port(adap, i) {
unsigned int rss_size;
struct port_info *p = adap2pinfo(adap, i);
while ((adap->params.portvec & (1 << j)) == 0)
j++;
c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F |
FW_PORT_CMD_PORTID_V(j));
c.action_to_len16 = cpu_to_be32(
FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
FW_LEN16(c));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret)
return ret;
ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
if (ret < 0)
return ret;
p->viid = ret;
p->tx_chan = j;
p->lport = j;
p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
adap->port[i]->dev_port = j;
ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
FW_PORT_CMD_MDIOADDR_G(ret) : -1;
p->port_type = FW_PORT_CMD_PTYPE_G(ret);
p->mod_type = FW_PORT_MOD_TYPE_NA;
rvc.op_to_viid =
cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F |
FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
if (ret)
return ret;
p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
j++;
}
return 0;
}
/**
* t4_read_cimq_cfg - read CIM queue configuration
* @adap: the adapter
* @base: holds the queue base addresses in bytes
* @size: holds the queue sizes in bytes
* @thres: holds the queue full thresholds in bytes
*
* Returns the current configuration of the CIM queues, starting with
* the IBQs, then the OBQs.
*/
void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
{
unsigned int i, v;
int cim_num_obq = is_t4(adap->params.chip) ?
CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
for (i = 0; i < CIM_NUM_IBQ; i++) {
t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
QUENUMSELECT_V(i));
v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
/* value is in 256-byte units */
*base++ = CIMQBASE_G(v) * 256;
*size++ = CIMQSIZE_G(v) * 256;
*thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
}
for (i = 0; i < cim_num_obq; i++) {
t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
QUENUMSELECT_V(i));
v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
/* value is in 256-byte units */
*base++ = CIMQBASE_G(v) * 256;
*size++ = CIMQSIZE_G(v) * 256;
}
}
/**
* t4_read_cim_ibq - read the contents of a CIM inbound queue
* @adap: the adapter
* @qid: the queue index
* @data: where to store the queue contents
* @n: capacity of @data in 32-bit words
*
* Reads the contents of the selected CIM queue starting at address 0 up
* to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
* error and the number of 32-bit words actually read on success.
*/
int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
{
int i, err, attempts;
unsigned int addr;
const unsigned int nwords = CIM_IBQ_SIZE * 4;
if (qid > 5 || (n & 3))
return -EINVAL;
addr = qid * nwords;
if (n > nwords)
n = nwords;
/* It might take 3-10ms before the IBQ debug read access is allowed.
* Wait for 1 Sec with a delay of 1 usec.
*/
attempts = 1000000;
for (i = 0; i < n; i++, addr++) {
t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
IBQDBGEN_F);
err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
attempts, 1);
if (err)
return err;
*data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
}
t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
return i;
}
/**
* t4_read_cim_obq - read the contents of a CIM outbound queue
* @adap: the adapter
* @qid: the queue index
* @data: where to store the queue contents
* @n: capacity of @data in 32-bit words
*
* Reads the contents of the selected CIM queue starting at address 0 up
* to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
* error and the number of 32-bit words actually read on success.
*/
int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
{
int i, err;
unsigned int addr, v, nwords;
int cim_num_obq = is_t4(adap->params.chip) ?
CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
if ((qid > (cim_num_obq - 1)) || (n & 3))
return -EINVAL;
t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
QUENUMSELECT_V(qid));
v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
nwords = CIMQSIZE_G(v) * 64; /* same */
if (n > nwords)
n = nwords;
for (i = 0; i < n; i++, addr++) {
t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
OBQDBGEN_F);
err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
2, 1);
if (err)
return err;
*data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
}
t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
return i;
}
/**
* t4_cim_read - read a block from CIM internal address space
* @adap: the adapter
* @addr: the start address within the CIM address space
* @n: number of words to read
* @valp: where to store the result
*
* Reads a block of 4-byte words from the CIM intenal address space.
*/
int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
unsigned int *valp)
{
int ret = 0;
if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
return -EBUSY;
for ( ; !ret && n--; addr += 4) {
t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
0, 5, 2);
if (!ret)
*valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
}
return ret;
}
/**
* t4_cim_write - write a block into CIM internal address space
* @adap: the adapter
* @addr: the start address within the CIM address space
* @n: number of words to write
* @valp: set of values to write
*
* Writes a block of 4-byte words into the CIM intenal address space.
*/
int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
const unsigned int *valp)
{
int ret = 0;
if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
return -EBUSY;
for ( ; !ret && n--; addr += 4) {
t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
0, 5, 2);
}
return ret;
}
static int t4_cim_write1(struct adapter *adap, unsigned int addr,
unsigned int val)
{
return t4_cim_write(adap, addr, 1, &val);
}
/**
* t4_cim_read_la - read CIM LA capture buffer
* @adap: the adapter
* @la_buf: where to store the LA data
* @wrptr: the HW write pointer within the capture buffer
*
* Reads the contents of the CIM LA buffer with the most recent entry at
* the end of the returned data and with the entry at @wrptr first.
* We try to leave the LA in the running state we find it in.
*/
int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
{
int i, ret;
unsigned int cfg, val, idx;
ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
if (ret)
return ret;
if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
if (ret)
return ret;
}
ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
if (ret)
goto restart;
idx = UPDBGLAWRPTR_G(val);
if (wrptr)
*wrptr = idx;
for (i = 0; i < adap->params.cim_la_size; i++) {
ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
if (ret)
break;
ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
if (ret)
break;
if (val & UPDBGLARDEN_F) {
ret = -ETIMEDOUT;
break;
}
ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
if (ret)
break;
idx = (idx + 1) & UPDBGLARDPTR_M;
}
restart:
if (cfg & UPDBGLAEN_F) {
int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
cfg & ~UPDBGLARDEN_F);
if (!ret)
ret = r;
}
return ret;
}
/**
* t4_tp_read_la - read TP LA capture buffer
* @adap: the adapter
* @la_buf: where to store the LA data
* @wrptr: the HW write pointer within the capture buffer
*
* Reads the contents of the TP LA buffer with the most recent entry at
* the end of the returned data and with the entry at @wrptr first.
* We leave the LA in the running state we find it in.
*/
void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
{
bool last_incomplete;
unsigned int i, cfg, val, idx;
cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
if (cfg & DBGLAENABLE_F) /* freeze LA */
t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
idx = DBGLAWPTR_G(val);
last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
if (last_incomplete)
idx = (idx + 1) & DBGLARPTR_M;
if (wrptr)
*wrptr = idx;
val &= 0xffff;
val &= ~DBGLARPTR_V(DBGLARPTR_M);
val |= adap->params.tp.la_mask;
for (i = 0; i < TPLA_SIZE; i++) {
t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
idx = (idx + 1) & DBGLARPTR_M;
}
/* Wipe out last entry if it isn't valid */
if (last_incomplete)
la_buf[TPLA_SIZE - 1] = ~0ULL;
if (cfg & DBGLAENABLE_F) /* restore running state */
t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
cfg | adap->params.tp.la_mask);
}
/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
* seconds). If we find one of the SGE Ingress DMA State Machines in the same
* state for more than the Warning Threshold then we'll issue a warning about
* a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
* appears to be hung every Warning Repeat second till the situation clears.
* If the situation clears, we'll note that as well.
*/
#define SGE_IDMA_WARN_THRESH 1
#define SGE_IDMA_WARN_REPEAT 300
/**
* t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
* @adapter: the adapter
* @idma: the adapter IDMA Monitor state
*
* Initialize the state of an SGE Ingress DMA Monitor.
*/
void t4_idma_monitor_init(struct adapter *adapter,
struct sge_idma_monitor_state *idma)
{
/* Initialize the state variables for detecting an SGE Ingress DMA
* hang. The SGE has internal counters which count up on each clock
* tick whenever the SGE finds its Ingress DMA State Engines in the
* same state they were on the previous clock tick. The clock used is
* the Core Clock so we have a limit on the maximum "time" they can
* record; typically a very small number of seconds. For instance,
* with a 600MHz Core Clock, we can only count up to a bit more than
* 7s. So we'll synthesize a larger counter in order to not run the
* risk of having the "timers" overflow and give us the flexibility to
* maintain a Hung SGE State Machine of our own which operates across
* a longer time frame.
*/
idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
idma->idma_stalled[0] = 0;
idma->idma_stalled[1] = 0;
}
/**
* t4_idma_monitor - monitor SGE Ingress DMA state
* @adapter: the adapter
* @idma: the adapter IDMA Monitor state
* @hz: number of ticks/second
* @ticks: number of ticks since the last IDMA Monitor call
*/
void t4_idma_monitor(struct adapter *adapter,
struct sge_idma_monitor_state *idma,
int hz, int ticks)
{
int i, idma_same_state_cnt[2];
/* Read the SGE Debug Ingress DMA Same State Count registers. These
* are counters inside the SGE which count up on each clock when the
* SGE finds its Ingress DMA State Engines in the same states they
* were in the previous clock. The counters will peg out at
* 0xffffffff without wrapping around so once they pass the 1s
* threshold they'll stay above that till the IDMA state changes.
*/
t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
for (i = 0; i < 2; i++) {
u32 debug0, debug11;
/* If the Ingress DMA Same State Counter ("timer") is less
* than 1s, then we can reset our synthesized Stall Timer and
* continue. If we have previously emitted warnings about a
* potential stalled Ingress Queue, issue a note indicating
* that the Ingress Queue has resumed forward progress.
*/
if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
"resumed after %d seconds\n",
i, idma->idma_qid[i],
idma->idma_stalled[i] / hz);
idma->idma_stalled[i] = 0;
continue;
}
/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
* domain. The first time we get here it'll be because we
* passed the 1s Threshold; each additional time it'll be
* because the RX Timer Callback is being fired on its regular
* schedule.
*
* If the stall is below our Potential Hung Ingress Queue
* Warning Threshold, continue.
*/
if (idma->idma_stalled[i] == 0) {
idma->idma_stalled[i] = hz;
idma->idma_warn[i] = 0;
} else {
idma->idma_stalled[i] += ticks;
idma->idma_warn[i] -= ticks;
}
if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
continue;
/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
*/
if (idma->idma_warn[i] > 0)
continue;
idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
/* Read and save the SGE IDMA State and Queue ID information.
* We do this every time in case it changes across time ...
* can't be too careful ...
*/
t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
"state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
i, idma->idma_qid[i], idma->idma_state[i],
idma->idma_stalled[i] / hz,
debug0, debug11);
t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
}
}
| gpl-2.0 |
Elite-Kernels/HTC-10 | drivers/net/wireless/rtlwifi/rtl8188ee/hw.c | 179 | 74338 | /******************************************************************************
*
* Copyright(c) 2009-2013 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../efuse.h"
#include "../base.h"
#include "../regd.h"
#include "../cam.h"
#include "../ps.h"
#include "../pci.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "dm.h"
#include "fw.h"
#include "led.h"
#include "hw.h"
#include "pwrseq.h"
#define LLT_CONFIG 5
static void _rtl88ee_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
u8 set_bits, u8 clear_bits)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpci->reg_bcn_ctrl_val |= set_bits;
rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
}
static void _rtl88ee_stop_tx_beacon(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmp1byte;
tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
tmp1byte &= ~(BIT(0));
rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
}
static void _rtl88ee_resume_tx_beacon(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmp1byte;
tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
tmp1byte |= BIT(0);
rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
}
static void _rtl88ee_enable_bcn_sub_func(struct ieee80211_hw *hw)
{
_rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(1));
}
static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
unsigned long flags;
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
while (skb_queue_len(&ring->queue)) {
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->get_desc(
(u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
}
static void _rtl88ee_disable_bcn_sub_func(struct ieee80211_hw *hw)
{
_rtl88ee_set_bcn_ctrl_reg(hw, BIT(1), 0);
}
static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
u8 rpwm_val, bool b_need_turn_off_ckk)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
bool b_support_remote_wake_up;
u32 count = 0, isr_regaddr, content;
bool schedule_timer = b_need_turn_off_ckk;
rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
(u8 *)(&b_support_remote_wake_up));
if (!rtlhal->fw_ready)
return;
if (!rtlpriv->psc.fw_current_inpsmode)
return;
while (1) {
spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
if (rtlhal->fw_clk_change_in_progress) {
while (rtlhal->fw_clk_change_in_progress) {
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
count++;
udelay(100);
if (count > 1000)
return;
spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
}
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
} else {
rtlhal->fw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
break;
}
}
if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) {
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
if (FW_PS_IS_ACK(rpwm_val)) {
isr_regaddr = REG_HISR;
content = rtl_read_dword(rtlpriv, isr_regaddr);
while (!(content & IMR_CPWM) && (count < 500)) {
udelay(50);
count++;
content = rtl_read_dword(rtlpriv, isr_regaddr);
}
if (content & IMR_CPWM) {
rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_88E;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"Receive CPWM INT!!! Set pHalData->FwPSState = %X\n",
rtlhal->fw_ps_state);
}
}
spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
rtlhal->fw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
if (schedule_timer) {
mod_timer(&rtlpriv->works.fw_clockoff_timer,
jiffies + MSECS(10));
}
} else {
spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
rtlhal->fw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
}
}
static void _rtl88ee_set_fw_clock_off(struct ieee80211_hw *hw,
u8 rpwm_val)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring;
enum rf_pwrstate rtstate;
bool schedule_timer = false;
u8 queue;
if (!rtlhal->fw_ready)
return;
if (!rtlpriv->psc.fw_current_inpsmode)
return;
if (!rtlhal->allow_sw_to_change_hwclc)
return;
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *)(&rtstate));
if (rtstate == ERFOFF || rtlpriv->psc.inactive_pwrstate == ERFOFF)
return;
for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
ring = &rtlpci->tx_ring[queue];
if (skb_queue_len(&ring->queue)) {
schedule_timer = true;
break;
}
}
if (schedule_timer) {
mod_timer(&rtlpriv->works.fw_clockoff_timer,
jiffies + MSECS(10));
return;
}
if (FW_PS_STATE(rtlhal->fw_ps_state) !=
FW_PS_STATE_RF_OFF_LOW_PWR_88E) {
spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
if (!rtlhal->fw_clk_change_in_progress) {
rtlhal->fw_clk_change_in_progress = true;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
rtl_write_word(rtlpriv, REG_HISR, 0x0100);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
&rpwm_val);
spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
rtlhal->fw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
} else {
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
mod_timer(&rtlpriv->works.fw_clockoff_timer,
jiffies + MSECS(10));
}
}
}
static void _rtl88ee_set_fw_ps_rf_on(struct ieee80211_hw *hw)
{
u8 rpwm_val = 0;
rpwm_val |= (FW_PS_STATE_RF_OFF_88E | FW_PS_ACK);
_rtl88ee_set_fw_clock_on(hw, rpwm_val, true);
}
static void _rtl88ee_set_fw_ps_rf_off_low_power(struct ieee80211_hw *hw)
{
u8 rpwm_val = 0;
rpwm_val |= FW_PS_STATE_RF_OFF_LOW_PWR_88E;
_rtl88ee_set_fw_clock_off(hw, rpwm_val);
}
void rtl88ee_fw_clk_off_timer_callback(unsigned long data)
{
struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
_rtl88ee_set_fw_ps_rf_off_low_power(hw);
}
static void _rtl88ee_fwlps_leave(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
bool fw_current_inps = false;
u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE;
if (ppsc->low_power_enable) {
rpwm_val = (FW_PS_STATE_ALL_ON_88E|FW_PS_ACK);/* RF on */
_rtl88ee_set_fw_clock_on(hw, rpwm_val, false);
rtlhal->allow_sw_to_change_hwclc = false;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
&fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
} else {
rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
&fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
}
}
static void _rtl88ee_fwlps_enter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
bool fw_current_inps = true;
u8 rpwm_val;
if (ppsc->low_power_enable) {
rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_88E; /* RF off */
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
&ppsc->fwctrl_psmode);
rtlhal->allow_sw_to_change_hwclc = true;
_rtl88ee_set_fw_clock_off(hw, rpwm_val);
} else {
rpwm_val = FW_PS_STATE_RF_OFF_88E; /* RF off */
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
&ppsc->fwctrl_psmode);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
}
}
void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
switch (variable) {
case HW_VAR_RCR:
*((u32 *)(val)) = rtlpci->receive_config;
break;
case HW_VAR_RF_STATE:
*((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
break;
case HW_VAR_FWLPS_RF_ON:{
enum rf_pwrstate rfstate;
u32 val_rcr;
rtlpriv->cfg->ops->get_hw_reg(hw,
HW_VAR_RF_STATE,
(u8 *)(&rfstate));
if (rfstate == ERFOFF) {
*((bool *)(val)) = true;
} else {
val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
val_rcr &= 0x00070000;
if (val_rcr)
*((bool *)(val)) = false;
else
*((bool *)(val)) = true;
}
break; }
case HW_VAR_FW_PSMODE_STATUS:
*((bool *)(val)) = ppsc->fw_current_inpsmode;
break;
case HW_VAR_CORRECT_TSF:{
u64 tsf;
u32 *ptsf_low = (u32 *)&tsf;
u32 *ptsf_high = ((u32 *)&tsf) + 1;
*ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
*ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
*((u64 *)(val)) = tsf;
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not process %x\n", variable);
break;
}
}
void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
u8 idx;
switch (variable) {
case HW_VAR_ETHER_ADDR:
for (idx = 0; idx < ETH_ALEN; idx++) {
rtl_write_byte(rtlpriv, (REG_MACID + idx),
val[idx]);
}
break;
case HW_VAR_BASIC_RATE:{
u16 b_rate_cfg = ((u16 *)val)[0];
u8 rate_index = 0;
b_rate_cfg = b_rate_cfg & 0x15f;
b_rate_cfg |= 0x01;
rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff);
rtl_write_byte(rtlpriv, REG_RRSR + 1,
(b_rate_cfg >> 8) & 0xff);
while (b_rate_cfg > 0x1) {
b_rate_cfg = (b_rate_cfg >> 1);
rate_index++;
}
rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
rate_index);
break;
}
case HW_VAR_BSSID:
for (idx = 0; idx < ETH_ALEN; idx++) {
rtl_write_byte(rtlpriv, (REG_BSSID + idx),
val[idx]);
}
break;
case HW_VAR_SIFS:
rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
if (!mac->ht_enable)
rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
0x0e0e);
else
rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
*((u16 *)val));
break;
case HW_VAR_SLOT_TIME:{
u8 e_aci;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
"HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
&e_aci);
}
break;
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
u8 short_preamble = (bool)*val;
reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2);
if (short_preamble) {
reg_tmp |= 0x02;
rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL +
2, reg_tmp);
} else {
reg_tmp |= 0xFD;
rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL +
2, reg_tmp);
}
break; }
case HW_VAR_WPA_CONFIG:
rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_AMPDU_MIN_SPACE:{
u8 min_spacing_to_set;
u8 sec_min_space;
min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
if (min_spacing_to_set < sec_min_space)
min_spacing_to_set = sec_min_space;
mac->min_space_cfg = ((mac->min_space_cfg &
0xf8) |
min_spacing_to_set);
*val = min_spacing_to_set;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
"Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
}
break; }
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
"Set HW_VAR_SHORTGI_DENSITY: %#x\n",
mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
break;
}
case HW_VAR_AMPDU_FACTOR:{
u8 regtoset_normal[4] = { 0x41, 0xa8, 0x72, 0xb9 };
u8 factor_toset;
u8 *p_regtoset = NULL;
u8 index = 0;
p_regtoset = regtoset_normal;
factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
factor_toset = 0xf;
for (index = 0; index < 4; index++) {
if ((p_regtoset[index] & 0xf0) >
(factor_toset << 4))
p_regtoset[index] =
(p_regtoset[index] & 0x0f) |
(factor_toset << 4);
if ((p_regtoset[index] & 0x0f) >
factor_toset)
p_regtoset[index] =
(p_regtoset[index] & 0xf0) |
(factor_toset);
rtl_write_byte(rtlpriv,
(REG_AGGLEN_LMT + index),
p_regtoset[index]);
}
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
"Set HW_VAR_AMPDU_FACTOR: %#x\n",
factor_toset);
}
break; }
case HW_VAR_AC_PARAM:{
u8 e_aci = *val;
rtl88e_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
&e_aci);
break; }
case HW_VAR_ACM_CTRL:{
u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
acm_ctrl = acm_ctrl |
((rtlpci->acm_method == 2) ? 0x0 : 0x1);
if (acm) {
switch (e_aci) {
case AC0_BE:
acm_ctrl |= ACMHW_BEQEN;
break;
case AC2_VI:
acm_ctrl |= ACMHW_VIQEN;
break;
case AC3_VO:
acm_ctrl |= ACMHW_VOQEN;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
acm);
break;
}
} else {
switch (e_aci) {
case AC0_BE:
acm_ctrl &= (~ACMHW_BEQEN);
break;
case AC2_VI:
acm_ctrl &= (~ACMHW_VIQEN);
break;
case AC3_VO:
acm_ctrl &= (~ACMHW_BEQEN);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not process\n");
break;
}
}
RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
"SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break; }
case HW_VAR_RCR:
rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]);
rtlpci->receive_config = ((u32 *)(val))[0];
break;
case HW_VAR_RETRY_LIMIT:{
u8 retry_limit = *val;
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
retry_limit << RETRY_LIMIT_LONG_SHIFT);
break; }
case HW_VAR_DUAL_TSF_RST:
rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
break;
case HW_VAR_EFUSE_BYTES:
rtlefuse->efuse_usedbytes = *((u16 *)val);
break;
case HW_VAR_EFUSE_USAGE:
rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl88e_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_SET_RPWM:{
u8 rpwm_val;
rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
udelay(1);
if (rpwm_val & BIT(7)) {
rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
} else {
rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
}
break; }
case HW_VAR_H2C_FW_PWRMODE:
rtl88e_set_fw_pwrmode_cmd(hw, *val);
break;
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *)val);
break;
case HW_VAR_RESUME_CLK_ON:
_rtl88ee_set_fw_ps_rf_on(hw);
break;
case HW_VAR_FW_LPS_ACTION:{
bool enter_fwlps = *((bool *)val);
if (enter_fwlps)
_rtl88ee_fwlps_enter(hw);
else
_rtl88ee_fwlps_leave(hw);
break; }
case HW_VAR_H2C_FW_JOINBSSRPT:{
u8 mstatus = *val;
u8 tmp_regcr, tmp_reg422, bcnvalid_reg;
u8 count = 0, dlbcn_count = 0;
bool b_recover = false;
if (mstatus == RT_MEDIA_CONNECT) {
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
NULL);
tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
rtl_write_byte(rtlpriv, REG_CR + 1,
(tmp_regcr | BIT(0)));
_rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(3));
_rtl88ee_set_bcn_ctrl_reg(hw, BIT(4), 0);
tmp_reg422 =
rtl_read_byte(rtlpriv,
REG_FWHW_TXQ_CTRL + 2);
rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
tmp_reg422 & (~BIT(6)));
if (tmp_reg422 & BIT(6))
b_recover = true;
do {
bcnvalid_reg = rtl_read_byte(rtlpriv,
REG_TDECTRL+2);
rtl_write_byte(rtlpriv, REG_TDECTRL+2,
(bcnvalid_reg | BIT(0)));
_rtl88ee_return_beacon_queue_skb(hw);
rtl88e_set_fw_rsvdpagepkt(hw, 0);
bcnvalid_reg = rtl_read_byte(rtlpriv,
REG_TDECTRL+2);
count = 0;
while (!(bcnvalid_reg & BIT(0)) && count < 20) {
count++;
udelay(10);
bcnvalid_reg =
rtl_read_byte(rtlpriv, REG_TDECTRL+2);
}
dlbcn_count++;
} while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
if (bcnvalid_reg & BIT(0))
rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0));
_rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
_rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(4));
if (b_recover) {
rtl_write_byte(rtlpriv,
REG_FWHW_TXQ_CTRL + 2,
tmp_reg422);
}
rtl_write_byte(rtlpriv, REG_CR + 1,
(tmp_regcr & ~(BIT(0))));
}
rtl88e_set_fw_joinbss_report_cmd(hw, (*(u8 *)val));
break; }
case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
rtl88e_set_p2p_ps_offload_cmd(hw, *val);
break;
case HW_VAR_AID:{
u16 u2btmp;
u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
u2btmp &= 0xC000;
rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
mac->assoc_id));
break; }
case HW_VAR_CORRECT_TSF:{
u8 btype_ibss = *val;
if (btype_ibss)
_rtl88ee_stop_tx_beacon(hw);
_rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(3));
rtl_write_dword(rtlpriv, REG_TSFTR,
(u32)(mac->tsf & 0xffffffff));
rtl_write_dword(rtlpriv, REG_TSFTR + 4,
(u32)((mac->tsf >> 32) & 0xffffffff));
_rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
if (btype_ibss)
_rtl88ee_resume_tx_beacon(hw);
break; }
case HW_VAR_KEEP_ALIVE: {
u8 array[2];
array[0] = 0xff;
array[1] = *((u8 *)val);
rtl88e_fill_h2c_cmd(hw, H2C_88E_KEEP_ALIVE_CTRL,
2, array);
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not process %x\n", variable);
break;
}
}
static bool _rtl88ee_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
bool status = true;
long count = 0;
u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) |
_LLT_OP(_LLT_WRITE_ACCESS);
rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
do {
value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
break;
if (count > POLLING_LLT_THRESHOLD) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Failed to polling write LLT done at address %d!\n",
address);
status = false;
break;
}
} while (++count);
return status;
}
static bool _rtl88ee_llt_table_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
unsigned short i;
u8 txpktbuf_bndy;
u8 maxpage;
bool status;
maxpage = 0xAF;
txpktbuf_bndy = 0xAB;
rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x01);
rtl_write_dword(rtlpriv, REG_RQPN, 0x80730d29);
/*0x2600 MaxRxBuff=10k-max(TxReportSize(64*8), WOLPattern(16*24)) */
rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, (0x25FF0000 | txpktbuf_bndy));
rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
rtl_write_byte(rtlpriv, REG_PBP, 0x11);
rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
status = _rtl88ee_llt_write(hw, i, i + 1);
if (true != status)
return status;
}
status = _rtl88ee_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
if (true != status)
return status;
for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl88ee_llt_write(hw, i, (i + 1));
if (true != status)
return status;
}
status = _rtl88ee_llt_write(hw, maxpage, txpktbuf_bndy);
if (true != status)
return status;
return true;
}
static void _rtl88ee_gen_refresh_led_state(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
if (rtlpriv->rtlhal.up_first_time)
return;
if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
rtl88ee_sw_led_on(hw, pLed0);
else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
rtl88ee_sw_led_on(hw, pLed0);
else
rtl88ee_sw_led_off(hw, pLed0);
}
static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 bytetmp;
u16 wordtmp;
/*Disable XTAL OUTPUT for power saving. YJ,add,111206. */
bytetmp = rtl_read_byte(rtlpriv, REG_XCK_OUT_CTRL) & (~BIT(0));
rtl_write_byte(rtlpriv, REG_XCK_OUT_CTRL, bytetmp);
/*Auto Power Down to CHIP-off State*/
bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7));
rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
/* HW Power on sequence */
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
RTL8188EE_NIC_ENABLE_FLOW)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
return false;
}
bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4);
rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp);
bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+2);
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+2, bytetmp|BIT(2));
bytetmp = rtl_read_byte(rtlpriv, REG_WATCH_DOG+1);
rtl_write_byte(rtlpriv, REG_WATCH_DOG+1, bytetmp|BIT(7));
bytetmp = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL_EXT+1);
rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL_EXT+1, bytetmp|BIT(1));
bytetmp = rtl_read_byte(rtlpriv, REG_TX_RPT_CTRL);
rtl_write_byte(rtlpriv, REG_TX_RPT_CTRL, bytetmp|BIT(1)|BIT(0));
rtl_write_byte(rtlpriv, REG_TX_RPT_CTRL+1, 2);
rtl_write_word(rtlpriv, REG_TX_RPT_TIME, 0xcdf0);
/*Add for wake up online*/
bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CLKR);
rtl_write_byte(rtlpriv, REG_SYS_CLKR, bytetmp|BIT(3));
bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG+1);
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG+1, (bytetmp & (~BIT(4))));
rtl_write_byte(rtlpriv, 0x367, 0x80);
rtl_write_word(rtlpriv, REG_CR, 0x2ff);
rtl_write_byte(rtlpriv, REG_CR+1, 0x06);
rtl_write_byte(rtlpriv, REG_CR+2, 0x00);
if (!rtlhal->mac_func_enable) {
if (_rtl88ee_llt_table_init(hw) == false) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"LLT table init fail\n");
return false;
}
}
rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff);
wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
wordtmp &= 0xf;
wordtmp |= 0xE771;
rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xffff);
rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
DMA_BIT_MASK(32));
rtl_write_dword(rtlpriv, REG_MGQ_DESA,
(u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
DMA_BIT_MASK(32));
rtl_write_dword(rtlpriv, REG_VOQ_DESA,
(u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
rtl_write_dword(rtlpriv, REG_VIQ_DESA,
(u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
rtl_write_dword(rtlpriv, REG_BEQ_DESA,
(u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
rtl_write_dword(rtlpriv, REG_BKQ_DESA,
(u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
rtl_write_dword(rtlpriv, REG_HQ_DESA,
(u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
DMA_BIT_MASK(32));
rtl_write_dword(rtlpriv, REG_RX_DESA,
(u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
DMA_BIT_MASK(32));
/* if we want to support 64 bit DMA, we should set it here,
* but now we do not support 64 bit DMA
*/
rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0);/*Enable RX DMA */
if (rtlhal->earlymode_enable) {/*Early mode enable*/
bytetmp = rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL);
bytetmp |= 0x1f;
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, bytetmp);
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL+3, 0x81);
}
_rtl88ee_gen_refresh_led_state(hw);
return true;
}
static void _rtl88ee_hw_configure(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 reg_bw_opmode;
u32 reg_ratr, reg_prsr;
reg_bw_opmode = BW_OPMODE_20MHZ;
reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
}
static void _rtl88ee_enable_aspm_back_door(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
u8 tmp1byte = 0;
u32 tmp4byte = 0, count = 0;
rtl_write_word(rtlpriv, 0x354, 0x8104);
rtl_write_word(rtlpriv, 0x358, 0x24);
rtl_write_word(rtlpriv, 0x350, 0x70c);
rtl_write_byte(rtlpriv, 0x352, 0x2);
tmp1byte = rtl_read_byte(rtlpriv, 0x352);
count = 0;
while (tmp1byte && count < 20) {
udelay(10);
tmp1byte = rtl_read_byte(rtlpriv, 0x352);
count++;
}
if (0 == tmp1byte) {
tmp4byte = rtl_read_dword(rtlpriv, 0x34c);
rtl_write_dword(rtlpriv, 0x348, tmp4byte|BIT(31));
rtl_write_word(rtlpriv, 0x350, 0xf70c);
rtl_write_byte(rtlpriv, 0x352, 0x1);
}
tmp1byte = rtl_read_byte(rtlpriv, 0x352);
count = 0;
while (tmp1byte && count < 20) {
udelay(10);
tmp1byte = rtl_read_byte(rtlpriv, 0x352);
count++;
}
rtl_write_word(rtlpriv, 0x350, 0x718);
rtl_write_byte(rtlpriv, 0x352, 0x2);
tmp1byte = rtl_read_byte(rtlpriv, 0x352);
count = 0;
while (tmp1byte && count < 20) {
udelay(10);
tmp1byte = rtl_read_byte(rtlpriv, 0x352);
count++;
}
if (ppsc->support_backdoor || (0 == tmp1byte)) {
tmp4byte = rtl_read_dword(rtlpriv, 0x34c);
rtl_write_dword(rtlpriv, 0x348, tmp4byte|BIT(11)|BIT(12));
rtl_write_word(rtlpriv, 0x350, 0xf718);
rtl_write_byte(rtlpriv, 0x352, 0x1);
}
tmp1byte = rtl_read_byte(rtlpriv, 0x352);
count = 0;
while (tmp1byte && count < 20) {
udelay(10);
tmp1byte = rtl_read_byte(rtlpriv, 0x352);
count++;
}
}
void rtl88ee_enable_hw_security_config(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
rtlpriv->sec.pairwise_enc_algorithm,
rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"not open hw encryption\n");
return;
}
sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE;
if (rtlpriv->sec.use_defaultkey) {
sec_reg_value |= SCR_TXUSEDK;
sec_reg_value |= SCR_RXUSEDK;
}
sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
int rtl88ee_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
bool rtstatus = true;
int err = 0;
u8 tmp_u1b, u1byte;
unsigned long flags;
rtlpriv->rtlhal.being_init_adapter = true;
/* As this function can take a very long time (up to 350 ms)
* and can be called with irqs disabled, reenable the irqs
* to let the other devices continue being serviced.
*
* It is safe doing so since our own interrupts will only be enabled
* in a subsequent step.
*/
local_save_flags(flags);
local_irq_enable();
rtlhal->fw_ready = false;
rtlpriv->intf_ops->disable_aspm(hw);
tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1);
u1byte = rtl_read_byte(rtlpriv, REG_CR);
if ((tmp_u1b & BIT(3)) && (u1byte != 0 && u1byte != 0xEA)) {
rtlhal->mac_func_enable = true;
} else {
rtlhal->mac_func_enable = false;
rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_88E;
}
rtstatus = _rtl88ee_init_mac(hw);
if (rtstatus != true) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
goto exit;
}
err = rtl88e_download_fw(hw, false);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
rtlhal->fw_ready = true;
/*fw related variable initialize */
rtlhal->last_hmeboxnum = 0;
rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_88E;
rtlhal->fw_clk_change_in_progress = false;
rtlhal->allow_sw_to_change_hwclc = false;
ppsc->fw_current_inpsmode = false;
rtl88e_phy_mac_config(hw);
/* because last function modify RCR, so we update
* rcr var here, or TP will unstable for receive_config
* is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
* RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
*/
rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
rtl88e_phy_bb_config(hw);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
rtl88e_phy_rf_config(hw);
rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
RF_CHNLBW, RFREG_OFFSET_MASK);
rtlphy->rfreg_chnlval[0] = rtlphy->rfreg_chnlval[0] & 0xfff00fff;
_rtl88ee_hw_configure(hw);
rtl_cam_reset_all_entry(hw);
rtl88ee_enable_hw_security_config(hw);
rtlhal->mac_func_enable = true;
ppsc->rfpwr_state = ERFON;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
_rtl88ee_enable_aspm_back_door(hw);
rtlpriv->intf_ops->enable_aspm(hw);
if (ppsc->rfpwr_state == ERFON) {
if ((rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) ||
((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) &&
(rtlhal->oem_id == RT_CID_819X_HP))) {
rtl88e_phy_set_rfpath_switch(hw, true);
rtlpriv->dm.fat_table.rx_idle_ant = MAIN_ANT;
} else {
rtl88e_phy_set_rfpath_switch(hw, false);
rtlpriv->dm.fat_table.rx_idle_ant = AUX_ANT;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "rx idle ant %s\n",
(rtlpriv->dm.fat_table.rx_idle_ant == MAIN_ANT) ?
("MAIN_ANT") : ("AUX_ANT"));
if (rtlphy->iqk_initialized) {
rtl88e_phy_iq_calibrate(hw, true);
} else {
rtl88e_phy_iq_calibrate(hw, false);
rtlphy->iqk_initialized = true;
}
rtl88e_dm_check_txpower_tracking(hw);
rtl88e_phy_lc_calibrate(hw);
}
tmp_u1b = efuse_read_1byte(hw, 0x1FA);
if (!(tmp_u1b & BIT(0))) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n");
}
if (!(tmp_u1b & BIT(4))) {
tmp_u1b = rtl_read_byte(rtlpriv, 0x16);
tmp_u1b &= 0x0F;
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
udelay(10);
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "under 1.5V\n");
}
rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128));
rtl88e_dm_init(hw);
exit:
local_irq_restore(flags);
rtlpriv->rtlhal.being_init_adapter = false;
return err;
}
static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
enum version_8188e version = VERSION_UNKNOWN;
u32 value32;
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
if (value32 & TRP_VAUX_EN) {
version = (enum version_8188e) VERSION_TEST_CHIP_88E;
} else {
version = NORMAL_CHIP;
version = version | ((value32 & TYPE_ID) ? RF_TYPE_2T2R : 0);
version = version | ((value32 & VENDOR_ID) ?
CHIP_VENDOR_UMC : 0);
}
rtlphy->rf_type = RF_1T1R;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
"RF_2T2R" : "RF_1T1R");
return version;
}
static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
enum nl80211_iftype type)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc;
enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
u8 mode = MSR_NOLINK;
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
mode = MSR_ADHOC;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to AP!\n");
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Network type %d not support!\n", type);
return 1;
break;
}
/* MSR_INFRA == Link in infrastructure network;
* MSR_ADHOC == Link in ad hoc network;
* Therefore, check link state is necessary.
*
* MSR_AP == AP mode; link state is not cared here.
*/
if (mode != MSR_AP && rtlpriv->mac80211.link_state < MAC80211_LINKED) {
mode = MSR_NOLINK;
ledaction = LED_CTL_NO_LINK;
}
if (mode == MSR_NOLINK || mode == MSR_INFRA) {
_rtl88ee_stop_tx_beacon(hw);
_rtl88ee_enable_bcn_sub_func(hw);
} else if (mode == MSR_ADHOC || mode == MSR_AP) {
_rtl88ee_resume_tx_beacon(hw);
_rtl88ee_disable_bcn_sub_func(hw);
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
mode);
}
rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
rtlpriv->cfg->ops->led_control(hw, ledaction);
if (mode == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
else
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
return 0;
}
void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u32 reg_rcr = rtlpci->receive_config;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
if (check_bssid == true) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
(u8 *)(®_rcr));
_rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(4));
} else if (check_bssid == false) {
reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
_rtl88ee_set_bcn_ctrl_reg(hw, BIT(4), 0);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_RCR, (u8 *)(®_rcr));
}
}
int rtl88ee_set_network_type(struct ieee80211_hw *hw,
enum nl80211_iftype type)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (_rtl88ee_set_media_status(hw, type))
return -EOPNOTSUPP;
if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
if (type != NL80211_IFTYPE_AP &&
type != NL80211_IFTYPE_MESH_POINT)
rtl88ee_set_check_bssid(hw, true);
} else {
rtl88ee_set_check_bssid(hw, false);
}
return 0;
}
/* don't set REG_EDCA_BE_PARAM here
* because mac80211 will send pkt when scan
*/
void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl88e_dm_init_edca_turbo(hw);
switch (aci) {
case AC1_BK:
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
break;
case AC0_BE:
break;
case AC2_VI:
rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
break;
case AC3_VO:
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
break;
default:
RT_ASSERT(false, "invalid aci: %d !\n", aci);
break;
}
}
void rtl88ee_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
rtl_write_dword(rtlpriv, REG_HIMR,
rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE,
rtlpci->irq_mask[1] & 0xFFFFFFFF);
rtlpci->irq_enabled = true;
/* there are some C2H CMDs have been sent
* before system interrupt is enabled, e.g., C2H, CPWM.
* So we need to clear all C2H events that FW has notified,
* otherwise FW won't schedule any commands anymore.
*/
rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0);
/*enable system interrupt*/
rtl_write_dword(rtlpriv, REG_HSIMR,
rtlpci->sys_irq_mask & 0xFFFFFFFF);
}
void rtl88ee_disable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED);
rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED);
rtlpci->irq_enabled = false;
/*synchronize_irq(rtlpci->pdev->irq);*/
}
static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 u1b_tmp;
u32 count = 0;
rtlhal->mac_func_enable = false;
rtlpriv->intf_ops->enable_aspm(hw);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n");
u1b_tmp = rtl_read_byte(rtlpriv, REG_TX_RPT_CTRL);
rtl_write_byte(rtlpriv, REG_TX_RPT_CTRL, u1b_tmp & (~BIT(1)));
u1b_tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
while (!(u1b_tmp & BIT(1)) && (count++ < 100)) {
udelay(10);
u1b_tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
count++;
}
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF);
rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
PWR_INTF_PCI_MSK,
RTL8188EE_NIC_LPS_ENTER_FLOW);
rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
rtl88e_firmware_selfreset(hw);
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2))));
rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
u1b_tmp = rtl_read_byte(rtlpriv, REG_32K_CTRL);
rtl_write_byte(rtlpriv, REG_32K_CTRL, (u1b_tmp & (~BIT(0))));
rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
PWR_INTF_PCI_MSK, RTL8188EE_NIC_DISABLE_FLOW);
u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3))));
u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(3)));
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E);
u1b_tmp = rtl_read_byte(rtlpriv, GPIO_IN);
rtl_write_byte(rtlpriv, GPIO_OUT, u1b_tmp);
rtl_write_byte(rtlpriv, GPIO_IO_SEL, 0x7F);
u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL, (u1b_tmp << 4) | u1b_tmp);
u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL+1);
rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL+1, u1b_tmp | 0x0F);
rtl_write_dword(rtlpriv, REG_GPIO_IO_SEL_2+2, 0x00080808);
}
void rtl88ee_card_disable(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
enum nl80211_iftype opmode;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8188ee card disable\n");
mac->link_state = MAC80211_NOLINK;
opmode = NL80211_IFTYPE_UNSPECIFIED;
_rtl88ee_set_media_status(hw, opmode);
if (rtlpriv->rtlhal.driver_is_goingto_unload ||
ppsc->rfoff_reason > RF_CHANGE_BY_PS)
rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
_rtl88ee_poweroff_adapter(hw);
/* after power off we should do iqk again */
rtlpriv->phy.iqk_initialized = false;
}
void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
u32 *p_inta, u32 *p_intb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
rtl_write_dword(rtlpriv, ISR, *p_inta);
*p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
}
void rtl88ee_set_beacon_related_registers(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u16 bcn_interval, atim_window;
bcn_interval = mac->beacon_interval;
atim_window = 2; /*FIX MERGE */
rtl88ee_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
rtl_write_byte(rtlpriv, 0x606, 0x30);
rtlpci->reg_bcn_ctrl_val |= BIT(3);
rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
/*rtl88ee_enable_interrupt(hw);*/
}
void rtl88ee_set_beacon_interval(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
"beacon_interval:%d\n", bcn_interval);
/*rtl88ee_disable_interrupt(hw);*/
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
/*rtl88ee_enable_interrupt(hw);*/
}
void rtl88ee_update_interrupt_mask(struct ieee80211_hw *hw,
u32 add_msr, u32 rm_msr)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
"add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
if (rm_msr)
rtlpci->irq_mask[0] &= (~rm_msr);
rtl88ee_disable_interrupt(hw);
rtl88ee_enable_interrupt(hw);
}
static u8 _rtl88e_get_chnl_group(u8 chnl)
{
u8 group = 0;
if (chnl < 3)
group = 0;
else if (chnl < 6)
group = 1;
else if (chnl < 9)
group = 2;
else if (chnl < 12)
group = 3;
else if (chnl < 14)
group = 4;
else if (chnl == 14)
group = 5;
return group;
}
static void set_24g_base(struct txpower_info_2g *pwrinfo24g, u32 rfpath)
{
int group, txcnt;
for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
pwrinfo24g->index_cck_base[rfpath][group] = 0x2D;
pwrinfo24g->index_bw40_base[rfpath][group] = 0x2D;
}
for (txcnt = 0; txcnt < MAX_TX_COUNT; txcnt++) {
if (txcnt == 0) {
pwrinfo24g->bw20_diff[rfpath][0] = 0x02;
pwrinfo24g->ofdm_diff[rfpath][0] = 0x04;
} else {
pwrinfo24g->bw20_diff[rfpath][txcnt] = 0xFE;
pwrinfo24g->bw40_diff[rfpath][txcnt] = 0xFE;
pwrinfo24g->cck_diff[rfpath][txcnt] = 0xFE;
pwrinfo24g->ofdm_diff[rfpath][txcnt] = 0xFE;
}
}
}
static void read_power_value_fromprom(struct ieee80211_hw *hw,
struct txpower_info_2g *pwrinfo24g,
struct txpower_info_5g *pwrinfo5g,
bool autoload_fail, u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 rfpath, eeaddr = EEPROM_TX_PWR_INX, group, txcnt = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"hal_ReadPowerValueFromPROM88E():PROMContent[0x%x]=0x%x\n",
(eeaddr+1), hwinfo[eeaddr+1]);
if (0xFF == hwinfo[eeaddr+1]) /*YJ,add,120316*/
autoload_fail = true;
if (autoload_fail) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"auto load fail : Use Default value!\n");
for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) {
/* 2.4G default value */
set_24g_base(pwrinfo24g, rfpath);
}
return;
}
for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) {
/*2.4G default value*/
for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
pwrinfo24g->index_cck_base[rfpath][group] =
hwinfo[eeaddr++];
if (pwrinfo24g->index_cck_base[rfpath][group] == 0xFF)
pwrinfo24g->index_cck_base[rfpath][group] =
0x2D;
}
for (group = 0 ; group < MAX_CHNL_GROUP_24G-1; group++) {
pwrinfo24g->index_bw40_base[rfpath][group] =
hwinfo[eeaddr++];
if (pwrinfo24g->index_bw40_base[rfpath][group] == 0xFF)
pwrinfo24g->index_bw40_base[rfpath][group] =
0x2D;
}
pwrinfo24g->bw40_diff[rfpath][0] = 0;
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo24g->bw20_diff[rfpath][0] = 0x02;
} else {
pwrinfo24g->bw20_diff[rfpath][0] =
(hwinfo[eeaddr]&0xf0)>>4;
/*bit sign number to 8 bit sign number*/
if (pwrinfo24g->bw20_diff[rfpath][0] & BIT(3))
pwrinfo24g->bw20_diff[rfpath][0] |= 0xF0;
}
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo24g->ofdm_diff[rfpath][0] = 0x04;
} else {
pwrinfo24g->ofdm_diff[rfpath][0] =
(hwinfo[eeaddr]&0x0f);
/*bit sign number to 8 bit sign number*/
if (pwrinfo24g->ofdm_diff[rfpath][0] & BIT(3))
pwrinfo24g->ofdm_diff[rfpath][0] |= 0xF0;
}
pwrinfo24g->cck_diff[rfpath][0] = 0;
eeaddr++;
for (txcnt = 1; txcnt < MAX_TX_COUNT; txcnt++) {
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo24g->bw40_diff[rfpath][txcnt] = 0xFE;
} else {
pwrinfo24g->bw40_diff[rfpath][txcnt] =
(hwinfo[eeaddr]&0xf0)>>4;
if (pwrinfo24g->bw40_diff[rfpath][txcnt] &
BIT(3))
pwrinfo24g->bw40_diff[rfpath][txcnt] |=
0xF0;
}
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo24g->bw20_diff[rfpath][txcnt] =
0xFE;
} else {
pwrinfo24g->bw20_diff[rfpath][txcnt] =
(hwinfo[eeaddr]&0x0f);
if (pwrinfo24g->bw20_diff[rfpath][txcnt] &
BIT(3))
pwrinfo24g->bw20_diff[rfpath][txcnt] |=
0xF0;
}
eeaddr++;
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo24g->ofdm_diff[rfpath][txcnt] = 0xFE;
} else {
pwrinfo24g->ofdm_diff[rfpath][txcnt] =
(hwinfo[eeaddr]&0xf0)>>4;
if (pwrinfo24g->ofdm_diff[rfpath][txcnt] &
BIT(3))
pwrinfo24g->ofdm_diff[rfpath][txcnt] |=
0xF0;
}
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo24g->cck_diff[rfpath][txcnt] = 0xFE;
} else {
pwrinfo24g->cck_diff[rfpath][txcnt] =
(hwinfo[eeaddr]&0x0f);
if (pwrinfo24g->cck_diff[rfpath][txcnt] &
BIT(3))
pwrinfo24g->cck_diff[rfpath][txcnt] |=
0xF0;
}
eeaddr++;
}
/*5G default value*/
for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) {
pwrinfo5g->index_bw40_base[rfpath][group] =
hwinfo[eeaddr++];
if (pwrinfo5g->index_bw40_base[rfpath][group] == 0xFF)
pwrinfo5g->index_bw40_base[rfpath][group] =
0xFE;
}
pwrinfo5g->bw40_diff[rfpath][0] = 0;
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo5g->bw20_diff[rfpath][0] = 0;
} else {
pwrinfo5g->bw20_diff[rfpath][0] =
(hwinfo[eeaddr]&0xf0)>>4;
if (pwrinfo5g->bw20_diff[rfpath][0] & BIT(3))
pwrinfo5g->bw20_diff[rfpath][0] |= 0xF0;
}
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo5g->ofdm_diff[rfpath][0] = 0x04;
} else {
pwrinfo5g->ofdm_diff[rfpath][0] = (hwinfo[eeaddr]&0x0f);
if (pwrinfo5g->ofdm_diff[rfpath][0] & BIT(3))
pwrinfo5g->ofdm_diff[rfpath][0] |= 0xF0;
}
eeaddr++;
for (txcnt = 1; txcnt < MAX_TX_COUNT; txcnt++) {
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo5g->bw40_diff[rfpath][txcnt] = 0xFE;
} else {
pwrinfo5g->bw40_diff[rfpath][txcnt] =
(hwinfo[eeaddr]&0xf0)>>4;
if (pwrinfo5g->bw40_diff[rfpath][txcnt] &
BIT(3))
pwrinfo5g->bw40_diff[rfpath][txcnt] |=
0xF0;
}
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo5g->bw20_diff[rfpath][txcnt] = 0xFE;
} else {
pwrinfo5g->bw20_diff[rfpath][txcnt] =
(hwinfo[eeaddr]&0x0f);
if (pwrinfo5g->bw20_diff[rfpath][txcnt] &
BIT(3))
pwrinfo5g->bw20_diff[rfpath][txcnt] |=
0xF0;
}
eeaddr++;
}
if (hwinfo[eeaddr] == 0xFF) {
pwrinfo5g->ofdm_diff[rfpath][1] = 0xFE;
pwrinfo5g->ofdm_diff[rfpath][2] = 0xFE;
} else {
pwrinfo5g->ofdm_diff[rfpath][1] =
(hwinfo[eeaddr]&0xf0)>>4;
pwrinfo5g->ofdm_diff[rfpath][2] =
(hwinfo[eeaddr]&0x0f);
}
eeaddr++;
if (hwinfo[eeaddr] == 0xFF)
pwrinfo5g->ofdm_diff[rfpath][3] = 0xFE;
else
pwrinfo5g->ofdm_diff[rfpath][3] = (hwinfo[eeaddr]&0x0f);
eeaddr++;
for (txcnt = 1; txcnt < MAX_TX_COUNT; txcnt++) {
if (pwrinfo5g->ofdm_diff[rfpath][txcnt] == 0xFF)
pwrinfo5g->ofdm_diff[rfpath][txcnt] = 0xFE;
else if (pwrinfo5g->ofdm_diff[rfpath][txcnt] & BIT(3))
pwrinfo5g->ofdm_diff[rfpath][txcnt] |= 0xF0;
}
}
}
static void _rtl88ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
bool autoload_fail,
u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct txpower_info_2g pwrinfo24g;
struct txpower_info_5g pwrinfo5g;
u8 rf_path, index;
u8 i;
read_power_value_fromprom(hw, &pwrinfo24g,
&pwrinfo5g, autoload_fail, hwinfo);
for (rf_path = 0; rf_path < 2; rf_path++) {
for (i = 0; i < 14; i++) {
index = _rtl88e_get_chnl_group(i+1);
rtlefuse->txpwrlevel_cck[rf_path][i] =
pwrinfo24g.index_cck_base[rf_path][index];
rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
pwrinfo24g.index_bw40_base[rf_path][index];
rtlefuse->txpwr_ht20diff[rf_path][i] =
pwrinfo24g.bw20_diff[rf_path][0];
rtlefuse->txpwr_legacyhtdiff[rf_path][i] =
pwrinfo24g.ofdm_diff[rf_path][0];
}
for (i = 0; i < 14; i++) {
RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
"RF(%d)-Ch(%d) [CCK / HT40_1S ] = [0x%x / 0x%x ]\n",
rf_path, i,
rtlefuse->txpwrlevel_cck[rf_path][i],
rtlefuse->txpwrlevel_ht40_1s[rf_path][i]);
}
}
if (!autoload_fail)
rtlefuse->eeprom_thermalmeter =
hwinfo[EEPROM_THERMAL_METER_88E];
else
rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
if (rtlefuse->eeprom_thermalmeter == 0xff || autoload_fail) {
rtlefuse->apk_thermalmeterignore = true;
rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
}
rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
"thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
if (!autoload_fail) {
rtlefuse->eeprom_regulatory =
hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0x07;/*bit0~2*/
if (hwinfo[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
rtlefuse->eeprom_regulatory = 0;
} else {
rtlefuse->eeprom_regulatory = 0;
}
RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
"eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
}
static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u16 i, usvalue;
u8 hwinfo[HWSET_MAX_SIZE];
u16 eeprom_id;
if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
rtl_efuse_shadow_map_update(hw);
memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
HWSET_MAX_SIZE);
} else if (rtlefuse->epromtype == EEPROM_93C46) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!");
return;
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"boot from neither eeprom nor efuse, check it !!");
return;
}
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
hwinfo, HWSET_MAX_SIZE);
eeprom_id = *((u16 *)&hwinfo[0]);
if (eeprom_id != RTL8188E_EEPROM_ID) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
if (rtlefuse->autoload_failflag == true)
return;
/*VID DID SVID SDID*/
rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROMId = 0x%4x\n", eeprom_id);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
/*customer ID*/
rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
if (rtlefuse->eeprom_oemid == 0xFF)
rtlefuse->eeprom_oemid = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
/*EEPROM version*/
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
/*mac address*/
for (i = 0; i < 6; i += 2) {
usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
*((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"dev_addr: %pM\n", rtlefuse->dev_addr);
/*channel plan */
rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
/* set channel paln to world wide 13 */
rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
/*tx power*/
_rtl88ee_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag,
hwinfo);
rtlefuse->txpwr_fromeprom = true;
rtl8188ee_read_bt_coexist_info_from_hwpg(hw,
rtlefuse->autoload_failflag,
hwinfo);
/*board type*/
rtlefuse->board_type =
((hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0xE0) >> 5);
rtlhal->board_type = rtlefuse->board_type;
/*Wake on wlan*/
rtlefuse->wowlan_enable =
((hwinfo[EEPROM_RF_FEATURE_OPTION_88E] & 0x40) >> 6);
/*parse xtal*/
rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_88E];
if (hwinfo[EEPROM_XTAL_88E])
rtlefuse->crystalcap = 0x20;
/*antenna diversity*/
rtlefuse->antenna_div_cfg =
(hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0x18) >> 3;
if (hwinfo[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
rtlefuse->antenna_div_cfg = 0;
if (rtlpriv->btcoexist.eeprom_bt_coexist != 0 &&
rtlpriv->btcoexist.eeprom_bt_ant_num == ANT_X1)
rtlefuse->antenna_div_cfg = 0;
rtlefuse->antenna_div_type = hwinfo[EEPROM_RF_ANTENNA_OPT_88E];
if (rtlefuse->antenna_div_type == 0xFF)
rtlefuse->antenna_div_type = 0x01;
if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV ||
rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
rtlefuse->antenna_div_cfg = 1;
if (rtlhal->oem_id == RT_CID_DEFAULT) {
switch (rtlefuse->eeprom_oemid) {
case EEPROM_CID_DEFAULT:
if (rtlefuse->eeprom_did == 0x8179) {
if (rtlefuse->eeprom_svid == 0x1025) {
rtlhal->oem_id = RT_CID_819X_ACER;
} else if ((rtlefuse->eeprom_svid == 0x10EC &&
rtlefuse->eeprom_smid == 0x0179) ||
(rtlefuse->eeprom_svid == 0x17AA &&
rtlefuse->eeprom_smid == 0x0179)) {
rtlhal->oem_id = RT_CID_819X_LENOVO;
} else if (rtlefuse->eeprom_svid == 0x103c &&
rtlefuse->eeprom_smid == 0x197d) {
rtlhal->oem_id = RT_CID_819X_HP;
} else {
rtlhal->oem_id = RT_CID_DEFAULT;
}
} else {
rtlhal->oem_id = RT_CID_DEFAULT;
}
break;
case EEPROM_CID_TOSHIBA:
rtlhal->oem_id = RT_CID_TOSHIBA;
break;
case EEPROM_CID_QMI:
rtlhal->oem_id = RT_CID_819X_QMI;
break;
case EEPROM_CID_WHQL:
default:
rtlhal->oem_id = RT_CID_DEFAULT;
break;
}
}
}
static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
pcipriv->ledctl.led_opendrain = true;
switch (rtlhal->oem_id) {
case RT_CID_819X_HP:
pcipriv->ledctl.led_opendrain = true;
break;
case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
case RT_CID_TOSHIBA:
case RT_CID_CCX:
case RT_CID_819X_ACER:
case RT_CID_WHQL:
default:
break;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 tmp_u1b;
rtlhal->version = _rtl88ee_read_chip_version(hw);
if (get_rf_type(rtlphy) == RF_1T1R)
rtlpriv->dm.rfpath_rxenable[0] = true;
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl88ee_read_adapter_info(hw);
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
}
_rtl88ee_hal_customized_behavior(hw);
}
static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u32 ratr_value;
u8 ratr_index = 0;
u8 b_nmode = mac->ht_enable;
/*u8 mimo_ps = IEEE80211_SMPS_OFF;*/
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
u32 ratr_mask;
if (rtlhal->current_bandtype == BAND_ON_5G)
ratr_value = sta->supp_rates[1] << 4;
else
ratr_value = sta->supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
sta->ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
ratr_value &= 0x0000000d;
else
ratr_value &= 0x0000000f;
break;
case WIRELESS_MODE_G:
ratr_value &= 0x00000FF5;
break;
case WIRELESS_MODE_N_24G:
case WIRELESS_MODE_N_5G:
b_nmode = 1;
if (get_rf_type(rtlphy) == RF_1T2R ||
get_rf_type(rtlphy) == RF_1T1R)
ratr_mask = 0x000ff005;
else
ratr_mask = 0x0f0ff005;
ratr_value &= ratr_mask;
break;
default:
if (rtlphy->rf_type == RF_1T2R)
ratr_value &= 0x000ff0ff;
else
ratr_value &= 0x0f0ff0ff;
break;
}
if ((rtlpriv->btcoexist.bt_coexistence) &&
(rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
(rtlpriv->btcoexist.bt_cur_state) &&
(rtlpriv->btcoexist.bt_ant_isolation) &&
((rtlpriv->btcoexist.bt_service == BT_SCO) ||
(rtlpriv->btcoexist.bt_service == BT_BUSY)))
ratr_value &= 0x0fffcfc0;
else
ratr_value &= 0x0FFFFFFF;
if (b_nmode &&
((curtxbw_40mhz && curshortgi_40mhz) ||
(!curtxbw_40mhz && curshortgi_20mhz))) {
ratr_value |= 0x10000000;
tmp_ratr_value = (ratr_value >> 12);
for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
if ((1 << shortgi_rate) & tmp_ratr_value)
break;
}
shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
(shortgi_rate << 4) | (shortgi_rate);
}
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
"%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u8 rssi_level)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
? 1 : 0;
u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool b_shortgi = false;
u8 rate_mask[5];
u8 macid = 0;
/*u8 mimo_ps = IEEE80211_SMPS_OFF;*/
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
wirelessmode = sta_entry->wireless_mode;
if (mac->opmode == NL80211_IFTYPE_STATION ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
curtxbw_40mhz = mac->bw_40;
else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC)
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
ratr_bitmap = sta->supp_rates[1] << 4;
else
ratr_bitmap = sta->supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
sta->ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
if (ratr_bitmap & 0x0000000c)
ratr_bitmap &= 0x0000000d;
else
ratr_bitmap &= 0x0000000f;
break;
case WIRELESS_MODE_G:
ratr_index = RATR_INX_WIRELESS_GB;
if (rssi_level == 1)
ratr_bitmap &= 0x00000f00;
else if (rssi_level == 2)
ratr_bitmap &= 0x00000ff0;
else
ratr_bitmap &= 0x00000ff5;
break;
case WIRELESS_MODE_N_24G:
case WIRELESS_MODE_N_5G:
ratr_index = RATR_INX_WIRELESS_NGB;
if (rtlphy->rf_type == RF_1T2R ||
rtlphy->rf_type == RF_1T1R) {
if (curtxbw_40mhz) {
if (rssi_level == 1)
ratr_bitmap &= 0x000f0000;
else if (rssi_level == 2)
ratr_bitmap &= 0x000ff000;
else
ratr_bitmap &= 0x000ff015;
} else {
if (rssi_level == 1)
ratr_bitmap &= 0x000f0000;
else if (rssi_level == 2)
ratr_bitmap &= 0x000ff000;
else
ratr_bitmap &= 0x000ff005;
}
} else {
if (curtxbw_40mhz) {
if (rssi_level == 1)
ratr_bitmap &= 0x0f8f0000;
else if (rssi_level == 2)
ratr_bitmap &= 0x0f8ff000;
else
ratr_bitmap &= 0x0f8ff015;
} else {
if (rssi_level == 1)
ratr_bitmap &= 0x0f8f0000;
else if (rssi_level == 2)
ratr_bitmap &= 0x0f8ff000;
else
ratr_bitmap &= 0x0f8ff005;
}
}
/*}*/
if ((curtxbw_40mhz && curshortgi_40mhz) ||
(!curtxbw_40mhz && curshortgi_20mhz)) {
if (macid == 0)
b_shortgi = true;
else if (macid == 1)
b_shortgi = false;
}
break;
default:
ratr_index = RATR_INX_WIRELESS_NGB;
if (rtlphy->rf_type == RF_1T2R)
ratr_bitmap &= 0x000ff0ff;
else
ratr_bitmap &= 0x0f0ff0ff;
break;
}
sta_entry->ratr_index = ratr_index;
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
"ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80;
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
"Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
ratr_index, ratr_bitmap,
rate_mask[0], rate_mask[1],
rate_mask[2], rate_mask[3],
rate_mask[4]);
rtl88e_fill_h2c_cmd(hw, H2C_88E_RA_MASK, 5, rate_mask);
_rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
}
void rtl88ee_update_hal_rate_tbl(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u8 rssi_level)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
rtl88ee_update_hal_rate_mask(hw, sta, rssi_level);
else
rtl88ee_update_hal_rate_table(hw, sta);
}
void rtl88ee_update_channel_access_setting(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
sifs_timer = 0x0e0e;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
}
bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
u32 u4tmp;
bool b_actuallyset = false;
if (rtlpriv->rtlhal.being_init_adapter)
return false;
if (ppsc->swrf_processing)
return false;
spin_lock(&rtlpriv->locks.rf_ps_lock);
if (ppsc->rfchange_inprogress) {
spin_unlock(&rtlpriv->locks.rf_ps_lock);
return false;
} else {
ppsc->rfchange_inprogress = true;
spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
cur_rfstate = ppsc->rfpwr_state;
u4tmp = rtl_read_dword(rtlpriv, REG_GPIO_OUTPUT);
e_rfpowerstate_toset = (u4tmp & BIT(31)) ? ERFON : ERFOFF;
if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
"GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
b_actuallyset = true;
} else if ((!ppsc->hwradiooff) &&
(e_rfpowerstate_toset == ERFOFF)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
"GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
b_actuallyset = true;
}
if (b_actuallyset) {
spin_lock(&rtlpriv->locks.rf_ps_lock);
ppsc->rfchange_inprogress = false;
spin_unlock(&rtlpriv->locks.rf_ps_lock);
} else {
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
spin_lock(&rtlpriv->locks.rf_ps_lock);
ppsc->rfchange_inprogress = false;
spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
*valid = 1;
return !ppsc->hwradiooff;
}
void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 *p_macaddr, bool is_group, u8 enc_algo,
bool is_wepkey, bool clear_all)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 *macaddr = p_macaddr;
u32 entry_id = 0;
bool is_pairwise = false;
static u8 cam_const_addr[4][6] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
};
static u8 cam_const_broad[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
if (clear_all) {
u8 idx = 0;
u8 cam_offset = 0;
u8 clear_number = 5;
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
rtl_cam_empty_entry(hw, cam_offset + idx);
if (idx < 5) {
memset(rtlpriv->sec.key_buf[idx], 0,
MAX_KEY_LEN);
rtlpriv->sec.key_len[idx] = 0;
}
}
} else {
switch (enc_algo) {
case WEP40_ENCRYPTION:
enc_algo = CAM_WEP40;
break;
case WEP104_ENCRYPTION:
enc_algo = CAM_WEP104;
break;
case TKIP_ENCRYPTION:
enc_algo = CAM_TKIP;
break;
case AESCCMP_ENCRYPTION:
enc_algo = CAM_AES;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not process\n");
enc_algo = CAM_TKIP;
break;
}
if (is_wepkey || rtlpriv->sec.use_defaultkey) {
macaddr = cam_const_addr[key_index];
entry_id = key_index;
} else {
if (is_group) {
macaddr = cam_const_broad;
entry_id = key_index;
} else {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
entry_id =
rtl_cam_get_free_entry(hw, p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
RT_TRACE(rtlpriv, COMP_SEC,
DBG_EMERG,
"Can not find free hw security cam entry\n");
return;
}
} else {
entry_id = CAM_PAIRWISE_KEY_POSITION;
}
key_index = PAIRWISE_KEYIDX;
is_pairwise = true;
}
}
if (rtlpriv->sec.key_len[key_index] == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"delete one entry, entry_id is %d\n",
entry_id);
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"add one entry\n");
if (is_pairwise) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
rtlefuse->dev_addr,
PAIRWISE_KEYIDX,
CAM_PAIRWISE_KEY_POSITION,
enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf
[entry_id]);
}
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[entry_id]);
}
}
}
}
static void rtl8188ee_bt_var_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->btcoexist.bt_coexistence =
rtlpriv->btcoexist.eeprom_bt_coexist;
rtlpriv->btcoexist.bt_ant_num = rtlpriv->btcoexist.eeprom_bt_ant_num;
rtlpriv->btcoexist.bt_coexist_type = rtlpriv->btcoexist.eeprom_bt_type;
if (rtlpriv->btcoexist.reg_bt_iso == 2)
rtlpriv->btcoexist.bt_ant_isolation =
rtlpriv->btcoexist.eeprom_bt_ant_isol;
else
rtlpriv->btcoexist.bt_ant_isolation =
rtlpriv->btcoexist.reg_bt_iso;
rtlpriv->btcoexist.bt_radio_shared_type =
rtlpriv->btcoexist.eeprom_bt_radio_shared;
if (rtlpriv->btcoexist.bt_coexistence) {
if (rtlpriv->btcoexist.reg_bt_sco == 1)
rtlpriv->btcoexist.bt_service = BT_OTHER_ACTION;
else if (rtlpriv->btcoexist.reg_bt_sco == 2)
rtlpriv->btcoexist.bt_service = BT_SCO;
else if (rtlpriv->btcoexist.reg_bt_sco == 4)
rtlpriv->btcoexist.bt_service = BT_BUSY;
else if (rtlpriv->btcoexist.reg_bt_sco == 5)
rtlpriv->btcoexist.bt_service = BT_OTHERBUSY;
else
rtlpriv->btcoexist.bt_service = BT_IDLE;
rtlpriv->btcoexist.bt_edca_ul = 0;
rtlpriv->btcoexist.bt_edca_dl = 0;
rtlpriv->btcoexist.bt_rssi_state = 0xff;
}
}
void rtl8188ee_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
bool auto_load_fail, u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 value;
if (!auto_load_fail) {
rtlpriv->btcoexist.eeprom_bt_coexist =
((hwinfo[EEPROM_RF_FEATURE_OPTION_88E] & 0xe0) >> 5);
if (hwinfo[EEPROM_RF_FEATURE_OPTION_88E] == 0xFF)
rtlpriv->btcoexist.eeprom_bt_coexist = 0;
value = hwinfo[EEPROM_RF_BT_SETTING_88E];
rtlpriv->btcoexist.eeprom_bt_type = ((value & 0xe) >> 1);
rtlpriv->btcoexist.eeprom_bt_ant_num = (value & 0x1);
rtlpriv->btcoexist.eeprom_bt_ant_isol = ((value & 0x10) >> 4);
rtlpriv->btcoexist.eeprom_bt_radio_shared =
((value & 0x20) >> 5);
} else {
rtlpriv->btcoexist.eeprom_bt_coexist = 0;
rtlpriv->btcoexist.eeprom_bt_type = BT_2WIRE;
rtlpriv->btcoexist.eeprom_bt_ant_num = ANT_X2;
rtlpriv->btcoexist.eeprom_bt_ant_isol = 0;
rtlpriv->btcoexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
}
rtl8188ee_bt_var_init(hw);
}
void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* 0:Low, 1:High, 2:From Efuse. */
rtlpriv->btcoexist.reg_bt_iso = 2;
/* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
rtlpriv->btcoexist.reg_bt_sco = 3;
/* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
rtlpriv->btcoexist.reg_bt_sco = 0;
}
void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 u1_tmp;
if (rtlpriv->btcoexist.bt_coexistence &&
((rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) ||
rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC8)) {
if (rtlpriv->btcoexist.bt_ant_isolation)
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
BIT_OFFSET_LEN_MASK_32(0, 1);
u1_tmp = u1_tmp |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
((rtlpriv->btcoexist.bt_service == BT_SCO) ?
0 : BIT_OFFSET_LEN_MASK_32(2, 1));
rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+4, 0xaaaa9aaa);
rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+8, 0xffbd0040);
rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+0xc, 0x40000010);
/* Config to 1T1R. */
if (rtlphy->rf_type == RF_1T1R) {
u1_tmp = rtl_read_byte(rtlpriv, ROFDM0_TRXPATHENABLE);
u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
rtl_write_byte(rtlpriv, ROFDM0_TRXPATHENABLE, u1_tmp);
u1_tmp = rtl_read_byte(rtlpriv, ROFDM1_TRXPATHENABLE);
u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
rtl_write_byte(rtlpriv, ROFDM1_TRXPATHENABLE, u1_tmp);
}
}
}
void rtl88ee_suspend(struct ieee80211_hw *hw)
{
}
void rtl88ee_resume(struct ieee80211_hw *hw)
{
}
| gpl-2.0 |
crypta-io/android_kernel_samsung_prevail2spr-stock-galaxy-rush | arch/powerpc/kernel/time.c | 691 | 30829 | /*
* Common time routines among all ppc machines.
*
* Written by Cort Dougan (cort@cs.nmt.edu) to merge
* Paul Mackerras' version and mine for PReP and Pmac.
* MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
* Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
*
* First round of bugfixes by Gabriel Paubert (paubert@iram.es)
* to make clock more stable (2.4.0-test5). The only thing
* that this code assumes is that the timebases have been synchronized
* by firmware on SMP and are never stopped (never do sleep
* on SMP then, nap and doze are OK).
*
* Speeded up do_gettimeofday by getting rid of references to
* xtime (which required locks for consistency). (mikejc@us.ibm.com)
*
* TODO (not necessarily in this file):
* - improve precision and reproducibility of timebase frequency
* measurement at boot time. (for iSeries, we calibrate the timebase
* against the Titan chip's clock.)
* - for astronomical applications: add a new function to get
* non ambiguous timestamps even around leap seconds. This needs
* a new timestamp format and a good name.
*
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/kernel_stat.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/cpu.h>
#include <linux/security.h>
#include <linux/percpu.h>
#include <linux/rtc.h>
#include <linux/jiffies.h>
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/irq_work.h>
#include <asm/trace.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/nvram.h>
#include <asm/cache.h>
#include <asm/machdep.h>
#include <asm/uaccess.h>
#include <asm/time.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/smp.h>
#include <asm/vdso_datapage.h>
#include <asm/firmware.h>
#include <asm/cputime.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/iseries/it_lp_queue.h>
#include <asm/iseries/hv_call_xm.h>
#endif
/* powerpc clocksource/clockevent code */
#include <linux/clockchips.h>
#include <linux/clocksource.h>
static cycle_t rtc_read(struct clocksource *);
static struct clocksource clocksource_rtc = {
.name = "rtc",
.rating = 400,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.mask = CLOCKSOURCE_MASK(64),
.shift = 22,
.mult = 0, /* To be filled in */
.read = rtc_read,
};
static cycle_t timebase_read(struct clocksource *);
static struct clocksource clocksource_timebase = {
.name = "timebase",
.rating = 400,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.mask = CLOCKSOURCE_MASK(64),
.shift = 22,
.mult = 0, /* To be filled in */
.read = timebase_read,
};
#define DECREMENTER_MAX 0x7fffffff
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev);
static void decrementer_set_mode(enum clock_event_mode mode,
struct clock_event_device *dev);
static struct clock_event_device decrementer_clockevent = {
.name = "decrementer",
.rating = 200,
.shift = 0, /* To be filled in */
.mult = 0, /* To be filled in */
.irq = 0,
.set_next_event = decrementer_set_next_event,
.set_mode = decrementer_set_mode,
.features = CLOCK_EVT_FEAT_ONESHOT,
};
struct decrementer_clock {
struct clock_event_device event;
u64 next_tb;
};
static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
#ifdef CONFIG_PPC_ISERIES
static unsigned long __initdata iSeries_recal_titan;
static signed long __initdata iSeries_recal_tb;
/* Forward declaration is only needed for iSereis compiles */
static void __init clocksource_init(void);
#endif
#define XSEC_PER_SEC (1024*1024)
#ifdef CONFIG_PPC64
#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
#else
/* compute ((xsec << 12) * max) >> 32 */
#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
#endif
unsigned long tb_ticks_per_jiffy;
unsigned long tb_ticks_per_usec = 100; /* sane default */
EXPORT_SYMBOL(tb_ticks_per_usec);
unsigned long tb_ticks_per_sec;
EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
static u64 tb_to_ns_scale __read_mostly;
static unsigned tb_to_ns_shift __read_mostly;
static u64 boot_tb __read_mostly;
extern struct timezone sys_tz;
static long timezone_offset;
unsigned long ppc_proc_freq;
EXPORT_SYMBOL_GPL(ppc_proc_freq);
unsigned long ppc_tb_freq;
EXPORT_SYMBOL_GPL(ppc_tb_freq);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
* Factors for converting from cputime_t (timebase ticks) to
* jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
* These are all stored as 0.64 fixed-point binary fractions.
*/
u64 __cputime_jiffies_factor;
EXPORT_SYMBOL(__cputime_jiffies_factor);
u64 __cputime_msec_factor;
EXPORT_SYMBOL(__cputime_msec_factor);
u64 __cputime_sec_factor;
EXPORT_SYMBOL(__cputime_sec_factor);
u64 __cputime_clockt_factor;
EXPORT_SYMBOL(__cputime_clockt_factor);
DEFINE_PER_CPU(unsigned long, cputime_last_delta);
DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
cputime_t cputime_one_jiffy;
void (*dtl_consumer)(struct dtl_entry *, u64);
static void calc_cputime_factors(void)
{
struct div_result res;
div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
__cputime_jiffies_factor = res.result_low;
div128_by_32(1000, 0, tb_ticks_per_sec, &res);
__cputime_msec_factor = res.result_low;
div128_by_32(1, 0, tb_ticks_per_sec, &res);
__cputime_sec_factor = res.result_low;
div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
__cputime_clockt_factor = res.result_low;
}
/*
* Read the SPURR on systems that have it, otherwise the PURR,
* or if that doesn't exist return the timebase value passed in.
*/
static u64 read_spurr(u64 tb)
{
if (cpu_has_feature(CPU_FTR_SPURR))
return mfspr(SPRN_SPURR);
if (cpu_has_feature(CPU_FTR_PURR))
return mfspr(SPRN_PURR);
return tb;
}
#ifdef CONFIG_PPC_SPLPAR
/*
* Scan the dispatch trace log and count up the stolen time.
* Should be called with interrupts disabled.
*/
static u64 scan_dispatch_log(u64 stop_tb)
{
u64 i = local_paca->dtl_ridx;
struct dtl_entry *dtl = local_paca->dtl_curr;
struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
struct lppaca *vpa = local_paca->lppaca_ptr;
u64 tb_delta;
u64 stolen = 0;
u64 dtb;
if (!dtl)
return 0;
if (i == vpa->dtl_idx)
return 0;
while (i < vpa->dtl_idx) {
if (dtl_consumer)
dtl_consumer(dtl, i);
dtb = dtl->timebase;
tb_delta = dtl->enqueue_to_dispatch_time +
dtl->ready_to_enqueue_time;
barrier();
if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
/* buffer has overflowed */
i = vpa->dtl_idx - N_DISPATCH_LOG;
dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
continue;
}
if (dtb > stop_tb)
break;
stolen += tb_delta;
++i;
++dtl;
if (dtl == dtl_end)
dtl = local_paca->dispatch_log;
}
local_paca->dtl_ridx = i;
local_paca->dtl_curr = dtl;
return stolen;
}
/*
* Accumulate stolen time by scanning the dispatch trace log.
* Called on entry from user mode.
*/
void accumulate_stolen_time(void)
{
u64 sst, ust;
u8 save_soft_enabled = local_paca->soft_enabled;
u8 save_hard_enabled = local_paca->hard_enabled;
/* We are called early in the exception entry, before
* soft/hard_enabled are sync'ed to the expected state
* for the exception. We are hard disabled but the PACA
* needs to reflect that so various debug stuff doesn't
* complain
*/
local_paca->soft_enabled = 0;
local_paca->hard_enabled = 0;
sst = scan_dispatch_log(local_paca->starttime_user);
ust = scan_dispatch_log(local_paca->starttime);
local_paca->system_time -= sst;
local_paca->user_time -= ust;
local_paca->stolen_time += ust + sst;
local_paca->soft_enabled = save_soft_enabled;
local_paca->hard_enabled = save_hard_enabled;
}
static inline u64 calculate_stolen_time(u64 stop_tb)
{
u64 stolen = 0;
if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
stolen = scan_dispatch_log(stop_tb);
get_paca()->system_time -= stolen;
}
stolen += get_paca()->stolen_time;
get_paca()->stolen_time = 0;
return stolen;
}
#else /* CONFIG_PPC_SPLPAR */
static inline u64 calculate_stolen_time(u64 stop_tb)
{
return 0;
}
#endif /* CONFIG_PPC_SPLPAR */
/*
* Account time for a transition between system, hard irq
* or soft irq state.
*/
void account_system_vtime(struct task_struct *tsk)
{
u64 now, nowscaled, delta, deltascaled;
unsigned long flags;
u64 stolen, udelta, sys_scaled, user_scaled;
local_irq_save(flags);
now = mftb();
nowscaled = read_spurr(now);
get_paca()->system_time += now - get_paca()->starttime;
get_paca()->starttime = now;
deltascaled = nowscaled - get_paca()->startspurr;
get_paca()->startspurr = nowscaled;
stolen = calculate_stolen_time(now);
delta = get_paca()->system_time;
get_paca()->system_time = 0;
udelta = get_paca()->user_time - get_paca()->utime_sspurr;
get_paca()->utime_sspurr = get_paca()->user_time;
/*
* Because we don't read the SPURR on every kernel entry/exit,
* deltascaled includes both user and system SPURR ticks.
* Apportion these ticks to system SPURR ticks and user
* SPURR ticks in the same ratio as the system time (delta)
* and user time (udelta) values obtained from the timebase
* over the same interval. The system ticks get accounted here;
* the user ticks get saved up in paca->user_time_scaled to be
* used by account_process_tick.
*/
sys_scaled = delta;
user_scaled = udelta;
if (deltascaled != delta + udelta) {
if (udelta) {
sys_scaled = deltascaled * delta / (delta + udelta);
user_scaled = deltascaled - sys_scaled;
} else {
sys_scaled = deltascaled;
}
}
get_paca()->user_time_scaled += user_scaled;
if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
account_system_time(tsk, 0, delta, sys_scaled);
if (stolen)
account_steal_time(stolen);
} else {
account_idle_time(delta + stolen);
}
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(account_system_vtime);
/*
* Transfer the user and system times accumulated in the paca
* by the exception entry and exit code to the generic process
* user and system time records.
* Must be called with interrupts disabled.
* Assumes that account_system_vtime() has been called recently
* (i.e. since the last entry from usermode) so that
* get_paca()->user_time_scaled is up to date.
*/
void account_process_tick(struct task_struct *tsk, int user_tick)
{
cputime_t utime, utimescaled;
utime = get_paca()->user_time;
utimescaled = get_paca()->user_time_scaled;
get_paca()->user_time = 0;
get_paca()->user_time_scaled = 0;
get_paca()->utime_sspurr = 0;
account_user_time(tsk, utime, utimescaled);
}
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
#define calc_cputime_factors()
#endif
void __delay(unsigned long loops)
{
unsigned long start;
int diff;
if (__USE_RTC()) {
start = get_rtcl();
do {
/* the RTCL register wraps at 1000000000 */
diff = get_rtcl() - start;
if (diff < 0)
diff += 1000000000;
} while (diff < loops);
} else {
start = get_tbl();
while (get_tbl() - start < loops)
HMT_low();
HMT_medium();
}
}
EXPORT_SYMBOL(__delay);
void udelay(unsigned long usecs)
{
__delay(tb_ticks_per_usec * usecs);
}
EXPORT_SYMBOL(udelay);
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
if (in_lock_functions(pc))
return regs->link;
return pc;
}
EXPORT_SYMBOL(profile_pc);
#endif
#ifdef CONFIG_PPC_ISERIES
/*
* This function recalibrates the timebase based on the 49-bit time-of-day
* value in the Titan chip. The Titan is much more accurate than the value
* returned by the service processor for the timebase frequency.
*/
static int __init iSeries_tb_recal(void)
{
unsigned long titan, tb;
/* Make sure we only run on iSeries */
if (!firmware_has_feature(FW_FEATURE_ISERIES))
return -ENODEV;
tb = get_tb();
titan = HvCallXm_loadTod();
if ( iSeries_recal_titan ) {
unsigned long tb_ticks = tb - iSeries_recal_tb;
unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
unsigned long new_tb_ticks_per_jiffy =
DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ);
long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
char sign = '+';
/* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
if ( tick_diff < 0 ) {
tick_diff = -tick_diff;
sign = '-';
}
if ( tick_diff ) {
if ( tick_diff < tb_ticks_per_jiffy/25 ) {
printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
new_tb_ticks_per_jiffy, sign, tick_diff );
tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
tb_ticks_per_sec = new_tb_ticks_per_sec;
calc_cputime_factors();
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
setup_cputime_one_jiffy();
}
else {
printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
" new tb_ticks_per_jiffy = %lu\n"
" old tb_ticks_per_jiffy = %lu\n",
new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
}
}
}
iSeries_recal_titan = titan;
iSeries_recal_tb = tb;
/* Called here as now we know accurate values for the timebase */
clocksource_init();
return 0;
}
late_initcall(iSeries_tb_recal);
/* Called from platform early init */
void __init iSeries_time_init_early(void)
{
iSeries_recal_tb = get_tb();
iSeries_recal_titan = HvCallXm_loadTod();
}
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_IRQ_WORK
/*
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/
#ifdef CONFIG_PPC64
static inline unsigned long test_irq_work_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
: "i" (offsetof(struct paca_struct, irq_work_pending)));
return x;
}
static inline void set_irq_work_pending_flag(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (1),
"i" (offsetof(struct paca_struct, irq_work_pending)));
}
static inline void clear_irq_work_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (0),
"i" (offsetof(struct paca_struct, irq_work_pending)));
}
#else /* 32-bit */
DEFINE_PER_CPU(u8, irq_work_pending);
#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
#endif /* 32 vs 64 bit */
void arch_irq_work_raise(void)
{
preempt_disable();
set_irq_work_pending_flag();
set_dec(1);
preempt_enable();
}
#else /* CONFIG_IRQ_WORK */
#define test_irq_work_pending() 0
#define clear_irq_work_pending()
#endif /* CONFIG_IRQ_WORK */
/*
* For iSeries shared processors, we have to let the hypervisor
* set the hardware decrementer. We set a virtual decrementer
* in the lppaca and call the hypervisor if the virtual
* decrementer is less than the current value in the hardware
* decrementer. (almost always the new decrementer value will
* be greater than the current hardware decementer so the hypervisor
* call will not be needed)
*/
/*
* timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled.
*/
void timer_interrupt(struct pt_regs * regs)
{
struct pt_regs *old_regs;
struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
struct clock_event_device *evt = &decrementer->event;
u64 now;
/* Ensure a positive value is written to the decrementer, or else
* some CPUs will continue to take decrementer exceptions.
*/
set_dec(DECREMENTER_MAX);
/* Some implementations of hotplug will get timer interrupts while
* offline, just ignore these
*/
if (!cpu_online(smp_processor_id()))
return;
trace_timer_interrupt_entry(regs);
__get_cpu_var(irq_stat).timer_irqs++;
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
old_regs = set_irq_regs(regs);
irq_enter();
if (test_irq_work_pending()) {
clear_irq_work_pending();
irq_work_run();
}
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES))
get_lppaca()->int_dword.fields.decr_int = 0;
#endif
now = get_tb_or_rtc();
if (now >= decrementer->next_tb) {
decrementer->next_tb = ~(u64)0;
if (evt->event_handler)
evt->event_handler(evt);
} else {
now = decrementer->next_tb - now;
if (now <= DECREMENTER_MAX)
set_dec((int)now);
}
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
process_hvlpevents();
#endif
#ifdef CONFIG_PPC64
/* collect purr register values often, for accurate calculations */
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
cu->current_tb = mfspr(SPRN_PURR);
}
#endif
irq_exit();
set_irq_regs(old_regs);
trace_timer_interrupt_exit(regs);
}
#ifdef CONFIG_SUSPEND
static void generic_suspend_disable_irqs(void)
{
/* Disable the decrementer, so that it doesn't interfere
* with suspending.
*/
set_dec(0x7fffffff);
local_irq_disable();
set_dec(0x7fffffff);
}
static void generic_suspend_enable_irqs(void)
{
local_irq_enable();
}
/* Overrides the weak version in kernel/power/main.c */
void arch_suspend_disable_irqs(void)
{
if (ppc_md.suspend_disable_irqs)
ppc_md.suspend_disable_irqs();
generic_suspend_disable_irqs();
}
/* Overrides the weak version in kernel/power/main.c */
void arch_suspend_enable_irqs(void)
{
generic_suspend_enable_irqs();
if (ppc_md.suspend_enable_irqs)
ppc_md.suspend_enable_irqs();
}
#endif
/*
* Scheduler clock - returns current time in nanosec units.
*
* Note: mulhdu(a, b) (multiply high double unsigned) returns
* the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
* are 64-bit unsigned numbers.
*/
unsigned long long sched_clock(void)
{
if (__USE_RTC())
return get_rtc();
return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
}
static int __init get_freq(char *name, int cells, unsigned long *val)
{
struct device_node *cpu;
const unsigned int *fp;
int found = 0;
/* The cpu node should have timebase and clock frequency properties */
cpu = of_find_node_by_type(NULL, "cpu");
if (cpu) {
fp = of_get_property(cpu, name, NULL);
if (fp) {
found = 1;
*val = of_read_ulong(fp, cells);
}
of_node_put(cpu);
}
return found;
}
/* should become __cpuinit when secondary_cpu_time_init also is */
void start_cpu_decrementer(void)
{
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
/* Clear any pending timer interrupts */
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
/* Enable decrementer interrupt */
mtspr(SPRN_TCR, TCR_DIE);
#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
}
void __init generic_calibrate_decr(void)
{
ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
!get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
printk(KERN_ERR "WARNING: Estimating decrementer frequency "
"(not found)\n");
}
ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
!get_freq("clock-frequency", 1, &ppc_proc_freq)) {
printk(KERN_ERR "WARNING: Estimating processor frequency "
"(not found)\n");
}
}
int update_persistent_clock(struct timespec now)
{
struct rtc_time tm;
if (!ppc_md.set_rtc_time)
return 0;
to_tm(now.tv_sec + 1 + timezone_offset, &tm);
tm.tm_year -= 1900;
tm.tm_mon -= 1;
return ppc_md.set_rtc_time(&tm);
}
static void __read_persistent_clock(struct timespec *ts)
{
struct rtc_time tm;
static int first = 1;
ts->tv_nsec = 0;
/* XXX this is a litle fragile but will work okay in the short term */
if (first) {
first = 0;
if (ppc_md.time_init)
timezone_offset = ppc_md.time_init();
/* get_boot_time() isn't guaranteed to be safe to call late */
if (ppc_md.get_boot_time) {
ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
return;
}
}
if (!ppc_md.get_rtc_time) {
ts->tv_sec = 0;
return;
}
ppc_md.get_rtc_time(&tm);
ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
}
void read_persistent_clock(struct timespec *ts)
{
__read_persistent_clock(ts);
/* Sanitize it in case real time clock is set below EPOCH */
if (ts->tv_sec < 0) {
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
}
/* clocksource code */
static cycle_t rtc_read(struct clocksource *cs)
{
return (cycle_t)get_rtc();
}
static cycle_t timebase_read(struct clocksource *cs)
{
return (cycle_t)get_tb();
}
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
struct clocksource *clock, u32 mult)
{
u64 new_tb_to_xs, new_stamp_xsec;
u32 frac_sec;
if (clock != &clocksource_timebase)
return;
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_mb();
/* XXX this assumes clock->shift == 22 */
/* 4611686018 ~= 2^(20+64-22) / 1e9 */
new_tb_to_xs = (u64) mult * 4611686018ULL;
new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
do_div(new_stamp_xsec, 1000000000);
new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
/* this is tv_nsec / 1e9 as a 0.32 fraction */
frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
/*
* tb_update_count is used to allow the userspace gettimeofday code
* to assure itself that it sees a consistent view of the tb_to_xs and
* stamp_xsec variables. It reads the tb_update_count, then reads
* tb_to_xs and stamp_xsec and then reads tb_update_count again. If
* the two values of tb_update_count match and are even then the
* tb_to_xs and stamp_xsec values are consistent. If not, then it
* loops back and reads them again until this criteria is met.
* We expect the caller to have done the first increment of
* vdso_data->tb_update_count already.
*/
vdso_data->tb_orig_stamp = clock->cycle_last;
vdso_data->stamp_xsec = new_stamp_xsec;
vdso_data->tb_to_xs = new_tb_to_xs;
vdso_data->wtom_clock_sec = wtm->tv_sec;
vdso_data->wtom_clock_nsec = wtm->tv_nsec;
vdso_data->stamp_xtime = *wall_time;
vdso_data->stamp_sec_fraction = frac_sec;
smp_wmb();
++(vdso_data->tb_update_count);
}
void update_vsyscall_tz(void)
{
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_mb();
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
smp_mb();
++vdso_data->tb_update_count;
}
static void __init clocksource_init(void)
{
struct clocksource *clock;
if (__USE_RTC())
clock = &clocksource_rtc;
else
clock = &clocksource_timebase;
clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
if (clocksource_register(clock)) {
printk(KERN_ERR "clocksource: %s is already registered\n",
clock->name);
return;
}
printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
clock->name, clock->mult, clock->shift);
}
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
__get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
set_dec(evt);
return 0;
}
static void decrementer_set_mode(enum clock_event_mode mode,
struct clock_event_device *dev)
{
if (mode != CLOCK_EVT_MODE_ONESHOT)
decrementer_set_next_event(DECREMENTER_MAX, dev);
}
static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec,
int shift)
{
uint64_t tmp = ((uint64_t)ticks) << shift;
do_div(tmp, nsec);
return tmp;
}
static void __init setup_clockevent_multiplier(unsigned long hz)
{
u64 mult, shift = 32;
while (1) {
mult = div_sc64(hz, NSEC_PER_SEC, shift);
if (mult && (mult >> 32UL) == 0UL)
break;
shift--;
}
decrementer_clockevent.shift = shift;
decrementer_clockevent.mult = mult;
}
static void register_decrementer_clockevent(int cpu)
{
struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
*dec = decrementer_clockevent;
dec->cpumask = cpumask_of(cpu);
printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
dec->name, dec->mult, dec->shift, cpu);
clockevents_register_device(dec);
}
static void __init init_decrementer_clockevent(void)
{
int cpu = smp_processor_id();
setup_clockevent_multiplier(ppc_tb_freq);
decrementer_clockevent.max_delta_ns =
clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
decrementer_clockevent.min_delta_ns =
clockevent_delta2ns(2, &decrementer_clockevent);
register_decrementer_clockevent(cpu);
}
void secondary_cpu_time_init(void)
{
/* Start the decrementer on CPUs that have manual control
* such as BookE
*/
start_cpu_decrementer();
/* FIME: Should make unrelatred change to move snapshot_timebase
* call here ! */
register_decrementer_clockevent(smp_processor_id());
}
/* This function is only called on the boot processor */
void __init time_init(void)
{
struct div_result res;
u64 scale;
unsigned shift;
if (__USE_RTC()) {
/* 601 processor: dec counts down by 128 every 128ns */
ppc_tb_freq = 1000000000;
} else {
/* Normal PowerPC with timebase register */
ppc_md.calibrate_decr();
printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
}
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
tb_ticks_per_sec = ppc_tb_freq;
tb_ticks_per_usec = ppc_tb_freq / 1000000;
calc_cputime_factors();
setup_cputime_one_jiffy();
/*
* Compute scale factor for sched_clock.
* The calibrate_decr() function has set tb_ticks_per_sec,
* which is the timebase frequency.
* We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
* the 128-bit result as a 64.64 fixed-point number.
* We then shift that number right until it is less than 1.0,
* giving us the scale factor and shift count to use in
* sched_clock().
*/
div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
scale = res.result_low;
for (shift = 0; res.result_high != 0; ++shift) {
scale = (scale >> 1) | (res.result_high << 63);
res.result_high >>= 1;
}
tb_to_ns_scale = scale;
tb_to_ns_shift = shift;
/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
boot_tb = get_tb_or_rtc();
/* If platform provided a timezone (pmac), we correct the time */
if (timezone_offset) {
sys_tz.tz_minuteswest = -timezone_offset / 60;
sys_tz.tz_dsttime = 0;
}
vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
/* Start the decrementer on CPUs that have manual control
* such as BookE
*/
start_cpu_decrementer();
/* Register the clocksource, if we're not running on iSeries */
if (!firmware_has_feature(FW_FEATURE_ISERIES))
clocksource_init();
init_decrementer_clockevent();
}
#define FEBRUARY 2
#define STARTOFTIME 1970
#define SECDAY 86400L
#define SECYR (SECDAY * 365)
#define leapyear(year) ((year) % 4 == 0 && \
((year) % 100 != 0 || (year) % 400 == 0))
#define days_in_year(a) (leapyear(a) ? 366 : 365)
#define days_in_month(a) (month_days[(a) - 1])
static int month_days[12] = {
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};
/*
* This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
*/
void GregorianDay(struct rtc_time * tm)
{
int leapsToDate;
int lastYear;
int day;
int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
lastYear = tm->tm_year - 1;
/*
* Number of leap corrections to apply up to end of last year
*/
leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
/*
* This year is a leap year if it is divisible by 4 except when it is
* divisible by 100 unless it is divisible by 400
*
* e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
*/
day = tm->tm_mon > 2 && leapyear(tm->tm_year);
day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
tm->tm_mday;
tm->tm_wday = day % 7;
}
void to_tm(int tim, struct rtc_time * tm)
{
register int i;
register long hms, day;
day = tim / SECDAY;
hms = tim % SECDAY;
/* Hours, minutes, seconds are easy */
tm->tm_hour = hms / 3600;
tm->tm_min = (hms % 3600) / 60;
tm->tm_sec = (hms % 3600) % 60;
/* Number of years in days */
for (i = STARTOFTIME; day >= days_in_year(i); i++)
day -= days_in_year(i);
tm->tm_year = i;
/* Number of months in days left */
if (leapyear(tm->tm_year))
days_in_month(FEBRUARY) = 29;
for (i = 1; day >= days_in_month(i); i++)
day -= days_in_month(i);
days_in_month(FEBRUARY) = 28;
tm->tm_mon = i;
/* Days are what is left over (+1) from all that. */
tm->tm_mday = day + 1;
/*
* Determine the day of week
*/
GregorianDay(tm);
}
/*
* Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
* result.
*/
void div128_by_32(u64 dividend_high, u64 dividend_low,
unsigned divisor, struct div_result *dr)
{
unsigned long a, b, c, d;
unsigned long w, x, y, z;
u64 ra, rb, rc;
a = dividend_high >> 32;
b = dividend_high & 0xffffffff;
c = dividend_low >> 32;
d = dividend_low & 0xffffffff;
w = a / divisor;
ra = ((u64)(a - (w * divisor)) << 32) + b;
rb = ((u64) do_div(ra, divisor) << 32) + c;
x = ra;
rc = ((u64) do_div(rb, divisor) << 32) + d;
y = rb;
do_div(rc, divisor);
z = rc;
dr->result_high = ((u64)w << 32) + x;
dr->result_low = ((u64)y << 32) + z;
}
/* We don't need to calibrate delay, we use the CPU timebase for that */
void calibrate_delay(void)
{
/* Some generic code (such as spinlock debug) use loops_per_jiffy
* as the number of __delay(1) in a jiffy, so make it so
*/
loops_per_jiffy = tb_ticks_per_jiffy;
}
static int __init rtc_init(void)
{
struct platform_device *pdev;
if (!ppc_md.get_rtc_time)
return -ENODEV;
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return 0;
}
module_init(rtc_init);
| gpl-2.0 |
galaxyishere/samsung-kernel-latona | arch/sh/kernel/irq_32.c | 947 | 1145 | /*
* SHcompact irqflags support
*
* Copyright (C) 2006 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/irqflags.h>
#include <linux/module.h>
void notrace raw_local_irq_restore(unsigned long flags)
{
unsigned long __dummy0, __dummy1;
if (flags == RAW_IRQ_DISABLED) {
__asm__ __volatile__ (
"stc sr, %0\n\t"
"or #0xf0, %0\n\t"
"ldc %0, sr\n\t"
: "=&z" (__dummy0)
: /* no inputs */
: "memory"
);
} else {
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and %1, %0\n\t"
#ifdef CONFIG_CPU_HAS_SR_RB
"stc r6_bank, %1\n\t"
"or %1, %0\n\t"
#endif
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~RAW_IRQ_DISABLED)
: "memory"
);
}
}
EXPORT_SYMBOL(raw_local_irq_restore);
unsigned long notrace __raw_local_save_flags(void)
{
unsigned long flags;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and #0xf0, %0\n\t"
: "=&z" (flags)
: /* no inputs */
: "memory"
);
return flags;
}
EXPORT_SYMBOL(__raw_local_save_flags);
| gpl-2.0 |
regalstreak/S7262-Kernel | drivers/input/keyboard/nomadik-ske-keypad.c | 2995 | 10052 | /*
* Copyright (C) ST-Ericsson SA 2010
*
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
* Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
*
* License terms:GNU General Public License (GPL) version 2
*
* Keypad controller driver for the SKE (Scroll Key Encoder) module used in
* the Nomadik 8815 and Ux500 platforms.
*/
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <plat/ske.h>
/* SKE_CR bits */
#define SKE_KPMLT (0x1 << 6)
#define SKE_KPCN (0x7 << 3)
#define SKE_KPASEN (0x1 << 2)
#define SKE_KPASON (0x1 << 7)
/* SKE_IMSC bits */
#define SKE_KPIMA (0x1 << 2)
/* SKE_ICR bits */
#define SKE_KPICS (0x1 << 3)
#define SKE_KPICA (0x1 << 2)
/* SKE_RIS bits */
#define SKE_KPRISA (0x1 << 2)
#define SKE_KEYPAD_ROW_SHIFT 3
#define SKE_KPD_KEYMAP_SIZE (8 * 8)
/* keypad auto scan registers */
#define SKE_ASR0 0x20
#define SKE_ASR1 0x24
#define SKE_ASR2 0x28
#define SKE_ASR3 0x2C
#define SKE_NUM_ASRX_REGISTERS (4)
/**
* struct ske_keypad - data structure used by keypad driver
* @irq: irq no
* @reg_base: ske regsiters base address
* @input: pointer to input device object
* @board: keypad platform device
* @keymap: matrix scan code table for keycodes
* @clk: clock structure pointer
*/
struct ske_keypad {
int irq;
void __iomem *reg_base;
struct input_dev *input;
const struct ske_keypad_platform_data *board;
unsigned short keymap[SKE_KPD_KEYMAP_SIZE];
struct clk *clk;
spinlock_t ske_keypad_lock;
};
static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
u8 mask, u8 data)
{
u32 ret;
spin_lock(&keypad->ske_keypad_lock);
ret = readl(keypad->reg_base + addr);
ret &= ~mask;
ret |= data;
writel(ret, keypad->reg_base + addr);
spin_unlock(&keypad->ske_keypad_lock);
}
/*
* ske_keypad_chip_init: init keypad controller configuration
*
* Enable Multi key press detection, auto scan mode
*/
static int __devinit ske_keypad_chip_init(struct ske_keypad *keypad)
{
u32 value;
int timeout = 50;
/* check SKE_RIS to be 0 */
while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--)
cpu_relax();
if (!timeout)
return -EINVAL;
/*
* set debounce value
* keypad dbounce is configured in DBCR[15:8]
* dbounce value in steps of 32/32.768 ms
*/
spin_lock(&keypad->ske_keypad_lock);
value = readl(keypad->reg_base + SKE_DBCR);
value = value & 0xff;
value |= ((keypad->board->debounce_ms * 32000)/32768) << 8;
writel(value, keypad->reg_base + SKE_DBCR);
spin_unlock(&keypad->ske_keypad_lock);
/* enable multi key detection */
ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT);
/*
* set up the number of columns
* KPCN[5:3] defines no. of keypad columns to be auto scanned
*/
value = (keypad->board->kcol - 1) << 3;
ske_keypad_set_bits(keypad, SKE_CR, SKE_KPCN, value);
/* clear keypad interrupt for auto(and pending SW) scans */
ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA | SKE_KPICS);
/* un-mask keypad interrupts */
ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
/* enable automatic scan */
ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPASEN);
return 0;
}
static void ske_keypad_read_data(struct ske_keypad *keypad)
{
struct input_dev *input = keypad->input;
u16 status;
int col = 0, row = 0, code;
int ske_asr, ske_ris, key_pressed, i;
/*
* Read the auto scan registers
*
* Each SKE_ASRx (x=0 to x=3) contains two row values.
* lower byte contains row value for column 2*x,
* upper byte contains row value for column 2*x + 1
*/
for (i = 0; i < SKE_NUM_ASRX_REGISTERS; i++) {
ske_asr = readl(keypad->reg_base + SKE_ASR0 + (4 * i));
if (!ske_asr)
continue;
/* now that ASRx is zero, find out the column x and row y*/
if (ske_asr & 0xff) {
col = i * 2;
status = ske_asr & 0xff;
} else {
col = (i * 2) + 1;
status = (ske_asr & 0xff00) >> 8;
}
/* find out the row */
row = __ffs(status);
code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
ske_ris = readl(keypad->reg_base + SKE_RIS);
key_pressed = ske_ris & SKE_KPRISA;
input_event(input, EV_MSC, MSC_SCAN, code);
input_report_key(input, keypad->keymap[code], key_pressed);
input_sync(input);
}
}
static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
{
struct ske_keypad *keypad = dev_id;
int retries = 20;
/* disable auto scan interrupt; mask the interrupt generated */
ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA);
while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --retries)
msleep(5);
if (retries) {
/* SKEx registers are stable and can be read */
ske_keypad_read_data(keypad);
}
/* enable auto scan interrupts */
ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
return IRQ_HANDLED;
}
static int __devinit ske_keypad_probe(struct platform_device *pdev)
{
const struct ske_keypad_platform_data *plat = pdev->dev.platform_data;
struct ske_keypad *keypad;
struct input_dev *input;
struct resource *res;
int irq;
int error;
if (!plat) {
dev_err(&pdev->dev, "invalid keypad platform data\n");
return -EINVAL;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get keypad irq\n");
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "missing platform resources\n");
return -EINVAL;
}
keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL);
input = input_allocate_device();
if (!keypad || !input) {
dev_err(&pdev->dev, "failed to allocate keypad memory\n");
error = -ENOMEM;
goto err_free_mem;
}
keypad->irq = irq;
keypad->board = plat;
keypad->input = input;
spin_lock_init(&keypad->ske_keypad_lock);
if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
dev_err(&pdev->dev, "failed to request I/O memory\n");
error = -EBUSY;
goto err_free_mem;
}
keypad->reg_base = ioremap(res->start, resource_size(res));
if (!keypad->reg_base) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
error = -ENXIO;
goto err_free_mem_region;
}
keypad->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
error = PTR_ERR(keypad->clk);
goto err_iounmap;
}
input->id.bustype = BUS_HOST;
input->name = "ux500-ske-keypad";
input->dev.parent = &pdev->dev;
input->keycode = keypad->keymap;
input->keycodesize = sizeof(keypad->keymap[0]);
input->keycodemax = ARRAY_SIZE(keypad->keymap);
input_set_capability(input, EV_MSC, MSC_SCAN);
__set_bit(EV_KEY, input->evbit);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
matrix_keypad_build_keymap(plat->keymap_data, SKE_KEYPAD_ROW_SHIFT,
input->keycode, input->keybit);
clk_enable(keypad->clk);
/* go through board initialization helpers */
if (keypad->board->init)
keypad->board->init();
error = ske_keypad_chip_init(keypad);
if (error) {
dev_err(&pdev->dev, "unable to init keypad hardware\n");
goto err_clk_disable;
}
error = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq,
IRQF_ONESHOT, "ske-keypad", keypad);
if (error) {
dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
goto err_clk_disable;
}
error = input_register_device(input);
if (error) {
dev_err(&pdev->dev,
"unable to register input device: %d\n", error);
goto err_free_irq;
}
if (plat->wakeup_enable)
device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, keypad);
return 0;
err_free_irq:
free_irq(keypad->irq, keypad);
err_clk_disable:
clk_disable(keypad->clk);
clk_put(keypad->clk);
err_iounmap:
iounmap(keypad->reg_base);
err_free_mem_region:
release_mem_region(res->start, resource_size(res));
err_free_mem:
input_free_device(input);
kfree(keypad);
return error;
}
static int __devexit ske_keypad_remove(struct platform_device *pdev)
{
struct ske_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
free_irq(keypad->irq, keypad);
input_unregister_device(keypad->input);
clk_disable(keypad->clk);
clk_put(keypad->clk);
if (keypad->board->exit)
keypad->board->exit();
iounmap(keypad->reg_base);
release_mem_region(res->start, resource_size(res));
kfree(keypad);
return 0;
}
#ifdef CONFIG_PM
static int ske_keypad_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ske_keypad *keypad = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
if (device_may_wakeup(dev))
enable_irq_wake(irq);
else
ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
return 0;
}
static int ske_keypad_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ske_keypad *keypad = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
if (device_may_wakeup(dev))
disable_irq_wake(irq);
else
ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
return 0;
}
static const struct dev_pm_ops ske_keypad_dev_pm_ops = {
.suspend = ske_keypad_suspend,
.resume = ske_keypad_resume,
};
#endif
struct platform_driver ske_keypad_driver = {
.driver = {
.name = "nmk-ske-keypad",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &ske_keypad_dev_pm_ops,
#endif
},
.probe = ske_keypad_probe,
.remove = __devexit_p(ske_keypad_remove),
};
static int __init ske_keypad_init(void)
{
return platform_driver_probe(&ske_keypad_driver, ske_keypad_probe);
}
module_init(ske_keypad_init);
static void __exit ske_keypad_exit(void)
{
platform_driver_unregister(&ske_keypad_driver);
}
module_exit(ske_keypad_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>");
MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver");
MODULE_ALIAS("platform:nomadik-ske-keypad");
| gpl-2.0 |
CyanogenMod/lge-kernel-p700 | arch/arm/plat-samsung/wakeup-mask.c | 2995 | 1106 | /* arch/arm/plat-samsung/wakeup-mask.c
*
* Copyright 2010 Ben Dooks <ben-linux@fluff.org>
*
* Support for wakeup mask interrupts on newer SoCs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/sysdev.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <plat/wakeup-mask.h>
#include <plat/pm.h>
void samsung_sync_wakemask(void __iomem *reg,
struct samsung_wakeup_mask *mask, int nr_mask)
{
struct irq_data *data;
u32 val;
val = __raw_readl(reg);
for (; nr_mask > 0; nr_mask--, mask++) {
if (mask->irq == NO_WAKEUP_IRQ) {
val |= mask->bit;
continue;
}
data = irq_get_irq_data(mask->irq);
/* bit of a liberty to read this directly from irq_data. */
if (irqd_is_wakeup_set(data))
val &= ~mask->bit;
else
val |= mask->bit;
}
printk(KERN_INFO "wakemask %08x => %08x\n", __raw_readl(reg), val);
__raw_writel(val, reg);
}
| gpl-2.0 |
gchild320/shamu | sound/pci/ice1712/phase.c | 3251 | 25998 | /*
* ALSA driver for ICEnsemble ICE1724 (Envy24)
*
* Lowlevel functions for Terratec PHASE 22
*
* Copyright (c) 2005 Misha Zhilin <misha@epiphan.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* PHASE 22 overview:
* Audio controller: VIA Envy24HT-S (slightly trimmed down Envy24HT, 4in/4out)
* Analog chip: AK4524 (partially via Philip's 74HCT125)
* Digital receiver: CS8414-CS (supported in this release)
* PHASE 22 revision 2.0 and Terrasoniq/Musonik TS22PCI have CS8416
* (support status unknown, please test and report)
*
* Envy connects to AK4524
* - CS directly from GPIO 10
* - CCLK via 74HCT125's gate #4 from GPIO 4
* - CDTI via 74HCT125's gate #2 from GPIO 5
* CDTI may be completely blocked by 74HCT125's gate #1
* controlled by GPIO 3
*/
/* PHASE 28 overview:
* Audio controller: VIA Envy24HT (full untrimmed version, 4in/8out)
* Analog chip: WM8770 (8 channel 192k DAC, 2 channel 96k ADC)
* Digital receiver: CS8414-CS (supported in this release)
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include "ice1712.h"
#include "envy24ht.h"
#include "phase.h"
#include <sound/tlv.h>
/* AC97 register cache for Phase28 */
struct phase28_spec {
unsigned short master[2];
unsigned short vol[8];
};
/* WM8770 registers */
#define WM_DAC_ATTEN 0x00 /* DAC1-8 analog attenuation */
#define WM_DAC_MASTER_ATTEN 0x08 /* DAC master analog attenuation */
#define WM_DAC_DIG_ATTEN 0x09 /* DAC1-8 digital attenuation */
#define WM_DAC_DIG_MASTER_ATTEN 0x11 /* DAC master digital attenuation */
#define WM_PHASE_SWAP 0x12 /* DAC phase */
#define WM_DAC_CTRL1 0x13 /* DAC control bits */
#define WM_MUTE 0x14 /* mute controls */
#define WM_DAC_CTRL2 0x15 /* de-emphasis and zefo-flag */
#define WM_INT_CTRL 0x16 /* interface control */
#define WM_MASTER 0x17 /* master clock and mode */
#define WM_POWERDOWN 0x18 /* power-down controls */
#define WM_ADC_GAIN 0x19 /* ADC gain L(19)/R(1a) */
#define WM_ADC_MUX 0x1b /* input MUX */
#define WM_OUT_MUX1 0x1c /* output MUX */
#define WM_OUT_MUX2 0x1e /* output MUX */
#define WM_RESET 0x1f /* software reset */
/*
* Logarithmic volume values for WM8770
* Computed as 20 * Log10(255 / x)
*/
static const unsigned char wm_vol[256] = {
127, 48, 42, 39, 36, 34, 33, 31, 30, 29, 28, 27, 27, 26, 25, 25, 24,
24, 23, 23, 22, 22, 21, 21, 21, 20, 20, 20, 19, 19, 19, 18, 18, 18, 18,
17, 17, 17, 17, 16, 16, 16, 16, 15, 15, 15, 15, 15, 15, 14, 14, 14, 14,
14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11,
11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
#define WM_VOL_MAX (sizeof(wm_vol) - 1)
#define WM_VOL_MUTE 0x8000
static struct snd_akm4xxx akm_phase22 = {
.type = SND_AK4524,
.num_dacs = 2,
.num_adcs = 2,
};
static struct snd_ak4xxx_private akm_phase22_priv = {
.caddr = 2,
.cif = 1,
.data_mask = 1 << 4,
.clk_mask = 1 << 5,
.cs_mask = 1 << 10,
.cs_addr = 1 << 10,
.cs_none = 0,
.add_flags = 1 << 3,
.mask_flags = 0,
};
static int phase22_init(struct snd_ice1712 *ice)
{
struct snd_akm4xxx *ak;
int err;
/* Configure DAC/ADC description for generic part of ice1724 */
switch (ice->eeprom.subvendor) {
case VT1724_SUBDEVICE_PHASE22:
case VT1724_SUBDEVICE_TS22:
ice->num_total_dacs = 2;
ice->num_total_adcs = 2;
ice->vt1720 = 1; /* Envy24HT-S have 16 bit wide GPIO */
break;
default:
snd_BUG();
return -EINVAL;
}
/* Initialize analog chips */
ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL);
ak = ice->akm;
if (!ak)
return -ENOMEM;
ice->akm_codecs = 1;
switch (ice->eeprom.subvendor) {
case VT1724_SUBDEVICE_PHASE22:
case VT1724_SUBDEVICE_TS22:
err = snd_ice1712_akm4xxx_init(ak, &akm_phase22,
&akm_phase22_priv, ice);
if (err < 0)
return err;
break;
}
return 0;
}
static int phase22_add_controls(struct snd_ice1712 *ice)
{
int err = 0;
switch (ice->eeprom.subvendor) {
case VT1724_SUBDEVICE_PHASE22:
case VT1724_SUBDEVICE_TS22:
err = snd_ice1712_akm4xxx_build_controls(ice);
if (err < 0)
return err;
}
return 0;
}
static unsigned char phase22_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x28, /* clock 512, mpu 401,
spdif-in/1xADC, 1xDACs */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0xf0, /* vol, 96k, 24bit */
[ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, spdif-in */
[ICE_EEP2_GPIO_DIR] = 0xff,
[ICE_EEP2_GPIO_DIR1] = 0xff,
[ICE_EEP2_GPIO_DIR2] = 0xff,
[ICE_EEP2_GPIO_MASK] = 0x00,
[ICE_EEP2_GPIO_MASK1] = 0x00,
[ICE_EEP2_GPIO_MASK2] = 0x00,
[ICE_EEP2_GPIO_STATE] = 0x00,
[ICE_EEP2_GPIO_STATE1] = 0x00,
[ICE_EEP2_GPIO_STATE2] = 0x00,
};
static unsigned char phase28_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x2b, /* clock 512, mpu401,
spdif-in/1xADC, 4xDACs */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0xfc, /* vol, 96k, 24bit, 192k */
[ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, spdif-in */
[ICE_EEP2_GPIO_DIR] = 0xff,
[ICE_EEP2_GPIO_DIR1] = 0xff,
[ICE_EEP2_GPIO_DIR2] = 0x5f,
[ICE_EEP2_GPIO_MASK] = 0x00,
[ICE_EEP2_GPIO_MASK1] = 0x00,
[ICE_EEP2_GPIO_MASK2] = 0x00,
[ICE_EEP2_GPIO_STATE] = 0x00,
[ICE_EEP2_GPIO_STATE1] = 0x00,
[ICE_EEP2_GPIO_STATE2] = 0x00,
};
/*
* write data in the SPI mode
*/
static void phase28_spi_write(struct snd_ice1712 *ice, unsigned int cs,
unsigned int data, int bits)
{
unsigned int tmp;
int i;
tmp = snd_ice1712_gpio_read(ice);
snd_ice1712_gpio_set_mask(ice, ~(PHASE28_WM_RW|PHASE28_SPI_MOSI|
PHASE28_SPI_CLK|PHASE28_WM_CS));
tmp |= PHASE28_WM_RW;
tmp &= ~cs;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
for (i = bits - 1; i >= 0; i--) {
tmp &= ~PHASE28_SPI_CLK;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
if (data & (1 << i))
tmp |= PHASE28_SPI_MOSI;
else
tmp &= ~PHASE28_SPI_MOSI;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
tmp |= PHASE28_SPI_CLK;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
}
tmp &= ~PHASE28_SPI_CLK;
tmp |= cs;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
tmp |= PHASE28_SPI_CLK;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
}
/*
* get the current register value of WM codec
*/
static unsigned short wm_get(struct snd_ice1712 *ice, int reg)
{
reg <<= 1;
return ((unsigned short)ice->akm[0].images[reg] << 8) |
ice->akm[0].images[reg + 1];
}
/*
* set the register value of WM codec
*/
static void wm_put_nocache(struct snd_ice1712 *ice, int reg, unsigned short val)
{
phase28_spi_write(ice, PHASE28_WM_CS, (reg << 9) | (val & 0x1ff), 16);
}
/*
* set the register value of WM codec and remember it
*/
static void wm_put(struct snd_ice1712 *ice, int reg, unsigned short val)
{
wm_put_nocache(ice, reg, val);
reg <<= 1;
ice->akm[0].images[reg] = val >> 8;
ice->akm[0].images[reg + 1] = val;
}
static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index,
unsigned short vol, unsigned short master)
{
unsigned char nvol;
if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE))
nvol = 0;
else
nvol = 127 - wm_vol[(((vol & ~WM_VOL_MUTE) *
(master & ~WM_VOL_MUTE)) / 127) & WM_VOL_MAX];
wm_put(ice, index, nvol);
wm_put_nocache(ice, index, 0x180 | nvol);
}
/*
* DAC mute control
*/
#define wm_pcm_mute_info snd_ctl_boolean_mono_info
static int wm_pcm_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
mutex_lock(&ice->gpio_mutex);
ucontrol->value.integer.value[0] = (wm_get(ice, WM_MUTE) & 0x10) ?
0 : 1;
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int wm_pcm_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned short nval, oval;
int change;
snd_ice1712_save_gpio_status(ice);
oval = wm_get(ice, WM_MUTE);
nval = (oval & ~0x10) | (ucontrol->value.integer.value[0] ? 0 : 0x10);
change = (nval != oval);
if (change)
wm_put(ice, WM_MUTE, nval);
snd_ice1712_restore_gpio_status(ice);
return change;
}
/*
* Master volume attenuation mixer control
*/
static int wm_master_vol_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = WM_VOL_MAX;
return 0;
}
static int wm_master_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
struct phase28_spec *spec = ice->spec;
int i;
for (i = 0; i < 2; i++)
ucontrol->value.integer.value[i] = spec->master[i] &
~WM_VOL_MUTE;
return 0;
}
static int wm_master_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
struct phase28_spec *spec = ice->spec;
int ch, change = 0;
snd_ice1712_save_gpio_status(ice);
for (ch = 0; ch < 2; ch++) {
unsigned int vol = ucontrol->value.integer.value[ch];
if (vol > WM_VOL_MAX)
continue;
vol |= spec->master[ch] & WM_VOL_MUTE;
if (vol != spec->master[ch]) {
int dac;
spec->master[ch] = vol;
for (dac = 0; dac < ice->num_total_dacs; dac += 2)
wm_set_vol(ice, WM_DAC_ATTEN + dac + ch,
spec->vol[dac + ch],
spec->master[ch]);
change = 1;
}
}
snd_ice1712_restore_gpio_status(ice);
return change;
}
static int phase28_init(struct snd_ice1712 *ice)
{
static const unsigned short wm_inits_phase28[] = {
/* These come first to reduce init pop noise */
0x1b, 0x044, /* ADC Mux (AC'97 source) */
0x1c, 0x00B, /* Out Mux1 (VOUT1 = DAC+AUX, VOUT2 = DAC) */
0x1d, 0x009, /* Out Mux2 (VOUT2 = DAC, VOUT3 = DAC) */
0x18, 0x000, /* All power-up */
0x16, 0x122, /* I2S, normal polarity, 24bit */
0x17, 0x022, /* 256fs, slave mode */
0x00, 0, /* DAC1 analog mute */
0x01, 0, /* DAC2 analog mute */
0x02, 0, /* DAC3 analog mute */
0x03, 0, /* DAC4 analog mute */
0x04, 0, /* DAC5 analog mute */
0x05, 0, /* DAC6 analog mute */
0x06, 0, /* DAC7 analog mute */
0x07, 0, /* DAC8 analog mute */
0x08, 0x100, /* master analog mute */
0x09, 0xff, /* DAC1 digital full */
0x0a, 0xff, /* DAC2 digital full */
0x0b, 0xff, /* DAC3 digital full */
0x0c, 0xff, /* DAC4 digital full */
0x0d, 0xff, /* DAC5 digital full */
0x0e, 0xff, /* DAC6 digital full */
0x0f, 0xff, /* DAC7 digital full */
0x10, 0xff, /* DAC8 digital full */
0x11, 0x1ff, /* master digital full */
0x12, 0x000, /* phase normal */
0x13, 0x090, /* unmute DAC L/R */
0x14, 0x000, /* all unmute */
0x15, 0x000, /* no deemphasis, no ZFLG */
0x19, 0x000, /* -12dB ADC/L */
0x1a, 0x000, /* -12dB ADC/R */
(unsigned short)-1
};
unsigned int tmp;
struct snd_akm4xxx *ak;
struct phase28_spec *spec;
const unsigned short *p;
int i;
ice->num_total_dacs = 8;
ice->num_total_adcs = 2;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
ice->spec = spec;
/* Initialize analog chips */
ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL);
ak = ice->akm;
if (!ak)
return -ENOMEM;
ice->akm_codecs = 1;
snd_ice1712_gpio_set_dir(ice, 0x5fffff); /* fix this for time being */
/* reset the wm codec as the SPI mode */
snd_ice1712_save_gpio_status(ice);
snd_ice1712_gpio_set_mask(ice, ~(PHASE28_WM_RESET|PHASE28_WM_CS|
PHASE28_HP_SEL));
tmp = snd_ice1712_gpio_read(ice);
tmp &= ~PHASE28_WM_RESET;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
tmp |= PHASE28_WM_CS;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
tmp |= PHASE28_WM_RESET;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
p = wm_inits_phase28;
for (; *p != (unsigned short)-1; p += 2)
wm_put(ice, p[0], p[1]);
snd_ice1712_restore_gpio_status(ice);
spec->master[0] = WM_VOL_MUTE;
spec->master[1] = WM_VOL_MUTE;
for (i = 0; i < ice->num_total_dacs; i++) {
spec->vol[i] = WM_VOL_MUTE;
wm_set_vol(ice, i, spec->vol[i], spec->master[i % 2]);
}
return 0;
}
/*
* DAC volume attenuation mixer control
*/
static int wm_vol_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int voices = kcontrol->private_value >> 8;
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = voices;
uinfo->value.integer.min = 0; /* mute (-101dB) */
uinfo->value.integer.max = 0x7F; /* 0dB */
return 0;
}
static int wm_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
struct phase28_spec *spec = ice->spec;
int i, ofs, voices;
voices = kcontrol->private_value >> 8;
ofs = kcontrol->private_value & 0xff;
for (i = 0; i < voices; i++)
ucontrol->value.integer.value[i] =
spec->vol[ofs+i] & ~WM_VOL_MUTE;
return 0;
}
static int wm_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
struct phase28_spec *spec = ice->spec;
int i, idx, ofs, voices;
int change = 0;
voices = kcontrol->private_value >> 8;
ofs = kcontrol->private_value & 0xff;
snd_ice1712_save_gpio_status(ice);
for (i = 0; i < voices; i++) {
unsigned int vol;
vol = ucontrol->value.integer.value[i];
if (vol > 0x7f)
continue;
vol |= spec->vol[ofs+i] & WM_VOL_MUTE;
if (vol != spec->vol[ofs+i]) {
spec->vol[ofs+i] = vol;
idx = WM_DAC_ATTEN + ofs + i;
wm_set_vol(ice, idx, spec->vol[ofs+i],
spec->master[i]);
change = 1;
}
}
snd_ice1712_restore_gpio_status(ice);
return change;
}
/*
* WM8770 mute control
*/
static int wm_mute_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo) {
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = kcontrol->private_value >> 8;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
static int wm_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
struct phase28_spec *spec = ice->spec;
int voices, ofs, i;
voices = kcontrol->private_value >> 8;
ofs = kcontrol->private_value & 0xFF;
for (i = 0; i < voices; i++)
ucontrol->value.integer.value[i] =
(spec->vol[ofs+i] & WM_VOL_MUTE) ? 0 : 1;
return 0;
}
static int wm_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
struct phase28_spec *spec = ice->spec;
int change = 0, voices, ofs, i;
voices = kcontrol->private_value >> 8;
ofs = kcontrol->private_value & 0xFF;
snd_ice1712_save_gpio_status(ice);
for (i = 0; i < voices; i++) {
int val = (spec->vol[ofs + i] & WM_VOL_MUTE) ? 0 : 1;
if (ucontrol->value.integer.value[i] != val) {
spec->vol[ofs + i] &= ~WM_VOL_MUTE;
spec->vol[ofs + i] |=
ucontrol->value.integer.value[i] ? 0 :
WM_VOL_MUTE;
wm_set_vol(ice, ofs + i, spec->vol[ofs + i],
spec->master[i]);
change = 1;
}
}
snd_ice1712_restore_gpio_status(ice);
return change;
}
/*
* WM8770 master mute control
*/
#define wm_master_mute_info snd_ctl_boolean_stereo_info
static int wm_master_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
struct phase28_spec *spec = ice->spec;
ucontrol->value.integer.value[0] =
(spec->master[0] & WM_VOL_MUTE) ? 0 : 1;
ucontrol->value.integer.value[1] =
(spec->master[1] & WM_VOL_MUTE) ? 0 : 1;
return 0;
}
static int wm_master_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
struct phase28_spec *spec = ice->spec;
int change = 0, i;
snd_ice1712_save_gpio_status(ice);
for (i = 0; i < 2; i++) {
int val = (spec->master[i] & WM_VOL_MUTE) ? 0 : 1;
if (ucontrol->value.integer.value[i] != val) {
int dac;
spec->master[i] &= ~WM_VOL_MUTE;
spec->master[i] |=
ucontrol->value.integer.value[i] ? 0 :
WM_VOL_MUTE;
for (dac = 0; dac < ice->num_total_dacs; dac += 2)
wm_set_vol(ice, WM_DAC_ATTEN + dac + i,
spec->vol[dac + i],
spec->master[i]);
change = 1;
}
}
snd_ice1712_restore_gpio_status(ice);
return change;
}
/* digital master volume */
#define PCM_0dB 0xff
#define PCM_RES 128 /* -64dB */
#define PCM_MIN (PCM_0dB - PCM_RES)
static int wm_pcm_vol_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0; /* mute (-64dB) */
uinfo->value.integer.max = PCM_RES; /* 0dB */
return 0;
}
static int wm_pcm_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned short val;
mutex_lock(&ice->gpio_mutex);
val = wm_get(ice, WM_DAC_DIG_MASTER_ATTEN) & 0xff;
val = val > PCM_MIN ? (val - PCM_MIN) : 0;
ucontrol->value.integer.value[0] = val;
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int wm_pcm_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned short ovol, nvol;
int change = 0;
nvol = ucontrol->value.integer.value[0];
if (nvol > PCM_RES)
return -EINVAL;
snd_ice1712_save_gpio_status(ice);
nvol = (nvol ? (nvol + PCM_MIN) : 0) & 0xff;
ovol = wm_get(ice, WM_DAC_DIG_MASTER_ATTEN) & 0xff;
if (ovol != nvol) {
wm_put(ice, WM_DAC_DIG_MASTER_ATTEN, nvol); /* prelatch */
/* update */
wm_put_nocache(ice, WM_DAC_DIG_MASTER_ATTEN, nvol | 0x100);
change = 1;
}
snd_ice1712_restore_gpio_status(ice);
return change;
}
/*
* Deemphasis
*/
#define phase28_deemp_info snd_ctl_boolean_mono_info
static int phase28_deemp_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = (wm_get(ice, WM_DAC_CTRL2) & 0xf) ==
0xf;
return 0;
}
static int phase28_deemp_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int temp, temp2;
temp = wm_get(ice, WM_DAC_CTRL2);
temp2 = temp;
if (ucontrol->value.integer.value[0])
temp |= 0xf;
else
temp &= ~0xf;
if (temp != temp2) {
wm_put(ice, WM_DAC_CTRL2, temp);
return 1;
}
return 0;
}
/*
* ADC Oversampling
*/
static int phase28_oversampling_info(struct snd_kcontrol *k,
struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[2] = { "128x", "64x" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 2;
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item = uinfo->value.enumerated.items -
1;
strcpy(uinfo->value.enumerated.name,
texts[uinfo->value.enumerated.item]);
return 0;
}
static int phase28_oversampling_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = (wm_get(ice, WM_MASTER) & 0x8) ==
0x8;
return 0;
}
static int phase28_oversampling_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int temp, temp2;
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
temp = wm_get(ice, WM_MASTER);
temp2 = temp;
if (ucontrol->value.enumerated.item[0])
temp |= 0x8;
else
temp &= ~0x8;
if (temp != temp2) {
wm_put(ice, WM_MASTER, temp);
return 1;
}
return 0;
}
static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1);
static const DECLARE_TLV_DB_SCALE(db_scale_wm_pcm, -6400, 50, 1);
static struct snd_kcontrol_new phase28_dac_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
.info = wm_master_mute_info,
.get = wm_master_mute_get,
.put = wm_master_mute_put
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "Master Playback Volume",
.info = wm_master_vol_info,
.get = wm_master_vol_get,
.put = wm_master_vol_put,
.tlv = { .p = db_scale_wm_dac }
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Front Playback Switch",
.info = wm_mute_info,
.get = wm_mute_get,
.put = wm_mute_put,
.private_value = (2 << 8) | 0
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "Front Playback Volume",
.info = wm_vol_info,
.get = wm_vol_get,
.put = wm_vol_put,
.private_value = (2 << 8) | 0,
.tlv = { .p = db_scale_wm_dac }
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Rear Playback Switch",
.info = wm_mute_info,
.get = wm_mute_get,
.put = wm_mute_put,
.private_value = (2 << 8) | 2
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "Rear Playback Volume",
.info = wm_vol_info,
.get = wm_vol_get,
.put = wm_vol_put,
.private_value = (2 << 8) | 2,
.tlv = { .p = db_scale_wm_dac }
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Center Playback Switch",
.info = wm_mute_info,
.get = wm_mute_get,
.put = wm_mute_put,
.private_value = (1 << 8) | 4
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "Center Playback Volume",
.info = wm_vol_info,
.get = wm_vol_get,
.put = wm_vol_put,
.private_value = (1 << 8) | 4,
.tlv = { .p = db_scale_wm_dac }
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "LFE Playback Switch",
.info = wm_mute_info,
.get = wm_mute_get,
.put = wm_mute_put,
.private_value = (1 << 8) | 5
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "LFE Playback Volume",
.info = wm_vol_info,
.get = wm_vol_get,
.put = wm_vol_put,
.private_value = (1 << 8) | 5,
.tlv = { .p = db_scale_wm_dac }
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Side Playback Switch",
.info = wm_mute_info,
.get = wm_mute_get,
.put = wm_mute_put,
.private_value = (2 << 8) | 6
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "Side Playback Volume",
.info = wm_vol_info,
.get = wm_vol_get,
.put = wm_vol_put,
.private_value = (2 << 8) | 6,
.tlv = { .p = db_scale_wm_dac }
}
};
static struct snd_kcontrol_new wm_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.info = wm_pcm_mute_info,
.get = wm_pcm_mute_get,
.put = wm_pcm_mute_put
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "PCM Playback Volume",
.info = wm_pcm_vol_info,
.get = wm_pcm_vol_get,
.put = wm_pcm_vol_put,
.tlv = { .p = db_scale_wm_pcm }
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "DAC Deemphasis Switch",
.info = phase28_deemp_info,
.get = phase28_deemp_get,
.put = phase28_deemp_put
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "ADC Oversampling",
.info = phase28_oversampling_info,
.get = phase28_oversampling_get,
.put = phase28_oversampling_put
}
};
static int phase28_add_controls(struct snd_ice1712 *ice)
{
unsigned int i, counts;
int err;
counts = ARRAY_SIZE(phase28_dac_controls);
for (i = 0; i < counts; i++) {
err = snd_ctl_add(ice->card,
snd_ctl_new1(&phase28_dac_controls[i],
ice));
if (err < 0)
return err;
}
for (i = 0; i < ARRAY_SIZE(wm_controls); i++) {
err = snd_ctl_add(ice->card,
snd_ctl_new1(&wm_controls[i], ice));
if (err < 0)
return err;
}
return 0;
}
struct snd_ice1712_card_info snd_vt1724_phase_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_PHASE22,
.name = "Terratec PHASE 22",
.model = "phase22",
.chip_init = phase22_init,
.build_controls = phase22_add_controls,
.eeprom_size = sizeof(phase22_eeprom),
.eeprom_data = phase22_eeprom,
},
{
.subvendor = VT1724_SUBDEVICE_PHASE28,
.name = "Terratec PHASE 28",
.model = "phase28",
.chip_init = phase28_init,
.build_controls = phase28_add_controls,
.eeprom_size = sizeof(phase28_eeprom),
.eeprom_data = phase28_eeprom,
},
{
.subvendor = VT1724_SUBDEVICE_TS22,
.name = "Terrasoniq TS22 PCI",
.model = "TS22",
.chip_init = phase22_init,
.build_controls = phase22_add_controls,
.eeprom_size = sizeof(phase22_eeprom),
.eeprom_data = phase22_eeprom,
},
{ } /* terminator */
};
| gpl-2.0 |
kykc/m7u-3.4.10-g4dad4ce | arch/arm/mach-msm/board-sapphire-rfkill.c | 4531 | 2657 | /* linux/arch/arm/mach-msm/board-sapphire-rfkill.c
* Copyright (C) 2007-2009 HTC Corporation.
* Author: Thomas Tsai <thomas_tsai@htc.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* Control bluetooth power for sapphire platform */
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/rfkill.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
#include "gpio_chip.h"
#include "board-sapphire.h"
static struct rfkill *bt_rfk;
static const char bt_name[] = "brf6300";
extern int sapphire_bt_fastclock_power(int on);
static int bluetooth_set_power(void *data, bool blocked)
{
if (!blocked) {
sapphire_bt_fastclock_power(1);
gpio_set_value(SAPPHIRE_GPIO_BT_32K_EN, 1);
udelay(10);
gpio_direction_output(101, 1);
} else {
gpio_direction_output(101, 0);
gpio_set_value(SAPPHIRE_GPIO_BT_32K_EN, 0);
sapphire_bt_fastclock_power(0);
}
return 0;
}
static struct rfkill_ops sapphire_rfkill_ops = {
.set_block = bluetooth_set_power,
};
static int sapphire_rfkill_probe(struct platform_device *pdev)
{
int rc = 0;
bool default_state = true; /* off */
bluetooth_set_power(NULL, default_state);
bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH,
&sapphire_rfkill_ops, NULL);
if (!bt_rfk)
return -ENOMEM;
/* userspace cannot take exclusive control */
rfkill_set_states(bt_rfk, default_state, false);
rc = rfkill_register(bt_rfk);
if (rc)
rfkill_destroy(bt_rfk);
return rc;
}
static int sapphire_rfkill_remove(struct platform_device *dev)
{
rfkill_unregister(bt_rfk);
rfkill_destroy(bt_rfk);
return 0;
}
static struct platform_driver sapphire_rfkill_driver = {
.probe = sapphire_rfkill_probe,
.remove = sapphire_rfkill_remove,
.driver = {
.name = "sapphire_rfkill",
.owner = THIS_MODULE,
},
};
static int __init sapphire_rfkill_init(void)
{
return platform_driver_register(&sapphire_rfkill_driver);
}
static void __exit sapphire_rfkill_exit(void)
{
platform_driver_unregister(&sapphire_rfkill_driver);
}
module_init(sapphire_rfkill_init);
module_exit(sapphire_rfkill_exit);
MODULE_DESCRIPTION("sapphire rfkill");
MODULE_AUTHOR("Nick Pelly <npelly@google.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
GalaticStryder/kernel_lge_msm8974 | drivers/mfd/asic3.c | 4787 | 26728 | /*
* driver/mfd/asic3.c
*
* Compaq ASIC3 support.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Copyright 2001 Compaq Computer Corporation.
* Copyright 2004-2005 Phil Blundell
* Copyright 2007-2008 OpenedHand Ltd.
*
* Authors: Phil Blundell <pb@handhelds.org>,
* Samuel Ortiz <sameo@openedhand.com>
*
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/mfd/asic3.h>
#include <linux/mfd/core.h>
#include <linux/mfd/ds1wm.h>
#include <linux/mfd/tmio.h>
enum {
ASIC3_CLOCK_SPI,
ASIC3_CLOCK_OWM,
ASIC3_CLOCK_PWM0,
ASIC3_CLOCK_PWM1,
ASIC3_CLOCK_LED0,
ASIC3_CLOCK_LED1,
ASIC3_CLOCK_LED2,
ASIC3_CLOCK_SD_HOST,
ASIC3_CLOCK_SD_BUS,
ASIC3_CLOCK_SMBUS,
ASIC3_CLOCK_EX0,
ASIC3_CLOCK_EX1,
};
struct asic3_clk {
int enabled;
unsigned int cdex;
unsigned long rate;
};
#define INIT_CDEX(_name, _rate) \
[ASIC3_CLOCK_##_name] = { \
.cdex = CLOCK_CDEX_##_name, \
.rate = _rate, \
}
static struct asic3_clk asic3_clk_init[] __initdata = {
INIT_CDEX(SPI, 0),
INIT_CDEX(OWM, 5000000),
INIT_CDEX(PWM0, 0),
INIT_CDEX(PWM1, 0),
INIT_CDEX(LED0, 0),
INIT_CDEX(LED1, 0),
INIT_CDEX(LED2, 0),
INIT_CDEX(SD_HOST, 24576000),
INIT_CDEX(SD_BUS, 12288000),
INIT_CDEX(SMBUS, 0),
INIT_CDEX(EX0, 32768),
INIT_CDEX(EX1, 24576000),
};
struct asic3 {
void __iomem *mapping;
unsigned int bus_shift;
unsigned int irq_nr;
unsigned int irq_base;
spinlock_t lock;
u16 irq_bothedge[4];
struct gpio_chip gpio;
struct device *dev;
void __iomem *tmio_cnf;
struct asic3_clk clocks[ARRAY_SIZE(asic3_clk_init)];
};
static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset);
void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 value)
{
iowrite16(value, asic->mapping +
(reg >> asic->bus_shift));
}
EXPORT_SYMBOL_GPL(asic3_write_register);
u32 asic3_read_register(struct asic3 *asic, unsigned int reg)
{
return ioread16(asic->mapping +
(reg >> asic->bus_shift));
}
EXPORT_SYMBOL_GPL(asic3_read_register);
static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&asic->lock, flags);
val = asic3_read_register(asic, reg);
if (set)
val |= bits;
else
val &= ~bits;
asic3_write_register(asic, reg, val);
spin_unlock_irqrestore(&asic->lock, flags);
}
/* IRQs */
#define MAX_ASIC_ISR_LOOPS 20
#define ASIC3_GPIO_BASE_INCR \
(ASIC3_GPIO_B_BASE - ASIC3_GPIO_A_BASE)
static void asic3_irq_flip_edge(struct asic3 *asic,
u32 base, int bit)
{
u16 edge;
unsigned long flags;
spin_lock_irqsave(&asic->lock, flags);
edge = asic3_read_register(asic,
base + ASIC3_GPIO_EDGE_TRIGGER);
edge ^= bit;
asic3_write_register(asic,
base + ASIC3_GPIO_EDGE_TRIGGER, edge);
spin_unlock_irqrestore(&asic->lock, flags);
}
static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
{
struct asic3 *asic = irq_desc_get_handler_data(desc);
struct irq_data *data = irq_desc_get_irq_data(desc);
int iter, i;
unsigned long flags;
data->chip->irq_ack(data);
for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
u32 status;
int bank;
spin_lock_irqsave(&asic->lock, flags);
status = asic3_read_register(asic,
ASIC3_OFFSET(INTR, P_INT_STAT));
spin_unlock_irqrestore(&asic->lock, flags);
/* Check all ten register bits */
if ((status & 0x3ff) == 0)
break;
/* Handle GPIO IRQs */
for (bank = 0; bank < ASIC3_NUM_GPIO_BANKS; bank++) {
if (status & (1 << bank)) {
unsigned long base, istat;
base = ASIC3_GPIO_A_BASE
+ bank * ASIC3_GPIO_BASE_INCR;
spin_lock_irqsave(&asic->lock, flags);
istat = asic3_read_register(asic,
base +
ASIC3_GPIO_INT_STATUS);
/* Clearing IntStatus */
asic3_write_register(asic,
base +
ASIC3_GPIO_INT_STATUS, 0);
spin_unlock_irqrestore(&asic->lock, flags);
for (i = 0; i < ASIC3_GPIOS_PER_BANK; i++) {
int bit = (1 << i);
unsigned int irqnr;
if (!(istat & bit))
continue;
irqnr = asic->irq_base +
(ASIC3_GPIOS_PER_BANK * bank)
+ i;
generic_handle_irq(irqnr);
if (asic->irq_bothedge[bank] & bit)
asic3_irq_flip_edge(asic, base,
bit);
}
}
}
/* Handle remaining IRQs in the status register */
for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) {
/* They start at bit 4 and go up */
if (status & (1 << (i - ASIC3_NUM_GPIOS + 4)))
generic_handle_irq(asic->irq_base + i);
}
}
if (iter >= MAX_ASIC_ISR_LOOPS)
dev_err(asic->dev, "interrupt processing overrun\n");
}
static inline int asic3_irq_to_bank(struct asic3 *asic, int irq)
{
int n;
n = (irq - asic->irq_base) >> 4;
return (n * (ASIC3_GPIO_B_BASE - ASIC3_GPIO_A_BASE));
}
static inline int asic3_irq_to_index(struct asic3 *asic, int irq)
{
return (irq - asic->irq_base) & 0xf;
}
static void asic3_mask_gpio_irq(struct irq_data *data)
{
struct asic3 *asic = irq_data_get_irq_chip_data(data);
u32 val, bank, index;
unsigned long flags;
bank = asic3_irq_to_bank(asic, data->irq);
index = asic3_irq_to_index(asic, data->irq);
spin_lock_irqsave(&asic->lock, flags);
val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
val |= 1 << index;
asic3_write_register(asic, bank + ASIC3_GPIO_MASK, val);
spin_unlock_irqrestore(&asic->lock, flags);
}
static void asic3_mask_irq(struct irq_data *data)
{
struct asic3 *asic = irq_data_get_irq_chip_data(data);
int regval;
unsigned long flags;
spin_lock_irqsave(&asic->lock, flags);
regval = asic3_read_register(asic,
ASIC3_INTR_BASE +
ASIC3_INTR_INT_MASK);
regval &= ~(ASIC3_INTMASK_MASK0 <<
(data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
asic3_write_register(asic,
ASIC3_INTR_BASE +
ASIC3_INTR_INT_MASK,
regval);
spin_unlock_irqrestore(&asic->lock, flags);
}
static void asic3_unmask_gpio_irq(struct irq_data *data)
{
struct asic3 *asic = irq_data_get_irq_chip_data(data);
u32 val, bank, index;
unsigned long flags;
bank = asic3_irq_to_bank(asic, data->irq);
index = asic3_irq_to_index(asic, data->irq);
spin_lock_irqsave(&asic->lock, flags);
val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
val &= ~(1 << index);
asic3_write_register(asic, bank + ASIC3_GPIO_MASK, val);
spin_unlock_irqrestore(&asic->lock, flags);
}
static void asic3_unmask_irq(struct irq_data *data)
{
struct asic3 *asic = irq_data_get_irq_chip_data(data);
int regval;
unsigned long flags;
spin_lock_irqsave(&asic->lock, flags);
regval = asic3_read_register(asic,
ASIC3_INTR_BASE +
ASIC3_INTR_INT_MASK);
regval |= (ASIC3_INTMASK_MASK0 <<
(data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
asic3_write_register(asic,
ASIC3_INTR_BASE +
ASIC3_INTR_INT_MASK,
regval);
spin_unlock_irqrestore(&asic->lock, flags);
}
static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
{
struct asic3 *asic = irq_data_get_irq_chip_data(data);
u32 bank, index;
u16 trigger, level, edge, bit;
unsigned long flags;
bank = asic3_irq_to_bank(asic, data->irq);
index = asic3_irq_to_index(asic, data->irq);
bit = 1<<index;
spin_lock_irqsave(&asic->lock, flags);
level = asic3_read_register(asic,
bank + ASIC3_GPIO_LEVEL_TRIGGER);
edge = asic3_read_register(asic,
bank + ASIC3_GPIO_EDGE_TRIGGER);
trigger = asic3_read_register(asic,
bank + ASIC3_GPIO_TRIGGER_TYPE);
asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] &= ~bit;
if (type == IRQ_TYPE_EDGE_RISING) {
trigger |= bit;
edge |= bit;
} else if (type == IRQ_TYPE_EDGE_FALLING) {
trigger |= bit;
edge &= ~bit;
} else if (type == IRQ_TYPE_EDGE_BOTH) {
trigger |= bit;
if (asic3_gpio_get(&asic->gpio, data->irq - asic->irq_base))
edge &= ~bit;
else
edge |= bit;
asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] |= bit;
} else if (type == IRQ_TYPE_LEVEL_LOW) {
trigger &= ~bit;
level &= ~bit;
} else if (type == IRQ_TYPE_LEVEL_HIGH) {
trigger &= ~bit;
level |= bit;
} else {
/*
* if type == IRQ_TYPE_NONE, we should mask interrupts, but
* be careful to not unmask them if mask was also called.
* Probably need internal state for mask.
*/
dev_notice(asic->dev, "irq type not changed\n");
}
asic3_write_register(asic, bank + ASIC3_GPIO_LEVEL_TRIGGER,
level);
asic3_write_register(asic, bank + ASIC3_GPIO_EDGE_TRIGGER,
edge);
asic3_write_register(asic, bank + ASIC3_GPIO_TRIGGER_TYPE,
trigger);
spin_unlock_irqrestore(&asic->lock, flags);
return 0;
}
static struct irq_chip asic3_gpio_irq_chip = {
.name = "ASIC3-GPIO",
.irq_ack = asic3_mask_gpio_irq,
.irq_mask = asic3_mask_gpio_irq,
.irq_unmask = asic3_unmask_gpio_irq,
.irq_set_type = asic3_gpio_irq_type,
};
static struct irq_chip asic3_irq_chip = {
.name = "ASIC3",
.irq_ack = asic3_mask_irq,
.irq_mask = asic3_mask_irq,
.irq_unmask = asic3_unmask_irq,
};
static int __init asic3_irq_probe(struct platform_device *pdev)
{
struct asic3 *asic = platform_get_drvdata(pdev);
unsigned long clksel = 0;
unsigned int irq, irq_base;
int ret;
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
asic->irq_nr = ret;
/* turn on clock to IRQ controller */
clksel |= CLOCK_SEL_CX;
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL),
clksel);
irq_base = asic->irq_base;
for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
if (irq < asic->irq_base + ASIC3_NUM_GPIOS)
irq_set_chip(irq, &asic3_gpio_irq_chip);
else
irq_set_chip(irq, &asic3_irq_chip);
irq_set_chip_data(irq, asic);
irq_set_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK),
ASIC3_INTMASK_GINTMASK);
irq_set_chained_handler(asic->irq_nr, asic3_irq_demux);
irq_set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING);
irq_set_handler_data(asic->irq_nr, asic);
return 0;
}
static void asic3_irq_remove(struct platform_device *pdev)
{
struct asic3 *asic = platform_get_drvdata(pdev);
unsigned int irq, irq_base;
irq_base = asic->irq_base;
for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
set_irq_flags(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
irq_set_chained_handler(asic->irq_nr, NULL);
}
/* GPIOs */
static int asic3_gpio_direction(struct gpio_chip *chip,
unsigned offset, int out)
{
u32 mask = ASIC3_GPIO_TO_MASK(offset), out_reg;
unsigned int gpio_base;
unsigned long flags;
struct asic3 *asic;
asic = container_of(chip, struct asic3, gpio);
gpio_base = ASIC3_GPIO_TO_BASE(offset);
if (gpio_base > ASIC3_GPIO_D_BASE) {
dev_err(asic->dev, "Invalid base (0x%x) for gpio %d\n",
gpio_base, offset);
return -EINVAL;
}
spin_lock_irqsave(&asic->lock, flags);
out_reg = asic3_read_register(asic, gpio_base + ASIC3_GPIO_DIRECTION);
/* Input is 0, Output is 1 */
if (out)
out_reg |= mask;
else
out_reg &= ~mask;
asic3_write_register(asic, gpio_base + ASIC3_GPIO_DIRECTION, out_reg);
spin_unlock_irqrestore(&asic->lock, flags);
return 0;
}
static int asic3_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
return asic3_gpio_direction(chip, offset, 0);
}
static int asic3_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
return asic3_gpio_direction(chip, offset, 1);
}
static int asic3_gpio_get(struct gpio_chip *chip,
unsigned offset)
{
unsigned int gpio_base;
u32 mask = ASIC3_GPIO_TO_MASK(offset);
struct asic3 *asic;
asic = container_of(chip, struct asic3, gpio);
gpio_base = ASIC3_GPIO_TO_BASE(offset);
if (gpio_base > ASIC3_GPIO_D_BASE) {
dev_err(asic->dev, "Invalid base (0x%x) for gpio %d\n",
gpio_base, offset);
return -EINVAL;
}
return asic3_read_register(asic, gpio_base + ASIC3_GPIO_STATUS) & mask;
}
static void asic3_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
u32 mask, out_reg;
unsigned int gpio_base;
unsigned long flags;
struct asic3 *asic;
asic = container_of(chip, struct asic3, gpio);
gpio_base = ASIC3_GPIO_TO_BASE(offset);
if (gpio_base > ASIC3_GPIO_D_BASE) {
dev_err(asic->dev, "Invalid base (0x%x) for gpio %d\n",
gpio_base, offset);
return;
}
mask = ASIC3_GPIO_TO_MASK(offset);
spin_lock_irqsave(&asic->lock, flags);
out_reg = asic3_read_register(asic, gpio_base + ASIC3_GPIO_OUT);
if (value)
out_reg |= mask;
else
out_reg &= ~mask;
asic3_write_register(asic, gpio_base + ASIC3_GPIO_OUT, out_reg);
spin_unlock_irqrestore(&asic->lock, flags);
return;
}
static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct asic3 *asic = container_of(chip, struct asic3, gpio);
return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO;
}
static __init int asic3_gpio_probe(struct platform_device *pdev,
u16 *gpio_config, int num)
{
struct asic3 *asic = platform_get_drvdata(pdev);
u16 alt_reg[ASIC3_NUM_GPIO_BANKS];
u16 out_reg[ASIC3_NUM_GPIO_BANKS];
u16 dir_reg[ASIC3_NUM_GPIO_BANKS];
int i;
memset(alt_reg, 0, ASIC3_NUM_GPIO_BANKS * sizeof(u16));
memset(out_reg, 0, ASIC3_NUM_GPIO_BANKS * sizeof(u16));
memset(dir_reg, 0, ASIC3_NUM_GPIO_BANKS * sizeof(u16));
/* Enable all GPIOs */
asic3_write_register(asic, ASIC3_GPIO_OFFSET(A, MASK), 0xffff);
asic3_write_register(asic, ASIC3_GPIO_OFFSET(B, MASK), 0xffff);
asic3_write_register(asic, ASIC3_GPIO_OFFSET(C, MASK), 0xffff);
asic3_write_register(asic, ASIC3_GPIO_OFFSET(D, MASK), 0xffff);
for (i = 0; i < num; i++) {
u8 alt, pin, dir, init, bank_num, bit_num;
u16 config = gpio_config[i];
pin = ASIC3_CONFIG_GPIO_PIN(config);
alt = ASIC3_CONFIG_GPIO_ALT(config);
dir = ASIC3_CONFIG_GPIO_DIR(config);
init = ASIC3_CONFIG_GPIO_INIT(config);
bank_num = ASIC3_GPIO_TO_BANK(pin);
bit_num = ASIC3_GPIO_TO_BIT(pin);
alt_reg[bank_num] |= (alt << bit_num);
out_reg[bank_num] |= (init << bit_num);
dir_reg[bank_num] |= (dir << bit_num);
}
for (i = 0; i < ASIC3_NUM_GPIO_BANKS; i++) {
asic3_write_register(asic,
ASIC3_BANK_TO_BASE(i) +
ASIC3_GPIO_DIRECTION,
dir_reg[i]);
asic3_write_register(asic,
ASIC3_BANK_TO_BASE(i) + ASIC3_GPIO_OUT,
out_reg[i]);
asic3_write_register(asic,
ASIC3_BANK_TO_BASE(i) +
ASIC3_GPIO_ALT_FUNCTION,
alt_reg[i]);
}
return gpiochip_add(&asic->gpio);
}
static int asic3_gpio_remove(struct platform_device *pdev)
{
struct asic3 *asic = platform_get_drvdata(pdev);
return gpiochip_remove(&asic->gpio);
}
static void asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
{
unsigned long flags;
u32 cdex;
spin_lock_irqsave(&asic->lock, flags);
if (clk->enabled++ == 0) {
cdex = asic3_read_register(asic, ASIC3_OFFSET(CLOCK, CDEX));
cdex |= clk->cdex;
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex);
}
spin_unlock_irqrestore(&asic->lock, flags);
}
static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
{
unsigned long flags;
u32 cdex;
WARN_ON(clk->enabled == 0);
spin_lock_irqsave(&asic->lock, flags);
if (--clk->enabled == 0) {
cdex = asic3_read_register(asic, ASIC3_OFFSET(CLOCK, CDEX));
cdex &= ~clk->cdex;
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex);
}
spin_unlock_irqrestore(&asic->lock, flags);
}
/* MFD cells (SPI, PWM, LED, DS1WM, MMC) */
static struct ds1wm_driver_data ds1wm_pdata = {
.active_high = 1,
.reset_recover_delay = 1,
};
static struct resource ds1wm_resources[] = {
{
.start = ASIC3_OWM_BASE,
.end = ASIC3_OWM_BASE + 0x13,
.flags = IORESOURCE_MEM,
},
{
.start = ASIC3_IRQ_OWM,
.end = ASIC3_IRQ_OWM,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
static int ds1wm_enable(struct platform_device *pdev)
{
struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
/* Turn on external clocks and the OWM clock */
asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_OWM]);
msleep(1);
/* Reset and enable DS1WM */
asic3_set_register(asic, ASIC3_OFFSET(EXTCF, RESET),
ASIC3_EXTCF_OWM_RESET, 1);
msleep(1);
asic3_set_register(asic, ASIC3_OFFSET(EXTCF, RESET),
ASIC3_EXTCF_OWM_RESET, 0);
msleep(1);
asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
ASIC3_EXTCF_OWM_EN, 1);
msleep(1);
return 0;
}
static int ds1wm_disable(struct platform_device *pdev)
{
struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
ASIC3_EXTCF_OWM_EN, 0);
asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_OWM]);
asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
return 0;
}
static struct mfd_cell asic3_cell_ds1wm = {
.name = "ds1wm",
.enable = ds1wm_enable,
.disable = ds1wm_disable,
.platform_data = &ds1wm_pdata,
.pdata_size = sizeof(ds1wm_pdata),
.num_resources = ARRAY_SIZE(ds1wm_resources),
.resources = ds1wm_resources,
};
static void asic3_mmc_pwr(struct platform_device *pdev, int state)
{
struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
tmio_core_mmc_pwr(asic->tmio_cnf, 1 - asic->bus_shift, state);
}
static void asic3_mmc_clk_div(struct platform_device *pdev, int state)
{
struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
tmio_core_mmc_clk_div(asic->tmio_cnf, 1 - asic->bus_shift, state);
}
static struct tmio_mmc_data asic3_mmc_data = {
.hclk = 24576000,
.set_pwr = asic3_mmc_pwr,
.set_clk_div = asic3_mmc_clk_div,
};
static struct resource asic3_mmc_resources[] = {
{
.start = ASIC3_SD_CTRL_BASE,
.end = ASIC3_SD_CTRL_BASE + 0x3ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0,
.end = 0,
.flags = IORESOURCE_IRQ,
},
};
static int asic3_mmc_enable(struct platform_device *pdev)
{
struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
/* Not sure if it must be done bit by bit, but leaving as-is */
asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
ASIC3_SDHWCTRL_LEVCD, 1);
asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
ASIC3_SDHWCTRL_LEVWP, 1);
asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
ASIC3_SDHWCTRL_SUSPEND, 0);
asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
ASIC3_SDHWCTRL_PCLR, 0);
asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
/* CLK32 used for card detection and for interruption detection
* when HCLK is stopped.
*/
asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
msleep(1);
/* HCLK 24.576 MHz, BCLK 12.288 MHz: */
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL),
CLOCK_SEL_CX | CLOCK_SEL_SD_HCLK_SEL);
asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_SD_HOST]);
asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_SD_BUS]);
msleep(1);
asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
ASIC3_EXTCF_SD_MEM_ENABLE, 1);
/* Enable SD card slot 3.3V power supply */
asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
ASIC3_SDHWCTRL_SDPWR, 1);
/* ASIC3_SD_CTRL_BASE assumes 32-bit addressing, TMIO is 16-bit */
tmio_core_mmc_enable(asic->tmio_cnf, 1 - asic->bus_shift,
ASIC3_SD_CTRL_BASE >> 1);
return 0;
}
static int asic3_mmc_disable(struct platform_device *pdev)
{
struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
/* Put in suspend mode */
asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
ASIC3_SDHWCTRL_SUSPEND, 1);
/* Disable clocks */
asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_SD_HOST]);
asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_SD_BUS]);
asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
return 0;
}
static struct mfd_cell asic3_cell_mmc = {
.name = "tmio-mmc",
.enable = asic3_mmc_enable,
.disable = asic3_mmc_disable,
.suspend = asic3_mmc_disable,
.resume = asic3_mmc_enable,
.platform_data = &asic3_mmc_data,
.pdata_size = sizeof(asic3_mmc_data),
.num_resources = ARRAY_SIZE(asic3_mmc_resources),
.resources = asic3_mmc_resources,
};
static const int clock_ledn[ASIC3_NUM_LEDS] = {
[0] = ASIC3_CLOCK_LED0,
[1] = ASIC3_CLOCK_LED1,
[2] = ASIC3_CLOCK_LED2,
};
static int asic3_leds_enable(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
asic3_clk_enable(asic, &asic->clocks[clock_ledn[cell->id]]);
return 0;
}
static int asic3_leds_disable(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
asic3_clk_disable(asic, &asic->clocks[clock_ledn[cell->id]]);
return 0;
}
static int asic3_leds_suspend(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
while (asic3_gpio_get(&asic->gpio, ASIC3_GPIO(C, cell->id)) != 0)
msleep(1);
asic3_clk_disable(asic, &asic->clocks[clock_ledn[cell->id]]);
return 0;
}
static struct mfd_cell asic3_cell_leds[ASIC3_NUM_LEDS] = {
[0] = {
.name = "leds-asic3",
.id = 0,
.enable = asic3_leds_enable,
.disable = asic3_leds_disable,
.suspend = asic3_leds_suspend,
.resume = asic3_leds_enable,
},
[1] = {
.name = "leds-asic3",
.id = 1,
.enable = asic3_leds_enable,
.disable = asic3_leds_disable,
.suspend = asic3_leds_suspend,
.resume = asic3_leds_enable,
},
[2] = {
.name = "leds-asic3",
.id = 2,
.enable = asic3_leds_enable,
.disable = asic3_leds_disable,
.suspend = asic3_leds_suspend,
.resume = asic3_leds_enable,
},
};
static int __init asic3_mfd_probe(struct platform_device *pdev,
struct asic3_platform_data *pdata,
struct resource *mem)
{
struct asic3 *asic = platform_get_drvdata(pdev);
struct resource *mem_sdio;
int irq, ret;
mem_sdio = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!mem_sdio)
dev_dbg(asic->dev, "no SDIO MEM resource\n");
irq = platform_get_irq(pdev, 1);
if (irq < 0)
dev_dbg(asic->dev, "no SDIO IRQ resource\n");
/* DS1WM */
asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
ASIC3_EXTCF_OWM_SMB, 0);
ds1wm_resources[0].start >>= asic->bus_shift;
ds1wm_resources[0].end >>= asic->bus_shift;
/* MMC */
asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
mem_sdio->start,
ASIC3_SD_CONFIG_SIZE >> asic->bus_shift);
if (!asic->tmio_cnf) {
ret = -ENOMEM;
dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n");
goto out;
}
asic3_mmc_resources[0].start >>= asic->bus_shift;
asic3_mmc_resources[0].end >>= asic->bus_shift;
ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_ds1wm, 1, mem, asic->irq_base);
if (ret < 0)
goto out;
if (mem_sdio && (irq >= 0)) {
ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_mmc, 1, mem_sdio, irq);
if (ret < 0)
goto out;
}
if (pdata->leds) {
int i;
for (i = 0; i < ASIC3_NUM_LEDS; ++i) {
asic3_cell_leds[i].platform_data = &pdata->leds[i];
asic3_cell_leds[i].pdata_size = sizeof(pdata->leds[i]);
}
ret = mfd_add_devices(&pdev->dev, 0,
asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0);
}
out:
return ret;
}
static void asic3_mfd_remove(struct platform_device *pdev)
{
struct asic3 *asic = platform_get_drvdata(pdev);
mfd_remove_devices(&pdev->dev);
iounmap(asic->tmio_cnf);
}
/* Core */
static int __init asic3_probe(struct platform_device *pdev)
{
struct asic3_platform_data *pdata = pdev->dev.platform_data;
struct asic3 *asic;
struct resource *mem;
unsigned long clksel;
int ret = 0;
asic = kzalloc(sizeof(struct asic3), GFP_KERNEL);
if (asic == NULL) {
printk(KERN_ERR "kzalloc failed\n");
return -ENOMEM;
}
spin_lock_init(&asic->lock);
platform_set_drvdata(pdev, asic);
asic->dev = &pdev->dev;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
ret = -ENOMEM;
dev_err(asic->dev, "no MEM resource\n");
goto out_free;
}
asic->mapping = ioremap(mem->start, resource_size(mem));
if (!asic->mapping) {
ret = -ENOMEM;
dev_err(asic->dev, "Couldn't ioremap\n");
goto out_free;
}
asic->irq_base = pdata->irq_base;
/* calculate bus shift from mem resource */
asic->bus_shift = 2 - (resource_size(mem) >> 12);
clksel = 0;
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), clksel);
ret = asic3_irq_probe(pdev);
if (ret < 0) {
dev_err(asic->dev, "Couldn't probe IRQs\n");
goto out_unmap;
}
asic->gpio.label = "asic3";
asic->gpio.base = pdata->gpio_base;
asic->gpio.ngpio = ASIC3_NUM_GPIOS;
asic->gpio.get = asic3_gpio_get;
asic->gpio.set = asic3_gpio_set;
asic->gpio.direction_input = asic3_gpio_direction_input;
asic->gpio.direction_output = asic3_gpio_direction_output;
asic->gpio.to_irq = asic3_gpio_to_irq;
ret = asic3_gpio_probe(pdev,
pdata->gpio_config,
pdata->gpio_config_num);
if (ret < 0) {
dev_err(asic->dev, "GPIO probe failed\n");
goto out_irq;
}
/* Making a per-device copy is only needed for the
* theoretical case of multiple ASIC3s on one board:
*/
memcpy(asic->clocks, asic3_clk_init, sizeof(asic3_clk_init));
asic3_mfd_probe(pdev, pdata, mem);
dev_info(asic->dev, "ASIC3 Core driver\n");
return 0;
out_irq:
asic3_irq_remove(pdev);
out_unmap:
iounmap(asic->mapping);
out_free:
kfree(asic);
return ret;
}
static int __devexit asic3_remove(struct platform_device *pdev)
{
int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
asic3_mfd_remove(pdev);
ret = asic3_gpio_remove(pdev);
if (ret < 0)
return ret;
asic3_irq_remove(pdev);
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), 0);
iounmap(asic->mapping);
kfree(asic);
return 0;
}
static void asic3_shutdown(struct platform_device *pdev)
{
}
static struct platform_driver asic3_device_driver = {
.driver = {
.name = "asic3",
},
.remove = __devexit_p(asic3_remove),
.shutdown = asic3_shutdown,
};
static int __init asic3_init(void)
{
int retval = 0;
retval = platform_driver_probe(&asic3_device_driver, asic3_probe);
return retval;
}
subsys_initcall(asic3_init);
| gpl-2.0 |
tobigun/samsung-kernel-smg800h | drivers/video/omap2/dss/dss.c | 4787 | 18698 | /*
* linux/drivers/video/omap2/dss/dss.c
*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define DSS_SUBSYS_NAME "DSS"
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/cpu.h>
#include <plat/clock.h>
#include "dss.h"
#include "dss_features.h"
#define DSS_SZ_REGS SZ_512
struct dss_reg {
u16 idx;
};
#define DSS_REG(idx) ((const struct dss_reg) { idx })
#define DSS_REVISION DSS_REG(0x0000)
#define DSS_SYSCONFIG DSS_REG(0x0010)
#define DSS_SYSSTATUS DSS_REG(0x0014)
#define DSS_CONTROL DSS_REG(0x0040)
#define DSS_SDI_CONTROL DSS_REG(0x0044)
#define DSS_PLL_CONTROL DSS_REG(0x0048)
#define DSS_SDI_STATUS DSS_REG(0x005C)
#define REG_GET(idx, start, end) \
FLD_GET(dss_read_reg(idx), start, end)
#define REG_FLD_MOD(idx, val, start, end) \
dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
static struct {
struct platform_device *pdev;
void __iomem *base;
struct clk *dpll4_m4_ck;
struct clk *dss_clk;
unsigned long cache_req_pck;
unsigned long cache_prate;
struct dss_clock_info cache_dss_cinfo;
struct dispc_clock_info cache_dispc_cinfo;
enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI];
enum omap_dss_clk_source dispc_clk_source;
enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
bool ctx_valid;
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
} dss;
static const char * const dss_generic_clk_source_names[] = {
[OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC",
[OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI",
[OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
};
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
{
__raw_writel(val, dss.base + idx.idx);
}
static inline u32 dss_read_reg(const struct dss_reg idx)
{
return __raw_readl(dss.base + idx.idx);
}
#define SR(reg) \
dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg)
#define RR(reg) \
dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
static void dss_save_context(void)
{
DSSDBG("dss_save_context\n");
SR(CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
OMAP_DISPLAY_TYPE_SDI) {
SR(SDI_CONTROL);
SR(PLL_CONTROL);
}
dss.ctx_valid = true;
DSSDBG("context saved\n");
}
static void dss_restore_context(void)
{
DSSDBG("dss_restore_context\n");
if (!dss.ctx_valid)
return;
RR(CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
OMAP_DISPLAY_TYPE_SDI) {
RR(SDI_CONTROL);
RR(PLL_CONTROL);
}
DSSDBG("context restored\n");
}
#undef SR
#undef RR
void dss_sdi_init(u8 datapairs)
{
u32 l;
BUG_ON(datapairs > 3 || datapairs < 1);
l = dss_read_reg(DSS_SDI_CONTROL);
l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */
l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */
l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */
dss_write_reg(DSS_SDI_CONTROL, l);
l = dss_read_reg(DSS_PLL_CONTROL);
l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */
l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */
l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */
dss_write_reg(DSS_PLL_CONTROL, l);
}
int dss_sdi_enable(void)
{
unsigned long timeout;
dispc_pck_free_enable(1);
/* Reset SDI PLL */
REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
udelay(1); /* wait 2x PCLK */
/* Lock SDI PLL */
REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
/* Waiting for PLL lock request to complete */
timeout = jiffies + msecs_to_jiffies(500);
while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("PLL lock request timed out\n");
goto err1;
}
}
/* Clearing PLL_GO bit */
REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28);
/* Waiting for PLL to lock */
timeout = jiffies + msecs_to_jiffies(500);
while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("PLL lock timed out\n");
goto err1;
}
}
dispc_lcd_enable_signal(1);
/* Waiting for SDI reset to complete */
timeout = jiffies + msecs_to_jiffies(500);
while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("SDI reset timed out\n");
goto err2;
}
}
return 0;
err2:
dispc_lcd_enable_signal(0);
err1:
/* Reset SDI PLL */
REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
dispc_pck_free_enable(0);
return -ETIMEDOUT;
}
void dss_sdi_disable(void)
{
dispc_lcd_enable_signal(0);
dispc_pck_free_enable(0);
/* Reset SDI PLL */
REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
}
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
{
return dss_generic_clk_source_names[clk_src];
}
void dss_dump_clocks(struct seq_file *s)
{
unsigned long dpll4_ck_rate;
unsigned long dpll4_m4_ck_rate;
const char *fclk_name, *fclk_real_name;
unsigned long fclk_rate;
if (dss_runtime_get())
return;
seq_printf(s, "- DSS -\n");
fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
fclk_rate = clk_get_rate(dss.dss_clk);
if (dss.dpll4_m4_ck) {
dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
if (cpu_is_omap3630() || cpu_is_omap44xx())
seq_printf(s, "%s (%s) = %lu / %lu = %lu\n",
fclk_name, fclk_real_name,
dpll4_ck_rate,
dpll4_ck_rate / dpll4_m4_ck_rate,
fclk_rate);
else
seq_printf(s, "%s (%s) = %lu / %lu * 2 = %lu\n",
fclk_name, fclk_real_name,
dpll4_ck_rate,
dpll4_ck_rate / dpll4_m4_ck_rate,
fclk_rate);
} else {
seq_printf(s, "%s (%s) = %lu\n",
fclk_name, fclk_real_name,
fclk_rate);
}
dss_runtime_put();
}
void dss_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
if (dss_runtime_get())
return;
DUMPREG(DSS_REVISION);
DUMPREG(DSS_SYSCONFIG);
DUMPREG(DSS_SYSSTATUS);
DUMPREG(DSS_CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
OMAP_DISPLAY_TYPE_SDI) {
DUMPREG(DSS_SDI_CONTROL);
DUMPREG(DSS_PLL_CONTROL);
DUMPREG(DSS_SDI_STATUS);
}
dss_runtime_put();
#undef DUMPREG
}
void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
{
struct platform_device *dsidev;
int b;
u8 start, end;
switch (clk_src) {
case OMAP_DSS_CLK_SRC_FCK:
b = 0;
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
b = 1;
dsidev = dsi_get_dsidev_from_id(0);
dsi_wait_pll_hsdiv_dispc_active(dsidev);
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
b = 2;
dsidev = dsi_get_dsidev_from_id(1);
dsi_wait_pll_hsdiv_dispc_active(dsidev);
break;
default:
BUG();
}
dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
REG_FLD_MOD(DSS_CONTROL, b, start, end); /* DISPC_CLK_SWITCH */
dss.dispc_clk_source = clk_src;
}
void dss_select_dsi_clk_source(int dsi_module,
enum omap_dss_clk_source clk_src)
{
struct platform_device *dsidev;
int b;
switch (clk_src) {
case OMAP_DSS_CLK_SRC_FCK:
b = 0;
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI:
BUG_ON(dsi_module != 0);
b = 1;
dsidev = dsi_get_dsidev_from_id(0);
dsi_wait_pll_hsdiv_dsi_active(dsidev);
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI:
BUG_ON(dsi_module != 1);
b = 1;
dsidev = dsi_get_dsidev_from_id(1);
dsi_wait_pll_hsdiv_dsi_active(dsidev);
break;
default:
BUG();
}
REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
dss.dsi_clk_source[dsi_module] = clk_src;
}
void dss_select_lcd_clk_source(enum omap_channel channel,
enum omap_dss_clk_source clk_src)
{
struct platform_device *dsidev;
int b, ix, pos;
if (!dss_has_feature(FEAT_LCD_CLK_SRC))
return;
switch (clk_src) {
case OMAP_DSS_CLK_SRC_FCK:
b = 0;
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
b = 1;
dsidev = dsi_get_dsidev_from_id(0);
dsi_wait_pll_hsdiv_dispc_active(dsidev);
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2);
b = 1;
dsidev = dsi_get_dsidev_from_id(1);
dsi_wait_pll_hsdiv_dispc_active(dsidev);
break;
default:
BUG();
}
pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */
ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1;
dss.lcd_clk_source[ix] = clk_src;
}
enum omap_dss_clk_source dss_get_dispc_clk_source(void)
{
return dss.dispc_clk_source;
}
enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module)
{
return dss.dsi_clk_source[dsi_module];
}
enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
{
if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1;
return dss.lcd_clk_source[ix];
} else {
/* LCD_CLK source is the same as DISPC_FCLK source for
* OMAP2 and OMAP3 */
return dss.dispc_clk_source;
}
}
/* calculate clock rates using dividers in cinfo */
int dss_calc_clock_rates(struct dss_clock_info *cinfo)
{
if (dss.dpll4_m4_ck) {
unsigned long prate;
u16 fck_div_max = 16;
if (cpu_is_omap3630() || cpu_is_omap44xx())
fck_div_max = 32;
if (cinfo->fck_div > fck_div_max || cinfo->fck_div == 0)
return -EINVAL;
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
cinfo->fck = prate / cinfo->fck_div;
} else {
if (cinfo->fck_div != 0)
return -EINVAL;
cinfo->fck = clk_get_rate(dss.dss_clk);
}
return 0;
}
int dss_set_clock_div(struct dss_clock_info *cinfo)
{
if (dss.dpll4_m4_ck) {
unsigned long prate;
int r;
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
DSSDBG("dpll4_m4 = %ld\n", prate);
r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
if (r)
return r;
} else {
if (cinfo->fck_div != 0)
return -EINVAL;
}
DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
return 0;
}
int dss_get_clock_div(struct dss_clock_info *cinfo)
{
cinfo->fck = clk_get_rate(dss.dss_clk);
if (dss.dpll4_m4_ck) {
unsigned long prate;
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
if (cpu_is_omap3630() || cpu_is_omap44xx())
cinfo->fck_div = prate / (cinfo->fck);
else
cinfo->fck_div = prate / (cinfo->fck / 2);
} else {
cinfo->fck_div = 0;
}
return 0;
}
unsigned long dss_get_dpll4_rate(void)
{
if (dss.dpll4_m4_ck)
return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
else
return 0;
}
int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
struct dss_clock_info *dss_cinfo,
struct dispc_clock_info *dispc_cinfo)
{
unsigned long prate;
struct dss_clock_info best_dss;
struct dispc_clock_info best_dispc;
unsigned long fck, max_dss_fck;
u16 fck_div, fck_div_max = 16;
int match = 0;
int min_fck_per_pck;
prate = dss_get_dpll4_rate();
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
fck = clk_get_rate(dss.dss_clk);
if (req_pck == dss.cache_req_pck &&
((cpu_is_omap34xx() && prate == dss.cache_prate) ||
dss.cache_dss_cinfo.fck == fck)) {
DSSDBG("dispc clock info found from cache.\n");
*dss_cinfo = dss.cache_dss_cinfo;
*dispc_cinfo = dss.cache_dispc_cinfo;
return 0;
}
min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
if (min_fck_per_pck &&
req_pck * min_fck_per_pck > max_dss_fck) {
DSSERR("Requested pixel clock not possible with the current "
"OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
"the constraint off.\n");
min_fck_per_pck = 0;
}
retry:
memset(&best_dss, 0, sizeof(best_dss));
memset(&best_dispc, 0, sizeof(best_dispc));
if (dss.dpll4_m4_ck == NULL) {
struct dispc_clock_info cur_dispc;
/* XXX can we change the clock on omap2? */
fck = clk_get_rate(dss.dss_clk);
fck_div = 1;
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
match = 1;
best_dss.fck = fck;
best_dss.fck_div = fck_div;
best_dispc = cur_dispc;
goto found;
} else {
if (cpu_is_omap3630() || cpu_is_omap44xx())
fck_div_max = 32;
for (fck_div = fck_div_max; fck_div > 0; --fck_div) {
struct dispc_clock_info cur_dispc;
if (fck_div_max == 32)
fck = prate / fck_div;
else
fck = prate / fck_div * 2;
if (fck > max_dss_fck)
continue;
if (min_fck_per_pck &&
fck < req_pck * min_fck_per_pck)
continue;
match = 1;
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
if (abs(cur_dispc.pck - req_pck) <
abs(best_dispc.pck - req_pck)) {
best_dss.fck = fck;
best_dss.fck_div = fck_div;
best_dispc = cur_dispc;
if (cur_dispc.pck == req_pck)
goto found;
}
}
}
found:
if (!match) {
if (min_fck_per_pck) {
DSSERR("Could not find suitable clock settings.\n"
"Turning FCK/PCK constraint off and"
"trying again.\n");
min_fck_per_pck = 0;
goto retry;
}
DSSERR("Could not find suitable clock settings.\n");
return -EINVAL;
}
if (dss_cinfo)
*dss_cinfo = best_dss;
if (dispc_cinfo)
*dispc_cinfo = best_dispc;
dss.cache_req_pck = req_pck;
dss.cache_prate = prate;
dss.cache_dss_cinfo = best_dss;
dss.cache_dispc_cinfo = best_dispc;
return 0;
}
void dss_set_venc_output(enum omap_dss_venc_type type)
{
int l = 0;
if (type == OMAP_DSS_VENC_TYPE_COMPOSITE)
l = 0;
else if (type == OMAP_DSS_VENC_TYPE_SVIDEO)
l = 1;
else
BUG();
/* venc out selection. 0 = comp, 1 = svideo */
REG_FLD_MOD(DSS_CONTROL, l, 6, 6);
}
void dss_set_dac_pwrdn_bgz(bool enable)
{
REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
}
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi)
{
REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */
}
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void)
{
enum omap_display_type displays;
displays = dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_DIGIT);
if ((displays & OMAP_DISPLAY_TYPE_HDMI) == 0)
return DSS_VENC_TV_CLK;
return REG_GET(DSS_CONTROL, 15, 15);
}
static int dss_get_clocks(void)
{
struct clk *clk;
int r;
clk = clk_get(&dss.pdev->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get clock fck\n");
r = PTR_ERR(clk);
goto err;
}
dss.dss_clk = clk;
if (cpu_is_omap34xx()) {
clk = clk_get(NULL, "dpll4_m4_ck");
if (IS_ERR(clk)) {
DSSERR("Failed to get dpll4_m4_ck\n");
r = PTR_ERR(clk);
goto err;
}
} else if (cpu_is_omap44xx()) {
clk = clk_get(NULL, "dpll_per_m5x2_ck");
if (IS_ERR(clk)) {
DSSERR("Failed to get dpll_per_m5x2_ck\n");
r = PTR_ERR(clk);
goto err;
}
} else { /* omap24xx */
clk = NULL;
}
dss.dpll4_m4_ck = clk;
return 0;
err:
if (dss.dss_clk)
clk_put(dss.dss_clk);
if (dss.dpll4_m4_ck)
clk_put(dss.dpll4_m4_ck);
return r;
}
static void dss_put_clocks(void)
{
if (dss.dpll4_m4_ck)
clk_put(dss.dpll4_m4_ck);
clk_put(dss.dss_clk);
}
int dss_runtime_get(void)
{
int r;
DSSDBG("dss_runtime_get\n");
r = pm_runtime_get_sync(&dss.pdev->dev);
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
void dss_runtime_put(void)
{
int r;
DSSDBG("dss_runtime_put\n");
r = pm_runtime_put_sync(&dss.pdev->dev);
WARN_ON(r < 0);
}
/* DEBUGFS */
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
void dss_debug_dump_clocks(struct seq_file *s)
{
dss_dump_clocks(s);
dispc_dump_clocks(s);
#ifdef CONFIG_OMAP2_DSS_DSI
dsi_dump_clocks(s);
#endif
}
#endif
/* DSS HW IP initialisation */
static int omap_dsshw_probe(struct platform_device *pdev)
{
struct resource *dss_mem;
u32 rev;
int r;
dss.pdev = pdev;
dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
if (!dss_mem) {
DSSERR("can't get IORESOURCE_MEM DSS\n");
return -EINVAL;
}
dss.base = devm_ioremap(&pdev->dev, dss_mem->start,
resource_size(dss_mem));
if (!dss.base) {
DSSERR("can't ioremap DSS\n");
return -ENOMEM;
}
r = dss_get_clocks();
if (r)
return r;
pm_runtime_enable(&pdev->dev);
r = dss_runtime_get();
if (r)
goto err_runtime_get;
/* Select DPLL */
REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
#ifdef CONFIG_OMAP2_DSS_VENC
REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
#endif
dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
r = dpi_init();
if (r) {
DSSERR("Failed to initialize DPI\n");
goto err_dpi;
}
r = sdi_init();
if (r) {
DSSERR("Failed to initialize SDI\n");
goto err_sdi;
}
rev = dss_read_reg(DSS_REVISION);
printk(KERN_INFO "OMAP DSS rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
dss_runtime_put();
return 0;
err_sdi:
dpi_exit();
err_dpi:
dss_runtime_put();
err_runtime_get:
pm_runtime_disable(&pdev->dev);
dss_put_clocks();
return r;
}
static int omap_dsshw_remove(struct platform_device *pdev)
{
dpi_exit();
sdi_exit();
pm_runtime_disable(&pdev->dev);
dss_put_clocks();
return 0;
}
static int dss_runtime_suspend(struct device *dev)
{
dss_save_context();
return 0;
}
static int dss_runtime_resume(struct device *dev)
{
dss_restore_context();
return 0;
}
static const struct dev_pm_ops dss_pm_ops = {
.runtime_suspend = dss_runtime_suspend,
.runtime_resume = dss_runtime_resume,
};
static struct platform_driver omap_dsshw_driver = {
.probe = omap_dsshw_probe,
.remove = omap_dsshw_remove,
.driver = {
.name = "omapdss_dss",
.owner = THIS_MODULE,
.pm = &dss_pm_ops,
},
};
int dss_init_platform_driver(void)
{
return platform_driver_register(&omap_dsshw_driver);
}
void dss_uninit_platform_driver(void)
{
return platform_driver_unregister(&omap_dsshw_driver);
}
| gpl-2.0 |
shinobisoft/android_kernel_lge_msm8226 | fs/hpfs/inode.c | 5043 | 8772 | /*
* linux/fs/hpfs/inode.c
*
* Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
*
* inode VFS functions
*/
#include <linux/slab.h>
#include "hpfs_fn.h"
void hpfs_init_inode(struct inode *i)
{
struct super_block *sb = i->i_sb;
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
i->i_uid = hpfs_sb(sb)->sb_uid;
i->i_gid = hpfs_sb(sb)->sb_gid;
i->i_mode = hpfs_sb(sb)->sb_mode;
i->i_size = -1;
i->i_blocks = -1;
hpfs_inode->i_dno = 0;
hpfs_inode->i_n_secs = 0;
hpfs_inode->i_file_sec = 0;
hpfs_inode->i_disk_sec = 0;
hpfs_inode->i_dpos = 0;
hpfs_inode->i_dsubdno = 0;
hpfs_inode->i_ea_mode = 0;
hpfs_inode->i_ea_uid = 0;
hpfs_inode->i_ea_gid = 0;
hpfs_inode->i_ea_size = 0;
hpfs_inode->i_rddir_off = NULL;
hpfs_inode->i_dirty = 0;
i->i_ctime.tv_sec = i->i_ctime.tv_nsec = 0;
i->i_mtime.tv_sec = i->i_mtime.tv_nsec = 0;
i->i_atime.tv_sec = i->i_atime.tv_nsec = 0;
}
void hpfs_read_inode(struct inode *i)
{
struct buffer_head *bh;
struct fnode *fnode;
struct super_block *sb = i->i_sb;
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
void *ea;
int ea_size;
if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) {
/*i->i_mode |= S_IFREG;
i->i_mode &= ~0111;
i->i_op = &hpfs_file_iops;
i->i_fop = &hpfs_file_ops;
clear_nlink(i);*/
make_bad_inode(i);
return;
}
if (hpfs_sb(i->i_sb)->sb_eas) {
if ((ea = hpfs_get_ea(i->i_sb, fnode, "UID", &ea_size))) {
if (ea_size == 2) {
i->i_uid = le16_to_cpu(*(__le16*)ea);
hpfs_inode->i_ea_uid = 1;
}
kfree(ea);
}
if ((ea = hpfs_get_ea(i->i_sb, fnode, "GID", &ea_size))) {
if (ea_size == 2) {
i->i_gid = le16_to_cpu(*(__le16*)ea);
hpfs_inode->i_ea_gid = 1;
}
kfree(ea);
}
if ((ea = hpfs_get_ea(i->i_sb, fnode, "SYMLINK", &ea_size))) {
kfree(ea);
i->i_mode = S_IFLNK | 0777;
i->i_op = &page_symlink_inode_operations;
i->i_data.a_ops = &hpfs_symlink_aops;
set_nlink(i, 1);
i->i_size = ea_size;
i->i_blocks = 1;
brelse(bh);
return;
}
if ((ea = hpfs_get_ea(i->i_sb, fnode, "MODE", &ea_size))) {
int rdev = 0;
umode_t mode = hpfs_sb(sb)->sb_mode;
if (ea_size == 2) {
mode = le16_to_cpu(*(__le16*)ea);
hpfs_inode->i_ea_mode = 1;
}
kfree(ea);
i->i_mode = mode;
if (S_ISBLK(mode) || S_ISCHR(mode)) {
if ((ea = hpfs_get_ea(i->i_sb, fnode, "DEV", &ea_size))) {
if (ea_size == 4)
rdev = le32_to_cpu(*(__le32*)ea);
kfree(ea);
}
}
if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
brelse(bh);
set_nlink(i, 1);
i->i_size = 0;
i->i_blocks = 1;
init_special_inode(i, mode,
new_decode_dev(rdev));
return;
}
}
}
if (fnode->dirflag) {
int n_dnodes, n_subdirs;
i->i_mode |= S_IFDIR;
i->i_op = &hpfs_dir_iops;
i->i_fop = &hpfs_dir_ops;
hpfs_inode->i_parent_dir = le32_to_cpu(fnode->up);
hpfs_inode->i_dno = le32_to_cpu(fnode->u.external[0].disk_secno);
if (hpfs_sb(sb)->sb_chk >= 2) {
struct buffer_head *bh0;
if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0);
}
n_dnodes = 0; n_subdirs = 0;
hpfs_count_dnodes(i->i_sb, hpfs_inode->i_dno, &n_dnodes, &n_subdirs, NULL);
i->i_blocks = 4 * n_dnodes;
i->i_size = 2048 * n_dnodes;
set_nlink(i, 2 + n_subdirs);
} else {
i->i_mode |= S_IFREG;
if (!hpfs_inode->i_ea_mode) i->i_mode &= ~0111;
i->i_op = &hpfs_file_iops;
i->i_fop = &hpfs_file_ops;
set_nlink(i, 1);
i->i_size = le32_to_cpu(fnode->file_size);
i->i_blocks = ((i->i_size + 511) >> 9) + 1;
i->i_data.a_ops = &hpfs_aops;
hpfs_i(i)->mmu_private = i->i_size;
}
brelse(bh);
}
static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode)
{
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
/*if (le32_to_cpu(fnode->acl_size_l) || le16_to_cpu(fnode->acl_size_s)) {
Some unknown structures like ACL may be in fnode,
we'd better not overwrite them
hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino);
} else*/ if (hpfs_sb(i->i_sb)->sb_eas >= 2) {
__le32 ea;
if ((i->i_uid != hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) {
ea = cpu_to_le32(i->i_uid);
hpfs_set_ea(i, fnode, "UID", (char*)&ea, 2);
hpfs_inode->i_ea_uid = 1;
}
if ((i->i_gid != hpfs_sb(i->i_sb)->sb_gid) || hpfs_inode->i_ea_gid) {
ea = cpu_to_le32(i->i_gid);
hpfs_set_ea(i, fnode, "GID", (char *)&ea, 2);
hpfs_inode->i_ea_gid = 1;
}
if (!S_ISLNK(i->i_mode))
if ((i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0 : 0111))
| (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG))
&& i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0222 : 0333))
| (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG))) || hpfs_inode->i_ea_mode) {
ea = cpu_to_le32(i->i_mode);
/* sick, but legal */
hpfs_set_ea(i, fnode, "MODE", (char *)&ea, 2);
hpfs_inode->i_ea_mode = 1;
}
if (S_ISBLK(i->i_mode) || S_ISCHR(i->i_mode)) {
ea = cpu_to_le32(new_encode_dev(i->i_rdev));
hpfs_set_ea(i, fnode, "DEV", (char *)&ea, 4);
}
}
}
void hpfs_write_inode(struct inode *i)
{
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
struct inode *parent;
if (i->i_ino == hpfs_sb(i->i_sb)->sb_root) return;
if (hpfs_inode->i_rddir_off && !atomic_read(&i->i_count)) {
if (*hpfs_inode->i_rddir_off) printk("HPFS: write_inode: some position still there\n");
kfree(hpfs_inode->i_rddir_off);
hpfs_inode->i_rddir_off = NULL;
}
if (!i->i_nlink) {
return;
}
parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir);
if (parent) {
hpfs_inode->i_dirty = 0;
if (parent->i_state & I_NEW) {
hpfs_init_inode(parent);
hpfs_read_inode(parent);
unlock_new_inode(parent);
}
hpfs_write_inode_nolock(i);
iput(parent);
}
}
void hpfs_write_inode_nolock(struct inode *i)
{
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
struct buffer_head *bh;
struct fnode *fnode;
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
if (i->i_ino == hpfs_sb(i->i_sb)->sb_root) return;
if (!(fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) return;
if (i->i_ino != hpfs_sb(i->i_sb)->sb_root && i->i_nlink) {
if (!(de = map_fnode_dirent(i->i_sb, i->i_ino, fnode, &qbh))) {
brelse(bh);
return;
}
} else de = NULL;
if (S_ISREG(i->i_mode)) {
fnode->file_size = cpu_to_le32(i->i_size);
if (de) de->file_size = cpu_to_le32(i->i_size);
} else if (S_ISDIR(i->i_mode)) {
fnode->file_size = cpu_to_le32(0);
if (de) de->file_size = cpu_to_le32(0);
}
hpfs_write_inode_ea(i, fnode);
if (de) {
de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec));
de->read_only = !(i->i_mode & 0222);
de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size);
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
}
if (S_ISDIR(i->i_mode)) {
if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) {
de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec));
de->read_only = !(i->i_mode & 0222);
de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0);
de->file_size = cpu_to_le32(0);
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
} else
hpfs_error(i->i_sb,
"directory %08lx doesn't have '.' entry",
(unsigned long)i->i_ino);
}
mark_buffer_dirty(bh);
brelse(bh);
}
int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
int error = -EINVAL;
hpfs_lock(inode->i_sb);
if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root)
goto out_unlock;
if ((attr->ia_valid & ATTR_UID) && attr->ia_uid >= 0x10000)
goto out_unlock;
if ((attr->ia_valid & ATTR_GID) && attr->ia_gid >= 0x10000)
goto out_unlock;
if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
goto out_unlock;
error = inode_change_ok(inode, attr);
if (error)
goto out_unlock;
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
error = vmtruncate(inode, attr->ia_size);
if (error)
goto out_unlock;
}
setattr_copy(inode, attr);
hpfs_write_inode(inode);
out_unlock:
hpfs_unlock(inode->i_sb);
return error;
}
void hpfs_write_if_changed(struct inode *inode)
{
struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
if (hpfs_inode->i_dirty)
hpfs_write_inode(inode);
}
void hpfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
if (!inode->i_nlink) {
hpfs_lock(inode->i_sb);
hpfs_remove_fnode(inode->i_sb, inode->i_ino);
hpfs_unlock(inode->i_sb);
}
}
| gpl-2.0 |
skybosi/linux | drivers/hwmon/pmbus/ucd9000.c | 9395 | 6313 | /*
* Hardware monitoring driver for UCD90xxx Sequencer and System Health
* Controller series
*
* Copyright (C) 2011 Ericsson AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c/pmbus.h>
#include "pmbus.h"
enum chips { ucd9000, ucd90120, ucd90124, ucd9090, ucd90910 };
#define UCD9000_MONITOR_CONFIG 0xd5
#define UCD9000_NUM_PAGES 0xd6
#define UCD9000_FAN_CONFIG_INDEX 0xe7
#define UCD9000_FAN_CONFIG 0xe8
#define UCD9000_DEVICE_ID 0xfd
#define UCD9000_MON_TYPE(x) (((x) >> 5) & 0x07)
#define UCD9000_MON_PAGE(x) ((x) & 0x0f)
#define UCD9000_MON_VOLTAGE 1
#define UCD9000_MON_TEMPERATURE 2
#define UCD9000_MON_CURRENT 3
#define UCD9000_MON_VOLTAGE_HW 4
#define UCD9000_NUM_FAN 4
struct ucd9000_data {
u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX];
struct pmbus_driver_info info;
};
#define to_ucd9000_data(_info) container_of(_info, struct ucd9000_data, info)
static int ucd9000_get_fan_config(struct i2c_client *client, int fan)
{
int fan_config = 0;
struct ucd9000_data *data
= to_ucd9000_data(pmbus_get_driver_info(client));
if (data->fan_data[fan][3] & 1)
fan_config |= PB_FAN_2_INSTALLED; /* Use lower bit position */
/* Pulses/revolution */
fan_config |= (data->fan_data[fan][3] & 0x06) >> 1;
return fan_config;
}
static int ucd9000_read_byte_data(struct i2c_client *client, int page, int reg)
{
int ret = 0;
int fan_config;
switch (reg) {
case PMBUS_FAN_CONFIG_12:
if (page > 0)
return -ENXIO;
ret = ucd9000_get_fan_config(client, 0);
if (ret < 0)
return ret;
fan_config = ret << 4;
ret = ucd9000_get_fan_config(client, 1);
if (ret < 0)
return ret;
fan_config |= ret;
ret = fan_config;
break;
case PMBUS_FAN_CONFIG_34:
if (page > 0)
return -ENXIO;
ret = ucd9000_get_fan_config(client, 2);
if (ret < 0)
return ret;
fan_config = ret << 4;
ret = ucd9000_get_fan_config(client, 3);
if (ret < 0)
return ret;
fan_config |= ret;
ret = fan_config;
break;
default:
ret = -ENODATA;
break;
}
return ret;
}
static const struct i2c_device_id ucd9000_id[] = {
{"ucd9000", ucd9000},
{"ucd90120", ucd90120},
{"ucd90124", ucd90124},
{"ucd9090", ucd9090},
{"ucd90910", ucd90910},
{}
};
MODULE_DEVICE_TABLE(i2c, ucd9000_id);
static int ucd9000_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
struct ucd9000_data *data;
struct pmbus_driver_info *info;
const struct i2c_device_id *mid;
int i, ret;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA))
return -ENODEV;
ret = i2c_smbus_read_block_data(client, UCD9000_DEVICE_ID,
block_buffer);
if (ret < 0) {
dev_err(&client->dev, "Failed to read device ID\n");
return ret;
}
block_buffer[ret] = '\0';
dev_info(&client->dev, "Device ID %s\n", block_buffer);
for (mid = ucd9000_id; mid->name[0]; mid++) {
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
break;
}
if (!mid->name[0]) {
dev_err(&client->dev, "Unsupported device\n");
return -ENODEV;
}
if (id->driver_data != ucd9000 && id->driver_data != mid->driver_data)
dev_notice(&client->dev,
"Device mismatch: Configured %s, detected %s\n",
id->name, mid->name);
data = devm_kzalloc(&client->dev, sizeof(struct ucd9000_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
info = &data->info;
ret = i2c_smbus_read_byte_data(client, UCD9000_NUM_PAGES);
if (ret < 0) {
dev_err(&client->dev,
"Failed to read number of active pages\n");
return ret;
}
info->pages = ret;
if (!info->pages) {
dev_err(&client->dev, "No pages configured\n");
return -ENODEV;
}
/* The internal temperature sensor is always active */
info->func[0] = PMBUS_HAVE_TEMP;
/* Everything else is configurable */
ret = i2c_smbus_read_block_data(client, UCD9000_MONITOR_CONFIG,
block_buffer);
if (ret <= 0) {
dev_err(&client->dev, "Failed to read configuration data\n");
return -ENODEV;
}
for (i = 0; i < ret; i++) {
int page = UCD9000_MON_PAGE(block_buffer[i]);
if (page >= info->pages)
continue;
switch (UCD9000_MON_TYPE(block_buffer[i])) {
case UCD9000_MON_VOLTAGE:
case UCD9000_MON_VOLTAGE_HW:
info->func[page] |= PMBUS_HAVE_VOUT
| PMBUS_HAVE_STATUS_VOUT;
break;
case UCD9000_MON_TEMPERATURE:
info->func[page] |= PMBUS_HAVE_TEMP2
| PMBUS_HAVE_STATUS_TEMP;
break;
case UCD9000_MON_CURRENT:
info->func[page] |= PMBUS_HAVE_IOUT
| PMBUS_HAVE_STATUS_IOUT;
break;
default:
break;
}
}
/* Fan configuration */
if (mid->driver_data == ucd90124) {
for (i = 0; i < UCD9000_NUM_FAN; i++) {
i2c_smbus_write_byte_data(client,
UCD9000_FAN_CONFIG_INDEX, i);
ret = i2c_smbus_read_block_data(client,
UCD9000_FAN_CONFIG,
data->fan_data[i]);
if (ret < 0)
return ret;
}
i2c_smbus_write_byte_data(client, UCD9000_FAN_CONFIG_INDEX, 0);
info->read_byte_data = ucd9000_read_byte_data;
info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12
| PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN34;
}
return pmbus_do_probe(client, mid, info);
}
/* This is the driver that will be inserted */
static struct i2c_driver ucd9000_driver = {
.driver = {
.name = "ucd9000",
},
.probe = ucd9000_probe,
.remove = pmbus_do_remove,
.id_table = ucd9000_id,
};
module_i2c_driver(ucd9000_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for TI UCD90xxx");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Alberto97/android_kernel_lge_dory | crypto/md5.c | 9907 | 4030 | /*
* Cryptographic API.
*
* MD5 Message Digest Algorithm (RFC1321).
*
* Derived from cryptoapi implementation, originally based on the
* public domain implementation written by Colin Plumb in 1993.
*
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/cryptohash.h>
#include <asm/byteorder.h>
/* XXX: this stuff can be optimized */
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
{
while (words--) {
__le32_to_cpus(buf);
buf++;
}
}
static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
{
while (words--) {
__cpu_to_le32s(buf);
buf++;
}
}
static inline void md5_transform_helper(struct md5_state *ctx)
{
le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
md5_transform(ctx->hash, ctx->block);
}
static int md5_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = 0x67452301;
mctx->hash[1] = 0xefcdab89;
mctx->hash[2] = 0x98badcfe;
mctx->hash[3] = 0x10325476;
mctx->byte_count = 0;
return 0;
}
static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
struct md5_state *mctx = shash_desc_ctx(desc);
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
mctx->byte_count += len;
if (avail > len) {
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, len);
return 0;
}
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, avail);
md5_transform_helper(mctx);
data += avail;
len -= avail;
while (len >= sizeof(mctx->block)) {
memcpy(mctx->block, data, sizeof(mctx->block));
md5_transform_helper(mctx);
data += sizeof(mctx->block);
len -= sizeof(mctx->block);
}
memcpy(mctx->block, data, len);
return 0;
}
static int md5_final(struct shash_desc *desc, u8 *out)
{
struct md5_state *mctx = shash_desc_ctx(desc);
const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1);
*p++ = 0x80;
if (padding < 0) {
memset(p, 0x00, padding + sizeof (u64));
md5_transform_helper(mctx);
p = (char *)mctx->block;
padding = 56;
}
memset(p, 0, padding);
mctx->block[14] = mctx->byte_count << 3;
mctx->block[15] = mctx->byte_count >> 29;
le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
sizeof(u64)) / sizeof(u32));
md5_transform(mctx->hash, mctx->block);
cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
memcpy(out, mctx->hash, sizeof(mctx->hash));
memset(mctx, 0, sizeof(*mctx));
return 0;
}
static int md5_export(struct shash_desc *desc, void *out)
{
struct md5_state *ctx = shash_desc_ctx(desc);
memcpy(out, ctx, sizeof(*ctx));
return 0;
}
static int md5_import(struct shash_desc *desc, const void *in)
{
struct md5_state *ctx = shash_desc_ctx(desc);
memcpy(ctx, in, sizeof(*ctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_init,
.update = md5_update,
.final = md5_final,
.export = md5_export,
.import = md5_import,
.descsize = sizeof(struct md5_state),
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init md5_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit md5_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(md5_mod_init);
module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
| gpl-2.0 |
loli10K/linux-hardkernel | drivers/hid/hid-rmi.c | 180 | 32287 | /*
* Copyright (c) 2013 Andrew Duggan <aduggan@synaptics.com>
* Copyright (c) 2013 Synaptics Incorporated
* Copyright (c) 2014 Benjamin Tissoires <benjamin.tissoires@gmail.com>
* Copyright (c) 2014 Red Hat, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/kernel.h>
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include "hid-ids.h"
#define RMI_MOUSE_REPORT_ID 0x01 /* Mouse emulation Report */
#define RMI_WRITE_REPORT_ID 0x09 /* Output Report */
#define RMI_READ_ADDR_REPORT_ID 0x0a /* Output Report */
#define RMI_READ_DATA_REPORT_ID 0x0b /* Input Report */
#define RMI_ATTN_REPORT_ID 0x0c /* Input Report */
#define RMI_SET_RMI_MODE_REPORT_ID 0x0f /* Feature Report */
/* flags */
#define RMI_READ_REQUEST_PENDING 0
#define RMI_READ_DATA_PENDING 1
#define RMI_STARTED 2
/* device flags */
#define RMI_DEVICE BIT(0)
#define RMI_DEVICE_HAS_PHYS_BUTTONS BIT(1)
enum rmi_mode_type {
RMI_MODE_OFF = 0,
RMI_MODE_ATTN_REPORTS = 1,
RMI_MODE_NO_PACKED_ATTN_REPORTS = 2,
};
struct rmi_function {
unsigned page; /* page of the function */
u16 query_base_addr; /* base address for queries */
u16 command_base_addr; /* base address for commands */
u16 control_base_addr; /* base address for controls */
u16 data_base_addr; /* base address for datas */
unsigned int interrupt_base; /* cross-function interrupt number
* (uniq in the device)*/
unsigned int interrupt_count; /* number of interrupts */
unsigned int report_size; /* size of a report */
unsigned long irq_mask; /* mask of the interrupts
* (to be applied against ATTN IRQ) */
};
/**
* struct rmi_data - stores information for hid communication
*
* @page_mutex: Locks current page to avoid changing pages in unexpected ways.
* @page: Keeps track of the current virtual page
*
* @wait: Used for waiting for read data
*
* @writeReport: output buffer when writing RMI registers
* @readReport: input buffer when reading RMI registers
*
* @input_report_size: size of an input report (advertised by HID)
* @output_report_size: size of an output report (advertised by HID)
*
* @flags: flags for the current device (started, reading, etc...)
*
* @f11: placeholder of internal RMI function F11 description
* @f30: placeholder of internal RMI function F30 description
*
* @max_fingers: maximum finger count reported by the device
* @max_x: maximum x value reported by the device
* @max_y: maximum y value reported by the device
*
* @gpio_led_count: count of GPIOs + LEDs reported by F30
* @button_count: actual physical buttons count
* @button_mask: button mask used to decode GPIO ATTN reports
* @button_state_mask: pull state of the buttons
*
* @input: pointer to the kernel input device
*
* @reset_work: worker which will be called in case of a mouse report
* @hdev: pointer to the struct hid_device
*/
struct rmi_data {
struct mutex page_mutex;
int page;
wait_queue_head_t wait;
u8 *writeReport;
u8 *readReport;
int input_report_size;
int output_report_size;
unsigned long flags;
struct rmi_function f01;
struct rmi_function f11;
struct rmi_function f30;
unsigned int max_fingers;
unsigned int max_x;
unsigned int max_y;
unsigned int x_size_mm;
unsigned int y_size_mm;
unsigned int gpio_led_count;
unsigned int button_count;
unsigned long button_mask;
unsigned long button_state_mask;
struct input_dev *input;
struct work_struct reset_work;
struct hid_device *hdev;
unsigned long device_flags;
unsigned long firmware_id;
};
#define RMI_PAGE(addr) (((addr) >> 8) & 0xff)
static int rmi_write_report(struct hid_device *hdev, u8 *report, int len);
/**
* rmi_set_page - Set RMI page
* @hdev: The pointer to the hid_device struct
* @page: The new page address.
*
* RMI devices have 16-bit addressing, but some of the physical
* implementations (like SMBus) only have 8-bit addressing. So RMI implements
* a page address at 0xff of every page so we can reliable page addresses
* every 256 registers.
*
* The page_mutex lock must be held when this function is entered.
*
* Returns zero on success, non-zero on failure.
*/
static int rmi_set_page(struct hid_device *hdev, u8 page)
{
struct rmi_data *data = hid_get_drvdata(hdev);
int retval;
data->writeReport[0] = RMI_WRITE_REPORT_ID;
data->writeReport[1] = 1;
data->writeReport[2] = 0xFF;
data->writeReport[4] = page;
retval = rmi_write_report(hdev, data->writeReport,
data->output_report_size);
if (retval != data->output_report_size) {
dev_err(&hdev->dev,
"%s: set page failed: %d.", __func__, retval);
return retval;
}
data->page = page;
return 0;
}
static int rmi_set_mode(struct hid_device *hdev, u8 mode)
{
int ret;
u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf,
sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret < 0) {
dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode,
ret);
return ret;
}
return 0;
}
static int rmi_write_report(struct hid_device *hdev, u8 *report, int len)
{
int ret;
ret = hid_hw_output_report(hdev, (void *)report, len);
if (ret < 0) {
dev_err(&hdev->dev, "failed to write hid report (%d)\n", ret);
return ret;
}
return ret;
}
static int rmi_read_block(struct hid_device *hdev, u16 addr, void *buf,
const int len)
{
struct rmi_data *data = hid_get_drvdata(hdev);
int ret;
int bytes_read;
int bytes_needed;
int retries;
int read_input_count;
mutex_lock(&data->page_mutex);
if (RMI_PAGE(addr) != data->page) {
ret = rmi_set_page(hdev, RMI_PAGE(addr));
if (ret < 0)
goto exit;
}
for (retries = 5; retries > 0; retries--) {
data->writeReport[0] = RMI_READ_ADDR_REPORT_ID;
data->writeReport[1] = 0; /* old 1 byte read count */
data->writeReport[2] = addr & 0xFF;
data->writeReport[3] = (addr >> 8) & 0xFF;
data->writeReport[4] = len & 0xFF;
data->writeReport[5] = (len >> 8) & 0xFF;
set_bit(RMI_READ_REQUEST_PENDING, &data->flags);
ret = rmi_write_report(hdev, data->writeReport,
data->output_report_size);
if (ret != data->output_report_size) {
clear_bit(RMI_READ_REQUEST_PENDING, &data->flags);
dev_err(&hdev->dev,
"failed to write request output report (%d)\n",
ret);
goto exit;
}
bytes_read = 0;
bytes_needed = len;
while (bytes_read < len) {
if (!wait_event_timeout(data->wait,
test_bit(RMI_READ_DATA_PENDING, &data->flags),
msecs_to_jiffies(1000))) {
hid_warn(hdev, "%s: timeout elapsed\n",
__func__);
ret = -EAGAIN;
break;
}
read_input_count = data->readReport[1];
memcpy(buf + bytes_read, &data->readReport[2],
read_input_count < bytes_needed ?
read_input_count : bytes_needed);
bytes_read += read_input_count;
bytes_needed -= read_input_count;
clear_bit(RMI_READ_DATA_PENDING, &data->flags);
}
if (ret >= 0) {
ret = 0;
break;
}
}
exit:
clear_bit(RMI_READ_REQUEST_PENDING, &data->flags);
mutex_unlock(&data->page_mutex);
return ret;
}
static inline int rmi_read(struct hid_device *hdev, u16 addr, void *buf)
{
return rmi_read_block(hdev, addr, buf, 1);
}
static int rmi_write_block(struct hid_device *hdev, u16 addr, void *buf,
const int len)
{
struct rmi_data *data = hid_get_drvdata(hdev);
int ret;
mutex_lock(&data->page_mutex);
if (RMI_PAGE(addr) != data->page) {
ret = rmi_set_page(hdev, RMI_PAGE(addr));
if (ret < 0)
goto exit;
}
data->writeReport[0] = RMI_WRITE_REPORT_ID;
data->writeReport[1] = len;
data->writeReport[2] = addr & 0xFF;
data->writeReport[3] = (addr >> 8) & 0xFF;
memcpy(&data->writeReport[4], buf, len);
ret = rmi_write_report(hdev, data->writeReport,
data->output_report_size);
if (ret < 0) {
dev_err(&hdev->dev,
"failed to write request output report (%d)\n",
ret);
goto exit;
}
ret = 0;
exit:
mutex_unlock(&data->page_mutex);
return ret;
}
static inline int rmi_write(struct hid_device *hdev, u16 addr, void *buf)
{
return rmi_write_block(hdev, addr, buf, 1);
}
static void rmi_f11_process_touch(struct rmi_data *hdata, int slot,
u8 finger_state, u8 *touch_data)
{
int x, y, wx, wy;
int wide, major, minor;
int z;
input_mt_slot(hdata->input, slot);
input_mt_report_slot_state(hdata->input, MT_TOOL_FINGER,
finger_state == 0x01);
if (finger_state == 0x01) {
x = (touch_data[0] << 4) | (touch_data[2] & 0x0F);
y = (touch_data[1] << 4) | (touch_data[2] >> 4);
wx = touch_data[3] & 0x0F;
wy = touch_data[3] >> 4;
wide = (wx > wy);
major = max(wx, wy);
minor = min(wx, wy);
z = touch_data[4];
/* y is inverted */
y = hdata->max_y - y;
input_event(hdata->input, EV_ABS, ABS_MT_POSITION_X, x);
input_event(hdata->input, EV_ABS, ABS_MT_POSITION_Y, y);
input_event(hdata->input, EV_ABS, ABS_MT_ORIENTATION, wide);
input_event(hdata->input, EV_ABS, ABS_MT_PRESSURE, z);
input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
}
}
static void rmi_reset_work(struct work_struct *work)
{
struct rmi_data *hdata = container_of(work, struct rmi_data,
reset_work);
/* switch the device to RMI if we receive a generic mouse report */
rmi_set_mode(hdata->hdev, RMI_MODE_ATTN_REPORTS);
}
static inline int rmi_schedule_reset(struct hid_device *hdev)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
return schedule_work(&hdata->reset_work);
}
static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data,
int size)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
int offset;
int i;
if (!(irq & hdata->f11.irq_mask) || size <= 0)
return 0;
offset = (hdata->max_fingers >> 2) + 1;
for (i = 0; i < hdata->max_fingers; i++) {
int fs_byte_position = i >> 2;
int fs_bit_position = (i & 0x3) << 1;
int finger_state = (data[fs_byte_position] >> fs_bit_position) &
0x03;
int position = offset + 5 * i;
if (position + 5 > size) {
/* partial report, go on with what we received */
printk_once(KERN_WARNING
"%s %s: Detected incomplete finger report. Finger reports may occasionally get dropped on this platform.\n",
dev_driver_string(&hdev->dev),
dev_name(&hdev->dev));
hid_dbg(hdev, "Incomplete finger report\n");
break;
}
rmi_f11_process_touch(hdata, i, finger_state, &data[position]);
}
input_mt_sync_frame(hdata->input);
input_sync(hdata->input);
return hdata->f11.report_size;
}
static int rmi_f30_input_event(struct hid_device *hdev, u8 irq, u8 *data,
int size)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
int i;
int button = 0;
bool value;
if (!(irq & hdata->f30.irq_mask))
return 0;
if (size < (int)hdata->f30.report_size) {
hid_warn(hdev, "Click Button pressed, but the click data is missing\n");
return 0;
}
for (i = 0; i < hdata->gpio_led_count; i++) {
if (test_bit(i, &hdata->button_mask)) {
value = (data[i / 8] >> (i & 0x07)) & BIT(0);
if (test_bit(i, &hdata->button_state_mask))
value = !value;
input_event(hdata->input, EV_KEY, BTN_LEFT + button++,
value);
}
}
return hdata->f30.report_size;
}
static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
unsigned long irq_mask = 0;
unsigned index = 2;
if (!(test_bit(RMI_STARTED, &hdata->flags)))
return 0;
irq_mask |= hdata->f11.irq_mask;
irq_mask |= hdata->f30.irq_mask;
if (data[1] & ~irq_mask)
hid_dbg(hdev, "unknown intr source:%02lx %s:%d\n",
data[1] & ~irq_mask, __FILE__, __LINE__);
if (hdata->f11.interrupt_base < hdata->f30.interrupt_base) {
index += rmi_f11_input_event(hdev, data[1], &data[index],
size - index);
index += rmi_f30_input_event(hdev, data[1], &data[index],
size - index);
} else {
index += rmi_f30_input_event(hdev, data[1], &data[index],
size - index);
index += rmi_f11_input_event(hdev, data[1], &data[index],
size - index);
}
return 1;
}
static int rmi_read_data_event(struct hid_device *hdev, u8 *data, int size)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
if (!test_bit(RMI_READ_REQUEST_PENDING, &hdata->flags)) {
hid_dbg(hdev, "no read request pending\n");
return 0;
}
memcpy(hdata->readReport, data, size < hdata->input_report_size ?
size : hdata->input_report_size);
set_bit(RMI_READ_DATA_PENDING, &hdata->flags);
wake_up(&hdata->wait);
return 1;
}
static int rmi_check_sanity(struct hid_device *hdev, u8 *data, int size)
{
int valid_size = size;
/*
* On the Dell XPS 13 9333, the bus sometimes get confused and fills
* the report with a sentinel value "ff". Synaptics told us that such
* behavior does not comes from the touchpad itself, so we filter out
* such reports here.
*/
while ((data[valid_size - 1] == 0xff) && valid_size > 0)
valid_size--;
return valid_size;
}
static int rmi_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
size = rmi_check_sanity(hdev, data, size);
if (size < 2)
return 0;
switch (data[0]) {
case RMI_READ_DATA_REPORT_ID:
return rmi_read_data_event(hdev, data, size);
case RMI_ATTN_REPORT_ID:
return rmi_input_event(hdev, data, size);
default:
return 1;
}
return 0;
}
static int rmi_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct rmi_data *data = hid_get_drvdata(hdev);
if ((data->device_flags & RMI_DEVICE) &&
(field->application == HID_GD_POINTER ||
field->application == HID_GD_MOUSE)) {
if (data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS) {
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
return 0;
if ((usage->hid == HID_GD_X || usage->hid == HID_GD_Y)
&& !value)
return 1;
}
rmi_schedule_reset(hdev);
return 1;
}
return 0;
}
#ifdef CONFIG_PM
static int rmi_post_reset(struct hid_device *hdev)
{
return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
}
static int rmi_post_resume(struct hid_device *hdev)
{
return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
}
#endif /* CONFIG_PM */
#define RMI4_MAX_PAGE 0xff
#define RMI4_PAGE_SIZE 0x0100
#define PDT_START_SCAN_LOCATION 0x00e9
#define PDT_END_SCAN_LOCATION 0x0005
#define RMI4_END_OF_PDT(id) ((id) == 0x00 || (id) == 0xff)
struct pdt_entry {
u8 query_base_addr:8;
u8 command_base_addr:8;
u8 control_base_addr:8;
u8 data_base_addr:8;
u8 interrupt_source_count:3;
u8 bits3and4:2;
u8 function_version:2;
u8 bit7:1;
u8 function_number:8;
} __attribute__((__packed__));
static inline unsigned long rmi_gen_mask(unsigned irq_base, unsigned irq_count)
{
return GENMASK(irq_count + irq_base - 1, irq_base);
}
static void rmi_register_function(struct rmi_data *data,
struct pdt_entry *pdt_entry, int page, unsigned interrupt_count)
{
struct rmi_function *f = NULL;
u16 page_base = page << 8;
switch (pdt_entry->function_number) {
case 0x01:
f = &data->f01;
break;
case 0x11:
f = &data->f11;
break;
case 0x30:
f = &data->f30;
break;
}
if (f) {
f->page = page;
f->query_base_addr = page_base | pdt_entry->query_base_addr;
f->command_base_addr = page_base | pdt_entry->command_base_addr;
f->control_base_addr = page_base | pdt_entry->control_base_addr;
f->data_base_addr = page_base | pdt_entry->data_base_addr;
f->interrupt_base = interrupt_count;
f->interrupt_count = pdt_entry->interrupt_source_count;
f->irq_mask = rmi_gen_mask(f->interrupt_base,
f->interrupt_count);
}
}
static int rmi_scan_pdt(struct hid_device *hdev)
{
struct rmi_data *data = hid_get_drvdata(hdev);
struct pdt_entry entry;
int page;
bool page_has_function;
int i;
int retval;
int interrupt = 0;
u16 page_start, pdt_start , pdt_end;
hid_info(hdev, "Scanning PDT...\n");
for (page = 0; (page <= RMI4_MAX_PAGE); page++) {
page_start = RMI4_PAGE_SIZE * page;
pdt_start = page_start + PDT_START_SCAN_LOCATION;
pdt_end = page_start + PDT_END_SCAN_LOCATION;
page_has_function = false;
for (i = pdt_start; i >= pdt_end; i -= sizeof(entry)) {
retval = rmi_read_block(hdev, i, &entry, sizeof(entry));
if (retval) {
hid_err(hdev,
"Read of PDT entry at %#06x failed.\n",
i);
goto error_exit;
}
if (RMI4_END_OF_PDT(entry.function_number))
break;
page_has_function = true;
hid_info(hdev, "Found F%02X on page %#04x\n",
entry.function_number, page);
rmi_register_function(data, &entry, page, interrupt);
interrupt += entry.interrupt_source_count;
}
if (!page_has_function)
break;
}
hid_info(hdev, "%s: Done with PDT scan.\n", __func__);
retval = 0;
error_exit:
return retval;
}
#define RMI_DEVICE_F01_BASIC_QUERY_LEN 11
static int rmi_populate_f01(struct hid_device *hdev)
{
struct rmi_data *data = hid_get_drvdata(hdev);
u8 basic_queries[RMI_DEVICE_F01_BASIC_QUERY_LEN];
u8 info[3];
int ret;
bool has_query42;
bool has_lts;
bool has_sensor_id;
bool has_ds4_queries = false;
bool has_build_id_query = false;
bool has_package_id_query = false;
u16 query_offset = data->f01.query_base_addr;
u16 prod_info_addr;
u8 ds4_query_len;
ret = rmi_read_block(hdev, query_offset, basic_queries,
RMI_DEVICE_F01_BASIC_QUERY_LEN);
if (ret) {
hid_err(hdev, "Can not read basic queries from Function 0x1.\n");
return ret;
}
has_lts = !!(basic_queries[0] & BIT(2));
has_sensor_id = !!(basic_queries[1] & BIT(3));
has_query42 = !!(basic_queries[1] & BIT(7));
query_offset += 11;
prod_info_addr = query_offset + 6;
query_offset += 10;
if (has_lts)
query_offset += 20;
if (has_sensor_id)
query_offset++;
if (has_query42) {
ret = rmi_read(hdev, query_offset, info);
if (ret) {
hid_err(hdev, "Can not read query42.\n");
return ret;
}
has_ds4_queries = !!(info[0] & BIT(0));
query_offset++;
}
if (has_ds4_queries) {
ret = rmi_read(hdev, query_offset, &ds4_query_len);
if (ret) {
hid_err(hdev, "Can not read DS4 Query length.\n");
return ret;
}
query_offset++;
if (ds4_query_len > 0) {
ret = rmi_read(hdev, query_offset, info);
if (ret) {
hid_err(hdev, "Can not read DS4 query.\n");
return ret;
}
has_package_id_query = !!(info[0] & BIT(0));
has_build_id_query = !!(info[0] & BIT(1));
}
}
if (has_package_id_query)
prod_info_addr++;
if (has_build_id_query) {
ret = rmi_read_block(hdev, prod_info_addr, info, 3);
if (ret) {
hid_err(hdev, "Can not read product info.\n");
return ret;
}
data->firmware_id = info[1] << 8 | info[0];
data->firmware_id += info[2] * 65536;
}
return 0;
}
static int rmi_populate_f11(struct hid_device *hdev)
{
struct rmi_data *data = hid_get_drvdata(hdev);
u8 buf[20];
int ret;
bool has_query9;
bool has_query10 = false;
bool has_query11;
bool has_query12;
bool has_query27;
bool has_query28;
bool has_query36 = false;
bool has_physical_props;
bool has_gestures;
bool has_rel;
bool has_data40 = false;
bool has_dribble = false;
bool has_palm_detect = false;
unsigned x_size, y_size;
u16 query_offset;
if (!data->f11.query_base_addr) {
hid_err(hdev, "No 2D sensor found, giving up.\n");
return -ENODEV;
}
/* query 0 contains some useful information */
ret = rmi_read(hdev, data->f11.query_base_addr, buf);
if (ret) {
hid_err(hdev, "can not get query 0: %d.\n", ret);
return ret;
}
has_query9 = !!(buf[0] & BIT(3));
has_query11 = !!(buf[0] & BIT(4));
has_query12 = !!(buf[0] & BIT(5));
has_query27 = !!(buf[0] & BIT(6));
has_query28 = !!(buf[0] & BIT(7));
/* query 1 to get the max number of fingers */
ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf);
if (ret) {
hid_err(hdev, "can not get NumberOfFingers: %d.\n", ret);
return ret;
}
data->max_fingers = (buf[0] & 0x07) + 1;
if (data->max_fingers > 5)
data->max_fingers = 10;
data->f11.report_size = data->max_fingers * 5 +
DIV_ROUND_UP(data->max_fingers, 4);
if (!(buf[0] & BIT(4))) {
hid_err(hdev, "No absolute events, giving up.\n");
return -ENODEV;
}
has_rel = !!(buf[0] & BIT(3));
has_gestures = !!(buf[0] & BIT(5));
ret = rmi_read(hdev, data->f11.query_base_addr + 5, buf);
if (ret) {
hid_err(hdev, "can not get absolute data sources: %d.\n", ret);
return ret;
}
has_dribble = !!(buf[0] & BIT(4));
/*
* At least 4 queries are guaranteed to be present in F11
* +1 for query 5 which is present since absolute events are
* reported and +1 for query 12.
*/
query_offset = 6;
if (has_rel)
++query_offset; /* query 6 is present */
if (has_gestures) {
/* query 8 to find out if query 10 exists */
ret = rmi_read(hdev,
data->f11.query_base_addr + query_offset + 1, buf);
if (ret) {
hid_err(hdev, "can not read gesture information: %d.\n",
ret);
return ret;
}
has_palm_detect = !!(buf[0] & BIT(0));
has_query10 = !!(buf[0] & BIT(2));
query_offset += 2; /* query 7 and 8 are present */
}
if (has_query9)
++query_offset;
if (has_query10)
++query_offset;
if (has_query11)
++query_offset;
/* query 12 to know if the physical properties are reported */
if (has_query12) {
ret = rmi_read(hdev, data->f11.query_base_addr
+ query_offset, buf);
if (ret) {
hid_err(hdev, "can not get query 12: %d.\n", ret);
return ret;
}
has_physical_props = !!(buf[0] & BIT(5));
if (has_physical_props) {
query_offset += 1;
ret = rmi_read_block(hdev,
data->f11.query_base_addr
+ query_offset, buf, 4);
if (ret) {
hid_err(hdev, "can not read query 15-18: %d.\n",
ret);
return ret;
}
x_size = buf[0] | (buf[1] << 8);
y_size = buf[2] | (buf[3] << 8);
data->x_size_mm = DIV_ROUND_CLOSEST(x_size, 10);
data->y_size_mm = DIV_ROUND_CLOSEST(y_size, 10);
hid_info(hdev, "%s: size in mm: %d x %d\n",
__func__, data->x_size_mm, data->y_size_mm);
/*
* query 15 - 18 contain the size of the sensor
* and query 19 - 26 contain bezel dimensions
*/
query_offset += 12;
}
}
if (has_query27)
++query_offset;
if (has_query28) {
ret = rmi_read(hdev, data->f11.query_base_addr
+ query_offset, buf);
if (ret) {
hid_err(hdev, "can not get query 28: %d.\n", ret);
return ret;
}
has_query36 = !!(buf[0] & BIT(6));
}
if (has_query36) {
query_offset += 2;
ret = rmi_read(hdev, data->f11.query_base_addr
+ query_offset, buf);
if (ret) {
hid_err(hdev, "can not get query 36: %d.\n", ret);
return ret;
}
has_data40 = !!(buf[0] & BIT(5));
}
if (has_data40)
data->f11.report_size += data->max_fingers * 2;
/*
* retrieve the ctrl registers
* the ctrl register has a size of 20 but a fw bug split it into 16 + 4,
* and there is no way to know if the first 20 bytes are here or not.
* We use only the first 12 bytes, so get only them.
*/
ret = rmi_read_block(hdev, data->f11.control_base_addr, buf, 12);
if (ret) {
hid_err(hdev, "can not read ctrl block of size 11: %d.\n", ret);
return ret;
}
data->max_x = buf[6] | (buf[7] << 8);
data->max_y = buf[8] | (buf[9] << 8);
if (has_dribble) {
buf[0] = buf[0] & ~BIT(6);
ret = rmi_write(hdev, data->f11.control_base_addr, buf);
if (ret) {
hid_err(hdev, "can not write to control reg 0: %d.\n",
ret);
return ret;
}
}
if (has_palm_detect) {
buf[11] = buf[11] & ~BIT(0);
ret = rmi_write(hdev, data->f11.control_base_addr + 11,
&buf[11]);
if (ret) {
hid_err(hdev, "can not write to control reg 11: %d.\n",
ret);
return ret;
}
}
return 0;
}
static int rmi_populate_f30(struct hid_device *hdev)
{
struct rmi_data *data = hid_get_drvdata(hdev);
u8 buf[20];
int ret;
bool has_gpio, has_led;
unsigned bytes_per_ctrl;
u8 ctrl2_addr;
int ctrl2_3_length;
int i;
/* function F30 is for physical buttons */
if (!data->f30.query_base_addr) {
hid_err(hdev, "No GPIO/LEDs found, giving up.\n");
return -ENODEV;
}
ret = rmi_read_block(hdev, data->f30.query_base_addr, buf, 2);
if (ret) {
hid_err(hdev, "can not get F30 query registers: %d.\n", ret);
return ret;
}
has_gpio = !!(buf[0] & BIT(3));
has_led = !!(buf[0] & BIT(2));
data->gpio_led_count = buf[1] & 0x1f;
/* retrieve ctrl 2 & 3 registers */
bytes_per_ctrl = (data->gpio_led_count + 7) / 8;
/* Ctrl0 is present only if both has_gpio and has_led are set*/
ctrl2_addr = (has_gpio && has_led) ? bytes_per_ctrl : 0;
/* Ctrl1 is always be present */
ctrl2_addr += bytes_per_ctrl;
ctrl2_3_length = 2 * bytes_per_ctrl;
data->f30.report_size = bytes_per_ctrl;
ret = rmi_read_block(hdev, data->f30.control_base_addr + ctrl2_addr,
buf, ctrl2_3_length);
if (ret) {
hid_err(hdev, "can not read ctrl 2&3 block of size %d: %d.\n",
ctrl2_3_length, ret);
return ret;
}
for (i = 0; i < data->gpio_led_count; i++) {
int byte_position = i >> 3;
int bit_position = i & 0x07;
u8 dir_byte = buf[byte_position];
u8 data_byte = buf[byte_position + bytes_per_ctrl];
bool dir = (dir_byte >> bit_position) & BIT(0);
bool dat = (data_byte >> bit_position) & BIT(0);
if (dir == 0) {
/* input mode */
if (dat) {
/* actual buttons have pull up resistor */
data->button_count++;
set_bit(i, &data->button_mask);
set_bit(i, &data->button_state_mask);
}
}
}
return 0;
}
static int rmi_populate(struct hid_device *hdev)
{
struct rmi_data *data = hid_get_drvdata(hdev);
int ret;
ret = rmi_scan_pdt(hdev);
if (ret) {
hid_err(hdev, "PDT scan failed with code %d.\n", ret);
return ret;
}
ret = rmi_populate_f01(hdev);
if (ret) {
hid_err(hdev, "Error while initializing F01 (%d).\n", ret);
return ret;
}
ret = rmi_populate_f11(hdev);
if (ret) {
hid_err(hdev, "Error while initializing F11 (%d).\n", ret);
return ret;
}
if (!(data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)) {
ret = rmi_populate_f30(hdev);
if (ret)
hid_warn(hdev, "Error while initializing F30 (%d).\n", ret);
}
return 0;
}
static void rmi_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct rmi_data *data = hid_get_drvdata(hdev);
struct input_dev *input = hi->input;
int ret;
int res_x, res_y, i;
data->input = input;
hid_dbg(hdev, "Opening low level driver\n");
ret = hid_hw_open(hdev);
if (ret)
return;
if (!(data->device_flags & RMI_DEVICE))
return;
/* Allow incoming hid reports */
hid_device_io_start(hdev);
ret = rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
if (ret < 0) {
dev_err(&hdev->dev, "failed to set rmi mode\n");
goto exit;
}
ret = rmi_set_page(hdev, 0);
if (ret < 0) {
dev_err(&hdev->dev, "failed to set page select to 0.\n");
goto exit;
}
ret = rmi_populate(hdev);
if (ret)
goto exit;
hid_info(hdev, "firmware id: %ld\n", data->firmware_id);
__set_bit(EV_ABS, input->evbit);
input_set_abs_params(input, ABS_MT_POSITION_X, 1, data->max_x, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 1, data->max_y, 0, 0);
if (data->x_size_mm && data->y_size_mm) {
res_x = (data->max_x - 1) / data->x_size_mm;
res_y = (data->max_y - 1) / data->y_size_mm;
input_abs_set_res(input, ABS_MT_POSITION_X, res_x);
input_abs_set_res(input, ABS_MT_POSITION_Y, res_y);
}
input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
input_mt_init_slots(input, data->max_fingers, INPUT_MT_POINTER);
if (data->button_count) {
__set_bit(EV_KEY, input->evbit);
for (i = 0; i < data->button_count; i++)
__set_bit(BTN_LEFT + i, input->keybit);
if (data->button_count == 1)
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
set_bit(RMI_STARTED, &data->flags);
exit:
hid_device_io_stop(hdev);
hid_hw_close(hdev);
}
static int rmi_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
{
struct rmi_data *data = hid_get_drvdata(hdev);
/*
* we want to make HID ignore the advertised HID collection
* for RMI deivces
*/
if (data->device_flags & RMI_DEVICE) {
if ((data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS) &&
((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON))
return 0;
return -1;
}
return 0;
}
static int rmi_check_valid_report_id(struct hid_device *hdev, unsigned type,
unsigned id, struct hid_report **report)
{
int i;
*report = hdev->report_enum[type].report_id_hash[id];
if (*report) {
for (i = 0; i < (*report)->maxfield; i++) {
unsigned app = (*report)->field[i]->application;
if ((app & HID_USAGE_PAGE) >= HID_UP_MSVENDOR)
return 1;
}
}
return 0;
}
static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct rmi_data *data = NULL;
int ret;
size_t alloc_size;
struct hid_report *input_report;
struct hid_report *output_report;
struct hid_report *feature_report;
data = devm_kzalloc(&hdev->dev, sizeof(struct rmi_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
INIT_WORK(&data->reset_work, rmi_reset_work);
data->hdev = hdev;
hid_set_drvdata(hdev, data);
hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
return ret;
}
if (id->driver_data)
data->device_flags = id->driver_data;
/*
* Check for the RMI specific report ids. If they are misisng
* simply return and let the events be processed by hid-input
*/
if (!rmi_check_valid_report_id(hdev, HID_FEATURE_REPORT,
RMI_SET_RMI_MODE_REPORT_ID, &feature_report)) {
hid_dbg(hdev, "device does not have set mode feature report\n");
goto start;
}
if (!rmi_check_valid_report_id(hdev, HID_INPUT_REPORT,
RMI_ATTN_REPORT_ID, &input_report)) {
hid_dbg(hdev, "device does not have attention input report\n");
goto start;
}
data->input_report_size = hid_report_len(input_report);
if (!rmi_check_valid_report_id(hdev, HID_OUTPUT_REPORT,
RMI_WRITE_REPORT_ID, &output_report)) {
hid_dbg(hdev,
"device does not have rmi write output report\n");
goto start;
}
data->output_report_size = hid_report_len(output_report);
data->device_flags |= RMI_DEVICE;
alloc_size = data->output_report_size + data->input_report_size;
data->writeReport = devm_kzalloc(&hdev->dev, alloc_size, GFP_KERNEL);
if (!data->writeReport) {
ret = -ENOMEM;
return ret;
}
data->readReport = data->writeReport + data->output_report_size;
init_waitqueue_head(&data->wait);
mutex_init(&data->page_mutex);
start:
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
return ret;
}
if ((data->device_flags & RMI_DEVICE) &&
!test_bit(RMI_STARTED, &data->flags))
/*
* The device maybe in the bootloader if rmi_input_configured
* failed to find F11 in the PDT. Print an error, but don't
* return an error from rmi_probe so that hidraw will be
* accessible from userspace. That way a userspace tool
* can be used to reload working firmware on the touchpad.
*/
hid_err(hdev, "Device failed to be properly configured\n");
return 0;
}
static void rmi_remove(struct hid_device *hdev)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
clear_bit(RMI_STARTED, &hdata->flags);
hid_hw_stop(hdev);
}
static const struct hid_device_id rmi_id[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14),
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, rmi_id);
static struct hid_driver rmi_driver = {
.name = "hid-rmi",
.id_table = rmi_id,
.probe = rmi_probe,
.remove = rmi_remove,
.event = rmi_event,
.raw_event = rmi_raw_event,
.input_mapping = rmi_input_mapping,
.input_configured = rmi_input_configured,
#ifdef CONFIG_PM
.resume = rmi_post_resume,
.reset_resume = rmi_post_reset,
#endif
};
module_hid_driver(rmi_driver);
MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
MODULE_DESCRIPTION("RMI HID driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
kirananto/RaZorKernel | arch/arm/mach-exynos/pm_domains.c | 1972 | 7075 | /*
* Exynos Generic power domain support.
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Implementation of Exynos specific power domain control which is used in
* conjunction with runtime-pm. Support for both device-tree and non-device-tree
* based power domain support is included.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_domain.h>
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/sched.h>
#include <mach/regs-pmu.h>
#include <plat/devs.h>
/*
* Exynos specific wrapper around the generic power domain
*/
struct exynos_pm_domain {
void __iomem *base;
char const *name;
bool is_off;
struct generic_pm_domain pd;
};
static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
{
struct exynos_pm_domain *pd;
void __iomem *base;
u32 timeout, pwr;
char *op;
pd = container_of(domain, struct exynos_pm_domain, pd);
base = pd->base;
pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0;
__raw_writel(pwr, base);
/* Wait max 1ms */
timeout = 10;
while ((__raw_readl(base + 0x4) & S5P_INT_LOCAL_PWR_EN) != pwr) {
if (!timeout) {
op = (power_on) ? "enable" : "disable";
pr_err("Power domain %s %s failed\n", domain->name, op);
return -ETIMEDOUT;
}
timeout--;
cpu_relax();
usleep_range(80, 100);
}
return 0;
}
static int exynos_pd_power_on(struct generic_pm_domain *domain)
{
return exynos_pd_power(domain, true);
}
static int exynos_pd_power_off(struct generic_pm_domain *domain)
{
return exynos_pd_power(domain, false);
}
#define EXYNOS_GPD(PD, BASE, NAME) \
static struct exynos_pm_domain PD = { \
.base = (void __iomem *)BASE, \
.name = NAME, \
.pd = { \
.power_off = exynos_pd_power_off, \
.power_on = exynos_pd_power_on, \
}, \
}
#ifdef CONFIG_OF
static void exynos_add_device_to_domain(struct exynos_pm_domain *pd,
struct device *dev)
{
int ret;
dev_dbg(dev, "adding to power domain %s\n", pd->pd.name);
while (1) {
ret = pm_genpd_add_device(&pd->pd, dev);
if (ret != -EAGAIN)
break;
cond_resched();
}
pm_genpd_dev_need_restore(dev, true);
}
static void exynos_remove_device_from_domain(struct device *dev)
{
struct generic_pm_domain *genpd = dev_to_genpd(dev);
int ret;
dev_dbg(dev, "removing from power domain %s\n", genpd->name);
while (1) {
ret = pm_genpd_remove_device(genpd, dev);
if (ret != -EAGAIN)
break;
cond_resched();
}
}
static void exynos_read_domain_from_dt(struct device *dev)
{
struct platform_device *pd_pdev;
struct exynos_pm_domain *pd;
struct device_node *node;
node = of_parse_phandle(dev->of_node, "samsung,power-domain", 0);
if (!node)
return;
pd_pdev = of_find_device_by_node(node);
if (!pd_pdev)
return;
pd = platform_get_drvdata(pd_pdev);
exynos_add_device_to_domain(pd, dev);
}
static int exynos_pm_notifier_call(struct notifier_block *nb,
unsigned long event, void *data)
{
struct device *dev = data;
switch (event) {
case BUS_NOTIFY_BIND_DRIVER:
if (dev->of_node)
exynos_read_domain_from_dt(dev);
break;
case BUS_NOTIFY_UNBOUND_DRIVER:
exynos_remove_device_from_domain(dev);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block platform_nb = {
.notifier_call = exynos_pm_notifier_call,
};
static __init int exynos_pm_dt_parse_domains(void)
{
struct platform_device *pdev;
struct device_node *np;
for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
struct exynos_pm_domain *pd;
int on;
pdev = of_find_device_by_node(np);
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
pr_err("%s: failed to allocate memory for domain\n",
__func__);
return -ENOMEM;
}
pd->pd.name = kstrdup(np->name, GFP_KERNEL);
pd->name = pd->pd.name;
pd->base = of_iomap(np, 0);
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
pd->pd.of_node = np;
platform_set_drvdata(pdev, pd);
on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
pm_genpd_init(&pd->pd, NULL, !on);
}
bus_register_notifier(&platform_bus_type, &platform_nb);
return 0;
}
#else
static __init int exynos_pm_dt_parse_domains(void)
{
return 0;
}
#endif /* CONFIG_OF */
static __init __maybe_unused void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
struct exynos_pm_domain *pd)
{
if (pdev->dev.bus) {
if (!pm_genpd_add_device(&pd->pd, &pdev->dev))
pm_genpd_dev_need_restore(&pdev->dev, true);
else
pr_info("%s: error in adding %s device to %s power"
"domain\n", __func__, dev_name(&pdev->dev),
pd->name);
}
}
EXYNOS_GPD(exynos4_pd_mfc, S5P_PMU_MFC_CONF, "pd-mfc");
EXYNOS_GPD(exynos4_pd_g3d, S5P_PMU_G3D_CONF, "pd-g3d");
EXYNOS_GPD(exynos4_pd_lcd0, S5P_PMU_LCD0_CONF, "pd-lcd0");
EXYNOS_GPD(exynos4_pd_lcd1, S5P_PMU_LCD1_CONF, "pd-lcd1");
EXYNOS_GPD(exynos4_pd_tv, S5P_PMU_TV_CONF, "pd-tv");
EXYNOS_GPD(exynos4_pd_cam, S5P_PMU_CAM_CONF, "pd-cam");
EXYNOS_GPD(exynos4_pd_gps, S5P_PMU_GPS_CONF, "pd-gps");
static struct exynos_pm_domain *exynos4_pm_domains[] = {
&exynos4_pd_mfc,
&exynos4_pd_g3d,
&exynos4_pd_lcd0,
&exynos4_pd_lcd1,
&exynos4_pd_tv,
&exynos4_pd_cam,
&exynos4_pd_gps,
};
static __init int exynos4_pm_init_power_domain(void)
{
int idx;
if (of_have_populated_dt())
return exynos_pm_dt_parse_domains();
for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) {
struct exynos_pm_domain *pd = exynos4_pm_domains[idx];
int on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
pm_genpd_init(&pd->pd, NULL, !on);
}
#ifdef CONFIG_S5P_DEV_FIMD0
exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0);
#endif
#ifdef CONFIG_S5P_DEV_TV
exynos_pm_add_dev_to_genpd(&s5p_device_hdmi, &exynos4_pd_tv);
exynos_pm_add_dev_to_genpd(&s5p_device_mixer, &exynos4_pd_tv);
#endif
#ifdef CONFIG_S5P_DEV_MFC
exynos_pm_add_dev_to_genpd(&s5p_device_mfc, &exynos4_pd_mfc);
#endif
#ifdef CONFIG_S5P_DEV_FIMC0
exynos_pm_add_dev_to_genpd(&s5p_device_fimc0, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_FIMC1
exynos_pm_add_dev_to_genpd(&s5p_device_fimc1, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_FIMC2
exynos_pm_add_dev_to_genpd(&s5p_device_fimc2, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_FIMC3
exynos_pm_add_dev_to_genpd(&s5p_device_fimc3, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_CSIS0
exynos_pm_add_dev_to_genpd(&s5p_device_mipi_csis0, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_CSIS1
exynos_pm_add_dev_to_genpd(&s5p_device_mipi_csis1, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_G2D
exynos_pm_add_dev_to_genpd(&s5p_device_g2d, &exynos4_pd_lcd0);
#endif
#ifdef CONFIG_S5P_DEV_JPEG
exynos_pm_add_dev_to_genpd(&s5p_device_jpeg, &exynos4_pd_cam);
#endif
return 0;
}
arch_initcall(exynos4_pm_init_power_domain);
int __init exynos_pm_late_initcall(void)
{
pm_genpd_poweroff_unused();
return 0;
}
| gpl-2.0 |
usb-bullhead-ubuntu-touch/kernel_msm | arch/arm/mach-exynos/pm_domains.c | 1972 | 7075 | /*
* Exynos Generic power domain support.
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Implementation of Exynos specific power domain control which is used in
* conjunction with runtime-pm. Support for both device-tree and non-device-tree
* based power domain support is included.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_domain.h>
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/sched.h>
#include <mach/regs-pmu.h>
#include <plat/devs.h>
/*
* Exynos specific wrapper around the generic power domain
*/
struct exynos_pm_domain {
void __iomem *base;
char const *name;
bool is_off;
struct generic_pm_domain pd;
};
static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
{
struct exynos_pm_domain *pd;
void __iomem *base;
u32 timeout, pwr;
char *op;
pd = container_of(domain, struct exynos_pm_domain, pd);
base = pd->base;
pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0;
__raw_writel(pwr, base);
/* Wait max 1ms */
timeout = 10;
while ((__raw_readl(base + 0x4) & S5P_INT_LOCAL_PWR_EN) != pwr) {
if (!timeout) {
op = (power_on) ? "enable" : "disable";
pr_err("Power domain %s %s failed\n", domain->name, op);
return -ETIMEDOUT;
}
timeout--;
cpu_relax();
usleep_range(80, 100);
}
return 0;
}
static int exynos_pd_power_on(struct generic_pm_domain *domain)
{
return exynos_pd_power(domain, true);
}
static int exynos_pd_power_off(struct generic_pm_domain *domain)
{
return exynos_pd_power(domain, false);
}
#define EXYNOS_GPD(PD, BASE, NAME) \
static struct exynos_pm_domain PD = { \
.base = (void __iomem *)BASE, \
.name = NAME, \
.pd = { \
.power_off = exynos_pd_power_off, \
.power_on = exynos_pd_power_on, \
}, \
}
#ifdef CONFIG_OF
static void exynos_add_device_to_domain(struct exynos_pm_domain *pd,
struct device *dev)
{
int ret;
dev_dbg(dev, "adding to power domain %s\n", pd->pd.name);
while (1) {
ret = pm_genpd_add_device(&pd->pd, dev);
if (ret != -EAGAIN)
break;
cond_resched();
}
pm_genpd_dev_need_restore(dev, true);
}
static void exynos_remove_device_from_domain(struct device *dev)
{
struct generic_pm_domain *genpd = dev_to_genpd(dev);
int ret;
dev_dbg(dev, "removing from power domain %s\n", genpd->name);
while (1) {
ret = pm_genpd_remove_device(genpd, dev);
if (ret != -EAGAIN)
break;
cond_resched();
}
}
static void exynos_read_domain_from_dt(struct device *dev)
{
struct platform_device *pd_pdev;
struct exynos_pm_domain *pd;
struct device_node *node;
node = of_parse_phandle(dev->of_node, "samsung,power-domain", 0);
if (!node)
return;
pd_pdev = of_find_device_by_node(node);
if (!pd_pdev)
return;
pd = platform_get_drvdata(pd_pdev);
exynos_add_device_to_domain(pd, dev);
}
static int exynos_pm_notifier_call(struct notifier_block *nb,
unsigned long event, void *data)
{
struct device *dev = data;
switch (event) {
case BUS_NOTIFY_BIND_DRIVER:
if (dev->of_node)
exynos_read_domain_from_dt(dev);
break;
case BUS_NOTIFY_UNBOUND_DRIVER:
exynos_remove_device_from_domain(dev);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block platform_nb = {
.notifier_call = exynos_pm_notifier_call,
};
static __init int exynos_pm_dt_parse_domains(void)
{
struct platform_device *pdev;
struct device_node *np;
for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
struct exynos_pm_domain *pd;
int on;
pdev = of_find_device_by_node(np);
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
pr_err("%s: failed to allocate memory for domain\n",
__func__);
return -ENOMEM;
}
pd->pd.name = kstrdup(np->name, GFP_KERNEL);
pd->name = pd->pd.name;
pd->base = of_iomap(np, 0);
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
pd->pd.of_node = np;
platform_set_drvdata(pdev, pd);
on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
pm_genpd_init(&pd->pd, NULL, !on);
}
bus_register_notifier(&platform_bus_type, &platform_nb);
return 0;
}
#else
static __init int exynos_pm_dt_parse_domains(void)
{
return 0;
}
#endif /* CONFIG_OF */
static __init __maybe_unused void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
struct exynos_pm_domain *pd)
{
if (pdev->dev.bus) {
if (!pm_genpd_add_device(&pd->pd, &pdev->dev))
pm_genpd_dev_need_restore(&pdev->dev, true);
else
pr_info("%s: error in adding %s device to %s power"
"domain\n", __func__, dev_name(&pdev->dev),
pd->name);
}
}
EXYNOS_GPD(exynos4_pd_mfc, S5P_PMU_MFC_CONF, "pd-mfc");
EXYNOS_GPD(exynos4_pd_g3d, S5P_PMU_G3D_CONF, "pd-g3d");
EXYNOS_GPD(exynos4_pd_lcd0, S5P_PMU_LCD0_CONF, "pd-lcd0");
EXYNOS_GPD(exynos4_pd_lcd1, S5P_PMU_LCD1_CONF, "pd-lcd1");
EXYNOS_GPD(exynos4_pd_tv, S5P_PMU_TV_CONF, "pd-tv");
EXYNOS_GPD(exynos4_pd_cam, S5P_PMU_CAM_CONF, "pd-cam");
EXYNOS_GPD(exynos4_pd_gps, S5P_PMU_GPS_CONF, "pd-gps");
static struct exynos_pm_domain *exynos4_pm_domains[] = {
&exynos4_pd_mfc,
&exynos4_pd_g3d,
&exynos4_pd_lcd0,
&exynos4_pd_lcd1,
&exynos4_pd_tv,
&exynos4_pd_cam,
&exynos4_pd_gps,
};
static __init int exynos4_pm_init_power_domain(void)
{
int idx;
if (of_have_populated_dt())
return exynos_pm_dt_parse_domains();
for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) {
struct exynos_pm_domain *pd = exynos4_pm_domains[idx];
int on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
pm_genpd_init(&pd->pd, NULL, !on);
}
#ifdef CONFIG_S5P_DEV_FIMD0
exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0);
#endif
#ifdef CONFIG_S5P_DEV_TV
exynos_pm_add_dev_to_genpd(&s5p_device_hdmi, &exynos4_pd_tv);
exynos_pm_add_dev_to_genpd(&s5p_device_mixer, &exynos4_pd_tv);
#endif
#ifdef CONFIG_S5P_DEV_MFC
exynos_pm_add_dev_to_genpd(&s5p_device_mfc, &exynos4_pd_mfc);
#endif
#ifdef CONFIG_S5P_DEV_FIMC0
exynos_pm_add_dev_to_genpd(&s5p_device_fimc0, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_FIMC1
exynos_pm_add_dev_to_genpd(&s5p_device_fimc1, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_FIMC2
exynos_pm_add_dev_to_genpd(&s5p_device_fimc2, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_FIMC3
exynos_pm_add_dev_to_genpd(&s5p_device_fimc3, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_CSIS0
exynos_pm_add_dev_to_genpd(&s5p_device_mipi_csis0, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_CSIS1
exynos_pm_add_dev_to_genpd(&s5p_device_mipi_csis1, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_G2D
exynos_pm_add_dev_to_genpd(&s5p_device_g2d, &exynos4_pd_lcd0);
#endif
#ifdef CONFIG_S5P_DEV_JPEG
exynos_pm_add_dev_to_genpd(&s5p_device_jpeg, &exynos4_pd_cam);
#endif
return 0;
}
arch_initcall(exynos4_pm_init_power_domain);
int __init exynos_pm_late_initcall(void)
{
pm_genpd_poweroff_unused();
return 0;
}
| gpl-2.0 |
thanhphat11/Kernel_N4_N910SLK | fs/ntfs/super.c | 2228 | 102688 | /*
* super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc.
* Copyright (c) 2001,2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h> /* For bdev_logical_block_size(). */
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/moduleparam.h>
#include <linux/bitmap.h>
#include "sysctl.h"
#include "logfile.h"
#include "quota.h"
#include "usnjrnl.h"
#include "dir.h"
#include "debug.h"
#include "index.h"
#include "inode.h"
#include "aops.h"
#include "layout.h"
#include "malloc.h"
#include "ntfs.h"
/* Number of mounted filesystems which have compression enabled. */
static unsigned long ntfs_nr_compression_users;
/* A global default upcase table and a corresponding reference count. */
static ntfschar *default_upcase = NULL;
static unsigned long ntfs_nr_upcase_users = 0;
/* Error constants/strings used in inode.c::ntfs_show_options(). */
typedef enum {
/* One of these must be present, default is ON_ERRORS_CONTINUE. */
ON_ERRORS_PANIC = 0x01,
ON_ERRORS_REMOUNT_RO = 0x02,
ON_ERRORS_CONTINUE = 0x04,
/* Optional, can be combined with any of the above. */
ON_ERRORS_RECOVER = 0x10,
} ON_ERRORS_ACTIONS;
const option_t on_errors_arr[] = {
{ ON_ERRORS_PANIC, "panic" },
{ ON_ERRORS_REMOUNT_RO, "remount-ro", },
{ ON_ERRORS_CONTINUE, "continue", },
{ ON_ERRORS_RECOVER, "recover" },
{ 0, NULL }
};
/**
* simple_getbool -
*
* Copied from old ntfs driver (which copied from vfat driver).
*/
static int simple_getbool(char *s, bool *setval)
{
if (s) {
if (!strcmp(s, "1") || !strcmp(s, "yes") || !strcmp(s, "true"))
*setval = true;
else if (!strcmp(s, "0") || !strcmp(s, "no") ||
!strcmp(s, "false"))
*setval = false;
else
return 0;
} else
*setval = true;
return 1;
}
/**
* parse_options - parse the (re)mount options
* @vol: ntfs volume
* @opt: string containing the (re)mount options
*
* Parse the recognized options in @opt for the ntfs volume described by @vol.
*/
static bool parse_options(ntfs_volume *vol, char *opt)
{
char *p, *v, *ov;
static char *utf8 = "utf8";
int errors = 0, sloppy = 0;
kuid_t uid = INVALID_UID;
kgid_t gid = INVALID_GID;
umode_t fmask = (umode_t)-1, dmask = (umode_t)-1;
int mft_zone_multiplier = -1, on_errors = -1;
int show_sys_files = -1, case_sensitive = -1, disable_sparse = -1;
struct nls_table *nls_map = NULL, *old_nls;
/* I am lazy... (-8 */
#define NTFS_GETOPT_WITH_DEFAULT(option, variable, default_value) \
if (!strcmp(p, option)) { \
if (!v || !*v) \
variable = default_value; \
else { \
variable = simple_strtoul(ov = v, &v, 0); \
if (*v) \
goto needs_val; \
} \
}
#define NTFS_GETOPT(option, variable) \
if (!strcmp(p, option)) { \
if (!v || !*v) \
goto needs_arg; \
variable = simple_strtoul(ov = v, &v, 0); \
if (*v) \
goto needs_val; \
}
#define NTFS_GETOPT_UID(option, variable) \
if (!strcmp(p, option)) { \
uid_t uid_value; \
if (!v || !*v) \
goto needs_arg; \
uid_value = simple_strtoul(ov = v, &v, 0); \
if (*v) \
goto needs_val; \
variable = make_kuid(current_user_ns(), uid_value); \
if (!uid_valid(variable)) \
goto needs_val; \
}
#define NTFS_GETOPT_GID(option, variable) \
if (!strcmp(p, option)) { \
gid_t gid_value; \
if (!v || !*v) \
goto needs_arg; \
gid_value = simple_strtoul(ov = v, &v, 0); \
if (*v) \
goto needs_val; \
variable = make_kgid(current_user_ns(), gid_value); \
if (!gid_valid(variable)) \
goto needs_val; \
}
#define NTFS_GETOPT_OCTAL(option, variable) \
if (!strcmp(p, option)) { \
if (!v || !*v) \
goto needs_arg; \
variable = simple_strtoul(ov = v, &v, 8); \
if (*v) \
goto needs_val; \
}
#define NTFS_GETOPT_BOOL(option, variable) \
if (!strcmp(p, option)) { \
bool val; \
if (!simple_getbool(v, &val)) \
goto needs_bool; \
variable = val; \
}
#define NTFS_GETOPT_OPTIONS_ARRAY(option, variable, opt_array) \
if (!strcmp(p, option)) { \
int _i; \
if (!v || !*v) \
goto needs_arg; \
ov = v; \
if (variable == -1) \
variable = 0; \
for (_i = 0; opt_array[_i].str && *opt_array[_i].str; _i++) \
if (!strcmp(opt_array[_i].str, v)) { \
variable |= opt_array[_i].val; \
break; \
} \
if (!opt_array[_i].str || !*opt_array[_i].str) \
goto needs_val; \
}
if (!opt || !*opt)
goto no_mount_options;
ntfs_debug("Entering with mount options string: %s", opt);
while ((p = strsep(&opt, ","))) {
if ((v = strchr(p, '=')))
*v++ = 0;
NTFS_GETOPT_UID("uid", uid)
else NTFS_GETOPT_GID("gid", gid)
else NTFS_GETOPT_OCTAL("umask", fmask = dmask)
else NTFS_GETOPT_OCTAL("fmask", fmask)
else NTFS_GETOPT_OCTAL("dmask", dmask)
else NTFS_GETOPT("mft_zone_multiplier", mft_zone_multiplier)
else NTFS_GETOPT_WITH_DEFAULT("sloppy", sloppy, true)
else NTFS_GETOPT_BOOL("show_sys_files", show_sys_files)
else NTFS_GETOPT_BOOL("case_sensitive", case_sensitive)
else NTFS_GETOPT_BOOL("disable_sparse", disable_sparse)
else NTFS_GETOPT_OPTIONS_ARRAY("errors", on_errors,
on_errors_arr)
else if (!strcmp(p, "posix") || !strcmp(p, "show_inodes"))
ntfs_warning(vol->sb, "Ignoring obsolete option %s.",
p);
else if (!strcmp(p, "nls") || !strcmp(p, "iocharset")) {
if (!strcmp(p, "iocharset"))
ntfs_warning(vol->sb, "Option iocharset is "
"deprecated. Please use "
"option nls=<charsetname> in "
"the future.");
if (!v || !*v)
goto needs_arg;
use_utf8:
old_nls = nls_map;
nls_map = load_nls(v);
if (!nls_map) {
if (!old_nls) {
ntfs_error(vol->sb, "NLS character set "
"%s not found.", v);
return false;
}
ntfs_error(vol->sb, "NLS character set %s not "
"found. Using previous one %s.",
v, old_nls->charset);
nls_map = old_nls;
} else /* nls_map */ {
unload_nls(old_nls);
}
} else if (!strcmp(p, "utf8")) {
bool val = false;
ntfs_warning(vol->sb, "Option utf8 is no longer "
"supported, using option nls=utf8. Please "
"use option nls=utf8 in the future and "
"make sure utf8 is compiled either as a "
"module or into the kernel.");
if (!v || !*v)
val = true;
else if (!simple_getbool(v, &val))
goto needs_bool;
if (val) {
v = utf8;
goto use_utf8;
}
} else {
ntfs_error(vol->sb, "Unrecognized mount option %s.", p);
if (errors < INT_MAX)
errors++;
}
#undef NTFS_GETOPT_OPTIONS_ARRAY
#undef NTFS_GETOPT_BOOL
#undef NTFS_GETOPT
#undef NTFS_GETOPT_WITH_DEFAULT
}
no_mount_options:
if (errors && !sloppy)
return false;
if (sloppy)
ntfs_warning(vol->sb, "Sloppy option given. Ignoring "
"unrecognized mount option(s) and continuing.");
/* Keep this first! */
if (on_errors != -1) {
if (!on_errors) {
ntfs_error(vol->sb, "Invalid errors option argument "
"or bug in options parser.");
return false;
}
}
if (nls_map) {
if (vol->nls_map && vol->nls_map != nls_map) {
ntfs_error(vol->sb, "Cannot change NLS character set "
"on remount.");
return false;
} /* else (!vol->nls_map) */
ntfs_debug("Using NLS character set %s.", nls_map->charset);
vol->nls_map = nls_map;
} else /* (!nls_map) */ {
if (!vol->nls_map) {
vol->nls_map = load_nls_default();
if (!vol->nls_map) {
ntfs_error(vol->sb, "Failed to load default "
"NLS character set.");
return false;
}
ntfs_debug("Using default NLS character set (%s).",
vol->nls_map->charset);
}
}
if (mft_zone_multiplier != -1) {
if (vol->mft_zone_multiplier && vol->mft_zone_multiplier !=
mft_zone_multiplier) {
ntfs_error(vol->sb, "Cannot change mft_zone_multiplier "
"on remount.");
return false;
}
if (mft_zone_multiplier < 1 || mft_zone_multiplier > 4) {
ntfs_error(vol->sb, "Invalid mft_zone_multiplier. "
"Using default value, i.e. 1.");
mft_zone_multiplier = 1;
}
vol->mft_zone_multiplier = mft_zone_multiplier;
}
if (!vol->mft_zone_multiplier)
vol->mft_zone_multiplier = 1;
if (on_errors != -1)
vol->on_errors = on_errors;
if (!vol->on_errors || vol->on_errors == ON_ERRORS_RECOVER)
vol->on_errors |= ON_ERRORS_CONTINUE;
if (uid_valid(uid))
vol->uid = uid;
if (gid_valid(gid))
vol->gid = gid;
if (fmask != (umode_t)-1)
vol->fmask = fmask;
if (dmask != (umode_t)-1)
vol->dmask = dmask;
if (show_sys_files != -1) {
if (show_sys_files)
NVolSetShowSystemFiles(vol);
else
NVolClearShowSystemFiles(vol);
}
if (case_sensitive != -1) {
if (case_sensitive)
NVolSetCaseSensitive(vol);
else
NVolClearCaseSensitive(vol);
}
if (disable_sparse != -1) {
if (disable_sparse)
NVolClearSparseEnabled(vol);
else {
if (!NVolSparseEnabled(vol) &&
vol->major_ver && vol->major_ver < 3)
ntfs_warning(vol->sb, "Not enabling sparse "
"support due to NTFS volume "
"version %i.%i (need at least "
"version 3.0).", vol->major_ver,
vol->minor_ver);
else
NVolSetSparseEnabled(vol);
}
}
return true;
needs_arg:
ntfs_error(vol->sb, "The %s option requires an argument.", p);
return false;
needs_bool:
ntfs_error(vol->sb, "The %s option requires a boolean argument.", p);
return false;
needs_val:
ntfs_error(vol->sb, "Invalid %s option argument: %s", p, ov);
return false;
}
#ifdef NTFS_RW
/**
* ntfs_write_volume_flags - write new flags to the volume information flags
* @vol: ntfs volume on which to modify the flags
* @flags: new flags value for the volume information flags
*
* Internal function. You probably want to use ntfs_{set,clear}_volume_flags()
* instead (see below).
*
* Replace the volume information flags on the volume @vol with the value
* supplied in @flags. Note, this overwrites the volume information flags, so
* make sure to combine the flags you want to modify with the old flags and use
* the result when calling ntfs_write_volume_flags().
*
* Return 0 on success and -errno on error.
*/
static int ntfs_write_volume_flags(ntfs_volume *vol, const VOLUME_FLAGS flags)
{
ntfs_inode *ni = NTFS_I(vol->vol_ino);
MFT_RECORD *m;
VOLUME_INFORMATION *vi;
ntfs_attr_search_ctx *ctx;
int err;
ntfs_debug("Entering, old flags = 0x%x, new flags = 0x%x.",
le16_to_cpu(vol->vol_flags), le16_to_cpu(flags));
if (vol->vol_flags == flags)
goto done;
BUG_ON(!ni);
m = map_mft_record(ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(ni, m);
if (!ctx) {
err = -ENOMEM;
goto put_unm_err_out;
}
err = ntfs_attr_lookup(AT_VOLUME_INFORMATION, NULL, 0, 0, 0, NULL, 0,
ctx);
if (err)
goto put_unm_err_out;
vi = (VOLUME_INFORMATION*)((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
vol->vol_flags = vi->flags = flags;
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
done:
ntfs_debug("Done.");
return 0;
put_unm_err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
err_out:
ntfs_error(vol->sb, "Failed with error code %i.", -err);
return err;
}
/**
* ntfs_set_volume_flags - set bits in the volume information flags
* @vol: ntfs volume on which to modify the flags
* @flags: flags to set on the volume
*
* Set the bits in @flags in the volume information flags on the volume @vol.
*
* Return 0 on success and -errno on error.
*/
static inline int ntfs_set_volume_flags(ntfs_volume *vol, VOLUME_FLAGS flags)
{
flags &= VOLUME_FLAGS_MASK;
return ntfs_write_volume_flags(vol, vol->vol_flags | flags);
}
/**
* ntfs_clear_volume_flags - clear bits in the volume information flags
* @vol: ntfs volume on which to modify the flags
* @flags: flags to clear on the volume
*
* Clear the bits in @flags in the volume information flags on the volume @vol.
*
* Return 0 on success and -errno on error.
*/
static inline int ntfs_clear_volume_flags(ntfs_volume *vol, VOLUME_FLAGS flags)
{
flags &= VOLUME_FLAGS_MASK;
flags = vol->vol_flags & cpu_to_le16(~le16_to_cpu(flags));
return ntfs_write_volume_flags(vol, flags);
}
#endif /* NTFS_RW */
/**
* ntfs_remount - change the mount options of a mounted ntfs filesystem
* @sb: superblock of mounted ntfs filesystem
* @flags: remount flags
* @opt: remount options string
*
* Change the mount options of an already mounted ntfs filesystem.
*
* NOTE: The VFS sets the @sb->s_flags remount flags to @flags after
* ntfs_remount() returns successfully (i.e. returns 0). Otherwise,
* @sb->s_flags are not changed.
*/
static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
{
ntfs_volume *vol = NTFS_SB(sb);
ntfs_debug("Entering with remount options string: %s", opt);
#ifndef NTFS_RW
/* For read-only compiled driver, enforce read-only flag. */
*flags |= MS_RDONLY;
#else /* NTFS_RW */
/*
* For the read-write compiled driver, if we are remounting read-write,
* make sure there are no volume errors and that no unsupported volume
* flags are set. Also, empty the logfile journal as it would become
* stale as soon as something is written to the volume and mark the
* volume dirty so that chkdsk is run if the volume is not umounted
* cleanly. Finally, mark the quotas out of date so Windows rescans
* the volume on boot and updates them.
*
* When remounting read-only, mark the volume clean if no volume errors
* have occurred.
*/
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
static const char *es = ". Cannot remount read-write.";
/* Remounting read-write. */
if (NVolErrors(vol)) {
ntfs_error(sb, "Volume has errors and is read-only%s",
es);
return -EROFS;
}
if (vol->vol_flags & VOLUME_IS_DIRTY) {
ntfs_error(sb, "Volume is dirty and read-only%s", es);
return -EROFS;
}
if (vol->vol_flags & VOLUME_MODIFIED_BY_CHKDSK) {
ntfs_error(sb, "Volume has been modified by chkdsk "
"and is read-only%s", es);
return -EROFS;
}
if (vol->vol_flags & VOLUME_MUST_MOUNT_RO_MASK) {
ntfs_error(sb, "Volume has unsupported flags set "
"(0x%x) and is read-only%s",
(unsigned)le16_to_cpu(vol->vol_flags),
es);
return -EROFS;
}
if (ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY)) {
ntfs_error(sb, "Failed to set dirty bit in volume "
"information flags%s", es);
return -EROFS;
}
#if 0
// TODO: Enable this code once we start modifying anything that
// is different between NTFS 1.2 and 3.x...
/* Set NT4 compatibility flag on newer NTFS version volumes. */
if ((vol->major_ver > 1)) {
if (ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
ntfs_error(sb, "Failed to set NT4 "
"compatibility flag%s", es);
NVolSetErrors(vol);
return -EROFS;
}
}
#endif
if (!ntfs_empty_logfile(vol->logfile_ino)) {
ntfs_error(sb, "Failed to empty journal $LogFile%s",
es);
NVolSetErrors(vol);
return -EROFS;
}
if (!ntfs_mark_quotas_out_of_date(vol)) {
ntfs_error(sb, "Failed to mark quotas out of date%s",
es);
NVolSetErrors(vol);
return -EROFS;
}
if (!ntfs_stamp_usnjrnl(vol)) {
ntfs_error(sb, "Failed to stamp transation log "
"($UsnJrnl)%s", es);
NVolSetErrors(vol);
return -EROFS;
}
} else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) {
/* Remounting read-only. */
if (!NVolErrors(vol)) {
if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
ntfs_warning(sb, "Failed to clear dirty bit "
"in volume information "
"flags. Run chkdsk.");
}
}
#endif /* NTFS_RW */
// TODO: Deal with *flags.
if (!parse_options(vol, opt))
return -EINVAL;
ntfs_debug("Done.");
return 0;
}
/**
* is_boot_sector_ntfs - check whether a boot sector is a valid NTFS boot sector
* @sb: Super block of the device to which @b belongs.
* @b: Boot sector of device @sb to check.
* @silent: If 'true', all output will be silenced.
*
* is_boot_sector_ntfs() checks whether the boot sector @b is a valid NTFS boot
* sector. Returns 'true' if it is valid and 'false' if not.
*
* @sb is only needed for warning/error output, i.e. it can be NULL when silent
* is 'true'.
*/
static bool is_boot_sector_ntfs(const struct super_block *sb,
const NTFS_BOOT_SECTOR *b, const bool silent)
{
/*
* Check that checksum == sum of u32 values from b to the checksum
* field. If checksum is zero, no checking is done. We will work when
* the checksum test fails, since some utilities update the boot sector
* ignoring the checksum which leaves the checksum out-of-date. We
* report a warning if this is the case.
*/
if ((void*)b < (void*)&b->checksum && b->checksum && !silent) {
le32 *u;
u32 i;
for (i = 0, u = (le32*)b; u < (le32*)(&b->checksum); ++u)
i += le32_to_cpup(u);
if (le32_to_cpu(b->checksum) != i)
ntfs_warning(sb, "Invalid boot sector checksum.");
}
/* Check OEMidentifier is "NTFS " */
if (b->oem_id != magicNTFS)
goto not_ntfs;
/* Check bytes per sector value is between 256 and 4096. */
if (le16_to_cpu(b->bpb.bytes_per_sector) < 0x100 ||
le16_to_cpu(b->bpb.bytes_per_sector) > 0x1000)
goto not_ntfs;
/* Check sectors per cluster value is valid. */
switch (b->bpb.sectors_per_cluster) {
case 1: case 2: case 4: case 8: case 16: case 32: case 64: case 128:
break;
default:
goto not_ntfs;
}
/* Check the cluster size is not above the maximum (64kiB). */
if ((u32)le16_to_cpu(b->bpb.bytes_per_sector) *
b->bpb.sectors_per_cluster > NTFS_MAX_CLUSTER_SIZE)
goto not_ntfs;
/* Check reserved/unused fields are really zero. */
if (le16_to_cpu(b->bpb.reserved_sectors) ||
le16_to_cpu(b->bpb.root_entries) ||
le16_to_cpu(b->bpb.sectors) ||
le16_to_cpu(b->bpb.sectors_per_fat) ||
le32_to_cpu(b->bpb.large_sectors) || b->bpb.fats)
goto not_ntfs;
/* Check clusters per file mft record value is valid. */
if ((u8)b->clusters_per_mft_record < 0xe1 ||
(u8)b->clusters_per_mft_record > 0xf7)
switch (b->clusters_per_mft_record) {
case 1: case 2: case 4: case 8: case 16: case 32: case 64:
break;
default:
goto not_ntfs;
}
/* Check clusters per index block value is valid. */
if ((u8)b->clusters_per_index_record < 0xe1 ||
(u8)b->clusters_per_index_record > 0xf7)
switch (b->clusters_per_index_record) {
case 1: case 2: case 4: case 8: case 16: case 32: case 64:
break;
default:
goto not_ntfs;
}
/*
* Check for valid end of sector marker. We will work without it, but
* many BIOSes will refuse to boot from a bootsector if the magic is
* incorrect, so we emit a warning.
*/
if (!silent && b->end_of_sector_marker != cpu_to_le16(0xaa55))
ntfs_warning(sb, "Invalid end of sector marker.");
return true;
not_ntfs:
return false;
}
/**
* read_ntfs_boot_sector - read the NTFS boot sector of a device
* @sb: super block of device to read the boot sector from
* @silent: if true, suppress all output
*
* Reads the boot sector from the device and validates it. If that fails, tries
* to read the backup boot sector, first from the end of the device a-la NT4 and
* later and then from the middle of the device a-la NT3.51 and before.
*
* If a valid boot sector is found but it is not the primary boot sector, we
* repair the primary boot sector silently (unless the device is read-only or
* the primary boot sector is not accessible).
*
* NOTE: To call this function, @sb must have the fields s_dev, the ntfs super
* block (u.ntfs_sb), nr_blocks and the device flags (s_flags) initialized
* to their respective values.
*
* Return the unlocked buffer head containing the boot sector or NULL on error.
*/
static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
const int silent)
{
const char *read_err_str = "Unable to read %s boot sector.";
struct buffer_head *bh_primary, *bh_backup;
sector_t nr_blocks = NTFS_SB(sb)->nr_blocks;
/* Try to read primary boot sector. */
if ((bh_primary = sb_bread(sb, 0))) {
if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
bh_primary->b_data, silent))
return bh_primary;
if (!silent)
ntfs_error(sb, "Primary boot sector is invalid.");
} else if (!silent)
ntfs_error(sb, read_err_str, "primary");
if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
if (bh_primary)
brelse(bh_primary);
if (!silent)
ntfs_error(sb, "Mount option errors=recover not used. "
"Aborting without trying to recover.");
return NULL;
}
/* Try to read NT4+ backup boot sector. */
if ((bh_backup = sb_bread(sb, nr_blocks - 1))) {
if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
bh_backup->b_data, silent))
goto hotfix_primary_boot_sector;
brelse(bh_backup);
} else if (!silent)
ntfs_error(sb, read_err_str, "backup");
/* Try to read NT3.51- backup boot sector. */
if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
bh_backup->b_data, silent))
goto hotfix_primary_boot_sector;
if (!silent)
ntfs_error(sb, "Could not find a valid backup boot "
"sector.");
brelse(bh_backup);
} else if (!silent)
ntfs_error(sb, read_err_str, "backup");
/* We failed. Cleanup and return. */
if (bh_primary)
brelse(bh_primary);
return NULL;
hotfix_primary_boot_sector:
if (bh_primary) {
/*
* If we managed to read sector zero and the volume is not
* read-only, copy the found, valid backup boot sector to the
* primary boot sector. Note we only copy the actual boot
* sector structure, not the actual whole device sector as that
* may be bigger and would potentially damage the $Boot system
* file (FIXME: Would be nice to know if the backup boot sector
* on a large sector device contains the whole boot loader or
* just the first 512 bytes).
*/
if (!(sb->s_flags & MS_RDONLY)) {
ntfs_warning(sb, "Hot-fix: Recovering invalid primary "
"boot sector from backup copy.");
memcpy(bh_primary->b_data, bh_backup->b_data,
NTFS_BLOCK_SIZE);
mark_buffer_dirty(bh_primary);
sync_dirty_buffer(bh_primary);
if (buffer_uptodate(bh_primary)) {
brelse(bh_backup);
return bh_primary;
}
ntfs_error(sb, "Hot-fix: Device write error while "
"recovering primary boot sector.");
} else {
ntfs_warning(sb, "Hot-fix: Recovery of primary boot "
"sector failed: Read-only mount.");
}
brelse(bh_primary);
}
ntfs_warning(sb, "Using backup boot sector.");
return bh_backup;
}
/**
* parse_ntfs_boot_sector - parse the boot sector and store the data in @vol
* @vol: volume structure to initialise with data from boot sector
* @b: boot sector to parse
*
* Parse the ntfs boot sector @b and store all imporant information therein in
* the ntfs super block @vol. Return 'true' on success and 'false' on error.
*/
static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
{
unsigned int sectors_per_cluster_bits, nr_hidden_sects;
int clusters_per_mft_record, clusters_per_index_record;
s64 ll;
vol->sector_size = le16_to_cpu(b->bpb.bytes_per_sector);
vol->sector_size_bits = ffs(vol->sector_size) - 1;
ntfs_debug("vol->sector_size = %i (0x%x)", vol->sector_size,
vol->sector_size);
ntfs_debug("vol->sector_size_bits = %i (0x%x)", vol->sector_size_bits,
vol->sector_size_bits);
if (vol->sector_size < vol->sb->s_blocksize) {
ntfs_error(vol->sb, "Sector size (%i) is smaller than the "
"device block size (%lu). This is not "
"supported. Sorry.", vol->sector_size,
vol->sb->s_blocksize);
return false;
}
ntfs_debug("sectors_per_cluster = 0x%x", b->bpb.sectors_per_cluster);
sectors_per_cluster_bits = ffs(b->bpb.sectors_per_cluster) - 1;
ntfs_debug("sectors_per_cluster_bits = 0x%x",
sectors_per_cluster_bits);
nr_hidden_sects = le32_to_cpu(b->bpb.hidden_sectors);
ntfs_debug("number of hidden sectors = 0x%x", nr_hidden_sects);
vol->cluster_size = vol->sector_size << sectors_per_cluster_bits;
vol->cluster_size_mask = vol->cluster_size - 1;
vol->cluster_size_bits = ffs(vol->cluster_size) - 1;
ntfs_debug("vol->cluster_size = %i (0x%x)", vol->cluster_size,
vol->cluster_size);
ntfs_debug("vol->cluster_size_mask = 0x%x", vol->cluster_size_mask);
ntfs_debug("vol->cluster_size_bits = %i", vol->cluster_size_bits);
if (vol->cluster_size < vol->sector_size) {
ntfs_error(vol->sb, "Cluster size (%i) is smaller than the "
"sector size (%i). This is not supported. "
"Sorry.", vol->cluster_size, vol->sector_size);
return false;
}
clusters_per_mft_record = b->clusters_per_mft_record;
ntfs_debug("clusters_per_mft_record = %i (0x%x)",
clusters_per_mft_record, clusters_per_mft_record);
if (clusters_per_mft_record > 0)
vol->mft_record_size = vol->cluster_size <<
(ffs(clusters_per_mft_record) - 1);
else
/*
* When mft_record_size < cluster_size, clusters_per_mft_record
* = -log2(mft_record_size) bytes. mft_record_size normaly is
* 1024 bytes, which is encoded as 0xF6 (-10 in decimal).
*/
vol->mft_record_size = 1 << -clusters_per_mft_record;
vol->mft_record_size_mask = vol->mft_record_size - 1;
vol->mft_record_size_bits = ffs(vol->mft_record_size) - 1;
ntfs_debug("vol->mft_record_size = %i (0x%x)", vol->mft_record_size,
vol->mft_record_size);
ntfs_debug("vol->mft_record_size_mask = 0x%x",
vol->mft_record_size_mask);
ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
vol->mft_record_size_bits, vol->mft_record_size_bits);
/*
* We cannot support mft record sizes above the PAGE_CACHE_SIZE since
* we store $MFT/$DATA, the table of mft records in the page cache.
*/
if (vol->mft_record_size > PAGE_CACHE_SIZE) {
ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
"PAGE_CACHE_SIZE on your system (%lu). "
"This is not supported. Sorry.",
vol->mft_record_size, PAGE_CACHE_SIZE);
return false;
}
/* We cannot support mft record sizes below the sector size. */
if (vol->mft_record_size < vol->sector_size) {
ntfs_error(vol->sb, "Mft record size (%i) is smaller than the "
"sector size (%i). This is not supported. "
"Sorry.", vol->mft_record_size,
vol->sector_size);
return false;
}
clusters_per_index_record = b->clusters_per_index_record;
ntfs_debug("clusters_per_index_record = %i (0x%x)",
clusters_per_index_record, clusters_per_index_record);
if (clusters_per_index_record > 0)
vol->index_record_size = vol->cluster_size <<
(ffs(clusters_per_index_record) - 1);
else
/*
* When index_record_size < cluster_size,
* clusters_per_index_record = -log2(index_record_size) bytes.
* index_record_size normaly equals 4096 bytes, which is
* encoded as 0xF4 (-12 in decimal).
*/
vol->index_record_size = 1 << -clusters_per_index_record;
vol->index_record_size_mask = vol->index_record_size - 1;
vol->index_record_size_bits = ffs(vol->index_record_size) - 1;
ntfs_debug("vol->index_record_size = %i (0x%x)",
vol->index_record_size, vol->index_record_size);
ntfs_debug("vol->index_record_size_mask = 0x%x",
vol->index_record_size_mask);
ntfs_debug("vol->index_record_size_bits = %i (0x%x)",
vol->index_record_size_bits,
vol->index_record_size_bits);
/* We cannot support index record sizes below the sector size. */
if (vol->index_record_size < vol->sector_size) {
ntfs_error(vol->sb, "Index record size (%i) is smaller than "
"the sector size (%i). This is not "
"supported. Sorry.", vol->index_record_size,
vol->sector_size);
return false;
}
/*
* Get the size of the volume in clusters and check for 64-bit-ness.
* Windows currently only uses 32 bits to save the clusters so we do
* the same as it is much faster on 32-bit CPUs.
*/
ll = sle64_to_cpu(b->number_of_sectors) >> sectors_per_cluster_bits;
if ((u64)ll >= 1ULL << 32) {
ntfs_error(vol->sb, "Cannot handle 64-bit clusters. Sorry.");
return false;
}
vol->nr_clusters = ll;
ntfs_debug("vol->nr_clusters = 0x%llx", (long long)vol->nr_clusters);
/*
* On an architecture where unsigned long is 32-bits, we restrict the
* volume size to 2TiB (2^41). On a 64-bit architecture, the compiler
* will hopefully optimize the whole check away.
*/
if (sizeof(unsigned long) < 8) {
if ((ll << vol->cluster_size_bits) >= (1ULL << 41)) {
ntfs_error(vol->sb, "Volume size (%lluTiB) is too "
"large for this architecture. "
"Maximum supported is 2TiB. Sorry.",
(unsigned long long)ll >> (40 -
vol->cluster_size_bits));
return false;
}
}
ll = sle64_to_cpu(b->mft_lcn);
if (ll >= vol->nr_clusters) {
ntfs_error(vol->sb, "MFT LCN (%lli, 0x%llx) is beyond end of "
"volume. Weird.", (unsigned long long)ll,
(unsigned long long)ll);
return false;
}
vol->mft_lcn = ll;
ntfs_debug("vol->mft_lcn = 0x%llx", (long long)vol->mft_lcn);
ll = sle64_to_cpu(b->mftmirr_lcn);
if (ll >= vol->nr_clusters) {
ntfs_error(vol->sb, "MFTMirr LCN (%lli, 0x%llx) is beyond end "
"of volume. Weird.", (unsigned long long)ll,
(unsigned long long)ll);
return false;
}
vol->mftmirr_lcn = ll;
ntfs_debug("vol->mftmirr_lcn = 0x%llx", (long long)vol->mftmirr_lcn);
#ifdef NTFS_RW
/*
* Work out the size of the mft mirror in number of mft records. If the
* cluster size is less than or equal to the size taken by four mft
* records, the mft mirror stores the first four mft records. If the
* cluster size is bigger than the size taken by four mft records, the
* mft mirror contains as many mft records as will fit into one
* cluster.
*/
if (vol->cluster_size <= (4 << vol->mft_record_size_bits))
vol->mftmirr_size = 4;
else
vol->mftmirr_size = vol->cluster_size >>
vol->mft_record_size_bits;
ntfs_debug("vol->mftmirr_size = %i", vol->mftmirr_size);
#endif /* NTFS_RW */
vol->serial_no = le64_to_cpu(b->volume_serial_number);
ntfs_debug("vol->serial_no = 0x%llx",
(unsigned long long)vol->serial_no);
return true;
}
/**
* ntfs_setup_allocators - initialize the cluster and mft allocators
* @vol: volume structure for which to setup the allocators
*
* Setup the cluster (lcn) and mft allocators to the starting values.
*/
static void ntfs_setup_allocators(ntfs_volume *vol)
{
#ifdef NTFS_RW
LCN mft_zone_size, mft_lcn;
#endif /* NTFS_RW */
ntfs_debug("vol->mft_zone_multiplier = 0x%x",
vol->mft_zone_multiplier);
#ifdef NTFS_RW
/* Determine the size of the MFT zone. */
mft_zone_size = vol->nr_clusters;
switch (vol->mft_zone_multiplier) { /* % of volume size in clusters */
case 4:
mft_zone_size >>= 1; /* 50% */
break;
case 3:
mft_zone_size = (mft_zone_size +
(mft_zone_size >> 1)) >> 2; /* 37.5% */
break;
case 2:
mft_zone_size >>= 2; /* 25% */
break;
/* case 1: */
default:
mft_zone_size >>= 3; /* 12.5% */
break;
}
/* Setup the mft zone. */
vol->mft_zone_start = vol->mft_zone_pos = vol->mft_lcn;
ntfs_debug("vol->mft_zone_pos = 0x%llx",
(unsigned long long)vol->mft_zone_pos);
/*
* Calculate the mft_lcn for an unmodified NTFS volume (see mkntfs
* source) and if the actual mft_lcn is in the expected place or even
* further to the front of the volume, extend the mft_zone to cover the
* beginning of the volume as well. This is in order to protect the
* area reserved for the mft bitmap as well within the mft_zone itself.
* On non-standard volumes we do not protect it as the overhead would
* be higher than the speed increase we would get by doing it.
*/
mft_lcn = (8192 + 2 * vol->cluster_size - 1) / vol->cluster_size;
if (mft_lcn * vol->cluster_size < 16 * 1024)
mft_lcn = (16 * 1024 + vol->cluster_size - 1) /
vol->cluster_size;
if (vol->mft_zone_start <= mft_lcn)
vol->mft_zone_start = 0;
ntfs_debug("vol->mft_zone_start = 0x%llx",
(unsigned long long)vol->mft_zone_start);
/*
* Need to cap the mft zone on non-standard volumes so that it does
* not point outside the boundaries of the volume. We do this by
* halving the zone size until we are inside the volume.
*/
vol->mft_zone_end = vol->mft_lcn + mft_zone_size;
while (vol->mft_zone_end >= vol->nr_clusters) {
mft_zone_size >>= 1;
vol->mft_zone_end = vol->mft_lcn + mft_zone_size;
}
ntfs_debug("vol->mft_zone_end = 0x%llx",
(unsigned long long)vol->mft_zone_end);
/*
* Set the current position within each data zone to the start of the
* respective zone.
*/
vol->data1_zone_pos = vol->mft_zone_end;
ntfs_debug("vol->data1_zone_pos = 0x%llx",
(unsigned long long)vol->data1_zone_pos);
vol->data2_zone_pos = 0;
ntfs_debug("vol->data2_zone_pos = 0x%llx",
(unsigned long long)vol->data2_zone_pos);
/* Set the mft data allocation position to mft record 24. */
vol->mft_data_pos = 24;
ntfs_debug("vol->mft_data_pos = 0x%llx",
(unsigned long long)vol->mft_data_pos);
#endif /* NTFS_RW */
}
#ifdef NTFS_RW
/**
* load_and_init_mft_mirror - load and setup the mft mirror inode for a volume
* @vol: ntfs super block describing device whose mft mirror to load
*
* Return 'true' on success or 'false' on error.
*/
static bool load_and_init_mft_mirror(ntfs_volume *vol)
{
struct inode *tmp_ino;
ntfs_inode *tmp_ni;
ntfs_debug("Entering.");
/* Get mft mirror inode. */
tmp_ino = ntfs_iget(vol->sb, FILE_MFTMirr);
if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
/* Caller will display error message. */
return false;
}
/*
* Re-initialize some specifics about $MFTMirr's inode as
* ntfs_read_inode() will have set up the default ones.
*/
/* Set uid and gid to root. */
tmp_ino->i_uid = GLOBAL_ROOT_UID;
tmp_ino->i_gid = GLOBAL_ROOT_GID;
/* Regular file. No access for anyone. */
tmp_ino->i_mode = S_IFREG;
/* No VFS initiated operations allowed for $MFTMirr. */
tmp_ino->i_op = &ntfs_empty_inode_ops;
tmp_ino->i_fop = &ntfs_empty_file_ops;
/* Put in our special address space operations. */
tmp_ino->i_mapping->a_ops = &ntfs_mst_aops;
tmp_ni = NTFS_I(tmp_ino);
/* The $MFTMirr, like the $MFT is multi sector transfer protected. */
NInoSetMstProtected(tmp_ni);
NInoSetSparseDisabled(tmp_ni);
/*
* Set up our little cheat allowing us to reuse the async read io
* completion handler for directories.
*/
tmp_ni->itype.index.block_size = vol->mft_record_size;
tmp_ni->itype.index.block_size_bits = vol->mft_record_size_bits;
vol->mftmirr_ino = tmp_ino;
ntfs_debug("Done.");
return true;
}
/**
* check_mft_mirror - compare contents of the mft mirror with the mft
* @vol: ntfs super block describing device whose mft mirror to check
*
* Return 'true' on success or 'false' on error.
*
* Note, this function also results in the mft mirror runlist being completely
* mapped into memory. The mft mirror write code requires this and will BUG()
* should it find an unmapped runlist element.
*/
static bool check_mft_mirror(ntfs_volume *vol)
{
struct super_block *sb = vol->sb;
ntfs_inode *mirr_ni;
struct page *mft_page, *mirr_page;
u8 *kmft, *kmirr;
runlist_element *rl, rl2[2];
pgoff_t index;
int mrecs_per_page, i;
ntfs_debug("Entering.");
/* Compare contents of $MFT and $MFTMirr. */
mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size;
BUG_ON(!mrecs_per_page);
BUG_ON(!vol->mftmirr_size);
mft_page = mirr_page = NULL;
kmft = kmirr = NULL;
index = i = 0;
do {
u32 bytes;
/* Switch pages if necessary. */
if (!(i % mrecs_per_page)) {
if (index) {
ntfs_unmap_page(mft_page);
ntfs_unmap_page(mirr_page);
}
/* Get the $MFT page. */
mft_page = ntfs_map_page(vol->mft_ino->i_mapping,
index);
if (IS_ERR(mft_page)) {
ntfs_error(sb, "Failed to read $MFT.");
return false;
}
kmft = page_address(mft_page);
/* Get the $MFTMirr page. */
mirr_page = ntfs_map_page(vol->mftmirr_ino->i_mapping,
index);
if (IS_ERR(mirr_page)) {
ntfs_error(sb, "Failed to read $MFTMirr.");
goto mft_unmap_out;
}
kmirr = page_address(mirr_page);
++index;
}
/* Do not check the record if it is not in use. */
if (((MFT_RECORD*)kmft)->flags & MFT_RECORD_IN_USE) {
/* Make sure the record is ok. */
if (ntfs_is_baad_recordp((le32*)kmft)) {
ntfs_error(sb, "Incomplete multi sector "
"transfer detected in mft "
"record %i.", i);
mm_unmap_out:
ntfs_unmap_page(mirr_page);
mft_unmap_out:
ntfs_unmap_page(mft_page);
return false;
}
}
/* Do not check the mirror record if it is not in use. */
if (((MFT_RECORD*)kmirr)->flags & MFT_RECORD_IN_USE) {
if (ntfs_is_baad_recordp((le32*)kmirr)) {
ntfs_error(sb, "Incomplete multi sector "
"transfer detected in mft "
"mirror record %i.", i);
goto mm_unmap_out;
}
}
/* Get the amount of data in the current record. */
bytes = le32_to_cpu(((MFT_RECORD*)kmft)->bytes_in_use);
if (bytes < sizeof(MFT_RECORD_OLD) ||
bytes > vol->mft_record_size ||
ntfs_is_baad_recordp((le32*)kmft)) {
bytes = le32_to_cpu(((MFT_RECORD*)kmirr)->bytes_in_use);
if (bytes < sizeof(MFT_RECORD_OLD) ||
bytes > vol->mft_record_size ||
ntfs_is_baad_recordp((le32*)kmirr))
bytes = vol->mft_record_size;
}
/* Compare the two records. */
if (memcmp(kmft, kmirr, bytes)) {
ntfs_error(sb, "$MFT and $MFTMirr (record %i) do not "
"match. Run ntfsfix or chkdsk.", i);
goto mm_unmap_out;
}
kmft += vol->mft_record_size;
kmirr += vol->mft_record_size;
} while (++i < vol->mftmirr_size);
/* Release the last pages. */
ntfs_unmap_page(mft_page);
ntfs_unmap_page(mirr_page);
/* Construct the mft mirror runlist by hand. */
rl2[0].vcn = 0;
rl2[0].lcn = vol->mftmirr_lcn;
rl2[0].length = (vol->mftmirr_size * vol->mft_record_size +
vol->cluster_size - 1) / vol->cluster_size;
rl2[1].vcn = rl2[0].length;
rl2[1].lcn = LCN_ENOENT;
rl2[1].length = 0;
/*
* Because we have just read all of the mft mirror, we know we have
* mapped the full runlist for it.
*/
mirr_ni = NTFS_I(vol->mftmirr_ino);
down_read(&mirr_ni->runlist.lock);
rl = mirr_ni->runlist.rl;
/* Compare the two runlists. They must be identical. */
i = 0;
do {
if (rl2[i].vcn != rl[i].vcn || rl2[i].lcn != rl[i].lcn ||
rl2[i].length != rl[i].length) {
ntfs_error(sb, "$MFTMirr location mismatch. "
"Run chkdsk.");
up_read(&mirr_ni->runlist.lock);
return false;
}
} while (rl2[i++].length);
up_read(&mirr_ni->runlist.lock);
ntfs_debug("Done.");
return true;
}
/**
* load_and_check_logfile - load and check the logfile inode for a volume
* @vol: ntfs super block describing device whose logfile to load
*
* Return 'true' on success or 'false' on error.
*/
static bool load_and_check_logfile(ntfs_volume *vol,
RESTART_PAGE_HEADER **rp)
{
struct inode *tmp_ino;
ntfs_debug("Entering.");
tmp_ino = ntfs_iget(vol->sb, FILE_LogFile);
if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
/* Caller will display error message. */
return false;
}
if (!ntfs_check_logfile(tmp_ino, rp)) {
iput(tmp_ino);
/* ntfs_check_logfile() will have displayed error output. */
return false;
}
NInoSetSparseDisabled(NTFS_I(tmp_ino));
vol->logfile_ino = tmp_ino;
ntfs_debug("Done.");
return true;
}
#define NTFS_HIBERFIL_HEADER_SIZE 4096
/**
* check_windows_hibernation_status - check if Windows is suspended on a volume
* @vol: ntfs super block of device to check
*
* Check if Windows is hibernated on the ntfs volume @vol. This is done by
* looking for the file hiberfil.sys in the root directory of the volume. If
* the file is not present Windows is definitely not suspended.
*
* If hiberfil.sys exists and is less than 4kiB in size it means Windows is
* definitely suspended (this volume is not the system volume). Caveat: on a
* system with many volumes it is possible that the < 4kiB check is bogus but
* for now this should do fine.
*
* If hiberfil.sys exists and is larger than 4kiB in size, we need to read the
* hiberfil header (which is the first 4kiB). If this begins with "hibr",
* Windows is definitely suspended. If it is completely full of zeroes,
* Windows is definitely not hibernated. Any other case is treated as if
* Windows is suspended. This caters for the above mentioned caveat of a
* system with many volumes where no "hibr" magic would be present and there is
* no zero header.
*
* Return 0 if Windows is not hibernated on the volume, >0 if Windows is
* hibernated on the volume, and -errno on error.
*/
static int check_windows_hibernation_status(ntfs_volume *vol)
{
MFT_REF mref;
struct inode *vi;
struct page *page;
u32 *kaddr, *kend;
ntfs_name *name = NULL;
int ret = 1;
static const ntfschar hiberfil[13] = { cpu_to_le16('h'),
cpu_to_le16('i'), cpu_to_le16('b'),
cpu_to_le16('e'), cpu_to_le16('r'),
cpu_to_le16('f'), cpu_to_le16('i'),
cpu_to_le16('l'), cpu_to_le16('.'),
cpu_to_le16('s'), cpu_to_le16('y'),
cpu_to_le16('s'), 0 };
ntfs_debug("Entering.");
/*
* Find the inode number for the hibernation file by looking up the
* filename hiberfil.sys in the root directory.
*/
mutex_lock(&vol->root_ino->i_mutex);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->root_ino), hiberfil, 12,
&name);
mutex_unlock(&vol->root_ino->i_mutex);
if (IS_ERR_MREF(mref)) {
ret = MREF_ERR(mref);
/* If the file does not exist, Windows is not hibernated. */
if (ret == -ENOENT) {
ntfs_debug("hiberfil.sys not present. Windows is not "
"hibernated on the volume.");
return 0;
}
/* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for "
"hiberfil.sys.");
return ret;
}
/* We do not care for the type of match that was found. */
kfree(name);
/* Get the inode. */
vi = ntfs_iget(vol->sb, MREF(mref));
if (IS_ERR(vi) || is_bad_inode(vi)) {
if (!IS_ERR(vi))
iput(vi);
ntfs_error(vol->sb, "Failed to load hiberfil.sys.");
return IS_ERR(vi) ? PTR_ERR(vi) : -EIO;
}
if (unlikely(i_size_read(vi) < NTFS_HIBERFIL_HEADER_SIZE)) {
ntfs_debug("hiberfil.sys is smaller than 4kiB (0x%llx). "
"Windows is hibernated on the volume. This "
"is not the system volume.", i_size_read(vi));
goto iput_out;
}
page = ntfs_map_page(vi->i_mapping, 0);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read from hiberfil.sys.");
ret = PTR_ERR(page);
goto iput_out;
}
kaddr = (u32*)page_address(page);
if (*(le32*)kaddr == cpu_to_le32(0x72626968)/*'hibr'*/) {
ntfs_debug("Magic \"hibr\" found in hiberfil.sys. Windows is "
"hibernated on the volume. This is the "
"system volume.");
goto unm_iput_out;
}
kend = kaddr + NTFS_HIBERFIL_HEADER_SIZE/sizeof(*kaddr);
do {
if (unlikely(*kaddr)) {
ntfs_debug("hiberfil.sys is larger than 4kiB "
"(0x%llx), does not contain the "
"\"hibr\" magic, and does not have a "
"zero header. Windows is hibernated "
"on the volume. This is not the "
"system volume.", i_size_read(vi));
goto unm_iput_out;
}
} while (++kaddr < kend);
ntfs_debug("hiberfil.sys contains a zero header. Windows is not "
"hibernated on the volume. This is the system "
"volume.");
ret = 0;
unm_iput_out:
ntfs_unmap_page(page);
iput_out:
iput(vi);
return ret;
}
/**
* load_and_init_quota - load and setup the quota file for a volume if present
* @vol: ntfs super block describing device whose quota file to load
*
* Return 'true' on success or 'false' on error. If $Quota is not present, we
* leave vol->quota_ino as NULL and return success.
*/
static bool load_and_init_quota(ntfs_volume *vol)
{
MFT_REF mref;
struct inode *tmp_ino;
ntfs_name *name = NULL;
static const ntfschar Quota[7] = { cpu_to_le16('$'),
cpu_to_le16('Q'), cpu_to_le16('u'),
cpu_to_le16('o'), cpu_to_le16('t'),
cpu_to_le16('a'), 0 };
static ntfschar Q[3] = { cpu_to_le16('$'),
cpu_to_le16('Q'), 0 };
ntfs_debug("Entering.");
/*
* Find the inode number for the quota file by looking up the filename
* $Quota in the extended system files directory $Extend.
*/
mutex_lock(&vol->extend_ino->i_mutex);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), Quota, 6,
&name);
mutex_unlock(&vol->extend_ino->i_mutex);
if (IS_ERR_MREF(mref)) {
/*
* If the file does not exist, quotas are disabled and have
* never been enabled on this volume, just return success.
*/
if (MREF_ERR(mref) == -ENOENT) {
ntfs_debug("$Quota not present. Volume does not have "
"quotas enabled.");
/*
* No need to try to set quotas out of date if they are
* not enabled.
*/
NVolSetQuotaOutOfDate(vol);
return true;
}
/* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for $Quota.");
return false;
}
/* We do not care for the type of match that was found. */
kfree(name);
/* Get the inode. */
tmp_ino = ntfs_iget(vol->sb, MREF(mref));
if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(vol->sb, "Failed to load $Quota.");
return false;
}
vol->quota_ino = tmp_ino;
/* Get the $Q index allocation attribute. */
tmp_ino = ntfs_index_iget(vol->quota_ino, Q, 2);
if (IS_ERR(tmp_ino)) {
ntfs_error(vol->sb, "Failed to load $Quota/$Q index.");
return false;
}
vol->quota_q_ino = tmp_ino;
ntfs_debug("Done.");
return true;
}
/**
* load_and_init_usnjrnl - load and setup the transaction log if present
* @vol: ntfs super block describing device whose usnjrnl file to load
*
* Return 'true' on success or 'false' on error.
*
* If $UsnJrnl is not present or in the process of being disabled, we set
* NVolUsnJrnlStamped() and return success.
*
* If the $UsnJrnl $DATA/$J attribute has a size equal to the lowest valid usn,
* i.e. transaction logging has only just been enabled or the journal has been
* stamped and nothing has been logged since, we also set NVolUsnJrnlStamped()
* and return success.
*/
static bool load_and_init_usnjrnl(ntfs_volume *vol)
{
MFT_REF mref;
struct inode *tmp_ino;
ntfs_inode *tmp_ni;
struct page *page;
ntfs_name *name = NULL;
USN_HEADER *uh;
static const ntfschar UsnJrnl[9] = { cpu_to_le16('$'),
cpu_to_le16('U'), cpu_to_le16('s'),
cpu_to_le16('n'), cpu_to_le16('J'),
cpu_to_le16('r'), cpu_to_le16('n'),
cpu_to_le16('l'), 0 };
static ntfschar Max[5] = { cpu_to_le16('$'),
cpu_to_le16('M'), cpu_to_le16('a'),
cpu_to_le16('x'), 0 };
static ntfschar J[3] = { cpu_to_le16('$'),
cpu_to_le16('J'), 0 };
ntfs_debug("Entering.");
/*
* Find the inode number for the transaction log file by looking up the
* filename $UsnJrnl in the extended system files directory $Extend.
*/
mutex_lock(&vol->extend_ino->i_mutex);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), UsnJrnl, 8,
&name);
mutex_unlock(&vol->extend_ino->i_mutex);
if (IS_ERR_MREF(mref)) {
/*
* If the file does not exist, transaction logging is disabled,
* just return success.
*/
if (MREF_ERR(mref) == -ENOENT) {
ntfs_debug("$UsnJrnl not present. Volume does not "
"have transaction logging enabled.");
not_enabled:
/*
* No need to try to stamp the transaction log if
* transaction logging is not enabled.
*/
NVolSetUsnJrnlStamped(vol);
return true;
}
/* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for "
"$UsnJrnl.");
return false;
}
/* We do not care for the type of match that was found. */
kfree(name);
/* Get the inode. */
tmp_ino = ntfs_iget(vol->sb, MREF(mref));
if (unlikely(IS_ERR(tmp_ino) || is_bad_inode(tmp_ino))) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(vol->sb, "Failed to load $UsnJrnl.");
return false;
}
vol->usnjrnl_ino = tmp_ino;
/*
* If the transaction log is in the process of being deleted, we can
* ignore it.
*/
if (unlikely(vol->vol_flags & VOLUME_DELETE_USN_UNDERWAY)) {
ntfs_debug("$UsnJrnl in the process of being disabled. "
"Volume does not have transaction logging "
"enabled.");
goto not_enabled;
}
/* Get the $DATA/$Max attribute. */
tmp_ino = ntfs_attr_iget(vol->usnjrnl_ino, AT_DATA, Max, 4);
if (IS_ERR(tmp_ino)) {
ntfs_error(vol->sb, "Failed to load $UsnJrnl/$DATA/$Max "
"attribute.");
return false;
}
vol->usnjrnl_max_ino = tmp_ino;
if (unlikely(i_size_read(tmp_ino) < sizeof(USN_HEADER))) {
ntfs_error(vol->sb, "Found corrupt $UsnJrnl/$DATA/$Max "
"attribute (size is 0x%llx but should be at "
"least 0x%zx bytes).", i_size_read(tmp_ino),
sizeof(USN_HEADER));
return false;
}
/* Get the $DATA/$J attribute. */
tmp_ino = ntfs_attr_iget(vol->usnjrnl_ino, AT_DATA, J, 2);
if (IS_ERR(tmp_ino)) {
ntfs_error(vol->sb, "Failed to load $UsnJrnl/$DATA/$J "
"attribute.");
return false;
}
vol->usnjrnl_j_ino = tmp_ino;
/* Verify $J is non-resident and sparse. */
tmp_ni = NTFS_I(vol->usnjrnl_j_ino);
if (unlikely(!NInoNonResident(tmp_ni) || !NInoSparse(tmp_ni))) {
ntfs_error(vol->sb, "$UsnJrnl/$DATA/$J attribute is resident "
"and/or not sparse.");
return false;
}
/* Read the USN_HEADER from $DATA/$Max. */
page = ntfs_map_page(vol->usnjrnl_max_ino->i_mapping, 0);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read from $UsnJrnl/$DATA/$Max "
"attribute.");
return false;
}
uh = (USN_HEADER*)page_address(page);
/* Sanity check the $Max. */
if (unlikely(sle64_to_cpu(uh->allocation_delta) >
sle64_to_cpu(uh->maximum_size))) {
ntfs_error(vol->sb, "Allocation delta (0x%llx) exceeds "
"maximum size (0x%llx). $UsnJrnl is corrupt.",
(long long)sle64_to_cpu(uh->allocation_delta),
(long long)sle64_to_cpu(uh->maximum_size));
ntfs_unmap_page(page);
return false;
}
/*
* If the transaction log has been stamped and nothing has been written
* to it since, we do not need to stamp it.
*/
if (unlikely(sle64_to_cpu(uh->lowest_valid_usn) >=
i_size_read(vol->usnjrnl_j_ino))) {
if (likely(sle64_to_cpu(uh->lowest_valid_usn) ==
i_size_read(vol->usnjrnl_j_ino))) {
ntfs_unmap_page(page);
ntfs_debug("$UsnJrnl is enabled but nothing has been "
"logged since it was last stamped. "
"Treating this as if the volume does "
"not have transaction logging "
"enabled.");
goto not_enabled;
}
ntfs_error(vol->sb, "$UsnJrnl has lowest valid usn (0x%llx) "
"which is out of bounds (0x%llx). $UsnJrnl "
"is corrupt.",
(long long)sle64_to_cpu(uh->lowest_valid_usn),
i_size_read(vol->usnjrnl_j_ino));
ntfs_unmap_page(page);
return false;
}
ntfs_unmap_page(page);
ntfs_debug("Done.");
return true;
}
/**
* load_and_init_attrdef - load the attribute definitions table for a volume
* @vol: ntfs super block describing device whose attrdef to load
*
* Return 'true' on success or 'false' on error.
*/
static bool load_and_init_attrdef(ntfs_volume *vol)
{
loff_t i_size;
struct super_block *sb = vol->sb;
struct inode *ino;
struct page *page;
pgoff_t index, max_index;
unsigned int size;
ntfs_debug("Entering.");
/* Read attrdef table and setup vol->attrdef and vol->attrdef_size. */
ino = ntfs_iget(sb, FILE_AttrDef);
if (IS_ERR(ino) || is_bad_inode(ino)) {
if (!IS_ERR(ino))
iput(ino);
goto failed;
}
NInoSetSparseDisabled(NTFS_I(ino));
/* The size of FILE_AttrDef must be above 0 and fit inside 31 bits. */
i_size = i_size_read(ino);
if (i_size <= 0 || i_size > 0x7fffffff)
goto iput_failed;
vol->attrdef = (ATTR_DEF*)ntfs_malloc_nofs(i_size);
if (!vol->attrdef)
goto iput_failed;
index = 0;
max_index = i_size >> PAGE_CACHE_SHIFT;
size = PAGE_CACHE_SIZE;
while (index < max_index) {
/* Read the attrdef table and copy it into the linear buffer. */
read_partial_attrdef_page:
page = ntfs_map_page(ino->i_mapping, index);
if (IS_ERR(page))
goto free_iput_failed;
memcpy((u8*)vol->attrdef + (index++ << PAGE_CACHE_SHIFT),
page_address(page), size);
ntfs_unmap_page(page);
};
if (size == PAGE_CACHE_SIZE) {
size = i_size & ~PAGE_CACHE_MASK;
if (size)
goto read_partial_attrdef_page;
}
vol->attrdef_size = i_size;
ntfs_debug("Read %llu bytes from $AttrDef.", i_size);
iput(ino);
return true;
free_iput_failed:
ntfs_free(vol->attrdef);
vol->attrdef = NULL;
iput_failed:
iput(ino);
failed:
ntfs_error(sb, "Failed to initialize attribute definition table.");
return false;
}
#endif /* NTFS_RW */
/**
* load_and_init_upcase - load the upcase table for an ntfs volume
* @vol: ntfs super block describing device whose upcase to load
*
* Return 'true' on success or 'false' on error.
*/
static bool load_and_init_upcase(ntfs_volume *vol)
{
loff_t i_size;
struct super_block *sb = vol->sb;
struct inode *ino;
struct page *page;
pgoff_t index, max_index;
unsigned int size;
int i, max;
ntfs_debug("Entering.");
/* Read upcase table and setup vol->upcase and vol->upcase_len. */
ino = ntfs_iget(sb, FILE_UpCase);
if (IS_ERR(ino) || is_bad_inode(ino)) {
if (!IS_ERR(ino))
iput(ino);
goto upcase_failed;
}
/*
* The upcase size must not be above 64k Unicode characters, must not
* be zero and must be a multiple of sizeof(ntfschar).
*/
i_size = i_size_read(ino);
if (!i_size || i_size & (sizeof(ntfschar) - 1) ||
i_size > 64ULL * 1024 * sizeof(ntfschar))
goto iput_upcase_failed;
vol->upcase = (ntfschar*)ntfs_malloc_nofs(i_size);
if (!vol->upcase)
goto iput_upcase_failed;
index = 0;
max_index = i_size >> PAGE_CACHE_SHIFT;
size = PAGE_CACHE_SIZE;
while (index < max_index) {
/* Read the upcase table and copy it into the linear buffer. */
read_partial_upcase_page:
page = ntfs_map_page(ino->i_mapping, index);
if (IS_ERR(page))
goto iput_upcase_failed;
memcpy((char*)vol->upcase + (index++ << PAGE_CACHE_SHIFT),
page_address(page), size);
ntfs_unmap_page(page);
};
if (size == PAGE_CACHE_SIZE) {
size = i_size & ~PAGE_CACHE_MASK;
if (size)
goto read_partial_upcase_page;
}
vol->upcase_len = i_size >> UCHAR_T_SIZE_BITS;
ntfs_debug("Read %llu bytes from $UpCase (expected %zu bytes).",
i_size, 64 * 1024 * sizeof(ntfschar));
iput(ino);
mutex_lock(&ntfs_lock);
if (!default_upcase) {
ntfs_debug("Using volume specified $UpCase since default is "
"not present.");
mutex_unlock(&ntfs_lock);
return true;
}
max = default_upcase_len;
if (max > vol->upcase_len)
max = vol->upcase_len;
for (i = 0; i < max; i++)
if (vol->upcase[i] != default_upcase[i])
break;
if (i == max) {
ntfs_free(vol->upcase);
vol->upcase = default_upcase;
vol->upcase_len = max;
ntfs_nr_upcase_users++;
mutex_unlock(&ntfs_lock);
ntfs_debug("Volume specified $UpCase matches default. Using "
"default.");
return true;
}
mutex_unlock(&ntfs_lock);
ntfs_debug("Using volume specified $UpCase since it does not match "
"the default.");
return true;
iput_upcase_failed:
iput(ino);
ntfs_free(vol->upcase);
vol->upcase = NULL;
upcase_failed:
mutex_lock(&ntfs_lock);
if (default_upcase) {
vol->upcase = default_upcase;
vol->upcase_len = default_upcase_len;
ntfs_nr_upcase_users++;
mutex_unlock(&ntfs_lock);
ntfs_error(sb, "Failed to load $UpCase from the volume. Using "
"default.");
return true;
}
mutex_unlock(&ntfs_lock);
ntfs_error(sb, "Failed to initialize upcase table.");
return false;
}
/*
* The lcn and mft bitmap inodes are NTFS-internal inodes with
* their own special locking rules:
*/
static struct lock_class_key
lcnbmp_runlist_lock_key, lcnbmp_mrec_lock_key,
mftbmp_runlist_lock_key, mftbmp_mrec_lock_key;
/**
* load_system_files - open the system files using normal functions
* @vol: ntfs super block describing device whose system files to load
*
* Open the system files with normal access functions and complete setting up
* the ntfs super block @vol.
*
* Return 'true' on success or 'false' on error.
*/
static bool load_system_files(ntfs_volume *vol)
{
struct super_block *sb = vol->sb;
MFT_RECORD *m;
VOLUME_INFORMATION *vi;
ntfs_attr_search_ctx *ctx;
#ifdef NTFS_RW
RESTART_PAGE_HEADER *rp;
int err;
#endif /* NTFS_RW */
ntfs_debug("Entering.");
#ifdef NTFS_RW
/* Get mft mirror inode compare the contents of $MFT and $MFTMirr. */
if (!load_and_init_mft_mirror(vol) || !check_mft_mirror(vol)) {
static const char *es1 = "Failed to load $MFTMirr";
static const char *es2 = "$MFTMirr does not match $MFT";
static const char *es3 = ". Run ntfsfix and/or chkdsk.";
/* If a read-write mount, convert it to a read-only mount. */
if (!(sb->s_flags & MS_RDONLY)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
!vol->mftmirr_ino ? es1 : es2,
es3);
goto iput_mirr_err_out;
}
sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s",
!vol->mftmirr_ino ? es1 : es2, es3);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s",
!vol->mftmirr_ino ? es1 : es2, es3);
/* This will prevent a read-write remount. */
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
/* Get mft bitmap attribute inode. */
vol->mftbmp_ino = ntfs_attr_iget(vol->mft_ino, AT_BITMAP, NULL, 0);
if (IS_ERR(vol->mftbmp_ino)) {
ntfs_error(sb, "Failed to load $MFT/$BITMAP attribute.");
goto iput_mirr_err_out;
}
lockdep_set_class(&NTFS_I(vol->mftbmp_ino)->runlist.lock,
&mftbmp_runlist_lock_key);
lockdep_set_class(&NTFS_I(vol->mftbmp_ino)->mrec_lock,
&mftbmp_mrec_lock_key);
/* Read upcase table and setup @vol->upcase and @vol->upcase_len. */
if (!load_and_init_upcase(vol))
goto iput_mftbmp_err_out;
#ifdef NTFS_RW
/*
* Read attribute definitions table and setup @vol->attrdef and
* @vol->attrdef_size.
*/
if (!load_and_init_attrdef(vol))
goto iput_upcase_err_out;
#endif /* NTFS_RW */
/*
* Get the cluster allocation bitmap inode and verify the size, no
* need for any locking at this stage as we are already running
* exclusively as we are mount in progress task.
*/
vol->lcnbmp_ino = ntfs_iget(sb, FILE_Bitmap);
if (IS_ERR(vol->lcnbmp_ino) || is_bad_inode(vol->lcnbmp_ino)) {
if (!IS_ERR(vol->lcnbmp_ino))
iput(vol->lcnbmp_ino);
goto bitmap_failed;
}
lockdep_set_class(&NTFS_I(vol->lcnbmp_ino)->runlist.lock,
&lcnbmp_runlist_lock_key);
lockdep_set_class(&NTFS_I(vol->lcnbmp_ino)->mrec_lock,
&lcnbmp_mrec_lock_key);
NInoSetSparseDisabled(NTFS_I(vol->lcnbmp_ino));
if ((vol->nr_clusters + 7) >> 3 > i_size_read(vol->lcnbmp_ino)) {
iput(vol->lcnbmp_ino);
bitmap_failed:
ntfs_error(sb, "Failed to load $Bitmap.");
goto iput_attrdef_err_out;
}
/*
* Get the volume inode and setup our cache of the volume flags and
* version.
*/
vol->vol_ino = ntfs_iget(sb, FILE_Volume);
if (IS_ERR(vol->vol_ino) || is_bad_inode(vol->vol_ino)) {
if (!IS_ERR(vol->vol_ino))
iput(vol->vol_ino);
volume_failed:
ntfs_error(sb, "Failed to load $Volume.");
goto iput_lcnbmp_err_out;
}
m = map_mft_record(NTFS_I(vol->vol_ino));
if (IS_ERR(m)) {
iput_volume_failed:
iput(vol->vol_ino);
goto volume_failed;
}
if (!(ctx = ntfs_attr_get_search_ctx(NTFS_I(vol->vol_ino), m))) {
ntfs_error(sb, "Failed to get attribute search context.");
goto get_ctx_vol_failed;
}
if (ntfs_attr_lookup(AT_VOLUME_INFORMATION, NULL, 0, 0, 0, NULL, 0,
ctx) || ctx->attr->non_resident || ctx->attr->flags) {
err_put_vol:
ntfs_attr_put_search_ctx(ctx);
get_ctx_vol_failed:
unmap_mft_record(NTFS_I(vol->vol_ino));
goto iput_volume_failed;
}
vi = (VOLUME_INFORMATION*)((char*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
/* Some bounds checks. */
if ((u8*)vi < (u8*)ctx->attr || (u8*)vi +
le32_to_cpu(ctx->attr->data.resident.value_length) >
(u8*)ctx->attr + le32_to_cpu(ctx->attr->length))
goto err_put_vol;
/* Copy the volume flags and version to the ntfs_volume structure. */
vol->vol_flags = vi->flags;
vol->major_ver = vi->major_ver;
vol->minor_ver = vi->minor_ver;
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(NTFS_I(vol->vol_ino));
printk(KERN_INFO "NTFS volume version %i.%i.\n", vol->major_ver,
vol->minor_ver);
if (vol->major_ver < 3 && NVolSparseEnabled(vol)) {
ntfs_warning(vol->sb, "Disabling sparse support due to NTFS "
"volume version %i.%i (need at least version "
"3.0).", vol->major_ver, vol->minor_ver);
NVolClearSparseEnabled(vol);
}
#ifdef NTFS_RW
/* Make sure that no unsupported volume flags are set. */
if (vol->vol_flags & VOLUME_MUST_MOUNT_RO_MASK) {
static const char *es1a = "Volume is dirty";
static const char *es1b = "Volume has been modified by chkdsk";
static const char *es1c = "Volume has unsupported flags set";
static const char *es2a = ". Run chkdsk and mount in Windows.";
static const char *es2b = ". Mount in Windows.";
const char *es1, *es2;
es2 = es2a;
if (vol->vol_flags & VOLUME_IS_DIRTY)
es1 = es1a;
else if (vol->vol_flags & VOLUME_MODIFIED_BY_CHKDSK) {
es1 = es1b;
es2 = es2b;
} else {
es1 = es1c;
ntfs_warning(sb, "Unsupported volume flags 0x%x "
"encountered.",
(unsigned)le16_to_cpu(vol->vol_flags));
}
/* If a read-write mount, convert it to a read-only mount. */
if (!(sb->s_flags & MS_RDONLY)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
es1, es2);
goto iput_vol_err_out;
}
sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s", es1, es2);
/*
* Do not set NVolErrors() because ntfs_remount() re-checks the
* flags which we need to do in case any flags have changed.
*/
}
/*
* Get the inode for the logfile, check it and determine if the volume
* was shutdown cleanly.
*/
rp = NULL;
if (!load_and_check_logfile(vol, &rp) ||
!ntfs_is_logfile_clean(vol->logfile_ino, rp)) {
static const char *es1a = "Failed to load $LogFile";
static const char *es1b = "$LogFile is not clean";
static const char *es2 = ". Mount in Windows.";
const char *es1;
es1 = !vol->logfile_ino ? es1a : es1b;
/* If a read-write mount, convert it to a read-only mount. */
if (!(sb->s_flags & MS_RDONLY)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
es1, es2);
if (vol->logfile_ino) {
BUG_ON(!rp);
ntfs_free(rp);
}
goto iput_logfile_err_out;
}
sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s", es1, es2);
/* This will prevent a read-write remount. */
NVolSetErrors(vol);
}
ntfs_free(rp);
#endif /* NTFS_RW */
/* Get the root directory inode so we can do path lookups. */
vol->root_ino = ntfs_iget(sb, FILE_root);
if (IS_ERR(vol->root_ino) || is_bad_inode(vol->root_ino)) {
if (!IS_ERR(vol->root_ino))
iput(vol->root_ino);
ntfs_error(sb, "Failed to load root directory.");
goto iput_logfile_err_out;
}
#ifdef NTFS_RW
/*
* Check if Windows is suspended to disk on the target volume. If it
* is hibernated, we must not write *anything* to the disk so set
* NVolErrors() without setting the dirty volume flag and mount
* read-only. This will prevent read-write remounting and it will also
* prevent all writes.
*/
err = check_windows_hibernation_status(vol);
if (unlikely(err)) {
static const char *es1a = "Failed to determine if Windows is "
"hibernated";
static const char *es1b = "Windows is hibernated";
static const char *es2 = ". Run chkdsk.";
const char *es1;
es1 = err < 0 ? es1a : es1b;
/* If a read-write mount, convert it to a read-only mount. */
if (!(sb->s_flags & MS_RDONLY)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
es1, es2);
goto iput_root_err_out;
}
sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s", es1, es2);
/* This will prevent a read-write remount. */
NVolSetErrors(vol);
}
/* If (still) a read-write mount, mark the volume dirty. */
if (!(sb->s_flags & MS_RDONLY) &&
ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY)) {
static const char *es1 = "Failed to set dirty bit in volume "
"information flags";
static const char *es2 = ". Run chkdsk.";
/* Convert to a read-only mount. */
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors=continue nor "
"on_errors=remount-ro was specified%s",
es1, es2);
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
sb->s_flags |= MS_RDONLY;
/*
* Do not set NVolErrors() because ntfs_remount() might manage
* to set the dirty flag in which case all would be well.
*/
}
#if 0
// TODO: Enable this code once we start modifying anything that is
// different between NTFS 1.2 and 3.x...
/*
* If (still) a read-write mount, set the NT4 compatibility flag on
* newer NTFS version volumes.
*/
if (!(sb->s_flags & MS_RDONLY) && (vol->major_ver > 1) &&
ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
static const char *es1 = "Failed to set NT4 compatibility flag";
static const char *es2 = ". Run chkdsk.";
/* Convert to a read-only mount. */
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors=continue nor "
"on_errors=remount-ro was specified%s",
es1, es2);
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
sb->s_flags |= MS_RDONLY;
NVolSetErrors(vol);
}
#endif
/* If (still) a read-write mount, empty the logfile. */
if (!(sb->s_flags & MS_RDONLY) &&
!ntfs_empty_logfile(vol->logfile_ino)) {
static const char *es1 = "Failed to empty $LogFile";
static const char *es2 = ". Mount in Windows.";
/* Convert to a read-only mount. */
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors=continue nor "
"on_errors=remount-ro was specified%s",
es1, es2);
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
sb->s_flags |= MS_RDONLY;
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
/* If on NTFS versions before 3.0, we are done. */
if (unlikely(vol->major_ver < 3))
return true;
/* NTFS 3.0+ specific initialization. */
/* Get the security descriptors inode. */
vol->secure_ino = ntfs_iget(sb, FILE_Secure);
if (IS_ERR(vol->secure_ino) || is_bad_inode(vol->secure_ino)) {
if (!IS_ERR(vol->secure_ino))
iput(vol->secure_ino);
ntfs_error(sb, "Failed to load $Secure.");
goto iput_root_err_out;
}
// TODO: Initialize security.
/* Get the extended system files' directory inode. */
vol->extend_ino = ntfs_iget(sb, FILE_Extend);
if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino)) {
if (!IS_ERR(vol->extend_ino))
iput(vol->extend_ino);
ntfs_error(sb, "Failed to load $Extend.");
goto iput_sec_err_out;
}
#ifdef NTFS_RW
/* Find the quota file, load it if present, and set it up. */
if (!load_and_init_quota(vol)) {
static const char *es1 = "Failed to load $Quota";
static const char *es2 = ". Run chkdsk.";
/* If a read-write mount, convert it to a read-only mount. */
if (!(sb->s_flags & MS_RDONLY)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
es1, es2);
goto iput_quota_err_out;
}
sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s", es1, es2);
/* This will prevent a read-write remount. */
NVolSetErrors(vol);
}
/* If (still) a read-write mount, mark the quotas out of date. */
if (!(sb->s_flags & MS_RDONLY) &&
!ntfs_mark_quotas_out_of_date(vol)) {
static const char *es1 = "Failed to mark quotas out of date";
static const char *es2 = ". Run chkdsk.";
/* Convert to a read-only mount. */
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors=continue nor "
"on_errors=remount-ro was specified%s",
es1, es2);
goto iput_quota_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
sb->s_flags |= MS_RDONLY;
NVolSetErrors(vol);
}
/*
* Find the transaction log file ($UsnJrnl), load it if present, check
* it, and set it up.
*/
if (!load_and_init_usnjrnl(vol)) {
static const char *es1 = "Failed to load $UsnJrnl";
static const char *es2 = ". Run chkdsk.";
/* If a read-write mount, convert it to a read-only mount. */
if (!(sb->s_flags & MS_RDONLY)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
es1, es2);
goto iput_usnjrnl_err_out;
}
sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s", es1, es2);
/* This will prevent a read-write remount. */
NVolSetErrors(vol);
}
/* If (still) a read-write mount, stamp the transaction log. */
if (!(sb->s_flags & MS_RDONLY) && !ntfs_stamp_usnjrnl(vol)) {
static const char *es1 = "Failed to stamp transaction log "
"($UsnJrnl)";
static const char *es2 = ". Run chkdsk.";
/* Convert to a read-only mount. */
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors=continue nor "
"on_errors=remount-ro was specified%s",
es1, es2);
goto iput_usnjrnl_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
sb->s_flags |= MS_RDONLY;
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
return true;
#ifdef NTFS_RW
iput_usnjrnl_err_out:
if (vol->usnjrnl_j_ino)
iput(vol->usnjrnl_j_ino);
if (vol->usnjrnl_max_ino)
iput(vol->usnjrnl_max_ino);
if (vol->usnjrnl_ino)
iput(vol->usnjrnl_ino);
iput_quota_err_out:
if (vol->quota_q_ino)
iput(vol->quota_q_ino);
if (vol->quota_ino)
iput(vol->quota_ino);
iput(vol->extend_ino);
#endif /* NTFS_RW */
iput_sec_err_out:
iput(vol->secure_ino);
iput_root_err_out:
iput(vol->root_ino);
iput_logfile_err_out:
#ifdef NTFS_RW
if (vol->logfile_ino)
iput(vol->logfile_ino);
iput_vol_err_out:
#endif /* NTFS_RW */
iput(vol->vol_ino);
iput_lcnbmp_err_out:
iput(vol->lcnbmp_ino);
iput_attrdef_err_out:
vol->attrdef_size = 0;
if (vol->attrdef) {
ntfs_free(vol->attrdef);
vol->attrdef = NULL;
}
#ifdef NTFS_RW
iput_upcase_err_out:
#endif /* NTFS_RW */
vol->upcase_len = 0;
mutex_lock(&ntfs_lock);
if (vol->upcase == default_upcase) {
ntfs_nr_upcase_users--;
vol->upcase = NULL;
}
mutex_unlock(&ntfs_lock);
if (vol->upcase) {
ntfs_free(vol->upcase);
vol->upcase = NULL;
}
iput_mftbmp_err_out:
iput(vol->mftbmp_ino);
iput_mirr_err_out:
#ifdef NTFS_RW
if (vol->mftmirr_ino)
iput(vol->mftmirr_ino);
#endif /* NTFS_RW */
return false;
}
/**
* ntfs_put_super - called by the vfs to unmount a volume
* @sb: vfs superblock of volume to unmount
*
* ntfs_put_super() is called by the VFS (from fs/super.c::do_umount()) when
* the volume is being unmounted (umount system call has been invoked) and it
* releases all inodes and memory belonging to the NTFS specific part of the
* super block.
*/
static void ntfs_put_super(struct super_block *sb)
{
ntfs_volume *vol = NTFS_SB(sb);
ntfs_debug("Entering.");
#ifdef NTFS_RW
/*
* Commit all inodes while they are still open in case some of them
* cause others to be dirtied.
*/
ntfs_commit_inode(vol->vol_ino);
/* NTFS 3.0+ specific. */
if (vol->major_ver >= 3) {
if (vol->usnjrnl_j_ino)
ntfs_commit_inode(vol->usnjrnl_j_ino);
if (vol->usnjrnl_max_ino)
ntfs_commit_inode(vol->usnjrnl_max_ino);
if (vol->usnjrnl_ino)
ntfs_commit_inode(vol->usnjrnl_ino);
if (vol->quota_q_ino)
ntfs_commit_inode(vol->quota_q_ino);
if (vol->quota_ino)
ntfs_commit_inode(vol->quota_ino);
if (vol->extend_ino)
ntfs_commit_inode(vol->extend_ino);
if (vol->secure_ino)
ntfs_commit_inode(vol->secure_ino);
}
ntfs_commit_inode(vol->root_ino);
down_write(&vol->lcnbmp_lock);
ntfs_commit_inode(vol->lcnbmp_ino);
up_write(&vol->lcnbmp_lock);
down_write(&vol->mftbmp_lock);
ntfs_commit_inode(vol->mftbmp_ino);
up_write(&vol->mftbmp_lock);
if (vol->logfile_ino)
ntfs_commit_inode(vol->logfile_ino);
if (vol->mftmirr_ino)
ntfs_commit_inode(vol->mftmirr_ino);
ntfs_commit_inode(vol->mft_ino);
/*
* If a read-write mount and no volume errors have occurred, mark the
* volume clean. Also, re-commit all affected inodes.
*/
if (!(sb->s_flags & MS_RDONLY)) {
if (!NVolErrors(vol)) {
if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
ntfs_warning(sb, "Failed to clear dirty bit "
"in volume information "
"flags. Run chkdsk.");
ntfs_commit_inode(vol->vol_ino);
ntfs_commit_inode(vol->root_ino);
if (vol->mftmirr_ino)
ntfs_commit_inode(vol->mftmirr_ino);
ntfs_commit_inode(vol->mft_ino);
} else {
ntfs_warning(sb, "Volume has errors. Leaving volume "
"marked dirty. Run chkdsk.");
}
}
#endif /* NTFS_RW */
iput(vol->vol_ino);
vol->vol_ino = NULL;
/* NTFS 3.0+ specific clean up. */
if (vol->major_ver >= 3) {
#ifdef NTFS_RW
if (vol->usnjrnl_j_ino) {
iput(vol->usnjrnl_j_ino);
vol->usnjrnl_j_ino = NULL;
}
if (vol->usnjrnl_max_ino) {
iput(vol->usnjrnl_max_ino);
vol->usnjrnl_max_ino = NULL;
}
if (vol->usnjrnl_ino) {
iput(vol->usnjrnl_ino);
vol->usnjrnl_ino = NULL;
}
if (vol->quota_q_ino) {
iput(vol->quota_q_ino);
vol->quota_q_ino = NULL;
}
if (vol->quota_ino) {
iput(vol->quota_ino);
vol->quota_ino = NULL;
}
#endif /* NTFS_RW */
if (vol->extend_ino) {
iput(vol->extend_ino);
vol->extend_ino = NULL;
}
if (vol->secure_ino) {
iput(vol->secure_ino);
vol->secure_ino = NULL;
}
}
iput(vol->root_ino);
vol->root_ino = NULL;
down_write(&vol->lcnbmp_lock);
iput(vol->lcnbmp_ino);
vol->lcnbmp_ino = NULL;
up_write(&vol->lcnbmp_lock);
down_write(&vol->mftbmp_lock);
iput(vol->mftbmp_ino);
vol->mftbmp_ino = NULL;
up_write(&vol->mftbmp_lock);
#ifdef NTFS_RW
if (vol->logfile_ino) {
iput(vol->logfile_ino);
vol->logfile_ino = NULL;
}
if (vol->mftmirr_ino) {
/* Re-commit the mft mirror and mft just in case. */
ntfs_commit_inode(vol->mftmirr_ino);
ntfs_commit_inode(vol->mft_ino);
iput(vol->mftmirr_ino);
vol->mftmirr_ino = NULL;
}
/*
* We should have no dirty inodes left, due to
* mft.c::ntfs_mft_writepage() cleaning all the dirty pages as
* the underlying mft records are written out and cleaned.
*/
ntfs_commit_inode(vol->mft_ino);
write_inode_now(vol->mft_ino, 1);
#endif /* NTFS_RW */
iput(vol->mft_ino);
vol->mft_ino = NULL;
/* Throw away the table of attribute definitions. */
vol->attrdef_size = 0;
if (vol->attrdef) {
ntfs_free(vol->attrdef);
vol->attrdef = NULL;
}
vol->upcase_len = 0;
/*
* Destroy the global default upcase table if necessary. Also decrease
* the number of upcase users if we are a user.
*/
mutex_lock(&ntfs_lock);
if (vol->upcase == default_upcase) {
ntfs_nr_upcase_users--;
vol->upcase = NULL;
}
if (!ntfs_nr_upcase_users && default_upcase) {
ntfs_free(default_upcase);
default_upcase = NULL;
}
if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users)
free_compression_buffers();
mutex_unlock(&ntfs_lock);
if (vol->upcase) {
ntfs_free(vol->upcase);
vol->upcase = NULL;
}
unload_nls(vol->nls_map);
sb->s_fs_info = NULL;
kfree(vol);
}
/**
* get_nr_free_clusters - return the number of free clusters on a volume
* @vol: ntfs volume for which to obtain free cluster count
*
* Calculate the number of free clusters on the mounted NTFS volume @vol. We
* actually calculate the number of clusters in use instead because this
* allows us to not care about partial pages as these will be just zero filled
* and hence not be counted as allocated clusters.
*
* The only particularity is that clusters beyond the end of the logical ntfs
* volume will be marked as allocated to prevent errors which means we have to
* discount those at the end. This is important as the cluster bitmap always
* has a size in multiples of 8 bytes, i.e. up to 63 clusters could be outside
* the logical volume and marked in use when they are not as they do not exist.
*
* If any pages cannot be read we assume all clusters in the erroring pages are
* in use. This means we return an underestimate on errors which is better than
* an overestimate.
*/
static s64 get_nr_free_clusters(ntfs_volume *vol)
{
s64 nr_free = vol->nr_clusters;
struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
struct page *page;
pgoff_t index, max_index;
ntfs_debug("Entering.");
/* Serialize accesses to the cluster bitmap. */
down_read(&vol->lcnbmp_lock);
/*
* Convert the number of bits into bytes rounded up, then convert into
* multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
* full and one partial page max_index = 2.
*/
max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
/* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
max_index, PAGE_CACHE_SIZE / 4);
for (index = 0; index < max_index; index++) {
unsigned long *kaddr;
/*
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
page = read_mapping_page(mapping, index, NULL);
/* Ignore pages which errored synchronously. */
if (IS_ERR(page)) {
ntfs_debug("read_mapping_page() error. Skipping "
"page (index 0x%lx).", index);
nr_free -= PAGE_CACHE_SIZE * 8;
continue;
}
kaddr = kmap_atomic(page);
/*
* Subtract the number of set bits. If this
* is the last page and it is partial we don't really care as
* it just means we do a little extra work but it won't affect
* the result as all out of range bytes are set to zero by
* ntfs_readpage().
*/
nr_free -= bitmap_weight(kaddr,
PAGE_CACHE_SIZE * BITS_PER_BYTE);
kunmap_atomic(kaddr);
page_cache_release(page);
}
ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
/*
* Fixup for eventual bits outside logical ntfs volume (see function
* description above).
*/
if (vol->nr_clusters & 63)
nr_free += 64 - (vol->nr_clusters & 63);
up_read(&vol->lcnbmp_lock);
/* If errors occurred we may well have gone below zero, fix this. */
if (nr_free < 0)
nr_free = 0;
ntfs_debug("Exiting.");
return nr_free;
}
/**
* __get_nr_free_mft_records - return the number of free inodes on a volume
* @vol: ntfs volume for which to obtain free inode count
* @nr_free: number of mft records in filesystem
* @max_index: maximum number of pages containing set bits
*
* Calculate the number of free mft records (inodes) on the mounted NTFS
* volume @vol. We actually calculate the number of mft records in use instead
* because this allows us to not care about partial pages as these will be just
* zero filled and hence not be counted as allocated mft record.
*
* If any pages cannot be read we assume all mft records in the erroring pages
* are in use. This means we return an underestimate on errors which is better
* than an overestimate.
*
* NOTE: Caller must hold mftbmp_lock rw_semaphore for reading or writing.
*/
static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
s64 nr_free, const pgoff_t max_index)
{
struct address_space *mapping = vol->mftbmp_ino->i_mapping;
struct page *page;
pgoff_t index;
ntfs_debug("Entering.");
/* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
"0x%lx.", max_index, PAGE_CACHE_SIZE / 4);
for (index = 0; index < max_index; index++) {
unsigned long *kaddr;
/*
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
page = read_mapping_page(mapping, index, NULL);
/* Ignore pages which errored synchronously. */
if (IS_ERR(page)) {
ntfs_debug("read_mapping_page() error. Skipping "
"page (index 0x%lx).", index);
nr_free -= PAGE_CACHE_SIZE * 8;
continue;
}
kaddr = kmap_atomic(page);
/*
* Subtract the number of set bits. If this
* is the last page and it is partial we don't really care as
* it just means we do a little extra work but it won't affect
* the result as all out of range bytes are set to zero by
* ntfs_readpage().
*/
nr_free -= bitmap_weight(kaddr,
PAGE_CACHE_SIZE * BITS_PER_BYTE);
kunmap_atomic(kaddr);
page_cache_release(page);
}
ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
index - 1);
/* If errors occurred we may well have gone below zero, fix this. */
if (nr_free < 0)
nr_free = 0;
ntfs_debug("Exiting.");
return nr_free;
}
/**
* ntfs_statfs - return information about mounted NTFS volume
* @dentry: dentry from mounted volume
* @sfs: statfs structure in which to return the information
*
* Return information about the mounted NTFS volume @dentry in the statfs structure
* pointed to by @sfs (this is initialized with zeros before ntfs_statfs is
* called). We interpret the values to be correct of the moment in time at
* which we are called. Most values are variable otherwise and this isn't just
* the free values but the totals as well. For example we can increase the
* total number of file nodes if we run out and we can keep doing this until
* there is no more space on the volume left at all.
*
* Called from vfs_statfs which is used to handle the statfs, fstatfs, and
* ustat system calls.
*
* Return 0 on success or -errno on error.
*/
static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
{
struct super_block *sb = dentry->d_sb;
s64 size;
ntfs_volume *vol = NTFS_SB(sb);
ntfs_inode *mft_ni = NTFS_I(vol->mft_ino);
pgoff_t max_index;
unsigned long flags;
ntfs_debug("Entering.");
/* Type of filesystem. */
sfs->f_type = NTFS_SB_MAGIC;
/* Optimal transfer block size. */
sfs->f_bsize = PAGE_CACHE_SIZE;
/*
* Total data blocks in filesystem in units of f_bsize and since
* inodes are also stored in data blocs ($MFT is a file) this is just
* the total clusters.
*/
sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >>
PAGE_CACHE_SHIFT;
/* Free data blocks in filesystem in units of f_bsize. */
size = get_nr_free_clusters(vol) << vol->cluster_size_bits >>
PAGE_CACHE_SHIFT;
if (size < 0LL)
size = 0LL;
/* Free blocks avail to non-superuser, same as above on NTFS. */
sfs->f_bavail = sfs->f_bfree = size;
/* Serialize accesses to the inode bitmap. */
down_read(&vol->mftbmp_lock);
read_lock_irqsave(&mft_ni->size_lock, flags);
size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
/*
* Convert the maximum number of set bits into bytes rounded up, then
* convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we
* have one full and one partial page max_index = 2.
*/
max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
+ 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
read_unlock_irqrestore(&mft_ni->size_lock, flags);
/* Number of inodes in filesystem (at this point in time). */
sfs->f_files = size;
/* Free inodes in fs (based on current total count). */
sfs->f_ffree = __get_nr_free_mft_records(vol, size, max_index);
up_read(&vol->mftbmp_lock);
/*
* File system id. This is extremely *nix flavour dependent and even
* within Linux itself all fs do their own thing. I interpret this to
* mean a unique id associated with the mounted fs and not the id
* associated with the filesystem driver, the latter is already given
* by the filesystem type in sfs->f_type. Thus we use the 64-bit
* volume serial number splitting it into two 32-bit parts. We enter
* the least significant 32-bits in f_fsid[0] and the most significant
* 32-bits in f_fsid[1].
*/
sfs->f_fsid.val[0] = vol->serial_no & 0xffffffff;
sfs->f_fsid.val[1] = (vol->serial_no >> 32) & 0xffffffff;
/* Maximum length of filenames. */
sfs->f_namelen = NTFS_MAX_NAME_LEN;
return 0;
}
#ifdef NTFS_RW
static int ntfs_write_inode(struct inode *vi, struct writeback_control *wbc)
{
return __ntfs_write_inode(vi, wbc->sync_mode == WB_SYNC_ALL);
}
#endif
/**
* The complete super operations.
*/
static const struct super_operations ntfs_sops = {
.alloc_inode = ntfs_alloc_big_inode, /* VFS: Allocate new inode. */
.destroy_inode = ntfs_destroy_big_inode, /* VFS: Deallocate inode. */
#ifdef NTFS_RW
.write_inode = ntfs_write_inode, /* VFS: Write dirty inode to
disk. */
#endif /* NTFS_RW */
.put_super = ntfs_put_super, /* Syscall: umount. */
.statfs = ntfs_statfs, /* Syscall: statfs */
.remount_fs = ntfs_remount, /* Syscall: mount -o remount. */
.evict_inode = ntfs_evict_big_inode, /* VFS: Called when an inode is
removed from memory. */
.show_options = ntfs_show_options, /* Show mount options in
proc. */
};
/**
* ntfs_fill_super - mount an ntfs filesystem
* @sb: super block of ntfs filesystem to mount
* @opt: string containing the mount options
* @silent: silence error output
*
* ntfs_fill_super() is called by the VFS to mount the device described by @sb
* with the mount otions in @data with the NTFS filesystem.
*
* If @silent is true, remain silent even if errors are detected. This is used
* during bootup, when the kernel tries to mount the root filesystem with all
* registered filesystems one after the other until one succeeds. This implies
* that all filesystems except the correct one will quite correctly and
* expectedly return an error, but nobody wants to see error messages when in
* fact this is what is supposed to happen.
*
* NOTE: @sb->s_flags contains the mount options flags.
*/
static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
{
ntfs_volume *vol;
struct buffer_head *bh;
struct inode *tmp_ino;
int blocksize, result;
/*
* We do a pretty difficult piece of bootstrap by reading the
* MFT (and other metadata) from disk into memory. We'll only
* release this metadata during umount, so the locking patterns
* observed during bootstrap do not count. So turn off the
* observation of locking patterns (strictly for this context
* only) while mounting NTFS. [The validator is still active
* otherwise, even for this context: it will for example record
* lock class registrations.]
*/
lockdep_off();
ntfs_debug("Entering.");
#ifndef NTFS_RW
sb->s_flags |= MS_RDONLY;
#endif /* ! NTFS_RW */
/* Allocate a new ntfs_volume and place it in sb->s_fs_info. */
sb->s_fs_info = kmalloc(sizeof(ntfs_volume), GFP_NOFS);
vol = NTFS_SB(sb);
if (!vol) {
if (!silent)
ntfs_error(sb, "Allocation of NTFS volume structure "
"failed. Aborting mount...");
lockdep_on();
return -ENOMEM;
}
/* Initialize ntfs_volume structure. */
*vol = (ntfs_volume) {
.sb = sb,
/*
* Default is group and other don't have any access to files or
* directories while owner has full access. Further, files by
* default are not executable but directories are of course
* browseable.
*/
.fmask = 0177,
.dmask = 0077,
};
init_rwsem(&vol->mftbmp_lock);
init_rwsem(&vol->lcnbmp_lock);
/* By default, enable sparse support. */
NVolSetSparseEnabled(vol);
/* Important to get the mount options dealt with now. */
if (!parse_options(vol, (char*)opt))
goto err_out_now;
/* We support sector sizes up to the PAGE_CACHE_SIZE. */
if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
if (!silent)
ntfs_error(sb, "Device has unsupported sector size "
"(%i). The maximum supported sector "
"size on this architecture is %lu "
"bytes.",
bdev_logical_block_size(sb->s_bdev),
PAGE_CACHE_SIZE);
goto err_out_now;
}
/*
* Setup the device access block size to NTFS_BLOCK_SIZE or the hard
* sector size, whichever is bigger.
*/
blocksize = sb_min_blocksize(sb, NTFS_BLOCK_SIZE);
if (blocksize < NTFS_BLOCK_SIZE) {
if (!silent)
ntfs_error(sb, "Unable to set device block size.");
goto err_out_now;
}
BUG_ON(blocksize != sb->s_blocksize);
ntfs_debug("Set device block size to %i bytes (block size bits %i).",
blocksize, sb->s_blocksize_bits);
/* Determine the size of the device in units of block_size bytes. */
if (!i_size_read(sb->s_bdev->bd_inode)) {
if (!silent)
ntfs_error(sb, "Unable to determine device size.");
goto err_out_now;
}
vol->nr_blocks = i_size_read(sb->s_bdev->bd_inode) >>
sb->s_blocksize_bits;
/* Read the boot sector and return unlocked buffer head to it. */
if (!(bh = read_ntfs_boot_sector(sb, silent))) {
if (!silent)
ntfs_error(sb, "Not an NTFS volume.");
goto err_out_now;
}
/*
* Extract the data from the boot sector and setup the ntfs volume
* using it.
*/
result = parse_ntfs_boot_sector(vol, (NTFS_BOOT_SECTOR*)bh->b_data);
brelse(bh);
if (!result) {
if (!silent)
ntfs_error(sb, "Unsupported NTFS filesystem.");
goto err_out_now;
}
/*
* If the boot sector indicates a sector size bigger than the current
* device block size, switch the device block size to the sector size.
* TODO: It may be possible to support this case even when the set
* below fails, we would just be breaking up the i/o for each sector
* into multiple blocks for i/o purposes but otherwise it should just
* work. However it is safer to leave disabled until someone hits this
* error message and then we can get them to try it without the setting
* so we know for sure that it works.
*/
if (vol->sector_size > blocksize) {
blocksize = sb_set_blocksize(sb, vol->sector_size);
if (blocksize != vol->sector_size) {
if (!silent)
ntfs_error(sb, "Unable to set device block "
"size to sector size (%i).",
vol->sector_size);
goto err_out_now;
}
BUG_ON(blocksize != sb->s_blocksize);
vol->nr_blocks = i_size_read(sb->s_bdev->bd_inode) >>
sb->s_blocksize_bits;
ntfs_debug("Changed device block size to %i bytes (block size "
"bits %i) to match volume sector size.",
blocksize, sb->s_blocksize_bits);
}
/* Initialize the cluster and mft allocators. */
ntfs_setup_allocators(vol);
/* Setup remaining fields in the super block. */
sb->s_magic = NTFS_SB_MAGIC;
/*
* Ntfs allows 63 bits for the file size, i.e. correct would be:
* sb->s_maxbytes = ~0ULL >> 1;
* But the kernel uses a long as the page cache page index which on
* 32-bit architectures is only 32-bits. MAX_LFS_FILESIZE is kernel
* defined to the maximum the page cache page index can cope with
* without overflowing the index or to 2^63 - 1, whichever is smaller.
*/
sb->s_maxbytes = MAX_LFS_FILESIZE;
/* Ntfs measures time in 100ns intervals. */
sb->s_time_gran = 100;
/*
* Now load the metadata required for the page cache and our address
* space operations to function. We do this by setting up a specialised
* read_inode method and then just calling the normal iget() to obtain
* the inode for $MFT which is sufficient to allow our normal inode
* operations and associated address space operations to function.
*/
sb->s_op = &ntfs_sops;
tmp_ino = new_inode(sb);
if (!tmp_ino) {
if (!silent)
ntfs_error(sb, "Failed to load essential metadata.");
goto err_out_now;
}
tmp_ino->i_ino = FILE_MFT;
insert_inode_hash(tmp_ino);
if (ntfs_read_inode_mount(tmp_ino) < 0) {
if (!silent)
ntfs_error(sb, "Failed to load essential metadata.");
goto iput_tmp_ino_err_out_now;
}
mutex_lock(&ntfs_lock);
/*
* The current mount is a compression user if the cluster size is
* less than or equal 4kiB.
*/
if (vol->cluster_size <= 4096 && !ntfs_nr_compression_users++) {
result = allocate_compression_buffers();
if (result) {
ntfs_error(NULL, "Failed to allocate buffers "
"for compression engine.");
ntfs_nr_compression_users--;
mutex_unlock(&ntfs_lock);
goto iput_tmp_ino_err_out_now;
}
}
/*
* Generate the global default upcase table if necessary. Also
* temporarily increment the number of upcase users to avoid race
* conditions with concurrent (u)mounts.
*/
if (!default_upcase)
default_upcase = generate_default_upcase();
ntfs_nr_upcase_users++;
mutex_unlock(&ntfs_lock);
/*
* From now on, ignore @silent parameter. If we fail below this line,
* it will be due to a corrupt fs or a system error, so we report it.
*/
/*
* Open the system files with normal access functions and complete
* setting up the ntfs super block.
*/
if (!load_system_files(vol)) {
ntfs_error(sb, "Failed to load system files.");
goto unl_upcase_iput_tmp_ino_err_out_now;
}
/* We grab a reference, simulating an ntfs_iget(). */
ihold(vol->root_ino);
if ((sb->s_root = d_make_root(vol->root_ino))) {
ntfs_debug("Exiting, status successful.");
/* Release the default upcase if it has no users. */
mutex_lock(&ntfs_lock);
if (!--ntfs_nr_upcase_users && default_upcase) {
ntfs_free(default_upcase);
default_upcase = NULL;
}
mutex_unlock(&ntfs_lock);
sb->s_export_op = &ntfs_export_ops;
lockdep_on();
return 0;
}
ntfs_error(sb, "Failed to allocate root directory.");
/* Clean up after the successful load_system_files() call from above. */
// TODO: Use ntfs_put_super() instead of repeating all this code...
// FIXME: Should mark the volume clean as the error is most likely
// -ENOMEM.
iput(vol->vol_ino);
vol->vol_ino = NULL;
/* NTFS 3.0+ specific clean up. */
if (vol->major_ver >= 3) {
#ifdef NTFS_RW
if (vol->usnjrnl_j_ino) {
iput(vol->usnjrnl_j_ino);
vol->usnjrnl_j_ino = NULL;
}
if (vol->usnjrnl_max_ino) {
iput(vol->usnjrnl_max_ino);
vol->usnjrnl_max_ino = NULL;
}
if (vol->usnjrnl_ino) {
iput(vol->usnjrnl_ino);
vol->usnjrnl_ino = NULL;
}
if (vol->quota_q_ino) {
iput(vol->quota_q_ino);
vol->quota_q_ino = NULL;
}
if (vol->quota_ino) {
iput(vol->quota_ino);
vol->quota_ino = NULL;
}
#endif /* NTFS_RW */
if (vol->extend_ino) {
iput(vol->extend_ino);
vol->extend_ino = NULL;
}
if (vol->secure_ino) {
iput(vol->secure_ino);
vol->secure_ino = NULL;
}
}
iput(vol->root_ino);
vol->root_ino = NULL;
iput(vol->lcnbmp_ino);
vol->lcnbmp_ino = NULL;
iput(vol->mftbmp_ino);
vol->mftbmp_ino = NULL;
#ifdef NTFS_RW
if (vol->logfile_ino) {
iput(vol->logfile_ino);
vol->logfile_ino = NULL;
}
if (vol->mftmirr_ino) {
iput(vol->mftmirr_ino);
vol->mftmirr_ino = NULL;
}
#endif /* NTFS_RW */
/* Throw away the table of attribute definitions. */
vol->attrdef_size = 0;
if (vol->attrdef) {
ntfs_free(vol->attrdef);
vol->attrdef = NULL;
}
vol->upcase_len = 0;
mutex_lock(&ntfs_lock);
if (vol->upcase == default_upcase) {
ntfs_nr_upcase_users--;
vol->upcase = NULL;
}
mutex_unlock(&ntfs_lock);
if (vol->upcase) {
ntfs_free(vol->upcase);
vol->upcase = NULL;
}
if (vol->nls_map) {
unload_nls(vol->nls_map);
vol->nls_map = NULL;
}
/* Error exit code path. */
unl_upcase_iput_tmp_ino_err_out_now:
/*
* Decrease the number of upcase users and destroy the global default
* upcase table if necessary.
*/
mutex_lock(&ntfs_lock);
if (!--ntfs_nr_upcase_users && default_upcase) {
ntfs_free(default_upcase);
default_upcase = NULL;
}
if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users)
free_compression_buffers();
mutex_unlock(&ntfs_lock);
iput_tmp_ino_err_out_now:
iput(tmp_ino);
if (vol->mft_ino && vol->mft_ino != tmp_ino)
iput(vol->mft_ino);
vol->mft_ino = NULL;
/* Errors at this stage are irrelevant. */
err_out_now:
sb->s_fs_info = NULL;
kfree(vol);
ntfs_debug("Failed, returning -EINVAL.");
lockdep_on();
return -EINVAL;
}
/*
* This is a slab cache to optimize allocations and deallocations of Unicode
* strings of the maximum length allowed by NTFS, which is NTFS_MAX_NAME_LEN
* (255) Unicode characters + a terminating NULL Unicode character.
*/
struct kmem_cache *ntfs_name_cache;
/* Slab caches for efficient allocation/deallocation of inodes. */
struct kmem_cache *ntfs_inode_cache;
struct kmem_cache *ntfs_big_inode_cache;
/* Init once constructor for the inode slab cache. */
static void ntfs_big_inode_init_once(void *foo)
{
ntfs_inode *ni = (ntfs_inode *)foo;
inode_init_once(VFS_I(ni));
}
/*
* Slab caches to optimize allocations and deallocations of attribute search
* contexts and index contexts, respectively.
*/
struct kmem_cache *ntfs_attr_ctx_cache;
struct kmem_cache *ntfs_index_ctx_cache;
/* Driver wide mutex. */
DEFINE_MUTEX(ntfs_lock);
static struct dentry *ntfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
}
static struct file_system_type ntfs_fs_type = {
.owner = THIS_MODULE,
.name = "ntfs",
.mount = ntfs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("ntfs");
/* Stable names for the slab caches. */
static const char ntfs_index_ctx_cache_name[] = "ntfs_index_ctx_cache";
static const char ntfs_attr_ctx_cache_name[] = "ntfs_attr_ctx_cache";
static const char ntfs_name_cache_name[] = "ntfs_name_cache";
static const char ntfs_inode_cache_name[] = "ntfs_inode_cache";
static const char ntfs_big_inode_cache_name[] = "ntfs_big_inode_cache";
static int __init init_ntfs_fs(void)
{
int err = 0;
/* This may be ugly but it results in pretty output so who cares. (-8 */
printk(KERN_INFO "NTFS driver " NTFS_VERSION " [Flags: R/"
#ifdef NTFS_RW
"W"
#else
"O"
#endif
#ifdef DEBUG
" DEBUG"
#endif
#ifdef MODULE
" MODULE"
#endif
"].\n");
ntfs_debug("Debug messages are enabled.");
ntfs_index_ctx_cache = kmem_cache_create(ntfs_index_ctx_cache_name,
sizeof(ntfs_index_context), 0 /* offset */,
SLAB_HWCACHE_ALIGN, NULL /* ctor */);
if (!ntfs_index_ctx_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_index_ctx_cache_name);
goto ictx_err_out;
}
ntfs_attr_ctx_cache = kmem_cache_create(ntfs_attr_ctx_cache_name,
sizeof(ntfs_attr_search_ctx), 0 /* offset */,
SLAB_HWCACHE_ALIGN, NULL /* ctor */);
if (!ntfs_attr_ctx_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_attr_ctx_cache_name);
goto actx_err_out;
}
ntfs_name_cache = kmem_cache_create(ntfs_name_cache_name,
(NTFS_MAX_NAME_LEN+1) * sizeof(ntfschar), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ntfs_name_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_name_cache_name);
goto name_err_out;
}
ntfs_inode_cache = kmem_cache_create(ntfs_inode_cache_name,
sizeof(ntfs_inode), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
if (!ntfs_inode_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_inode_cache_name);
goto inode_err_out;
}
ntfs_big_inode_cache = kmem_cache_create(ntfs_big_inode_cache_name,
sizeof(big_ntfs_inode), 0,
SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
ntfs_big_inode_init_once);
if (!ntfs_big_inode_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_big_inode_cache_name);
goto big_inode_err_out;
}
/* Register the ntfs sysctls. */
err = ntfs_sysctl(1);
if (err) {
printk(KERN_CRIT "NTFS: Failed to register NTFS sysctls!\n");
goto sysctl_err_out;
}
err = register_filesystem(&ntfs_fs_type);
if (!err) {
ntfs_debug("NTFS driver registered successfully.");
return 0; /* Success! */
}
printk(KERN_CRIT "NTFS: Failed to register NTFS filesystem driver!\n");
/* Unregister the ntfs sysctls. */
ntfs_sysctl(0);
sysctl_err_out:
kmem_cache_destroy(ntfs_big_inode_cache);
big_inode_err_out:
kmem_cache_destroy(ntfs_inode_cache);
inode_err_out:
kmem_cache_destroy(ntfs_name_cache);
name_err_out:
kmem_cache_destroy(ntfs_attr_ctx_cache);
actx_err_out:
kmem_cache_destroy(ntfs_index_ctx_cache);
ictx_err_out:
if (!err) {
printk(KERN_CRIT "NTFS: Aborting NTFS filesystem driver "
"registration...\n");
err = -ENOMEM;
}
return err;
}
static void __exit exit_ntfs_fs(void)
{
ntfs_debug("Unregistering NTFS driver.");
unregister_filesystem(&ntfs_fs_type);
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(ntfs_big_inode_cache);
kmem_cache_destroy(ntfs_inode_cache);
kmem_cache_destroy(ntfs_name_cache);
kmem_cache_destroy(ntfs_attr_ctx_cache);
kmem_cache_destroy(ntfs_index_ctx_cache);
/* Unregister the ntfs sysctls. */
ntfs_sysctl(0);
}
MODULE_AUTHOR("Anton Altaparmakov <anton@tuxera.com>");
MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.");
MODULE_VERSION(NTFS_VERSION);
MODULE_LICENSE("GPL");
#ifdef DEBUG
module_param(debug_msgs, bint, 0);
MODULE_PARM_DESC(debug_msgs, "Enable debug messages.");
#endif
module_init(init_ntfs_fs)
module_exit(exit_ntfs_fs)
| gpl-2.0 |
hroark13/zw340 | drivers/net/ethernet/freescale/fec_mpc52xx.c | 2740 | 29091 | /*
* Driver for the MPC5200 Fast Ethernet Controller
*
* Originally written by Dale Farnsworth <dfarnsworth@mvista.com> and
* now maintained by Sylvain Munaut <tnt@246tNt.com>
*
* Copyright (C) 2007 Domen Puncer, Telargo, Inc.
* Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2003-2004 MontaVista, Software, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
*/
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/crc32.h>
#include <linux/hardirq.h>
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/mpc52xx.h>
#include <sysdev/bestcomm/bestcomm.h>
#include <sysdev/bestcomm/fec.h>
#include "fec_mpc52xx.h"
#define DRIVER_NAME "mpc52xx-fec"
/* Private driver data structure */
struct mpc52xx_fec_priv {
struct net_device *ndev;
int duplex;
int speed;
int r_irq;
int t_irq;
struct mpc52xx_fec __iomem *fec;
struct bcom_task *rx_dmatsk;
struct bcom_task *tx_dmatsk;
spinlock_t lock;
int msg_enable;
/* MDIO link details */
unsigned int mdio_speed;
struct device_node *phy_node;
struct phy_device *phydev;
enum phy_state link;
int seven_wire_mode;
};
static irqreturn_t mpc52xx_fec_interrupt(int, void *);
static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
static void mpc52xx_fec_stop(struct net_device *dev);
static void mpc52xx_fec_start(struct net_device *dev);
static void mpc52xx_fec_reset(struct net_device *dev);
static u8 mpc52xx_fec_mac_addr[6];
module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0);
MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe");
#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
static int debug = -1; /* the above default */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "debugging messages level");
static void mpc52xx_fec_tx_timeout(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
unsigned long flags;
dev_warn(&dev->dev, "transmit timed out\n");
spin_lock_irqsave(&priv->lock, flags);
mpc52xx_fec_reset(dev);
dev->stats.tx_errors++;
spin_unlock_irqrestore(&priv->lock, flags);
netif_wake_queue(dev);
}
static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
*(u32 *)(&mac[0]) = in_be32(&fec->paddr1);
*(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16;
}
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sock = addr;
memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
mpc52xx_fec_set_paddr(dev, sock->sa_data);
return 0;
}
static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s)
{
while (!bcom_queue_empty(s)) {
struct bcom_fec_bd *bd;
struct sk_buff *skb;
skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd);
dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
DMA_FROM_DEVICE);
kfree_skb(skb);
}
}
static void
mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct bcom_fec_bd *bd;
bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk);
bd->status = FEC_RX_BUFFER_SIZE;
bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data,
FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
}
static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk)
{
struct sk_buff *skb;
while (!bcom_queue_full(rxtsk)) {
skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
if (!skb)
return -EAGAIN;
/* zero out the initial receive buffers to aid debugging */
memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
mpc52xx_fec_rx_submit(dev, skb);
}
return 0;
}
/* based on generic_adjust_link from fs_enet-main.c */
static void mpc52xx_fec_adjust_link(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
int new_state = 0;
if (phydev->link != PHY_DOWN) {
if (phydev->duplex != priv->duplex) {
struct mpc52xx_fec __iomem *fec = priv->fec;
u32 rcntrl;
u32 tcntrl;
new_state = 1;
priv->duplex = phydev->duplex;
rcntrl = in_be32(&fec->r_cntrl);
tcntrl = in_be32(&fec->x_cntrl);
rcntrl &= ~FEC_RCNTRL_DRT;
tcntrl &= ~FEC_TCNTRL_FDEN;
if (phydev->duplex == DUPLEX_FULL)
tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */
else
rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
out_be32(&fec->r_cntrl, rcntrl);
out_be32(&fec->x_cntrl, tcntrl);
}
if (phydev->speed != priv->speed) {
new_state = 1;
priv->speed = phydev->speed;
}
if (priv->link == PHY_DOWN) {
new_state = 1;
priv->link = phydev->link;
}
} else if (priv->link) {
new_state = 1;
priv->link = PHY_DOWN;
priv->speed = 0;
priv->duplex = -1;
}
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
}
static int mpc52xx_fec_open(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
int err = -EBUSY;
if (priv->phy_node) {
priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
mpc52xx_fec_adjust_link, 0, 0);
if (!priv->phydev) {
dev_err(&dev->dev, "of_phy_connect failed\n");
return -ENODEV;
}
phy_start(priv->phydev);
}
if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
DRIVER_NAME "_ctrl", dev)) {
dev_err(&dev->dev, "ctrl interrupt request failed\n");
goto free_phy;
}
if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0,
DRIVER_NAME "_rx", dev)) {
dev_err(&dev->dev, "rx interrupt request failed\n");
goto free_ctrl_irq;
}
if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0,
DRIVER_NAME "_tx", dev)) {
dev_err(&dev->dev, "tx interrupt request failed\n");
goto free_2irqs;
}
bcom_fec_rx_reset(priv->rx_dmatsk);
bcom_fec_tx_reset(priv->tx_dmatsk);
err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
if (err) {
dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n");
goto free_irqs;
}
bcom_enable(priv->rx_dmatsk);
bcom_enable(priv->tx_dmatsk);
mpc52xx_fec_start(dev);
netif_start_queue(dev);
return 0;
free_irqs:
free_irq(priv->t_irq, dev);
free_2irqs:
free_irq(priv->r_irq, dev);
free_ctrl_irq:
free_irq(dev->irq, dev);
free_phy:
if (priv->phydev) {
phy_stop(priv->phydev);
phy_disconnect(priv->phydev);
priv->phydev = NULL;
}
return err;
}
static int mpc52xx_fec_close(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
netif_stop_queue(dev);
mpc52xx_fec_stop(dev);
mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
free_irq(dev->irq, dev);
free_irq(priv->r_irq, dev);
free_irq(priv->t_irq, dev);
if (priv->phydev) {
/* power down phy */
phy_stop(priv->phydev);
phy_disconnect(priv->phydev);
priv->phydev = NULL;
}
return 0;
}
/* This will only be invoked if your driver is _not_ in XOFF state.
* What this means is that you need not check it, and that this
* invariant will hold if you make sure that the netif_*_queue()
* calls are done at the proper times.
*/
static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct bcom_fec_bd *bd;
unsigned long flags;
if (bcom_queue_full(priv->tx_dmatsk)) {
if (net_ratelimit())
dev_err(&dev->dev, "transmit queue overrun\n");
return NETDEV_TX_BUSY;
}
spin_lock_irqsave(&priv->lock, flags);
bd = (struct bcom_fec_bd *)
bcom_prepare_next_buffer(priv->tx_dmatsk);
bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC;
bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
DMA_TO_DEVICE);
skb_tx_timestamp(skb);
bcom_submit_next_buffer(priv->tx_dmatsk, skb);
spin_unlock_irqrestore(&priv->lock, flags);
if (bcom_queue_full(priv->tx_dmatsk)) {
netif_stop_queue(dev);
}
return NETDEV_TX_OK;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void mpc52xx_fec_poll_controller(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
disable_irq(priv->t_irq);
mpc52xx_fec_tx_interrupt(priv->t_irq, dev);
enable_irq(priv->t_irq);
disable_irq(priv->r_irq);
mpc52xx_fec_rx_interrupt(priv->r_irq, dev);
enable_irq(priv->r_irq);
}
#endif
/* This handles BestComm transmit task interrupts
*/
static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
spin_lock(&priv->lock);
while (bcom_buffer_done(priv->tx_dmatsk)) {
struct sk_buff *skb;
struct bcom_fec_bd *bd;
skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL,
(struct bcom_bd **)&bd);
dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
}
spin_unlock(&priv->lock);
netif_wake_queue(dev);
return IRQ_HANDLED;
}
static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct sk_buff *rskb; /* received sk_buff */
struct sk_buff *skb; /* new sk_buff to enqueue in its place */
struct bcom_fec_bd *bd;
u32 status, physaddr;
int length;
spin_lock(&priv->lock);
while (bcom_buffer_done(priv->rx_dmatsk)) {
rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
(struct bcom_bd **)&bd);
physaddr = bd->skb_pa;
/* Test for errors in received frame */
if (status & BCOM_FEC_RX_BD_ERRORS) {
/* Drop packet and reuse the buffer */
mpc52xx_fec_rx_submit(dev, rskb);
dev->stats.rx_dropped++;
continue;
}
/* skbs are allocated on open, so now we allocate a new one,
* and remove the old (with the packet) */
skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
if (!skb) {
/* Can't get a new one : reuse the same & drop pkt */
dev_notice(&dev->dev, "Low memory - dropped packet.\n");
mpc52xx_fec_rx_submit(dev, rskb);
dev->stats.rx_dropped++;
continue;
}
/* Enqueue the new sk_buff back on the hardware */
mpc52xx_fec_rx_submit(dev, skb);
/* Process the received skb - Drop the spin lock while
* calling into the network stack */
spin_unlock(&priv->lock);
dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
DMA_FROM_DEVICE);
length = status & BCOM_FEC_RX_BD_LEN_MASK;
skb_put(rskb, length - 4); /* length without CRC32 */
rskb->protocol = eth_type_trans(rskb, dev);
if (!skb_defer_rx_timestamp(skb))
netif_rx(rskb);
spin_lock(&priv->lock);
}
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
u32 ievent;
ievent = in_be32(&fec->ievent);
ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */
if (!ievent)
return IRQ_NONE;
out_be32(&fec->ievent, ievent); /* clear pending events */
/* on fifo error, soft-reset fec */
if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
spin_lock(&priv->lock);
mpc52xx_fec_reset(dev);
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
if (ievent & ~FEC_IEVENT_TFINT)
dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
return IRQ_HANDLED;
}
/*
* Get the current statistics.
* This may be called with the card open or closed.
*/
static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct mpc52xx_fec __iomem *fec = priv->fec;
stats->rx_bytes = in_be32(&fec->rmon_r_octets);
stats->rx_packets = in_be32(&fec->rmon_r_packets);
stats->rx_errors = in_be32(&fec->rmon_r_crc_align) +
in_be32(&fec->rmon_r_undersize) +
in_be32(&fec->rmon_r_oversize) +
in_be32(&fec->rmon_r_frag) +
in_be32(&fec->rmon_r_jab);
stats->tx_bytes = in_be32(&fec->rmon_t_octets);
stats->tx_packets = in_be32(&fec->rmon_t_packets);
stats->tx_errors = in_be32(&fec->rmon_t_crc_align) +
in_be32(&fec->rmon_t_undersize) +
in_be32(&fec->rmon_t_oversize) +
in_be32(&fec->rmon_t_frag) +
in_be32(&fec->rmon_t_jab);
stats->multicast = in_be32(&fec->rmon_r_mc_pkt);
stats->collisions = in_be32(&fec->rmon_t_col);
/* detailed rx_errors: */
stats->rx_length_errors = in_be32(&fec->rmon_r_undersize)
+ in_be32(&fec->rmon_r_oversize)
+ in_be32(&fec->rmon_r_frag)
+ in_be32(&fec->rmon_r_jab);
stats->rx_over_errors = in_be32(&fec->r_macerr);
stats->rx_crc_errors = in_be32(&fec->ieee_r_crc);
stats->rx_frame_errors = in_be32(&fec->ieee_r_align);
stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop);
stats->rx_missed_errors = in_be32(&fec->rmon_r_drop);
/* detailed tx_errors: */
stats->tx_aborted_errors = 0;
stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr);
stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop);
stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe);
stats->tx_window_errors = in_be32(&fec->ieee_t_lcol);
return stats;
}
/*
* Read MIB counters in order to reset them,
* then zero all the stats fields in memory
*/
static void mpc52xx_fec_reset_stats(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
out_be32(&fec->mib_control, FEC_MIB_DISABLE);
memset_io(&fec->rmon_t_drop, 0,
offsetof(struct mpc52xx_fec, reserved10) -
offsetof(struct mpc52xx_fec, rmon_t_drop));
out_be32(&fec->mib_control, 0);
memset(&dev->stats, 0, sizeof(dev->stats));
}
/*
* Set or clear the multicast filter for this adaptor.
*/
static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
u32 rx_control;
rx_control = in_be32(&fec->r_cntrl);
if (dev->flags & IFF_PROMISC) {
rx_control |= FEC_RCNTRL_PROM;
out_be32(&fec->r_cntrl, rx_control);
} else {
rx_control &= ~FEC_RCNTRL_PROM;
out_be32(&fec->r_cntrl, rx_control);
if (dev->flags & IFF_ALLMULTI) {
out_be32(&fec->gaddr1, 0xffffffff);
out_be32(&fec->gaddr2, 0xffffffff);
} else {
u32 crc;
struct netdev_hw_addr *ha;
u32 gaddr1 = 0x00000000;
u32 gaddr2 = 0x00000000;
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(6, ha->addr) >> 26;
if (crc >= 32)
gaddr1 |= 1 << (crc-32);
else
gaddr2 |= 1 << crc;
}
out_be32(&fec->gaddr1, gaddr1);
out_be32(&fec->gaddr2, gaddr2);
}
}
}
/**
* mpc52xx_fec_hw_init
* @dev: network device
*
* Setup various hardware setting, only needed once on start
*/
static void mpc52xx_fec_hw_init(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
int i;
/* Whack a reset. We should wait for this. */
out_be32(&fec->ecntrl, FEC_ECNTRL_RESET);
for (i = 0; i < FEC_RESET_DELAY; ++i) {
if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0)
break;
udelay(1);
}
if (i == FEC_RESET_DELAY)
dev_err(&dev->dev, "FEC Reset timeout!\n");
/* set pause to 0x20 frames */
out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20);
/* high service request will be deasserted when there's < 7 bytes in fifo
* low service request will be deasserted when there's < 4*7 bytes in fifo
*/
out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
/* alarm when <= x bytes in FIFO */
out_be32(&fec->rfifo_alarm, 0x0000030c);
out_be32(&fec->tfifo_alarm, 0x00000100);
/* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
/* enable crc generation */
out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC);
out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */
out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */
/* set phy speed.
* this can't be done in phy driver, since it needs to be called
* before fec stuff (even on resume) */
out_be32(&fec->mii_speed, priv->mdio_speed);
}
/**
* mpc52xx_fec_start
* @dev: network device
*
* This function is called to start or restart the FEC during a link
* change. This happens on fifo errors or when switching between half
* and full duplex.
*/
static void mpc52xx_fec_start(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
u32 rcntrl;
u32 tcntrl;
u32 tmp;
/* clear sticky error bits */
tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF;
out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp);
out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp);
/* FIFOs will reset on mpc52xx_fec_enable */
out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET);
/* Set station address. */
mpc52xx_fec_set_paddr(dev, dev->dev_addr);
mpc52xx_fec_set_multicast_list(dev);
/* set max frame len, enable flow control, select mii mode */
rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */
rcntrl |= FEC_RCNTRL_FCE;
if (!priv->seven_wire_mode)
rcntrl |= FEC_RCNTRL_MII_MODE;
if (priv->duplex == DUPLEX_FULL)
tcntrl = FEC_TCNTRL_FDEN; /* FD enable */
else {
rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
tcntrl = 0;
}
out_be32(&fec->r_cntrl, rcntrl);
out_be32(&fec->x_cntrl, tcntrl);
/* Clear any outstanding interrupt. */
out_be32(&fec->ievent, 0xffffffff);
/* Enable interrupts we wish to service. */
out_be32(&fec->imask, FEC_IMASK_ENABLE);
/* And last, enable the transmit and receive processing. */
out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN);
out_be32(&fec->r_des_active, 0x01000000);
}
/**
* mpc52xx_fec_stop
* @dev: network device
*
* stop all activity on fec and empty dma buffers
*/
static void mpc52xx_fec_stop(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
unsigned long timeout;
/* disable all interrupts */
out_be32(&fec->imask, 0);
/* Disable the rx task. */
bcom_disable(priv->rx_dmatsk);
/* Wait for tx queue to drain, but only if we're in process context */
if (!in_interrupt()) {
timeout = jiffies + msecs_to_jiffies(2000);
while (time_before(jiffies, timeout) &&
!bcom_queue_empty(priv->tx_dmatsk))
msleep(100);
if (time_after_eq(jiffies, timeout))
dev_err(&dev->dev, "queues didn't drain\n");
#if 1
if (time_after_eq(jiffies, timeout)) {
dev_err(&dev->dev, " tx: index: %i, outdex: %i\n",
priv->tx_dmatsk->index,
priv->tx_dmatsk->outdex);
dev_err(&dev->dev, " rx: index: %i, outdex: %i\n",
priv->rx_dmatsk->index,
priv->rx_dmatsk->outdex);
}
#endif
}
bcom_disable(priv->tx_dmatsk);
/* Stop FEC */
out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN);
}
/* reset fec and bestcomm tasks */
static void mpc52xx_fec_reset(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
mpc52xx_fec_stop(dev);
out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status));
out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO);
mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
mpc52xx_fec_hw_init(dev);
bcom_fec_rx_reset(priv->rx_dmatsk);
bcom_fec_tx_reset(priv->tx_dmatsk);
mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
bcom_enable(priv->rx_dmatsk);
bcom_enable(priv->tx_dmatsk);
mpc52xx_fec_start(dev);
netif_wake_queue(dev);
}
/* ethtool interface */
static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
if (!priv->phydev)
return -ENODEV;
return phy_ethtool_gset(priv->phydev, cmd);
}
static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
if (!priv->phydev)
return -ENODEV;
return phy_ethtool_sset(priv->phydev, cmd);
}
static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
return priv->msg_enable;
}
static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
priv->msg_enable = level;
}
static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
.get_settings = mpc52xx_fec_get_settings,
.set_settings = mpc52xx_fec_set_settings,
.get_link = ethtool_op_get_link,
.get_msglevel = mpc52xx_fec_get_msglevel,
.set_msglevel = mpc52xx_fec_set_msglevel,
};
static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
if (!priv->phydev)
return -ENOTSUPP;
return phy_mii_ioctl(priv->phydev, rq, cmd);
}
static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_open = mpc52xx_fec_open,
.ndo_stop = mpc52xx_fec_close,
.ndo_start_xmit = mpc52xx_fec_start_xmit,
.ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
.ndo_set_mac_address = mpc52xx_fec_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mpc52xx_fec_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = mpc52xx_fec_tx_timeout,
.ndo_get_stats = mpc52xx_fec_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mpc52xx_fec_poll_controller,
#endif
};
/* ======================================================================== */
/* OF Driver */
/* ======================================================================== */
static int __devinit mpc52xx_fec_probe(struct platform_device *op)
{
int rv;
struct net_device *ndev;
struct mpc52xx_fec_priv *priv = NULL;
struct resource mem;
const u32 *prop;
int prop_size;
phys_addr_t rx_fifo;
phys_addr_t tx_fifo;
/* Get the ether ndev & it's private zone */
ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv));
if (!ndev)
return -ENOMEM;
priv = netdev_priv(ndev);
priv->ndev = ndev;
/* Reserve FEC control zone */
rv = of_address_to_resource(op->dev.of_node, 0, &mem);
if (rv) {
printk(KERN_ERR DRIVER_NAME ": "
"Error while parsing device node resource\n" );
goto err_netdev;
}
if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) {
printk(KERN_ERR DRIVER_NAME
" - invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
(unsigned long)resource_size(&mem),
sizeof(struct mpc52xx_fec));
rv = -EINVAL;
goto err_netdev;
}
if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec),
DRIVER_NAME)) {
rv = -EBUSY;
goto err_netdev;
}
/* Init ether ndev with what we have */
ndev->netdev_ops = &mpc52xx_fec_netdev_ops;
ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops;
ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
ndev->base_addr = mem.start;
SET_NETDEV_DEV(ndev, &op->dev);
spin_lock_init(&priv->lock);
/* ioremap the zones */
priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec));
if (!priv->fec) {
rv = -ENOMEM;
goto err_mem_region;
}
/* Bestcomm init */
rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data);
tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data);
priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE);
priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo);
if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" );
rv = -ENOMEM;
goto err_rx_tx_dmatsk;
}
/* Get the IRQ we need one by one */
/* Control */
ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
/* RX */
priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
/* TX */
priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
/* MAC address init */
if (!is_zero_ether_addr(mpc52xx_fec_mac_addr))
memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6);
else
mpc52xx_fec_get_paddr(ndev, ndev->dev_addr);
priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
/*
* Link mode configuration
*/
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
if (prop && (prop_size >= sizeof(u32) * 2)) {
priv->speed = prop[0];
priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
}
/* If there is a phy handle, then get the PHY node */
priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
/* Hardware init */
mpc52xx_fec_hw_init(ndev);
mpc52xx_fec_reset_stats(ndev);
rv = register_netdev(ndev);
if (rv < 0)
goto err_node;
/* We're done ! */
dev_set_drvdata(&op->dev, ndev);
return 0;
err_node:
of_node_put(priv->phy_node);
irq_dispose_mapping(ndev->irq);
err_rx_tx_dmatsk:
if (priv->rx_dmatsk)
bcom_fec_rx_release(priv->rx_dmatsk);
if (priv->tx_dmatsk)
bcom_fec_tx_release(priv->tx_dmatsk);
iounmap(priv->fec);
err_mem_region:
release_mem_region(mem.start, sizeof(struct mpc52xx_fec));
err_netdev:
free_netdev(ndev);
return rv;
}
static int
mpc52xx_fec_remove(struct platform_device *op)
{
struct net_device *ndev;
struct mpc52xx_fec_priv *priv;
ndev = dev_get_drvdata(&op->dev);
priv = netdev_priv(ndev);
unregister_netdev(ndev);
if (priv->phy_node)
of_node_put(priv->phy_node);
priv->phy_node = NULL;
irq_dispose_mapping(ndev->irq);
bcom_fec_rx_release(priv->rx_dmatsk);
bcom_fec_tx_release(priv->tx_dmatsk);
iounmap(priv->fec);
release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
free_netdev(ndev);
dev_set_drvdata(&op->dev, NULL);
return 0;
}
#ifdef CONFIG_PM
static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
{
struct net_device *dev = dev_get_drvdata(&op->dev);
if (netif_running(dev))
mpc52xx_fec_close(dev);
return 0;
}
static int mpc52xx_fec_of_resume(struct platform_device *op)
{
struct net_device *dev = dev_get_drvdata(&op->dev);
mpc52xx_fec_hw_init(dev);
mpc52xx_fec_reset_stats(dev);
if (netif_running(dev))
mpc52xx_fec_open(dev);
return 0;
}
#endif
static struct of_device_id mpc52xx_fec_match[] = {
{ .compatible = "fsl,mpc5200b-fec", },
{ .compatible = "fsl,mpc5200-fec", },
{ .compatible = "mpc5200-fec", },
{ }
};
MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
static struct platform_driver mpc52xx_fec_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = mpc52xx_fec_match,
},
.probe = mpc52xx_fec_probe,
.remove = mpc52xx_fec_remove,
#ifdef CONFIG_PM
.suspend = mpc52xx_fec_of_suspend,
.resume = mpc52xx_fec_of_resume,
#endif
};
/* ======================================================================== */
/* Module */
/* ======================================================================== */
static int __init
mpc52xx_fec_init(void)
{
#ifdef CONFIG_FEC_MPC52xx_MDIO
int ret;
ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
if (ret) {
printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n");
return ret;
}
#endif
return platform_driver_register(&mpc52xx_fec_driver);
}
static void __exit
mpc52xx_fec_exit(void)
{
platform_driver_unregister(&mpc52xx_fec_driver);
#ifdef CONFIG_FEC_MPC52xx_MDIO
platform_driver_unregister(&mpc52xx_fec_mdio_driver);
#endif
}
module_init(mpc52xx_fec_init);
module_exit(mpc52xx_fec_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dale Farnsworth");
MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC");
| gpl-2.0 |
sac23/Sacs_Stock_Kernel | arch/mips/kernel/perf_event_mipsxx.c | 3764 | 41357 | /*
* Linux performance counter support for MIPS.
*
* Copyright (C) 2010 MIPS Technologies, Inc.
* Copyright (C) 2011 Cavium Networks, Inc.
* Author: Deng-Cheng Zhu
*
* This code is based on the implementation for ARM, which is in turn
* based on the sparc64 perf event code and the x86 code. Performance
* counter access is based on the MIPS Oprofile code. And the callchain
* support references the code of MIPS stacktrace.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/stacktrace.h>
#include <asm/time.h> /* For perf_irq */
#define MIPS_MAX_HWEVENTS 4
struct cpu_hw_events {
/* Array of events on this cpu. */
struct perf_event *events[MIPS_MAX_HWEVENTS];
/*
* Set the bit (indexed by the counter number) when the counter
* is used for an event.
*/
unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
/*
* Software copy of the control register for each performance counter.
* MIPS CPUs vary in performance counters. They use this differently,
* and even may not use it.
*/
unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.saved_ctrl = {0},
};
/* The description of MIPS performance events. */
struct mips_perf_event {
unsigned int event_id;
/*
* MIPS performance counters are indexed starting from 0.
* CNTR_EVEN indicates the indexes of the counters to be used are
* even numbers.
*/
unsigned int cntr_mask;
#define CNTR_EVEN 0x55555555
#define CNTR_ODD 0xaaaaaaaa
#define CNTR_ALL 0xffffffff
#ifdef CONFIG_MIPS_MT_SMP
enum {
T = 0,
V = 1,
P = 2,
} range;
#else
#define T
#define V
#define P
#endif
};
static struct mips_perf_event raw_event;
static DEFINE_MUTEX(raw_event_mutex);
#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
#define C(x) PERF_COUNT_HW_CACHE_##x
struct mips_pmu {
u64 max_period;
u64 valid_count;
u64 overflow;
const char *name;
int irq;
u64 (*read_counter)(unsigned int idx);
void (*write_counter)(unsigned int idx, u64 val);
const struct mips_perf_event *(*map_raw_event)(u64 config);
const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
const struct mips_perf_event (*cache_event_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
unsigned int num_counters;
};
static struct mips_pmu mipspmu;
#define M_CONFIG1_PC (1 << 4)
#define M_PERFCTL_EXL (1 << 0)
#define M_PERFCTL_KERNEL (1 << 1)
#define M_PERFCTL_SUPERVISOR (1 << 2)
#define M_PERFCTL_USER (1 << 3)
#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
#define M_PERFCTL_WIDE (1 << 30)
#define M_PERFCTL_MORE (1 << 31)
#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
M_PERFCTL_KERNEL | \
M_PERFCTL_USER | \
M_PERFCTL_SUPERVISOR | \
M_PERFCTL_INTERRUPT_ENABLE)
#ifdef CONFIG_MIPS_MT_SMP
#define M_PERFCTL_CONFIG_MASK 0x3fff801f
#else
#define M_PERFCTL_CONFIG_MASK 0x1f
#endif
#define M_PERFCTL_EVENT_MASK 0xfe0
#ifdef CONFIG_MIPS_MT_SMP
static int cpu_has_mipsmt_pertccounters;
static DEFINE_RWLOCK(pmuint_rwlock);
/*
* FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
* cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
*/
#if defined(CONFIG_HW_PERF_EVENTS)
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
0 : smp_processor_id())
#else
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
0 : cpu_data[smp_processor_id()].vpe_id)
#endif
/* Copied from op_model_mipsxx.c */
static unsigned int vpe_shift(void)
{
if (num_possible_cpus() > 1)
return 1;
return 0;
}
static unsigned int counters_total_to_per_cpu(unsigned int counters)
{
return counters >> vpe_shift();
}
static unsigned int counters_per_cpu_to_total(unsigned int counters)
{
return counters << vpe_shift();
}
#else /* !CONFIG_MIPS_MT_SMP */
#define vpe_id() 0
#endif /* CONFIG_MIPS_MT_SMP */
static void resume_local_counters(void);
static void pause_local_counters(void);
static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
static int mipsxx_pmu_handle_shared_irq(void);
static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
{
if (vpe_id() == 1)
idx = (idx + 2) & 3;
return idx;
}
static u64 mipsxx_pmu_read_counter(unsigned int idx)
{
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
/*
* The counters are unsigned, we must cast to truncate
* off the high bits.
*/
return (u32)read_c0_perfcntr0();
case 1:
return (u32)read_c0_perfcntr1();
case 2:
return (u32)read_c0_perfcntr2();
case 3:
return (u32)read_c0_perfcntr3();
default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
return 0;
}
}
static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
{
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
return read_c0_perfcntr0_64();
case 1:
return read_c0_perfcntr1_64();
case 2:
return read_c0_perfcntr2_64();
case 3:
return read_c0_perfcntr3_64();
default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
return 0;
}
}
static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
{
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
write_c0_perfcntr0(val);
return;
case 1:
write_c0_perfcntr1(val);
return;
case 2:
write_c0_perfcntr2(val);
return;
case 3:
write_c0_perfcntr3(val);
return;
}
}
static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
{
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
write_c0_perfcntr0_64(val);
return;
case 1:
write_c0_perfcntr1_64(val);
return;
case 2:
write_c0_perfcntr2_64(val);
return;
case 3:
write_c0_perfcntr3_64(val);
return;
}
}
static unsigned int mipsxx_pmu_read_control(unsigned int idx)
{
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
return read_c0_perfctrl0();
case 1:
return read_c0_perfctrl1();
case 2:
return read_c0_perfctrl2();
case 3:
return read_c0_perfctrl3();
default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
return 0;
}
}
static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
{
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
write_c0_perfctrl0(val);
return;
case 1:
write_c0_perfctrl1(val);
return;
case 2:
write_c0_perfctrl2(val);
return;
case 3:
write_c0_perfctrl3(val);
return;
}
}
static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
struct hw_perf_event *hwc)
{
int i;
/*
* We only need to care the counter mask. The range has been
* checked definitely.
*/
unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
for (i = mipspmu.num_counters - 1; i >= 0; i--) {
/*
* Note that some MIPS perf events can be counted by both
* even and odd counters, wheresas many other are only by
* even _or_ odd counters. This introduces an issue that
* when the former kind of event takes the counter the
* latter kind of event wants to use, then the "counter
* allocation" for the latter event will fail. In fact if
* they can be dynamically swapped, they both feel happy.
* But here we leave this issue alone for now.
*/
if (test_bit(i, &cntr_mask) &&
!test_and_set_bit(i, cpuc->used_mask))
return i;
}
return -EAGAIN;
}
static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
(evt->config_base & M_PERFCTL_CONFIG_MASK) |
/* Make sure interrupt enabled. */
M_PERFCTL_INTERRUPT_ENABLE;
/*
* We do not actually let the counter run. Leave it until start().
*/
}
static void mipsxx_pmu_disable_event(int idx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long flags;
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
local_irq_save(flags);
cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
~M_PERFCTL_COUNT_EVENT_WHENEVER;
mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
local_irq_restore(flags);
}
static int mipspmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
u64 left = local64_read(&hwc->period_left);
u64 period = hwc->sample_period;
int ret = 0;
if (unlikely((left + period) & (1ULL << 63))) {
/* left underflowed by more than period. */
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
} else if (unlikely((left + period) <= period)) {
/* left underflowed by less than period. */
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > mipspmu.max_period) {
left = mipspmu.max_period;
local64_set(&hwc->period_left, left);
}
local64_set(&hwc->prev_count, mipspmu.overflow - left);
mipspmu.write_counter(idx, mipspmu.overflow - left);
perf_event_update_userpage(event);
return ret;
}
static void mipspmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
u64 prev_raw_count, new_raw_count;
u64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = mipspmu.read_counter(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = new_raw_count - prev_raw_count;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
}
static void mipspmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/* Set the period for the event. */
mipspmu_event_set_period(event, hwc, hwc->idx);
/* Enable the event. */
mipsxx_pmu_enable_event(hwc, hwc->idx);
}
static void mipspmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (!(hwc->state & PERF_HES_STOPPED)) {
/* We are working on a local event. */
mipsxx_pmu_disable_event(hwc->idx);
barrier();
mipspmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
static int mipspmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
perf_pmu_disable(event->pmu);
/* To look for a free counter for this event. */
idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
if (idx < 0) {
err = idx;
goto out;
}
/*
* If there is an event in the counter we are going to use then
* make sure it is disabled.
*/
event->hw.idx = idx;
mipsxx_pmu_disable_event(idx);
cpuc->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
mipspmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
perf_pmu_enable(event->pmu);
return err;
}
static void mipspmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
mipspmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
static void mipspmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/* Don't read disabled counters! */
if (hwc->idx < 0)
return;
mipspmu_event_update(event, hwc, hwc->idx);
}
static void mipspmu_enable(struct pmu *pmu)
{
#ifdef CONFIG_MIPS_MT_SMP
write_unlock(&pmuint_rwlock);
#endif
resume_local_counters();
}
/*
* MIPS performance counters can be per-TC. The control registers can
* not be directly accessed accross CPUs. Hence if we want to do global
* control, we need cross CPU calls. on_each_cpu() can help us, but we
* can not make sure this function is called with interrupts enabled. So
* here we pause local counters and then grab a rwlock and leave the
* counters on other CPUs alone. If any counter interrupt raises while
* we own the write lock, simply pause local counters on that CPU and
* spin in the handler. Also we know we won't be switched to another
* CPU after pausing local counters and before grabbing the lock.
*/
static void mipspmu_disable(struct pmu *pmu)
{
pause_local_counters();
#ifdef CONFIG_MIPS_MT_SMP
write_lock(&pmuint_rwlock);
#endif
}
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmu_reserve_mutex);
static int (*save_perf_irq)(void);
static int mipspmu_get_irq(void)
{
int err;
if (mipspmu.irq >= 0) {
/* Request my own irq handler. */
err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
IRQF_PERCPU | IRQF_NOBALANCING,
"mips_perf_pmu", NULL);
if (err) {
pr_warning("Unable to request IRQ%d for MIPS "
"performance counters!\n", mipspmu.irq);
}
} else if (cp0_perfcount_irq < 0) {
/*
* We are sharing the irq number with the timer interrupt.
*/
save_perf_irq = perf_irq;
perf_irq = mipsxx_pmu_handle_shared_irq;
err = 0;
} else {
pr_warning("The platform hasn't properly defined its "
"interrupt controller.\n");
err = -ENOENT;
}
return err;
}
static void mipspmu_free_irq(void)
{
if (mipspmu.irq >= 0)
free_irq(mipspmu.irq, NULL);
else if (cp0_perfcount_irq < 0)
perf_irq = save_perf_irq;
}
/*
* mipsxx/rm9000/loongson2 have different performance counters, they have
* specific low-level init routines.
*/
static void reset_counters(void *arg);
static int __hw_perf_event_init(struct perf_event *event);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events,
&pmu_reserve_mutex)) {
/*
* We must not call the destroy function with interrupts
* disabled.
*/
on_each_cpu(reset_counters,
(void *)(long)mipspmu.num_counters, 1);
mipspmu_free_irq();
mutex_unlock(&pmu_reserve_mutex);
}
}
static int mipspmu_event_init(struct perf_event *event)
{
int err = 0;
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (event->cpu >= nr_cpumask_bits ||
(event->cpu >= 0 && !cpu_online(event->cpu)))
return -ENODEV;
if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmu_reserve_mutex);
if (atomic_read(&active_events) == 0)
err = mipspmu_get_irq();
if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmu_reserve_mutex);
}
if (err)
return err;
return __hw_perf_event_init(event);
}
static struct pmu pmu = {
.pmu_enable = mipspmu_enable,
.pmu_disable = mipspmu_disable,
.event_init = mipspmu_event_init,
.add = mipspmu_add,
.del = mipspmu_del,
.start = mipspmu_start,
.stop = mipspmu_stop,
.read = mipspmu_read,
};
static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
{
/*
* Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
* event_id.
*/
#ifdef CONFIG_MIPS_MT_SMP
return ((unsigned int)pev->range << 24) |
(pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
#else
return (pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
#endif
}
static const struct mips_perf_event *mipspmu_map_general_event(int idx)
{
const struct mips_perf_event *pev;
pev = ((*mipspmu.general_event_map)[idx].event_id ==
UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
&(*mipspmu.general_event_map)[idx]);
return pev;
}
static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
const struct mips_perf_event *pev;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return ERR_PTR(-EINVAL);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return ERR_PTR(-EINVAL);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return ERR_PTR(-EINVAL);
pev = &((*mipspmu.cache_event_map)
[cache_type]
[cache_op]
[cache_result]);
if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
return ERR_PTR(-EOPNOTSUPP);
return pev;
}
static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct cpu_hw_events fake_cpuc;
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
return -EINVAL;
}
if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
return -EINVAL;
return 0;
}
/* This is needed by specific irq handlers in perf_event_*.c */
static void handle_associated_event(struct cpu_hw_events *cpuc,
int idx, struct perf_sample_data *data,
struct pt_regs *regs)
{
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc = &event->hw;
mipspmu_event_update(event, hwc, idx);
data->period = event->hw.last_period;
if (!mipspmu_event_set_period(event, hwc, idx))
return;
if (perf_event_overflow(event, data, regs))
mipsxx_pmu_disable_event(idx);
}
static int __n_counters(void)
{
if (!(read_c0_config1() & M_CONFIG1_PC))
return 0;
if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
return 1;
if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
return 2;
if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
return 3;
return 4;
}
static int n_counters(void)
{
int counters;
switch (current_cpu_type()) {
case CPU_R10000:
counters = 2;
break;
case CPU_R12000:
case CPU_R14000:
counters = 4;
break;
default:
counters = __n_counters();
}
return counters;
}
static void reset_counters(void *arg)
{
int counters = (int)(long)arg;
switch (counters) {
case 4:
mipsxx_pmu_write_control(3, 0);
mipspmu.write_counter(3, 0);
case 3:
mipsxx_pmu_write_control(2, 0);
mipspmu.write_counter(2, 0);
case 2:
mipsxx_pmu_write_control(1, 0);
mipspmu.write_counter(1, 0);
case 1:
mipsxx_pmu_write_control(0, 0);
mipspmu.write_counter(0, 0);
}
}
/* 24K/34K/1004K cores can share the same event map. */
static const struct mips_perf_event mipsxxcore_event_map
[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
};
/* 74K core has different branch event code. */
static const struct mips_perf_event mipsxx74Kcore_event_map
[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
};
static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
};
/* 24K/34K/1004K cores can share the same cache event map. */
static const struct mips_perf_event mipsxxcore_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* Like some other architectures (e.g. ARM), the performance
* counters don't differentiate between read and write
* accesses/misses, so this isn't strictly correct, but it's the
* best we can do. Writes and reads get combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
/*
* Note that MIPS has only "hit" events countable for
* the prefetch operation.
*/
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
[C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
[C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
};
/* 74K core has completely different cache event map. */
static const struct mips_perf_event mipsxx74Kcore_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* Like some other architectures (e.g. ARM), the performance
* counters don't differentiate between read and write
* accesses/misses, so this isn't strictly correct, but it's the
* best we can do. Writes and reads get combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
[C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
[C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
/*
* Note that MIPS has only "hit" events countable for
* the prefetch operation.
*/
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
[C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
[C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(DTLB)] = {
/* 74K core does not have specific DTLB events. */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
};
static const struct mips_perf_event octeon_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
[C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(DTLB)] = {
/*
* Only general DTLB misses are counted use the same event for
* read and write.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x35, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x35, CNTR_ALL },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x37, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
};
#ifdef CONFIG_MIPS_MT_SMP
static void check_and_calc_range(struct perf_event *event,
const struct mips_perf_event *pev)
{
struct hw_perf_event *hwc = &event->hw;
if (event->cpu >= 0) {
if (pev->range > V) {
/*
* The user selected an event that is processor
* wide, while expecting it to be VPE wide.
*/
hwc->config_base |= M_TC_EN_ALL;
} else {
/*
* FIXME: cpu_data[event->cpu].vpe_id reports 0
* for both CPUs.
*/
hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
hwc->config_base |= M_TC_EN_VPE;
}
} else
hwc->config_base |= M_TC_EN_ALL;
}
#else
static void check_and_calc_range(struct perf_event *event,
const struct mips_perf_event *pev)
{
}
#endif
static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
const struct mips_perf_event *pev;
int err;
/* Returning MIPS event descriptor for generic perf event. */
if (PERF_TYPE_HARDWARE == event->attr.type) {
if (event->attr.config >= PERF_COUNT_HW_MAX)
return -EINVAL;
pev = mipspmu_map_general_event(event->attr.config);
} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
pev = mipspmu_map_cache_event(event->attr.config);
} else if (PERF_TYPE_RAW == event->attr.type) {
/* We are working on the global raw event. */
mutex_lock(&raw_event_mutex);
pev = mipspmu.map_raw_event(event->attr.config);
} else {
/* The event type is not (yet) supported. */
return -EOPNOTSUPP;
}
if (IS_ERR(pev)) {
if (PERF_TYPE_RAW == event->attr.type)
mutex_unlock(&raw_event_mutex);
return PTR_ERR(pev);
}
/*
* We allow max flexibility on how each individual counter shared
* by the single CPU operates (the mode exclusion and the range).
*/
hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
/* Calculate range bits and validate it. */
if (num_possible_cpus() > 1)
check_and_calc_range(event, pev);
hwc->event_base = mipspmu_perf_event_encode(pev);
if (PERF_TYPE_RAW == event->attr.type)
mutex_unlock(&raw_event_mutex);
if (!attr->exclude_user)
hwc->config_base |= M_PERFCTL_USER;
if (!attr->exclude_kernel) {
hwc->config_base |= M_PERFCTL_KERNEL;
/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
hwc->config_base |= M_PERFCTL_EXL;
}
if (!attr->exclude_hv)
hwc->config_base |= M_PERFCTL_SUPERVISOR;
hwc->config_base &= M_PERFCTL_CONFIG_MASK;
/*
* The event can belong to another cpu. We do not assign a local
* counter for it for now.
*/
hwc->idx = -1;
hwc->config = 0;
if (!hwc->sample_period) {
hwc->sample_period = mipspmu.max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
err = 0;
if (event->group_leader != event)
err = validate_group(event);
event->destroy = hw_perf_event_destroy;
if (err)
event->destroy(event);
return err;
}
static void pause_local_counters(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int ctr = mipspmu.num_counters;
unsigned long flags;
local_irq_save(flags);
do {
ctr--;
cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
~M_PERFCTL_COUNT_EVENT_WHENEVER);
} while (ctr > 0);
local_irq_restore(flags);
}
static void resume_local_counters(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int ctr = mipspmu.num_counters;
do {
ctr--;
mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
} while (ctr > 0);
}
static int mipsxx_pmu_handle_shared_irq(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_sample_data data;
unsigned int counters = mipspmu.num_counters;
u64 counter;
int handled = IRQ_NONE;
struct pt_regs *regs;
if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
return handled;
/*
* First we pause the local counters, so that when we are locked
* here, the counters are all paused. When it gets locked due to
* perf_disable(), the timer interrupt handler will be delayed.
*
* See also mipsxx_pmu_start().
*/
pause_local_counters();
#ifdef CONFIG_MIPS_MT_SMP
read_lock(&pmuint_rwlock);
#endif
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
switch (counters) {
#define HANDLE_COUNTER(n) \
case n + 1: \
if (test_bit(n, cpuc->used_mask)) { \
counter = mipspmu.read_counter(n); \
if (counter & mipspmu.overflow) { \
handle_associated_event(cpuc, n, &data, regs); \
handled = IRQ_HANDLED; \
} \
}
HANDLE_COUNTER(3)
HANDLE_COUNTER(2)
HANDLE_COUNTER(1)
HANDLE_COUNTER(0)
}
/*
* Do all the work for the pending perf events. We can do this
* in here because the performance counter interrupt is a regular
* interrupt, not NMI.
*/
if (handled == IRQ_HANDLED)
irq_work_run();
#ifdef CONFIG_MIPS_MT_SMP
read_unlock(&pmuint_rwlock);
#endif
resume_local_counters();
return handled;
}
static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
{
return mipsxx_pmu_handle_shared_irq();
}
/* 24K */
#define IS_BOTH_COUNTERS_24K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
/* 34K */
#define IS_BOTH_COUNTERS_34K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
#ifdef CONFIG_MIPS_MT_SMP
#define IS_RANGE_P_34K_EVENT(r, b) \
((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
(b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
(r) == 176 || ((b) >= 50 && (b) <= 55) || \
((b) >= 64 && (b) <= 67))
#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
#endif
/* 74K */
#define IS_BOTH_COUNTERS_74K_EVENT(b) \
((b) == 0 || (b) == 1)
/* 1004K */
#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
#ifdef CONFIG_MIPS_MT_SMP
#define IS_RANGE_P_1004K_EVENT(r, b) \
((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
(b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
(r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
(r) == 188 || (b) == 61 || (b) == 62 || \
((b) >= 64 && (b) <= 67))
#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
#endif
/*
* User can use 0-255 raw events, where 0-127 for the events of even
* counters, and 128-255 for odd counters. Note that bit 7 is used to
* indicate the parity. So, for example, when user wants to take the
* Event Num of 15 for odd counters (by referring to the user manual),
* then 128 needs to be added to 15 as the input for the event config,
* i.e., 143 (0x8F) to be used.
*/
static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
{
unsigned int raw_id = config & 0xff;
unsigned int base_id = raw_id & 0x7f;
raw_event.event_id = base_id;
switch (current_cpu_type()) {
case CPU_24K:
if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
/*
* This is actually doing nothing. Non-multithreading
* CPUs will not check and calculate the range.
*/
raw_event.range = P;
#endif
break;
case CPU_34K:
if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
raw_event.range = P;
else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
raw_event.range = V;
else
raw_event.range = T;
#endif
break;
case CPU_74K:
if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
raw_event.range = P;
#endif
break;
case CPU_1004K:
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
raw_event.range = P;
else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
raw_event.range = V;
else
raw_event.range = T;
#endif
break;
}
return &raw_event;
}
static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
{
unsigned int raw_id = config & 0xff;
unsigned int base_id = raw_id & 0x7f;
raw_event.cntr_mask = CNTR_ALL;
raw_event.event_id = base_id;
if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
if (base_id > 0x42)
return ERR_PTR(-EOPNOTSUPP);
} else {
if (base_id > 0x3a)
return ERR_PTR(-EOPNOTSUPP);
}
switch (base_id) {
case 0x00:
case 0x0f:
case 0x1e:
case 0x1f:
case 0x2f:
case 0x34:
case 0x3b ... 0x3f:
return ERR_PTR(-EOPNOTSUPP);
default:
break;
}
return &raw_event;
}
static int __init
init_hw_perf_events(void)
{
int counters, irq;
int counter_bits;
pr_info("Performance counters: ");
counters = n_counters();
if (counters == 0) {
pr_cont("No available PMU.\n");
return -ENODEV;
}
#ifdef CONFIG_MIPS_MT_SMP
cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
if (!cpu_has_mipsmt_pertccounters)
counters = counters_total_to_per_cpu(counters);
#endif
#ifdef MSC01E_INT_BASE
if (cpu_has_veic) {
/*
* Using platform specific interrupt controller defines.
*/
irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
} else {
#endif
if (cp0_perfcount_irq >= 0)
irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
else
irq = -1;
#ifdef MSC01E_INT_BASE
}
#endif
mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
switch (current_cpu_type()) {
case CPU_24K:
mipspmu.name = "mips/24K";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
case CPU_34K:
mipspmu.name = "mips/34K";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
case CPU_74K:
mipspmu.name = "mips/74K";
mipspmu.general_event_map = &mipsxx74Kcore_event_map;
mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
break;
case CPU_1004K:
mipspmu.name = "mips/1004K";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
mipspmu.name = "octeon";
mipspmu.general_event_map = &octeon_event_map;
mipspmu.cache_event_map = &octeon_cache_map;
mipspmu.map_raw_event = octeon_pmu_map_raw_event;
break;
default:
pr_cont("Either hardware does not support performance "
"counters, or not yet implemented.\n");
return -ENODEV;
}
mipspmu.num_counters = counters;
mipspmu.irq = irq;
if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
mipspmu.max_period = (1ULL << 63) - 1;
mipspmu.valid_count = (1ULL << 63) - 1;
mipspmu.overflow = 1ULL << 63;
mipspmu.read_counter = mipsxx_pmu_read_counter_64;
mipspmu.write_counter = mipsxx_pmu_write_counter_64;
counter_bits = 64;
} else {
mipspmu.max_period = (1ULL << 31) - 1;
mipspmu.valid_count = (1ULL << 31) - 1;
mipspmu.overflow = 1ULL << 31;
mipspmu.read_counter = mipsxx_pmu_read_counter;
mipspmu.write_counter = mipsxx_pmu_write_counter;
counter_bits = 32;
}
on_each_cpu(reset_counters, (void *)(long)counters, 1);
pr_cont("%s PMU enabled, %d %d-bit counters available to each "
"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
irq < 0 ? " (share with timer interrupt)" : "");
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
early_initcall(init_hw_perf_events);
| gpl-2.0 |
jeeb/spica-2.6.38 | drivers/pci/hotplug/sgi_hotplug.c | 4276 | 18512 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005-2006 Silicon Graphics, Inc. All rights reserved.
*
* This work was based on the 2.4/2.6 kernel development by Dick Reigner.
* Work to add BIOS PROM support was completed by Mike Habeck.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/l1.h>
#include <asm/sn/module.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_feature_sets.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/types.h>
#include <linux/acpi.h>
#include <asm/sn/acpi.h>
#include "../pci.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)");
MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver");
/* SAL call error codes. Keep in sync with prom header io/include/pcibr.h */
#define PCI_SLOT_ALREADY_UP 2 /* slot already up */
#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */
#define PCI_L1_ERR 7 /* L1 console command error */
#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */
#define PCIIO_ASIC_TYPE_TIOCA 4
#define PCI_L1_QSIZE 128 /* our L1 message buffer size */
#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */
#define SN_SLOT_NAME_SIZE 33 /* size of name string */
/* internal list head */
static struct list_head sn_hp_list;
/* hotplug_slot struct's private pointer */
struct slot {
int device_num;
struct pci_bus *pci_bus;
/* this struct for glue internal only */
struct hotplug_slot *hotplug_slot;
struct list_head hp_list;
char physical_path[SN_SLOT_NAME_SIZE];
};
struct pcibr_slot_enable_resp {
int resp_sub_errno;
char resp_l1_msg[PCI_L1_QSIZE + 1];
};
struct pcibr_slot_disable_resp {
int resp_sub_errno;
char resp_l1_msg[PCI_L1_QSIZE + 1];
};
enum sn_pci_req_e {
PCI_REQ_SLOT_ELIGIBLE,
PCI_REQ_SLOT_DISABLE
};
static int enable_slot(struct hotplug_slot *slot);
static int disable_slot(struct hotplug_slot *slot);
static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops sn_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
};
static DEFINE_MUTEX(sn_hotplug_mutex);
static ssize_t path_show(struct pci_slot *pci_slot, char *buf)
{
int retval = -ENOENT;
struct slot *slot = pci_slot->hotplug->private;
if (!slot)
return retval;
retval = sprintf (buf, "%s\n", slot->physical_path);
return retval;
}
static struct pci_slot_attribute sn_slot_path_attr = __ATTR_RO(path);
static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device)
{
struct pcibus_info *pcibus_info;
u16 busnum, segment, ioboard_type;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
/* Check to see if this is a valid slot on 'pci_bus' */
if (!(pcibus_info->pbi_valid_devices & (1 << device)))
return -EPERM;
ioboard_type = sn_ioboard_to_pci_bus(pci_bus);
busnum = pcibus_info->pbi_buscommon.bs_persist_busnum;
segment = pci_domain_nr(pci_bus) & 0xf;
/* Do not allow hotplug operations on base I/O cards */
if ((ioboard_type == L1_BRICKTYPE_IX ||
ioboard_type == L1_BRICKTYPE_IA) &&
(segment == 1 && busnum == 0 && device != 1))
return -EPERM;
return 1;
}
static int sn_pci_bus_valid(struct pci_bus *pci_bus)
{
struct pcibus_info *pcibus_info;
u32 asic_type;
u16 ioboard_type;
/* Don't register slots hanging off the TIOCA bus */
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
asic_type = pcibus_info->pbi_buscommon.bs_asic_type;
if (asic_type == PCIIO_ASIC_TYPE_TIOCA)
return -EPERM;
/* Only register slots in I/O Bricks that support hotplug */
ioboard_type = sn_ioboard_to_pci_bus(pci_bus);
switch (ioboard_type) {
case L1_BRICKTYPE_IX:
case L1_BRICKTYPE_PX:
case L1_BRICKTYPE_IA:
case L1_BRICKTYPE_PA:
case L1_BOARDTYPE_PCIX3SLOT:
return 1;
break;
default:
return -EPERM;
break;
}
return -EIO;
}
static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
struct pci_bus *pci_bus, int device,
char *name)
{
struct pcibus_info *pcibus_info;
struct slot *slot;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
bss_hotplug_slot->private = slot;
slot->device_num = device;
slot->pci_bus = pci_bus;
sprintf(name, "%04x:%02x:%02x",
pci_domain_nr(pci_bus),
((u16)pcibus_info->pbi_buscommon.bs_persist_busnum),
device + 1);
sn_generate_path(pci_bus, slot->physical_path);
slot->hotplug_slot = bss_hotplug_slot;
list_add(&slot->hp_list, &sn_hp_list);
return 0;
}
static struct hotplug_slot * sn_hp_destroy(void)
{
struct slot *slot;
struct pci_slot *pci_slot;
struct hotplug_slot *bss_hotplug_slot = NULL;
list_for_each_entry(slot, &sn_hp_list, hp_list) {
bss_hotplug_slot = slot->hotplug_slot;
pci_slot = bss_hotplug_slot->pci_slot;
list_del(&((struct slot *)bss_hotplug_slot->private)->
hp_list);
sysfs_remove_file(&pci_slot->kobj,
&sn_slot_path_attr.attr);
break;
}
return bss_hotplug_slot;
}
static void sn_bus_free_data(struct pci_dev *dev)
{
struct pci_bus *subordinate_bus;
struct pci_dev *child;
/* Recursively clean up sn_irq_info structs */
if (dev->subordinate) {
subordinate_bus = dev->subordinate;
list_for_each_entry(child, &subordinate_bus->devices, bus_list)
sn_bus_free_data(child);
}
/*
* Some drivers may use dma accesses during the
* driver remove function. We release the sysdata
* areas after the driver remove functions have
* been called.
*/
sn_bus_store_sysdata(dev);
sn_pci_unfixup_slot(dev);
}
static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
int device_num, char **ssdt)
{
struct slot *slot = bss_hotplug_slot->private;
struct pcibus_info *pcibus_info;
struct pcibr_slot_enable_resp resp;
int rc;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
/*
* Power-on and initialize the slot in the SN
* PCI infrastructure.
*/
rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp, ssdt);
if (rc == PCI_SLOT_ALREADY_UP) {
dev_dbg(&slot->pci_bus->self->dev, "is already active\n");
return 1; /* return 1 to user */
}
if (rc == PCI_L1_ERR) {
dev_dbg(&slot->pci_bus->self->dev,
"L1 failure %d with message: %s",
resp.resp_sub_errno, resp.resp_l1_msg);
return -EPERM;
}
if (rc) {
dev_dbg(&slot->pci_bus->self->dev,
"insert failed with error %d sub-error %d\n",
rc, resp.resp_sub_errno);
return -EIO;
}
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
pcibus_info->pbi_enabled_devices |= (1 << device_num);
return 0;
}
static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
int device_num, int action)
{
struct slot *slot = bss_hotplug_slot->private;
struct pcibus_info *pcibus_info;
struct pcibr_slot_disable_resp resp;
int rc;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
rc = sal_pcibr_slot_disable(pcibus_info, device_num, action, &resp);
if ((action == PCI_REQ_SLOT_ELIGIBLE) &&
(rc == PCI_SLOT_ALREADY_DOWN)) {
dev_dbg(&slot->pci_bus->self->dev, "Slot %s already inactive\n", slot->physical_path);
return 1; /* return 1 to user */
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) {
dev_dbg(&slot->pci_bus->self->dev,
"Cannot remove last 33MHz card\n");
return -EPERM;
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) {
dev_dbg(&slot->pci_bus->self->dev,
"L1 failure %d with message \n%s\n",
resp.resp_sub_errno, resp.resp_l1_msg);
return -EPERM;
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) {
dev_dbg(&slot->pci_bus->self->dev,
"remove failed with error %d sub-error %d\n",
rc, resp.resp_sub_errno);
return -EIO;
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && !rc)
return 0;
if ((action == PCI_REQ_SLOT_DISABLE) && !rc) {
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
pcibus_info->pbi_enabled_devices &= ~(1 << device_num);
dev_dbg(&slot->pci_bus->self->dev, "remove successful\n");
return 0;
}
if ((action == PCI_REQ_SLOT_DISABLE) && rc) {
dev_dbg(&slot->pci_bus->self->dev,"remove failed rc = %d\n", rc);
}
return rc;
}
/*
* Power up and configure the slot via a SAL call to PROM.
* Scan slot (and any children), do any platform specific fixup,
* and find device driver.
*/
static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
{
struct slot *slot = bss_hotplug_slot->private;
struct pci_bus *new_bus = NULL;
struct pci_dev *dev;
int func, num_funcs;
int new_ppb = 0;
int rc;
char *ssdt = NULL;
void pcibios_fixup_device_resources(struct pci_dev *);
/* Serialize the Linux PCI infrastructure */
mutex_lock(&sn_hotplug_mutex);
/*
* Power-on and initialize the slot in the SN
* PCI infrastructure. Also, retrieve the ACPI SSDT
* table for the slot (if ACPI capable PROM).
*/
rc = sn_slot_enable(bss_hotplug_slot, slot->device_num, &ssdt);
if (rc) {
mutex_unlock(&sn_hotplug_mutex);
return rc;
}
if (ssdt)
ssdt = __va(ssdt);
/* Add the new SSDT for the slot to the ACPI namespace */
if (SN_ACPI_BASE_SUPPORT() && ssdt) {
acpi_status ret;
ret = acpi_load_table((struct acpi_table_header *)ssdt);
if (ACPI_FAILURE(ret)) {
printk(KERN_ERR "%s: acpi_load_table failed (0x%x)\n",
__func__, ret);
/* try to continue on */
}
}
num_funcs = pci_scan_slot(slot->pci_bus,
PCI_DEVFN(slot->device_num + 1, 0));
if (!num_funcs) {
dev_dbg(&slot->pci_bus->self->dev, "no device in slot\n");
mutex_unlock(&sn_hotplug_mutex);
return -ENODEV;
}
/*
* Map SN resources for all functions on the card
* to the Linux PCI interface and tell the drivers
* about them.
*/
for (func = 0; func < num_funcs; func++) {
dev = pci_get_slot(slot->pci_bus,
PCI_DEVFN(slot->device_num + 1,
PCI_FUNC(func)));
if (dev) {
/* Need to do slot fixup on PPB before fixup of children
* (PPB's pcidev_info needs to be in pcidev_info list
* before child's SN_PCIDEV_INFO() call to setup
* pdi_host_pcidev_info).
*/
pcibios_fixup_device_resources(dev);
if (SN_ACPI_BASE_SUPPORT())
sn_acpi_slot_fixup(dev);
else
sn_io_slot_fixup(dev);
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
unsigned char sec_bus;
pci_read_config_byte(dev, PCI_SECONDARY_BUS,
&sec_bus);
new_bus = pci_add_new_bus(dev->bus, dev,
sec_bus);
pci_scan_child_bus(new_bus);
new_ppb = 1;
}
pci_dev_put(dev);
}
}
/*
* Add the slot's devices to the ACPI infrastructure */
if (SN_ACPI_BASE_SUPPORT() && ssdt) {
unsigned long long adr;
struct acpi_device *pdevice;
struct acpi_device *device;
acpi_handle phandle;
acpi_handle chandle = NULL;
acpi_handle rethandle;
acpi_status ret;
phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
if (acpi_bus_get_device(phandle, &pdevice)) {
dev_dbg(&slot->pci_bus->self->dev,
"no parent device, assuming NULL\n");
pdevice = NULL;
}
/*
* Walk the rootbus node's immediate children looking for
* the slot's device node(s). There can be more than
* one for multifunction devices.
*/
for (;;) {
rethandle = NULL;
ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
phandle, chandle,
&rethandle);
if (ret == AE_NOT_FOUND || rethandle == NULL)
break;
chandle = rethandle;
ret = acpi_evaluate_integer(chandle, METHOD_NAME__ADR,
NULL, &adr);
if (ACPI_SUCCESS(ret) &&
(adr>>16) == (slot->device_num + 1)) {
ret = acpi_bus_add(&device, pdevice, chandle,
ACPI_BUS_TYPE_DEVICE);
if (ACPI_FAILURE(ret)) {
printk(KERN_ERR "%s: acpi_bus_add "
"failed (0x%x) for slot %d "
"func %d\n", __func__,
ret, (int)(adr>>16),
(int)(adr&0xffff));
/* try to continue on */
} else {
acpi_bus_start(device);
}
}
}
}
/* Call the driver for the new device */
pci_bus_add_devices(slot->pci_bus);
/* Call the drivers for the new devices subordinate to PPB */
if (new_ppb)
pci_bus_add_devices(new_bus);
mutex_unlock(&sn_hotplug_mutex);
if (rc == 0)
dev_dbg(&slot->pci_bus->self->dev,
"insert operation successful\n");
else
dev_dbg(&slot->pci_bus->self->dev,
"insert operation failed rc = %d\n", rc);
return rc;
}
static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
{
struct slot *slot = bss_hotplug_slot->private;
struct pci_dev *dev;
int func;
int rc;
acpi_owner_id ssdt_id = 0;
/* Acquire update access to the bus */
mutex_lock(&sn_hotplug_mutex);
/* is it okay to bring this slot down? */
rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
PCI_REQ_SLOT_ELIGIBLE);
if (rc)
goto leaving;
/* free the ACPI resources for the slot */
if (SN_ACPI_BASE_SUPPORT() &&
PCI_CONTROLLER(slot->pci_bus)->acpi_handle) {
unsigned long long adr;
struct acpi_device *device;
acpi_handle phandle;
acpi_handle chandle = NULL;
acpi_handle rethandle;
acpi_status ret;
/* Get the rootbus node pointer */
phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
/*
* Walk the rootbus node's immediate children looking for
* the slot's device node(s). There can be more than
* one for multifunction devices.
*/
for (;;) {
rethandle = NULL;
ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
phandle, chandle,
&rethandle);
if (ret == AE_NOT_FOUND || rethandle == NULL)
break;
chandle = rethandle;
ret = acpi_evaluate_integer(chandle,
METHOD_NAME__ADR,
NULL, &adr);
if (ACPI_SUCCESS(ret) &&
(adr>>16) == (slot->device_num + 1)) {
/* retain the owner id */
acpi_get_id(chandle, &ssdt_id);
ret = acpi_bus_get_device(chandle,
&device);
if (ACPI_SUCCESS(ret))
acpi_bus_trim(device, 1);
}
}
}
/* Free the SN resources assigned to the Linux device.*/
for (func = 0; func < 8; func++) {
dev = pci_get_slot(slot->pci_bus,
PCI_DEVFN(slot->device_num + 1,
PCI_FUNC(func)));
if (dev) {
sn_bus_free_data(dev);
pci_remove_bus_device(dev);
pci_dev_put(dev);
}
}
/* Remove the SSDT for the slot from the ACPI namespace */
if (SN_ACPI_BASE_SUPPORT() && ssdt_id) {
acpi_status ret;
ret = acpi_unload_table_id(ssdt_id);
if (ACPI_FAILURE(ret)) {
printk(KERN_ERR "%s: acpi_unload_table_id "
"failed (0x%x) for id %d\n",
__func__, ret, ssdt_id);
/* try to continue on */
}
}
/* free the collected sysdata pointers */
sn_bus_free_sysdata();
/* Deactivate slot */
rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
PCI_REQ_SLOT_DISABLE);
leaving:
/* Release the bus lock */
mutex_unlock(&sn_hotplug_mutex);
return rc;
}
static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
u8 *value)
{
struct slot *slot = bss_hotplug_slot->private;
struct pcibus_info *pcibus_info;
u32 power;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
mutex_lock(&sn_hotplug_mutex);
power = pcibus_info->pbi_enabled_devices & (1 << slot->device_num);
*value = power ? 1 : 0;
mutex_unlock(&sn_hotplug_mutex);
return 0;
}
static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
{
kfree(bss_hotplug_slot->info);
kfree(bss_hotplug_slot->private);
kfree(bss_hotplug_slot);
}
static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
{
int device;
struct pci_slot *pci_slot;
struct hotplug_slot *bss_hotplug_slot;
char name[SN_SLOT_NAME_SIZE];
int rc = 0;
/*
* Currently only four devices are supported,
* in the future there maybe more -- up to 32.
*/
for (device = 0; device < SN_MAX_HP_SLOTS ; device++) {
if (sn_pci_slot_valid(pci_bus, device) != 1)
continue;
bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot),
GFP_KERNEL);
if (!bss_hotplug_slot) {
rc = -ENOMEM;
goto alloc_err;
}
bss_hotplug_slot->info =
kzalloc(sizeof(struct hotplug_slot_info),
GFP_KERNEL);
if (!bss_hotplug_slot->info) {
rc = -ENOMEM;
goto alloc_err;
}
if (sn_hp_slot_private_alloc(bss_hotplug_slot,
pci_bus, device, name)) {
rc = -ENOMEM;
goto alloc_err;
}
bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
bss_hotplug_slot->release = &sn_release_slot;
rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
if (rc)
goto register_err;
pci_slot = bss_hotplug_slot->pci_slot;
rc = sysfs_create_file(&pci_slot->kobj,
&sn_slot_path_attr.attr);
if (rc)
goto register_err;
}
dev_dbg(&pci_bus->self->dev, "Registered bus with hotplug\n");
return rc;
register_err:
dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n",
rc);
alloc_err:
if (rc == -ENOMEM)
dev_dbg(&pci_bus->self->dev, "Memory allocation error\n");
/* destroy THIS element */
if (bss_hotplug_slot)
sn_release_slot(bss_hotplug_slot);
/* destroy anything else on the list */
while ((bss_hotplug_slot = sn_hp_destroy()))
pci_hp_deregister(bss_hotplug_slot);
return rc;
}
static int __init sn_pci_hotplug_init(void)
{
struct pci_bus *pci_bus = NULL;
int rc;
int registered = 0;
if (!sn_prom_feature_available(PRF_HOTPLUG_SUPPORT)) {
printk(KERN_ERR "%s: PROM version does not support hotplug.\n",
__func__);
return -EPERM;
}
INIT_LIST_HEAD(&sn_hp_list);
while ((pci_bus = pci_find_next_bus(pci_bus))) {
if (!pci_bus->sysdata)
continue;
rc = sn_pci_bus_valid(pci_bus);
if (rc != 1) {
dev_dbg(&pci_bus->self->dev, "not a valid hotplug bus\n");
continue;
}
dev_dbg(&pci_bus->self->dev, "valid hotplug bus\n");
rc = sn_hotplug_slot_register(pci_bus);
if (!rc) {
registered = 1;
} else {
registered = 0;
break;
}
}
return registered == 1 ? 0 : -ENODEV;
}
static void __exit sn_pci_hotplug_exit(void)
{
struct hotplug_slot *bss_hotplug_slot;
while ((bss_hotplug_slot = sn_hp_destroy()))
pci_hp_deregister(bss_hotplug_slot);
if (!list_empty(&sn_hp_list))
printk(KERN_ERR "%s: internal list is not empty\n", __FILE__);
}
module_init(sn_pci_hotplug_init);
module_exit(sn_pci_hotplug_exit);
| gpl-2.0 |
Dee-UK/D33_KK_RK3066 | drivers/ps3/sys-manager-core.c | 4788 | 2006 | /*
* PS3 System Manager core.
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
/**
* Staticly linked routines that allow late binding of a loaded sys-manager
* module.
*/
static struct ps3_sys_manager_ops ps3_sys_manager_ops;
/**
* ps3_register_sys_manager_ops - Bind ps3_sys_manager_ops to a module.
* @ops: struct ps3_sys_manager_ops.
*
* To be called from ps3_sys_manager_probe() and ps3_sys_manager_remove() to
* register call back ops for power control. Copies data to the static
* variable ps3_sys_manager_ops.
*/
void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops)
{
BUG_ON(!ops);
BUG_ON(!ops->dev);
ps3_sys_manager_ops = ops ? *ops : ps3_sys_manager_ops;
}
EXPORT_SYMBOL_GPL(ps3_sys_manager_register_ops);
void ps3_sys_manager_power_off(void)
{
if (ps3_sys_manager_ops.power_off)
ps3_sys_manager_ops.power_off(ps3_sys_manager_ops.dev);
ps3_sys_manager_halt();
}
void ps3_sys_manager_restart(void)
{
if (ps3_sys_manager_ops.restart)
ps3_sys_manager_ops.restart(ps3_sys_manager_ops.dev);
ps3_sys_manager_halt();
}
void ps3_sys_manager_halt(void)
{
pr_emerg("System Halted, OK to turn off power\n");
local_irq_disable();
while (1)
lv1_pause(1);
}
| gpl-2.0 |
hroark13/prevail_kernel_fixed | drivers/staging/octeon/cvmx-interrupt-decodes.c | 4788 | 13959 | /***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2009 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Automatically generated functions useful for enabling
* and decoding RSL_INT_BLOCKS interrupts.
*
*/
#include <asm/octeon/octeon.h>
#include "cvmx-gmxx-defs.h"
#include "cvmx-pcsx-defs.h"
#include "cvmx-pcsxx-defs.h"
#include "cvmx-spxx-defs.h"
#include "cvmx-stxx-defs.h"
#ifndef PRINT_ERROR
#define PRINT_ERROR(format, ...)
#endif
/**
* __cvmx_interrupt_gmxx_rxx_int_en_enable enables all interrupt bits in cvmx_gmxx_rxx_int_en_t
*/
void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
{
union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block),
cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block)));
gmx_rx_int_en.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
/* Skipping gmx_rx_int_en.s.reserved_29_63 */
gmx_rx_int_en.s.hg2cc = 1;
gmx_rx_int_en.s.hg2fld = 1;
gmx_rx_int_en.s.undat = 1;
gmx_rx_int_en.s.uneop = 1;
gmx_rx_int_en.s.unsop = 1;
gmx_rx_int_en.s.bad_term = 1;
gmx_rx_int_en.s.bad_seq = 1;
gmx_rx_int_en.s.rem_fault = 1;
gmx_rx_int_en.s.loc_fault = 1;
gmx_rx_int_en.s.pause_drp = 1;
/* Skipping gmx_rx_int_en.s.reserved_16_18 */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
/* Skipping gmx_rx_int_en.s.reserved_9_9 */
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/* Skipping gmx_rx_int_en.s.reserved_5_6 */
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
/* Skipping gmx_rx_int_en.s.reserved_2_2 */
gmx_rx_int_en.s.carext = 1;
/* Skipping gmx_rx_int_en.s.reserved_0_0 */
}
if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
/* Skipping gmx_rx_int_en.s.reserved_19_63 */
/*gmx_rx_int_en.s.phy_dupx = 1; */
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
gmx_rx_int_en.s.niberr = 1;
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
gmx_rx_int_en.s.alnerr = 1;
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
gmx_rx_int_en.s.maxerr = 1;
gmx_rx_int_en.s.carext = 1;
gmx_rx_int_en.s.minerr = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
/* Skipping gmx_rx_int_en.s.reserved_20_63 */
gmx_rx_int_en.s.pause_drp = 1;
/*gmx_rx_int_en.s.phy_dupx = 1; */
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
gmx_rx_int_en.s.niberr = 1;
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/* Skipping gmx_rx_int_en.s.reserved_6_6 */
gmx_rx_int_en.s.alnerr = 1;
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
/* Skipping gmx_rx_int_en.s.reserved_2_2 */
gmx_rx_int_en.s.carext = 1;
/* Skipping gmx_rx_int_en.s.reserved_0_0 */
}
if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
/* Skipping gmx_rx_int_en.s.reserved_19_63 */
/*gmx_rx_int_en.s.phy_dupx = 1; */
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
gmx_rx_int_en.s.niberr = 1;
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
gmx_rx_int_en.s.alnerr = 1;
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
gmx_rx_int_en.s.maxerr = 1;
gmx_rx_int_en.s.carext = 1;
gmx_rx_int_en.s.minerr = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
/* Skipping gmx_rx_int_en.s.reserved_19_63 */
/*gmx_rx_int_en.s.phy_dupx = 1; */
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
gmx_rx_int_en.s.niberr = 1;
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
gmx_rx_int_en.s.alnerr = 1;
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
gmx_rx_int_en.s.maxerr = 1;
gmx_rx_int_en.s.carext = 1;
gmx_rx_int_en.s.minerr = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
/* Skipping gmx_rx_int_en.s.reserved_20_63 */
gmx_rx_int_en.s.pause_drp = 1;
/*gmx_rx_int_en.s.phy_dupx = 1; */
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
gmx_rx_int_en.s.niberr = 1;
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
gmx_rx_int_en.s.alnerr = 1;
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
gmx_rx_int_en.s.maxerr = 1;
gmx_rx_int_en.s.carext = 1;
gmx_rx_int_en.s.minerr = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
/* Skipping gmx_rx_int_en.s.reserved_29_63 */
gmx_rx_int_en.s.hg2cc = 1;
gmx_rx_int_en.s.hg2fld = 1;
gmx_rx_int_en.s.undat = 1;
gmx_rx_int_en.s.uneop = 1;
gmx_rx_int_en.s.unsop = 1;
gmx_rx_int_en.s.bad_term = 1;
gmx_rx_int_en.s.bad_seq = 0;
gmx_rx_int_en.s.rem_fault = 1;
gmx_rx_int_en.s.loc_fault = 0;
gmx_rx_int_en.s.pause_drp = 1;
/* Skipping gmx_rx_int_en.s.reserved_16_18 */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
/* Skipping gmx_rx_int_en.s.reserved_9_9 */
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/* Skipping gmx_rx_int_en.s.reserved_5_6 */
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
/* Skipping gmx_rx_int_en.s.reserved_2_2 */
gmx_rx_int_en.s.carext = 1;
/* Skipping gmx_rx_int_en.s.reserved_0_0 */
}
cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64);
}
/**
* __cvmx_interrupt_pcsx_intx_en_reg_enable enables all interrupt bits in cvmx_pcsx_intx_en_reg_t
*/
void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block)
{
union cvmx_pcsx_intx_en_reg pcs_int_en_reg;
cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block),
cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block)));
pcs_int_en_reg.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
/* Skipping pcs_int_en_reg.s.reserved_12_63 */
/*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
pcs_int_en_reg.s.sync_bad_en = 1;
pcs_int_en_reg.s.an_bad_en = 1;
pcs_int_en_reg.s.rxlock_en = 1;
pcs_int_en_reg.s.rxbad_en = 1;
/*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
pcs_int_en_reg.s.txbad_en = 1;
pcs_int_en_reg.s.txfifo_en = 1;
pcs_int_en_reg.s.txfifu_en = 1;
pcs_int_en_reg.s.an_err_en = 1;
/*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
/*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
}
if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
/* Skipping pcs_int_en_reg.s.reserved_12_63 */
/*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
pcs_int_en_reg.s.sync_bad_en = 1;
pcs_int_en_reg.s.an_bad_en = 1;
pcs_int_en_reg.s.rxlock_en = 1;
pcs_int_en_reg.s.rxbad_en = 1;
/*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
pcs_int_en_reg.s.txbad_en = 1;
pcs_int_en_reg.s.txfifo_en = 1;
pcs_int_en_reg.s.txfifu_en = 1;
pcs_int_en_reg.s.an_err_en = 1;
/*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
/*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
}
cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64);
}
/**
* __cvmx_interrupt_pcsxx_int_en_reg_enable enables all interrupt bits in cvmx_pcsxx_int_en_reg_t
*/
void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index)
{
union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
cvmx_write_csr(CVMX_PCSXX_INT_REG(index),
cvmx_read_csr(CVMX_PCSXX_INT_REG(index)));
pcsx_int_en_reg.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
/* Skipping pcsx_int_en_reg.s.reserved_6_63 */
pcsx_int_en_reg.s.algnlos_en = 1;
pcsx_int_en_reg.s.synlos_en = 1;
pcsx_int_en_reg.s.bitlckls_en = 1;
pcsx_int_en_reg.s.rxsynbad_en = 1;
pcsx_int_en_reg.s.rxbad_en = 1;
pcsx_int_en_reg.s.txflt_en = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
/* Skipping pcsx_int_en_reg.s.reserved_6_63 */
pcsx_int_en_reg.s.algnlos_en = 1;
pcsx_int_en_reg.s.synlos_en = 1;
pcsx_int_en_reg.s.bitlckls_en = 0; /* Happens if XAUI module is not installed */
pcsx_int_en_reg.s.rxsynbad_en = 1;
pcsx_int_en_reg.s.rxbad_en = 1;
pcsx_int_en_reg.s.txflt_en = 1;
}
cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(index), pcsx_int_en_reg.u64);
}
/**
* __cvmx_interrupt_spxx_int_msk_enable enables all interrupt bits in cvmx_spxx_int_msk_t
*/
void __cvmx_interrupt_spxx_int_msk_enable(int index)
{
union cvmx_spxx_int_msk spx_int_msk;
cvmx_write_csr(CVMX_SPXX_INT_REG(index),
cvmx_read_csr(CVMX_SPXX_INT_REG(index)));
spx_int_msk.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
/* Skipping spx_int_msk.s.reserved_12_63 */
spx_int_msk.s.calerr = 1;
spx_int_msk.s.syncerr = 1;
spx_int_msk.s.diperr = 1;
spx_int_msk.s.tpaovr = 1;
spx_int_msk.s.rsverr = 1;
spx_int_msk.s.drwnng = 1;
spx_int_msk.s.clserr = 1;
spx_int_msk.s.spiovr = 1;
/* Skipping spx_int_msk.s.reserved_2_3 */
spx_int_msk.s.abnorm = 1;
spx_int_msk.s.prtnxa = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
/* Skipping spx_int_msk.s.reserved_12_63 */
spx_int_msk.s.calerr = 1;
spx_int_msk.s.syncerr = 1;
spx_int_msk.s.diperr = 1;
spx_int_msk.s.tpaovr = 1;
spx_int_msk.s.rsverr = 1;
spx_int_msk.s.drwnng = 1;
spx_int_msk.s.clserr = 1;
spx_int_msk.s.spiovr = 1;
/* Skipping spx_int_msk.s.reserved_2_3 */
spx_int_msk.s.abnorm = 1;
spx_int_msk.s.prtnxa = 1;
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(index), spx_int_msk.u64);
}
/**
* __cvmx_interrupt_stxx_int_msk_enable enables all interrupt bits in cvmx_stxx_int_msk_t
*/
void __cvmx_interrupt_stxx_int_msk_enable(int index)
{
union cvmx_stxx_int_msk stx_int_msk;
cvmx_write_csr(CVMX_STXX_INT_REG(index),
cvmx_read_csr(CVMX_STXX_INT_REG(index)));
stx_int_msk.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
/* Skipping stx_int_msk.s.reserved_8_63 */
stx_int_msk.s.frmerr = 1;
stx_int_msk.s.unxfrm = 1;
stx_int_msk.s.nosync = 1;
stx_int_msk.s.diperr = 1;
stx_int_msk.s.datovr = 1;
stx_int_msk.s.ovrbst = 1;
stx_int_msk.s.calpar1 = 1;
stx_int_msk.s.calpar0 = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
/* Skipping stx_int_msk.s.reserved_8_63 */
stx_int_msk.s.frmerr = 1;
stx_int_msk.s.unxfrm = 1;
stx_int_msk.s.nosync = 1;
stx_int_msk.s.diperr = 1;
stx_int_msk.s.datovr = 1;
stx_int_msk.s.ovrbst = 1;
stx_int_msk.s.calpar1 = 1;
stx_int_msk.s.calpar0 = 1;
}
cvmx_write_csr(CVMX_STXX_INT_MSK(index), stx_int_msk.u64);
}
| gpl-2.0 |
davidepianca98/android_kernel_oneplus_msm8974-kexec | arch/powerpc/platforms/embedded6xx/hlwd-pic.c | 6836 | 5291 | /*
* arch/powerpc/platforms/embedded6xx/hlwd-pic.c
*
* Nintendo Wii "Hollywood" interrupt controller support.
* Copyright (C) 2009 The GameCube Linux Team
* Copyright (C) 2009 Albert Herranz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
*/
#define DRV_MODULE_NAME "hlwd-pic"
#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <asm/io.h>
#include "hlwd-pic.h"
#define HLWD_NR_IRQS 32
/*
* Each interrupt has a corresponding bit in both
* the Interrupt Cause (ICR) and Interrupt Mask (IMR) registers.
*
* Enabling/disabling an interrupt line involves asserting/clearing
* the corresponding bit in IMR. ACK'ing a request simply involves
* asserting the corresponding bit in ICR.
*/
#define HW_BROADWAY_ICR 0x00
#define HW_BROADWAY_IMR 0x04
/*
* IRQ chip hooks.
*
*/
static void hlwd_pic_mask_and_ack(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
u32 mask = 1 << irq;
clrbits32(io_base + HW_BROADWAY_IMR, mask);
out_be32(io_base + HW_BROADWAY_ICR, mask);
}
static void hlwd_pic_ack(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
out_be32(io_base + HW_BROADWAY_ICR, 1 << irq);
}
static void hlwd_pic_mask(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
}
static void hlwd_pic_unmask(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
}
static struct irq_chip hlwd_pic = {
.name = "hlwd-pic",
.irq_ack = hlwd_pic_ack,
.irq_mask_ack = hlwd_pic_mask_and_ack,
.irq_mask = hlwd_pic_mask,
.irq_unmask = hlwd_pic_unmask,
};
/*
* IRQ host hooks.
*
*/
static struct irq_domain *hlwd_irq_host;
static int hlwd_pic_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(virq, h->host_data);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &hlwd_pic, handle_level_irq);
return 0;
}
static const struct irq_domain_ops hlwd_irq_domain_ops = {
.map = hlwd_pic_map,
};
static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
{
void __iomem *io_base = h->host_data;
int irq;
u32 irq_status;
irq_status = in_be32(io_base + HW_BROADWAY_ICR) &
in_be32(io_base + HW_BROADWAY_IMR);
if (irq_status == 0)
return NO_IRQ; /* no more IRQs pending */
irq = __ffs(irq_status);
return irq_linear_revmap(h, irq);
}
static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *irq_domain = irq_get_handler_data(cascade_virq);
unsigned int virq;
raw_spin_lock(&desc->lock);
chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */
raw_spin_unlock(&desc->lock);
virq = __hlwd_pic_get_irq(irq_domain);
if (virq != NO_IRQ)
generic_handle_irq(virq);
else
pr_err("spurious interrupt!\n");
raw_spin_lock(&desc->lock);
chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */
if (!irqd_irq_disabled(&desc->irq_data) && chip->irq_unmask)
chip->irq_unmask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
/*
* Platform hooks.
*
*/
static void __hlwd_quiesce(void __iomem *io_base)
{
/* mask and ack all IRQs */
out_be32(io_base + HW_BROADWAY_IMR, 0);
out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff);
}
struct irq_domain *hlwd_pic_init(struct device_node *np)
{
struct irq_domain *irq_domain;
struct resource res;
void __iomem *io_base;
int retval;
retval = of_address_to_resource(np, 0, &res);
if (retval) {
pr_err("no io memory range found\n");
return NULL;
}
io_base = ioremap(res.start, resource_size(&res));
if (!io_base) {
pr_err("ioremap failed\n");
return NULL;
}
pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
__hlwd_quiesce(io_base);
irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS,
&hlwd_irq_domain_ops, io_base);
if (!irq_domain) {
pr_err("failed to allocate irq_domain\n");
return NULL;
}
return irq_domain;
}
unsigned int hlwd_pic_get_irq(void)
{
return __hlwd_pic_get_irq(hlwd_irq_host);
}
/*
* Probe function.
*
*/
void hlwd_pic_probe(void)
{
struct irq_domain *host;
struct device_node *np;
const u32 *interrupts;
int cascade_virq;
for_each_compatible_node(np, NULL, "nintendo,hollywood-pic") {
interrupts = of_get_property(np, "interrupts", NULL);
if (interrupts) {
host = hlwd_pic_init(np);
BUG_ON(!host);
cascade_virq = irq_of_parse_and_map(np, 0);
irq_set_handler_data(cascade_virq, host);
irq_set_chained_handler(cascade_virq,
hlwd_pic_irq_cascade);
hlwd_irq_host = host;
break;
}
}
}
/**
* hlwd_quiesce() - quiesce hollywood irq controller
*
* Mask and ack all interrupt sources.
*
*/
void hlwd_quiesce(void)
{
void __iomem *io_base = hlwd_irq_host->host_data;
__hlwd_quiesce(io_base);
}
| gpl-2.0 |
Nicklas373/Hana-Kernel_MSM8627-AOSP_7.0 | arch/arm/mach-omap2/cm44xx.c | 7604 | 1317 | /*
* OMAP4 CM1, CM2 module low-level functions
*
* Copyright (C) 2010 Nokia Corporation
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* These functions are intended to be used only by the cminst44xx.c file.
* XXX Perhaps we should just move them there and make them static.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/io.h>
#include "iomap.h"
#include "common.h"
#include "cm.h"
#include "cm1_44xx.h"
#include "cm2_44xx.h"
#include "cm-regbits-44xx.h"
/* CM1 hardware module low-level functions */
/* Read a register in CM1 */
u32 omap4_cm1_read_inst_reg(s16 inst, u16 reg)
{
return __raw_readl(OMAP44XX_CM1_REGADDR(inst, reg));
}
/* Write into a register in CM1 */
void omap4_cm1_write_inst_reg(u32 val, s16 inst, u16 reg)
{
__raw_writel(val, OMAP44XX_CM1_REGADDR(inst, reg));
}
/* Read a register in CM2 */
u32 omap4_cm2_read_inst_reg(s16 inst, u16 reg)
{
return __raw_readl(OMAP44XX_CM2_REGADDR(inst, reg));
}
/* Write into a register in CM2 */
void omap4_cm2_write_inst_reg(u32 val, s16 inst, u16 reg)
{
__raw_writel(val, OMAP44XX_CM2_REGADDR(inst, reg));
}
| gpl-2.0 |
rex-xxx/Explay_A350_kernel_source_code | fs/hfs/sysdep.c | 8116 | 1041 | /*
* linux/fs/hfs/sysdep.c
*
* Copyright (C) 1996 Paul H. Hargrove
* (C) 2003 Ardis Technologies <roman@ardistech.com>
* This file may be distributed under the terms of the GNU General Public License.
*
* This file contains the code to do various system dependent things.
*/
#include <linux/namei.h>
#include "hfs_fs.h"
/* dentry case-handling: just lowercase everything */
static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode;
int diff;
if (nd->flags & LOOKUP_RCU)
return -ECHILD;
inode = dentry->d_inode;
if(!inode)
return 1;
/* fix up inode on a timezone change */
diff = sys_tz.tz_minuteswest * 60 - HFS_I(inode)->tz_secondswest;
if (diff) {
inode->i_ctime.tv_sec += diff;
inode->i_atime.tv_sec += diff;
inode->i_mtime.tv_sec += diff;
HFS_I(inode)->tz_secondswest += diff;
}
return 1;
}
const struct dentry_operations hfs_dentry_operations =
{
.d_revalidate = hfs_revalidate_dentry,
.d_hash = hfs_hash_dentry,
.d_compare = hfs_compare_dentry,
};
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_SHV-E110S | net/appletalk/sysctl_net_atalk.c | 9140 | 1370 | /*
* sysctl_net_atalk.c: sysctl interface to net AppleTalk subsystem.
*
* Begun April 1, 1996, Mike Shaver.
* Added /proc/sys/net/atalk directory entry (empty =) ). [MS]
* Dynamic registration, added aarp entries. (5/30/97 Chris Horn)
*/
#include <linux/sysctl.h>
#include <net/sock.h>
#include <linux/atalk.h>
static struct ctl_table atalk_table[] = {
{
.procname = "aarp-expiry-time",
.data = &sysctl_aarp_expiry_time,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "aarp-tick-time",
.data = &sysctl_aarp_tick_time,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "aarp-retransmit-limit",
.data = &sysctl_aarp_retransmit_limit,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "aarp-resolve-time",
.data = &sysctl_aarp_resolve_time,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ },
};
static struct ctl_path atalk_path[] = {
{ .procname = "net", },
{ .procname = "appletalk", },
{ }
};
static struct ctl_table_header *atalk_table_header;
void atalk_register_sysctl(void)
{
atalk_table_header = register_sysctl_paths(atalk_path, atalk_table);
}
void atalk_unregister_sysctl(void)
{
unregister_sysctl_table(atalk_table_header);
}
| gpl-2.0 |
zales/RamosW17pro-kernel-common | drivers/net/stmmac/stmmac_timer.c | 9140 | 3490 | /*******************************************************************************
STMMAC external timer support.
Copyright (C) 2007-2009 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include "stmmac_timer.h"
static void stmmac_timer_handler(void *data)
{
struct net_device *dev = (struct net_device *)data;
stmmac_schedule(dev);
}
#define STMMAC_TIMER_MSG(timer, freq) \
printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
#if defined(CONFIG_STMMAC_RTC_TIMER)
#include <linux/rtc.h>
static struct rtc_device *stmmac_rtc;
static rtc_task_t stmmac_task;
static void stmmac_rtc_start(unsigned int new_freq)
{
rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
}
static void stmmac_rtc_stop(void)
{
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
}
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
{
stmmac_task.private_data = dev;
stmmac_task.func = stmmac_timer_handler;
stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
if (stmmac_rtc == NULL) {
pr_err("open rtc device failed\n");
return -ENODEV;
}
rtc_irq_register(stmmac_rtc, &stmmac_task);
/* Periodic mode is not supported */
if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
pr_err("set periodic failed\n");
rtc_irq_unregister(stmmac_rtc, &stmmac_task);
rtc_class_close(stmmac_rtc);
return -1;
}
STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
tm->timer_start = stmmac_rtc_start;
tm->timer_stop = stmmac_rtc_stop;
return 0;
}
int stmmac_close_ext_timer(void)
{
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
rtc_irq_unregister(stmmac_rtc, &stmmac_task);
rtc_class_close(stmmac_rtc);
return 0;
}
#elif defined(CONFIG_STMMAC_TMU_TIMER)
#include <linux/clk.h>
#define TMU_CHANNEL "tmu2_clk"
static struct clk *timer_clock;
static void stmmac_tmu_start(unsigned int new_freq)
{
clk_set_rate(timer_clock, new_freq);
clk_enable(timer_clock);
}
static void stmmac_tmu_stop(void)
{
clk_disable(timer_clock);
}
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
{
timer_clock = clk_get(NULL, TMU_CHANNEL);
if (timer_clock == NULL)
return -1;
if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
timer_clock = NULL;
return -1;
}
STMMAC_TIMER_MSG("TMU2", tm->freq);
tm->timer_start = stmmac_tmu_start;
tm->timer_stop = stmmac_tmu_stop;
return 0;
}
int stmmac_close_ext_timer(void)
{
clk_disable(timer_clock);
tmu2_unregister_user();
clk_put(timer_clock);
return 0;
}
#endif
| gpl-2.0 |
emwno/android_kernel_konaxx | drivers/net/fs_enet/mac-fcc.c | 10420 | 15433 | /*
* FCC driver for Motorola MPC82xx (PQ2).
*
* Copyright (c) 2003 Intracom S.A.
* by Pantelis Antoniou <panto@intracom.gr>
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/of_device.h>
#include <linux/gfp.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
#include <asm/cpm2.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include "fs_enet.h"
/*************************************************/
/* FCC access macros */
/* write, read, set bits, clear bits */
#define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v))
#define R32(_p, _m) in_be32(&(_p)->_m)
#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
#define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v))
#define R16(_p, _m) in_be16(&(_p)->_m)
#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
#define W8(_p, _m, _v) out_8(&(_p)->_m, (_v))
#define R8(_p, _m) in_8(&(_p)->_m)
#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
/*************************************************/
#define FCC_MAX_MULTICAST_ADDRS 64
#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
#define mk_mii_end 0
#define MAX_CR_CMD_LOOPS 10000
static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op)
{
const struct fs_platform_info *fpi = fep->fpi;
return cpm_command(fpi->cp_command, op);
}
static int do_pd_setup(struct fs_enet_private *fep)
{
struct platform_device *ofdev = to_platform_device(fep->dev);
struct fs_platform_info *fpi = fep->fpi;
int ret = -EINVAL;
fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
goto out;
fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
if (!fep->fcc.fccp)
goto out;
fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1);
if (!fep->fcc.ep)
goto out_fccp;
fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2);
if (!fep->fcc.fcccp)
goto out_ep;
fep->fcc.mem = (void __iomem *)cpm2_immr;
fpi->dpram_offset = cpm_dpalloc(128, 32);
if (IS_ERR_VALUE(fpi->dpram_offset)) {
ret = fpi->dpram_offset;
goto out_fcccp;
}
return 0;
out_fcccp:
iounmap(fep->fcc.fcccp);
out_ep:
iounmap(fep->fcc.ep);
out_fccp:
iounmap(fep->fcc.fccp);
out:
return ret;
}
#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
#define FCC_RX_EVENT (FCC_ENET_RXF)
#define FCC_TX_EVENT (FCC_ENET_TXB)
#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
static int setup_data(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
if (do_pd_setup(fep) != 0)
return -EINVAL;
fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
fep->ev_rx = FCC_RX_EVENT;
fep->ev_tx = FCC_TX_EVENT;
fep->ev_err = FCC_ERR_EVENT_MSK;
return 0;
}
static int allocate_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev,
(fpi->tx_ring + fpi->rx_ring) *
sizeof(cbd_t), &fep->ring_mem_addr,
GFP_KERNEL);
if (fep->ring_base == NULL)
return -ENOMEM;
return 0;
}
static void free_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
if (fep->ring_base)
dma_free_coherent(fep->dev,
(fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
(void __force *)fep->ring_base, fep->ring_mem_addr);
}
static void cleanup_data(struct net_device *dev)
{
/* nothing */
}
static void set_promiscuous_mode(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
}
static void set_multicast_start(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_enet_t __iomem *ep = fep->fcc.ep;
W32(ep, fen_gaddrh, 0);
W32(ep, fen_gaddrl, 0);
}
static void set_multicast_one(struct net_device *dev, const u8 *mac)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_enet_t __iomem *ep = fep->fcc.ep;
u16 taddrh, taddrm, taddrl;
taddrh = ((u16)mac[5] << 8) | mac[4];
taddrm = ((u16)mac[3] << 8) | mac[2];
taddrl = ((u16)mac[1] << 8) | mac[0];
W16(ep, fen_taddrh, taddrh);
W16(ep, fen_taddrm, taddrm);
W16(ep, fen_taddrl, taddrl);
fcc_cr_cmd(fep, CPM_CR_SET_GADDR);
}
static void set_multicast_finish(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
fcc_enet_t __iomem *ep = fep->fcc.ep;
/* clear promiscuous always */
C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
W32(ep, fen_gaddrh, 0xffffffff);
W32(ep, fen_gaddrl, 0xffffffff);
}
/* read back */
fep->fcc.gaddrh = R32(ep, fen_gaddrh);
fep->fcc.gaddrl = R32(ep, fen_gaddrl);
}
static void set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
netdev_for_each_mc_addr(ha, dev)
set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
}
static void restart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
fcc_t __iomem *fccp = fep->fcc.fccp;
fcc_c_t __iomem *fcccp = fep->fcc.fcccp;
fcc_enet_t __iomem *ep = fep->fcc.ep;
dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
u16 paddrh, paddrm, paddrl;
const unsigned char *mac;
int i;
C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
/* clear everything (slow & steady does it) */
for (i = 0; i < sizeof(*ep); i++)
out_8((u8 __iomem *)ep + i, 0);
/* get physical address */
rx_bd_base_phys = fep->ring_mem_addr;
tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
/* point to bds */
W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
/* Set maximum bytes per receive buffer.
* It must be a multiple of 32.
*/
W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
/* Allocate space in the reserved FCC area of DPRAM for the
* internal buffers. No one uses this space (yet), so we
* can do this. Later, we will add resource management for
* this area.
*/
W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset);
W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32);
W16(ep, fen_padptr, fpi->dpram_offset + 64);
/* fill with special symbol... */
memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
W32(ep, fen_genfcc.fcc_rbptr, 0);
W32(ep, fen_genfcc.fcc_tbptr, 0);
W32(ep, fen_genfcc.fcc_rcrc, 0);
W32(ep, fen_genfcc.fcc_tcrc, 0);
W16(ep, fen_genfcc.fcc_res1, 0);
W32(ep, fen_genfcc.fcc_res2, 0);
/* no CAM */
W32(ep, fen_camptr, 0);
/* Set CRC preset and mask */
W32(ep, fen_cmask, 0xdebb20e3);
W32(ep, fen_cpres, 0xffffffff);
W32(ep, fen_crcec, 0); /* CRC Error counter */
W32(ep, fen_alec, 0); /* alignment error counter */
W32(ep, fen_disfc, 0); /* discard frame counter */
W16(ep, fen_retlim, 15); /* Retry limit threshold */
W16(ep, fen_pper, 0); /* Normal persistence */
/* set group address */
W32(ep, fen_gaddrh, fep->fcc.gaddrh);
W32(ep, fen_gaddrl, fep->fcc.gaddrh);
/* Clear hash filter tables */
W32(ep, fen_iaddrh, 0);
W32(ep, fen_iaddrl, 0);
/* Clear the Out-of-sequence TxBD */
W16(ep, fen_tfcstat, 0);
W16(ep, fen_tfclen, 0);
W32(ep, fen_tfcptr, 0);
W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
/* set address */
mac = dev->dev_addr;
paddrh = ((u16)mac[5] << 8) | mac[4];
paddrm = ((u16)mac[3] << 8) | mac[2];
paddrl = ((u16)mac[1] << 8) | mac[0];
W16(ep, fen_paddrh, paddrh);
W16(ep, fen_paddrm, paddrm);
W16(ep, fen_paddrl, paddrl);
W16(ep, fen_taddrh, 0);
W16(ep, fen_taddrm, 0);
W16(ep, fen_taddrl, 0);
W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
/* Clear stat counters, in case we ever enable RMON */
W32(ep, fen_octc, 0);
W32(ep, fen_colc, 0);
W32(ep, fen_broc, 0);
W32(ep, fen_mulc, 0);
W32(ep, fen_uspc, 0);
W32(ep, fen_frgc, 0);
W32(ep, fen_ospc, 0);
W32(ep, fen_jbrc, 0);
W32(ep, fen_p64c, 0);
W32(ep, fen_p65c, 0);
W32(ep, fen_p128c, 0);
W32(ep, fen_p256c, 0);
W32(ep, fen_p512c, 0);
W32(ep, fen_p1024c, 0);
W16(ep, fen_rfthr, 0); /* Suggested by manual */
W16(ep, fen_rfcnt, 0);
W16(ep, fen_cftype, 0);
fs_init_bds(dev);
/* adjust to speed (for RMII mode) */
if (fpi->use_rmii) {
if (fep->phydev->speed == 100)
C8(fcccp, fcc_gfemr, 0x20);
else
S8(fcccp, fcc_gfemr, 0x20);
}
fcc_cr_cmd(fep, CPM_CR_INIT_TRX);
/* clear events */
W16(fccp, fcc_fcce, 0xffff);
/* Enable interrupts we wish to service */
W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
/* Set GFMR to enable Ethernet operating mode */
W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
/* set sync/delimiters */
W16(fccp, fcc_fdsr, 0xd555);
W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
if (fpi->use_rmii)
S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
/* adjust to duplex mode */
if (fep->phydev->duplex)
S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
/* Restore multicast and promiscuous settings */
set_multicast_list(dev);
S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
}
static void stop(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
/* stop ethernet */
C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
/* clear events */
W16(fccp, fcc_fcce, 0xffff);
/* clear interrupt mask */
W16(fccp, fcc_fccm, 0);
fs_cleanup_bds(dev);
}
static void napi_clear_rx_event(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
}
static void napi_enable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
}
static void napi_disable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
{
/* nothing */
}
static void tx_kickstart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
S16(fccp, fcc_ftodr, 0x8000);
}
static u32 get_int_events(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
return (u32)R16(fccp, fcc_fcce);
}
static void clear_int_events(struct net_device *dev, u32 int_events)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
W16(fccp, fcc_fcce, int_events & 0xffff);
}
static void ev_error(struct net_device *dev, u32 int_events)
{
struct fs_enet_private *fep = netdev_priv(dev);
dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events);
}
static int get_regs(struct net_device *dev, void *p, int *sizep)
{
struct fs_enet_private *fep = netdev_priv(dev);
if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1)
return -EINVAL;
memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
p = (char *)p + sizeof(fcc_t);
memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
p = (char *)p + sizeof(fcc_enet_t);
memcpy_fromio(p, fep->fcc.fcccp, 1);
return 0;
}
static int get_regs_len(struct net_device *dev)
{
return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1;
}
/* Some transmit errors cause the transmitter to shut
* down. We now issue a restart transmit.
* Also, to workaround 8260 device erratum CPM37, we must
* disable and then re-enable the transmitterfollowing a
* Late Collision, Underrun, or Retry Limit error.
* In addition, tbptr may point beyond BDs beyond still marked
* as ready due to internal pipelining, so we need to look back
* through the BDs and adjust tbptr to point to the last BD
* marked as ready. This may result in some buffers being
* retransmitted.
*/
static void tx_restart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
const struct fs_platform_info *fpi = fep->fpi;
fcc_enet_t __iomem *ep = fep->fcc.ep;
cbd_t __iomem *curr_tbptr;
cbd_t __iomem *recheck_bd;
cbd_t __iomem *prev_bd;
cbd_t __iomem *last_tx_bd;
last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
/* get the current bd held in TBPTR and scan back from this point */
recheck_bd = curr_tbptr = (cbd_t __iomem *)
((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) +
fep->ring_base);
prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1;
/* Move through the bds in reverse, look for the earliest buffer
* that is not ready. Adjust TBPTR to the following buffer */
while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) {
/* Go back one buffer */
recheck_bd = prev_bd;
/* update the previous buffer */
prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1;
/* We should never see all bds marked as ready, check anyway */
if (recheck_bd == curr_tbptr)
break;
}
/* Now update the TBPTR and dirty flag to the current buffer */
W32(ep, fen_genfcc.fcc_tbptr,
(uint) (((void *)recheck_bd - fep->ring_base) +
fep->ring_mem_addr));
fep->dirty_tx = recheck_bd;
C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
udelay(10);
S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
fcc_cr_cmd(fep, CPM_CR_RESTART_TX);
}
/*************************************************************************/
const struct fs_ops fs_fcc_ops = {
.setup_data = setup_data,
.cleanup_data = cleanup_data,
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
.napi_clear_rx_event = napi_clear_rx_event,
.napi_enable_rx = napi_enable_rx,
.napi_disable_rx = napi_disable_rx,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
.clear_int_events = clear_int_events,
.ev_error = ev_error,
.get_regs = get_regs,
.get_regs_len = get_regs_len,
.tx_restart = tx_restart,
.allocate_bd = allocate_bd,
.free_bd = free_bd,
};
| gpl-2.0 |
jmztaylor/kernel_hc_sense_express | arch/score/kernel/time.c | 11956 | 2829 | /*
* arch/score/kernel/time.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <asm/scoreregs.h>
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = dev_id;
/* clear timer interrupt flag */
outl(1, P_TIMER0_CPP_REG);
evdev->event_handler(evdev);
return IRQ_HANDLED;
}
static struct irqaction timer_irq = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER,
.name = "timer",
};
static int score_timer_set_next_event(unsigned long delta,
struct clock_event_device *evdev)
{
outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL);
outl(delta, P_TIMER0_PRELOAD);
outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL);
return 0;
}
static void score_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evdev)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL);
outl(SYSTEM_CLOCK/HZ, P_TIMER0_PRELOAD);
outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL);
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_RESUME:
case CLOCK_EVT_MODE_UNUSED:
break;
default:
BUG();
}
}
static struct clock_event_device score_clockevent = {
.name = "score_clockevent",
.features = CLOCK_EVT_FEAT_PERIODIC,
.shift = 16,
.set_next_event = score_timer_set_next_event,
.set_mode = score_timer_set_mode,
};
void __init time_init(void)
{
timer_irq.dev_id = &score_clockevent;
setup_irq(IRQ_TIMER , &timer_irq);
/* setup COMPARE clockevent */
score_clockevent.mult = div_sc(SYSTEM_CLOCK, NSEC_PER_SEC,
score_clockevent.shift);
score_clockevent.max_delta_ns = clockevent_delta2ns((u32)~0,
&score_clockevent);
score_clockevent.min_delta_ns = clockevent_delta2ns(50,
&score_clockevent) + 1;
score_clockevent.cpumask = cpumask_of(0);
clockevents_register_device(&score_clockevent);
}
| gpl-2.0 |
jlyo/android_kernel_samsung_ypg1 | drivers/video/nvidia/nv_hw.c | 12468 | 51720 | /***************************************************************************\
|* *|
|* Copyright 1993-2003 NVIDIA, Corporation. All rights reserved. *|
|* *|
|* NOTICE TO USER: The source code is copyrighted under U.S. and *|
|* international laws. Users and possessors of this source code are *|
|* hereby granted a nonexclusive, royalty-free copyright license to *|
|* use this code in individual and commercial software. *|
|* *|
|* Any use of this source code must include, in the user documenta- *|
|* tion and internal comments to the code, notices to the end user *|
|* as follows: *|
|* *|
|* Copyright 1993-2003 NVIDIA, Corporation. All rights reserved. *|
|* *|
|* NVIDIA, CORPORATION MAKES NO REPRESENTATION ABOUT THE SUITABILITY *|
|* OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" *|
|* WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. NVIDIA, CORPOR- *|
|* ATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, *|
|* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGE- *|
|* MENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL *|
|* NVIDIA, CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT, INCI- *|
|* DENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RE- *|
|* SULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION *|
|* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF *|
|* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. *|
|* *|
|* U.S. Government End Users. This source code is a "commercial *|
|* item," as that term is defined at 48 C.F.R. 2.101 (OCT 1995), *|
|* consisting of "commercial computer software" and "commercial *|
|* computer software documentation," as such terms are used in *|
|* 48 C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Govern- *|
|* ment only as a commercial end item. Consistent with 48 C.F.R. *|
|* 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), *|
|* all U.S. Government End Users acquire the source code with only *|
|* those rights set forth herein. *|
|* *|
\***************************************************************************/
/*
* GPL Licensing Note - According to Mark Vojkovich, author of the Xorg/
* XFree86 'nv' driver, this source code is provided under MIT-style licensing
* where the source code is provided "as is" without warranty of any kind.
* The only usage restriction is for the copyright notices to be retained
* whenever code is used.
*
* Antonino Daplas <adaplas@pol.net> 2005-03-11
*/
/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nv_hw.c,v 1.4 2003/11/03 05:11:25 tsi Exp $ */
#include <linux/pci.h>
#include "nv_type.h"
#include "nv_local.h"
#include "nv_proto.h"
void NVLockUnlock(struct nvidia_par *par, int Lock)
{
u8 cr11;
VGA_WR08(par->PCIO, 0x3D4, 0x1F);
VGA_WR08(par->PCIO, 0x3D5, Lock ? 0x99 : 0x57);
VGA_WR08(par->PCIO, 0x3D4, 0x11);
cr11 = VGA_RD08(par->PCIO, 0x3D5);
if (Lock)
cr11 |= 0x80;
else
cr11 &= ~0x80;
VGA_WR08(par->PCIO, 0x3D5, cr11);
}
int NVShowHideCursor(struct nvidia_par *par, int ShowHide)
{
int cur = par->CurrentState->cursor1;
par->CurrentState->cursor1 = (par->CurrentState->cursor1 & 0xFE) |
(ShowHide & 0x01);
VGA_WR08(par->PCIO, 0x3D4, 0x31);
VGA_WR08(par->PCIO, 0x3D5, par->CurrentState->cursor1);
if (par->Architecture == NV_ARCH_40)
NV_WR32(par->PRAMDAC, 0x0300, NV_RD32(par->PRAMDAC, 0x0300));
return (cur & 0x01);
}
/****************************************************************************\
* *
* The video arbitration routines calculate some "magic" numbers. Fixes *
* the snow seen when accessing the framebuffer without it. *
* It just works (I hope). *
* *
\****************************************************************************/
typedef struct {
int graphics_lwm;
int video_lwm;
int graphics_burst_size;
int video_burst_size;
int valid;
} nv4_fifo_info;
typedef struct {
int pclk_khz;
int mclk_khz;
int nvclk_khz;
char mem_page_miss;
char mem_latency;
int memory_width;
char enable_video;
char gr_during_vid;
char pix_bpp;
char mem_aligned;
char enable_mp;
} nv4_sim_state;
typedef struct {
int graphics_lwm;
int video_lwm;
int graphics_burst_size;
int video_burst_size;
int valid;
} nv10_fifo_info;
typedef struct {
int pclk_khz;
int mclk_khz;
int nvclk_khz;
char mem_page_miss;
char mem_latency;
u32 memory_type;
int memory_width;
char enable_video;
char gr_during_vid;
char pix_bpp;
char mem_aligned;
char enable_mp;
} nv10_sim_state;
static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
unsigned int *NVClk)
{
unsigned int pll, N, M, MB, NB, P;
if (par->Architecture >= NV_ARCH_40) {
pll = NV_RD32(par->PMC, 0x4020);
P = (pll >> 16) & 0x07;
pll = NV_RD32(par->PMC, 0x4024);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
if (((par->Chipset & 0xfff0) == 0x0290) ||
((par->Chipset & 0xfff0) == 0x0390)) {
MB = 1;
NB = 1;
} else {
MB = (pll >> 16) & 0xFF;
NB = (pll >> 24) & 0xFF;
}
*MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
pll = NV_RD32(par->PMC, 0x4000);
P = (pll >> 16) & 0x07;
pll = NV_RD32(par->PMC, 0x4004);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
MB = (pll >> 16) & 0xFF;
NB = (pll >> 24) & 0xFF;
*NVClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
} else if (par->twoStagePLL) {
pll = NV_RD32(par->PRAMDAC0, 0x0504);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x0F;
pll = NV_RD32(par->PRAMDAC0, 0x0574);
if (pll & 0x80000000) {
MB = pll & 0xFF;
NB = (pll >> 8) & 0xFF;
} else {
MB = 1;
NB = 1;
}
*MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
pll = NV_RD32(par->PRAMDAC0, 0x0500);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x0F;
pll = NV_RD32(par->PRAMDAC0, 0x0570);
if (pll & 0x80000000) {
MB = pll & 0xFF;
NB = (pll >> 8) & 0xFF;
} else {
MB = 1;
NB = 1;
}
*NVClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
} else
if (((par->Chipset & 0x0ff0) == 0x0300) ||
((par->Chipset & 0x0ff0) == 0x0330)) {
pll = NV_RD32(par->PRAMDAC0, 0x0504);
M = pll & 0x0F;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x07;
if (pll & 0x00000080) {
MB = (pll >> 4) & 0x07;
NB = (pll >> 19) & 0x1f;
} else {
MB = 1;
NB = 1;
}
*MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
pll = NV_RD32(par->PRAMDAC0, 0x0500);
M = pll & 0x0F;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x07;
if (pll & 0x00000080) {
MB = (pll >> 4) & 0x07;
NB = (pll >> 19) & 0x1f;
} else {
MB = 1;
NB = 1;
}
*NVClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
} else {
pll = NV_RD32(par->PRAMDAC0, 0x0504);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x0F;
*MClk = (N * par->CrystalFreqKHz / M) >> P;
pll = NV_RD32(par->PRAMDAC0, 0x0500);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x0F;
*NVClk = (N * par->CrystalFreqKHz / M) >> P;
}
}
static void nv4CalcArbitration(nv4_fifo_info * fifo, nv4_sim_state * arb)
{
int data, pagemiss, cas, width, video_enable, bpp;
int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs;
int found, mclk_extra, mclk_loop, cbs, m1, p1;
int mclk_freq, pclk_freq, nvclk_freq, mp_enable;
int us_m, us_n, us_p, video_drain_rate, crtc_drain_rate;
int vpm_us, us_video, vlwm, video_fill_us, cpm_us, us_crt, clwm;
fifo->valid = 1;
pclk_freq = arb->pclk_khz;
mclk_freq = arb->mclk_khz;
nvclk_freq = arb->nvclk_khz;
pagemiss = arb->mem_page_miss;
cas = arb->mem_latency;
width = arb->memory_width >> 6;
video_enable = arb->enable_video;
bpp = arb->pix_bpp;
mp_enable = arb->enable_mp;
clwm = 0;
vlwm = 0;
cbs = 128;
pclks = 2;
nvclks = 2;
nvclks += 2;
nvclks += 1;
mclks = 5;
mclks += 3;
mclks += 1;
mclks += cas;
mclks += 1;
mclks += 1;
mclks += 1;
mclks += 1;
mclk_extra = 3;
nvclks += 2;
nvclks += 1;
nvclks += 1;
nvclks += 1;
if (mp_enable)
mclks += 4;
nvclks += 0;
pclks += 0;
found = 0;
vbs = 0;
while (found != 1) {
fifo->valid = 1;
found = 1;
mclk_loop = mclks + mclk_extra;
us_m = mclk_loop * 1000 * 1000 / mclk_freq;
us_n = nvclks * 1000 * 1000 / nvclk_freq;
us_p = nvclks * 1000 * 1000 / pclk_freq;
if (video_enable) {
video_drain_rate = pclk_freq * 2;
crtc_drain_rate = pclk_freq * bpp / 8;
vpagemiss = 2;
vpagemiss += 1;
crtpagemiss = 2;
vpm_us =
(vpagemiss * pagemiss) * 1000 * 1000 / mclk_freq;
if (nvclk_freq * 2 > mclk_freq * width)
video_fill_us =
cbs * 1000 * 1000 / 16 / nvclk_freq;
else
video_fill_us =
cbs * 1000 * 1000 / (8 * width) /
mclk_freq;
us_video = vpm_us + us_m + us_n + us_p + video_fill_us;
vlwm = us_video * video_drain_rate / (1000 * 1000);
vlwm++;
vbs = 128;
if (vlwm > 128)
vbs = 64;
if (vlwm > (256 - 64))
vbs = 32;
if (nvclk_freq * 2 > mclk_freq * width)
video_fill_us =
vbs * 1000 * 1000 / 16 / nvclk_freq;
else
video_fill_us =
vbs * 1000 * 1000 / (8 * width) /
mclk_freq;
cpm_us =
crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
us_crt =
us_video + video_fill_us + cpm_us + us_m + us_n +
us_p;
clwm = us_crt * crtc_drain_rate / (1000 * 1000);
clwm++;
} else {
crtc_drain_rate = pclk_freq * bpp / 8;
crtpagemiss = 2;
crtpagemiss += 1;
cpm_us =
crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
us_crt = cpm_us + us_m + us_n + us_p;
clwm = us_crt * crtc_drain_rate / (1000 * 1000);
clwm++;
}
m1 = clwm + cbs - 512;
p1 = m1 * pclk_freq / mclk_freq;
p1 = p1 * bpp / 8;
if ((p1 < m1) && (m1 > 0)) {
fifo->valid = 0;
found = 0;
if (mclk_extra == 0)
found = 1;
mclk_extra--;
} else if (video_enable) {
if ((clwm > 511) || (vlwm > 255)) {
fifo->valid = 0;
found = 0;
if (mclk_extra == 0)
found = 1;
mclk_extra--;
}
} else {
if (clwm > 519) {
fifo->valid = 0;
found = 0;
if (mclk_extra == 0)
found = 1;
mclk_extra--;
}
}
if (clwm < 384)
clwm = 384;
if (vlwm < 128)
vlwm = 128;
data = (int)(clwm);
fifo->graphics_lwm = data;
fifo->graphics_burst_size = 128;
data = (int)((vlwm + 15));
fifo->video_lwm = data;
fifo->video_burst_size = vbs;
}
}
static void nv4UpdateArbitrationSettings(unsigned VClk,
unsigned pixelDepth,
unsigned *burst,
unsigned *lwm, struct nvidia_par *par)
{
nv4_fifo_info fifo_data;
nv4_sim_state sim_data;
unsigned int MClk, NVClk, cfg1;
nvGetClocks(par, &MClk, &NVClk);
cfg1 = NV_RD32(par->PFB, 0x00000204);
sim_data.pix_bpp = (char)pixelDepth;
sim_data.enable_video = 0;
sim_data.enable_mp = 0;
sim_data.memory_width = (NV_RD32(par->PEXTDEV, 0x0000) & 0x10) ?
128 : 64;
sim_data.mem_latency = (char)cfg1 & 0x0F;
sim_data.mem_aligned = 1;
sim_data.mem_page_miss =
(char)(((cfg1 >> 4) & 0x0F) + ((cfg1 >> 31) & 0x01));
sim_data.gr_during_vid = 0;
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
sim_data.nvclk_khz = NVClk;
nv4CalcArbitration(&fifo_data, &sim_data);
if (fifo_data.valid) {
int b = fifo_data.graphics_burst_size >> 4;
*burst = 0;
while (b >>= 1)
(*burst)++;
*lwm = fifo_data.graphics_lwm >> 3;
}
}
static void nv10CalcArbitration(nv10_fifo_info * fifo, nv10_sim_state * arb)
{
int data, pagemiss, width, video_enable, bpp;
int nvclks, mclks, pclks, vpagemiss, crtpagemiss;
int nvclk_fill;
int found, mclk_extra, mclk_loop, cbs, m1;
int mclk_freq, pclk_freq, nvclk_freq, mp_enable;
int us_m, us_m_min, us_n, us_p, crtc_drain_rate;
int vus_m;
int vpm_us, us_video, cpm_us, us_crt, clwm;
int clwm_rnd_down;
int m2us, us_pipe_min, p1clk, p2;
int min_mclk_extra;
int us_min_mclk_extra;
fifo->valid = 1;
pclk_freq = arb->pclk_khz; /* freq in KHz */
mclk_freq = arb->mclk_khz;
nvclk_freq = arb->nvclk_khz;
pagemiss = arb->mem_page_miss;
width = arb->memory_width / 64;
video_enable = arb->enable_video;
bpp = arb->pix_bpp;
mp_enable = arb->enable_mp;
clwm = 0;
cbs = 512;
pclks = 4; /* lwm detect. */
nvclks = 3; /* lwm -> sync. */
nvclks += 2; /* fbi bus cycles (1 req + 1 busy) */
/* 2 edge sync. may be very close to edge so just put one. */
mclks = 1;
mclks += 1; /* arb_hp_req */
mclks += 5; /* ap_hp_req tiling pipeline */
mclks += 2; /* tc_req latency fifo */
mclks += 2; /* fb_cas_n_ memory request to fbio block */
mclks += 7; /* sm_d_rdv data returned from fbio block */
/* fb.rd.d.Put_gc need to accumulate 256 bits for read */
if (arb->memory_type == 0)
if (arb->memory_width == 64) /* 64 bit bus */
mclks += 4;
else
mclks += 2;
else if (arb->memory_width == 64) /* 64 bit bus */
mclks += 2;
else
mclks += 1;
if ((!video_enable) && (arb->memory_width == 128)) {
mclk_extra = (bpp == 32) ? 31 : 42; /* Margin of error */
min_mclk_extra = 17;
} else {
mclk_extra = (bpp == 32) ? 8 : 4; /* Margin of error */
/* mclk_extra = 4; *//* Margin of error */
min_mclk_extra = 18;
}
/* 2 edge sync. may be very close to edge so just put one. */
nvclks += 1;
nvclks += 1; /* fbi_d_rdv_n */
nvclks += 1; /* Fbi_d_rdata */
nvclks += 1; /* crtfifo load */
if (mp_enable)
mclks += 4; /* Mp can get in with a burst of 8. */
/* Extra clocks determined by heuristics */
nvclks += 0;
pclks += 0;
found = 0;
while (found != 1) {
fifo->valid = 1;
found = 1;
mclk_loop = mclks + mclk_extra;
/* Mclk latency in us */
us_m = mclk_loop * 1000 * 1000 / mclk_freq;
/* Minimum Mclk latency in us */
us_m_min = mclks * 1000 * 1000 / mclk_freq;
us_min_mclk_extra = min_mclk_extra * 1000 * 1000 / mclk_freq;
/* nvclk latency in us */
us_n = nvclks * 1000 * 1000 / nvclk_freq;
/* nvclk latency in us */
us_p = pclks * 1000 * 1000 / pclk_freq;
us_pipe_min = us_m_min + us_n + us_p;
/* Mclk latency in us */
vus_m = mclk_loop * 1000 * 1000 / mclk_freq;
if (video_enable) {
crtc_drain_rate = pclk_freq * bpp / 8; /* MB/s */
vpagemiss = 1; /* self generating page miss */
vpagemiss += 1; /* One higher priority before */
crtpagemiss = 2; /* self generating page miss */
if (mp_enable)
crtpagemiss += 1; /* if MA0 conflict */
vpm_us =
(vpagemiss * pagemiss) * 1000 * 1000 / mclk_freq;
/* Video has separate read return path */
us_video = vpm_us + vus_m;
cpm_us =
crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
/* Wait for video */
us_crt = us_video
+ cpm_us /* CRT Page miss */
+ us_m + us_n + us_p /* other latency */
;
clwm = us_crt * crtc_drain_rate / (1000 * 1000);
/* fixed point <= float_point - 1. Fixes that */
clwm++;
} else {
/* bpp * pclk/8 */
crtc_drain_rate = pclk_freq * bpp / 8;
crtpagemiss = 1; /* self generating page miss */
crtpagemiss += 1; /* MA0 page miss */
if (mp_enable)
crtpagemiss += 1; /* if MA0 conflict */
cpm_us =
crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
us_crt = cpm_us + us_m + us_n + us_p;
clwm = us_crt * crtc_drain_rate / (1000 * 1000);
/* fixed point <= float_point - 1. Fixes that */
clwm++;
/* Finally, a heuristic check when width == 64 bits */
if (width == 1) {
nvclk_fill = nvclk_freq * 8;
if (crtc_drain_rate * 100 >= nvclk_fill * 102)
/*Large number to fail */
clwm = 0xfff;
else if (crtc_drain_rate * 100 >=
nvclk_fill * 98) {
clwm = 1024;
cbs = 512;
}
}
}
/*
Overfill check:
*/
clwm_rnd_down = ((int)clwm / 8) * 8;
if (clwm_rnd_down < clwm)
clwm += 8;
m1 = clwm + cbs - 1024; /* Amount of overfill */
m2us = us_pipe_min + us_min_mclk_extra;
/* pclk cycles to drain */
p1clk = m2us * pclk_freq / (1000 * 1000);
p2 = p1clk * bpp / 8; /* bytes drained. */
if ((p2 < m1) && (m1 > 0)) {
fifo->valid = 0;
found = 0;
if (min_mclk_extra == 0) {
if (cbs <= 32) {
/* Can't adjust anymore! */
found = 1;
} else {
/* reduce the burst size */
cbs = cbs / 2;
}
} else {
min_mclk_extra--;
}
} else {
if (clwm > 1023) { /* Have some margin */
fifo->valid = 0;
found = 0;
if (min_mclk_extra == 0)
/* Can't adjust anymore! */
found = 1;
else
min_mclk_extra--;
}
}
if (clwm < (1024 - cbs + 8))
clwm = 1024 - cbs + 8;
data = (int)(clwm);
/* printf("CRT LWM: %f bytes, prog: 0x%x, bs: 256\n",
clwm, data ); */
fifo->graphics_lwm = data;
fifo->graphics_burst_size = cbs;
fifo->video_lwm = 1024;
fifo->video_burst_size = 512;
}
}
static void nv10UpdateArbitrationSettings(unsigned VClk,
unsigned pixelDepth,
unsigned *burst,
unsigned *lwm,
struct nvidia_par *par)
{
nv10_fifo_info fifo_data;
nv10_sim_state sim_data;
unsigned int MClk, NVClk, cfg1;
nvGetClocks(par, &MClk, &NVClk);
cfg1 = NV_RD32(par->PFB, 0x0204);
sim_data.pix_bpp = (char)pixelDepth;
sim_data.enable_video = 1;
sim_data.enable_mp = 0;
sim_data.memory_type = (NV_RD32(par->PFB, 0x0200) & 0x01) ? 1 : 0;
sim_data.memory_width = (NV_RD32(par->PEXTDEV, 0x0000) & 0x10) ?
128 : 64;
sim_data.mem_latency = (char)cfg1 & 0x0F;
sim_data.mem_aligned = 1;
sim_data.mem_page_miss =
(char)(((cfg1 >> 4) & 0x0F) + ((cfg1 >> 31) & 0x01));
sim_data.gr_during_vid = 0;
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
sim_data.nvclk_khz = NVClk;
nv10CalcArbitration(&fifo_data, &sim_data);
if (fifo_data.valid) {
int b = fifo_data.graphics_burst_size >> 4;
*burst = 0;
while (b >>= 1)
(*burst)++;
*lwm = fifo_data.graphics_lwm >> 3;
}
}
static void nv30UpdateArbitrationSettings (
struct nvidia_par *par,
unsigned int *burst,
unsigned int *lwm
)
{
unsigned int MClk, NVClk;
unsigned int fifo_size, burst_size, graphics_lwm;
fifo_size = 2048;
burst_size = 512;
graphics_lwm = fifo_size - burst_size;
nvGetClocks(par, &MClk, &NVClk);
*burst = 0;
burst_size >>= 5;
while(burst_size >>= 1) (*burst)++;
*lwm = graphics_lwm >> 3;
}
static void nForceUpdateArbitrationSettings(unsigned VClk,
unsigned pixelDepth,
unsigned *burst,
unsigned *lwm,
struct nvidia_par *par)
{
nv10_fifo_info fifo_data;
nv10_sim_state sim_data;
unsigned int M, N, P, pll, MClk, NVClk, memctrl;
struct pci_dev *dev;
if ((par->Chipset & 0x0FF0) == 0x01A0) {
unsigned int uMClkPostDiv;
dev = pci_get_bus_and_slot(0, 3);
pci_read_config_dword(dev, 0x6C, &uMClkPostDiv);
uMClkPostDiv = (uMClkPostDiv >> 8) & 0xf;
if (!uMClkPostDiv)
uMClkPostDiv = 4;
MClk = 400000 / uMClkPostDiv;
} else {
dev = pci_get_bus_and_slot(0, 5);
pci_read_config_dword(dev, 0x4c, &MClk);
MClk /= 1000;
}
pci_dev_put(dev);
pll = NV_RD32(par->PRAMDAC0, 0x0500);
M = (pll >> 0) & 0xFF;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x0F;
NVClk = (N * par->CrystalFreqKHz / M) >> P;
sim_data.pix_bpp = (char)pixelDepth;
sim_data.enable_video = 0;
sim_data.enable_mp = 0;
dev = pci_get_bus_and_slot(0, 1);
pci_read_config_dword(dev, 0x7C, &sim_data.memory_type);
pci_dev_put(dev);
sim_data.memory_type = (sim_data.memory_type >> 12) & 1;
sim_data.memory_width = 64;
dev = pci_get_bus_and_slot(0, 3);
pci_read_config_dword(dev, 0, &memctrl);
pci_dev_put(dev);
memctrl >>= 16;
if ((memctrl == 0x1A9) || (memctrl == 0x1AB) || (memctrl == 0x1ED)) {
u32 dimm[3];
dev = pci_get_bus_and_slot(0, 2);
pci_read_config_dword(dev, 0x40, &dimm[0]);
dimm[0] = (dimm[0] >> 8) & 0x4f;
pci_read_config_dword(dev, 0x44, &dimm[1]);
dimm[1] = (dimm[1] >> 8) & 0x4f;
pci_read_config_dword(dev, 0x48, &dimm[2]);
dimm[2] = (dimm[2] >> 8) & 0x4f;
if ((dimm[0] + dimm[1]) != dimm[2]) {
printk("nvidiafb: your nForce DIMMs are not arranged "
"in optimal banks!\n");
}
pci_dev_put(dev);
}
sim_data.mem_latency = 3;
sim_data.mem_aligned = 1;
sim_data.mem_page_miss = 10;
sim_data.gr_during_vid = 0;
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
sim_data.nvclk_khz = NVClk;
nv10CalcArbitration(&fifo_data, &sim_data);
if (fifo_data.valid) {
int b = fifo_data.graphics_burst_size >> 4;
*burst = 0;
while (b >>= 1)
(*burst)++;
*lwm = fifo_data.graphics_lwm >> 3;
}
}
/****************************************************************************\
* *
* RIVA Mode State Routines *
* *
\****************************************************************************/
/*
* Calculate the Video Clock parameters for the PLL.
*/
static void CalcVClock(int clockIn,
int *clockOut, u32 * pllOut, struct nvidia_par *par)
{
unsigned lowM, highM;
unsigned DeltaNew, DeltaOld;
unsigned VClk, Freq;
unsigned M, N, P;
DeltaOld = 0xFFFFFFFF;
VClk = (unsigned)clockIn;
if (par->CrystalFreqKHz == 13500) {
lowM = 7;
highM = 13;
} else {
lowM = 8;
highM = 14;
}
for (P = 0; P <= 4; P++) {
Freq = VClk << P;
if ((Freq >= 128000) && (Freq <= 350000)) {
for (M = lowM; M <= highM; M++) {
N = ((VClk << P) * M) / par->CrystalFreqKHz;
if (N <= 255) {
Freq =
((par->CrystalFreqKHz * N) /
M) >> P;
if (Freq > VClk)
DeltaNew = Freq - VClk;
else
DeltaNew = VClk - Freq;
if (DeltaNew < DeltaOld) {
*pllOut =
(P << 16) | (N << 8) | M;
*clockOut = Freq;
DeltaOld = DeltaNew;
}
}
}
}
}
}
static void CalcVClock2Stage(int clockIn,
int *clockOut,
u32 * pllOut,
u32 * pllBOut, struct nvidia_par *par)
{
unsigned DeltaNew, DeltaOld;
unsigned VClk, Freq;
unsigned M, N, P;
DeltaOld = 0xFFFFFFFF;
*pllBOut = 0x80000401; /* fixed at x4 for now */
VClk = (unsigned)clockIn;
for (P = 0; P <= 6; P++) {
Freq = VClk << P;
if ((Freq >= 400000) && (Freq <= 1000000)) {
for (M = 1; M <= 13; M++) {
N = ((VClk << P) * M) /
(par->CrystalFreqKHz << 2);
if ((N >= 5) && (N <= 255)) {
Freq =
(((par->CrystalFreqKHz << 2) * N) /
M) >> P;
if (Freq > VClk)
DeltaNew = Freq - VClk;
else
DeltaNew = VClk - Freq;
if (DeltaNew < DeltaOld) {
*pllOut =
(P << 16) | (N << 8) | M;
*clockOut = Freq;
DeltaOld = DeltaNew;
}
}
}
}
}
}
/*
* Calculate extended mode parameters (SVGA) and save in a
* mode state structure.
*/
void NVCalcStateExt(struct nvidia_par *par,
RIVA_HW_STATE * state,
int bpp,
int width,
int hDisplaySize, int height, int dotClock, int flags)
{
int pixelDepth, VClk = 0;
/*
* Save mode parameters.
*/
state->bpp = bpp; /* this is not bitsPerPixel, it's 8,15,16,32 */
state->width = width;
state->height = height;
/*
* Extended RIVA registers.
*/
pixelDepth = (bpp + 1) / 8;
if (par->twoStagePLL)
CalcVClock2Stage(dotClock, &VClk, &state->pll, &state->pllB,
par);
else
CalcVClock(dotClock, &VClk, &state->pll, par);
switch (par->Architecture) {
case NV_ARCH_04:
nv4UpdateArbitrationSettings(VClk,
pixelDepth * 8,
&(state->arbitration0),
&(state->arbitration1), par);
state->cursor0 = 0x00;
state->cursor1 = 0xbC;
if (flags & FB_VMODE_DOUBLE)
state->cursor1 |= 2;
state->cursor2 = 0x00000000;
state->pllsel = 0x10000700;
state->config = 0x00001114;
state->general = bpp == 16 ? 0x00101100 : 0x00100100;
state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00;
break;
case NV_ARCH_40:
if (!par->FlatPanel)
state->control = NV_RD32(par->PRAMDAC0, 0x0580) &
0xeffffeff;
/* fallthrough */
case NV_ARCH_10:
case NV_ARCH_20:
case NV_ARCH_30:
default:
if ((par->Chipset & 0xfff0) == 0x0240 ||
(par->Chipset & 0xfff0) == 0x03d0) {
state->arbitration0 = 256;
state->arbitration1 = 0x0480;
} else if (((par->Chipset & 0xffff) == 0x01A0) ||
((par->Chipset & 0xffff) == 0x01f0)) {
nForceUpdateArbitrationSettings(VClk,
pixelDepth * 8,
&(state->arbitration0),
&(state->arbitration1),
par);
} else if (par->Architecture < NV_ARCH_30) {
nv10UpdateArbitrationSettings(VClk,
pixelDepth * 8,
&(state->arbitration0),
&(state->arbitration1),
par);
} else {
nv30UpdateArbitrationSettings(par,
&(state->arbitration0),
&(state->arbitration1));
}
state->cursor0 = 0x80 | (par->CursorStart >> 17);
state->cursor1 = (par->CursorStart >> 11) << 2;
state->cursor2 = par->CursorStart >> 24;
if (flags & FB_VMODE_DOUBLE)
state->cursor1 |= 2;
state->pllsel = 0x10000700;
state->config = NV_RD32(par->PFB, 0x00000200);
state->general = bpp == 16 ? 0x00101100 : 0x00100100;
state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00;
break;
}
if (bpp != 8) /* DirectColor */
state->general |= 0x00000030;
state->repaint0 = (((width / 8) * pixelDepth) & 0x700) >> 3;
state->pixel = (pixelDepth > 2) ? 3 : pixelDepth;
}
void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
{
int i, j;
NV_WR32(par->PMC, 0x0140, 0x00000000);
NV_WR32(par->PMC, 0x0200, 0xFFFF00FF);
NV_WR32(par->PMC, 0x0200, 0xFFFFFFFF);
NV_WR32(par->PTIMER, 0x0200 * 4, 0x00000008);
NV_WR32(par->PTIMER, 0x0210 * 4, 0x00000003);
NV_WR32(par->PTIMER, 0x0140 * 4, 0x00000000);
NV_WR32(par->PTIMER, 0x0100 * 4, 0xFFFFFFFF);
if (par->Architecture == NV_ARCH_04) {
if (state)
NV_WR32(par->PFB, 0x0200, state->config);
} else if ((par->Architecture < NV_ARCH_40) ||
(par->Chipset & 0xfff0) == 0x0040) {
for (i = 0; i < 8; i++) {
NV_WR32(par->PFB, 0x0240 + (i * 0x10), 0);
NV_WR32(par->PFB, 0x0244 + (i * 0x10),
par->FbMapSize - 1);
}
} else {
int regions = 12;
if (((par->Chipset & 0xfff0) == 0x0090) ||
((par->Chipset & 0xfff0) == 0x01D0) ||
((par->Chipset & 0xfff0) == 0x0290) ||
((par->Chipset & 0xfff0) == 0x0390) ||
((par->Chipset & 0xfff0) == 0x03D0))
regions = 15;
for(i = 0; i < regions; i++) {
NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0);
NV_WR32(par->PFB, 0x0604 + (i * 0x10),
par->FbMapSize - 1);
}
}
if (par->Architecture >= NV_ARCH_40) {
NV_WR32(par->PRAMIN, 0x0000 * 4, 0x80000010);
NV_WR32(par->PRAMIN, 0x0001 * 4, 0x00101202);
NV_WR32(par->PRAMIN, 0x0002 * 4, 0x80000011);
NV_WR32(par->PRAMIN, 0x0003 * 4, 0x00101204);
NV_WR32(par->PRAMIN, 0x0004 * 4, 0x80000012);
NV_WR32(par->PRAMIN, 0x0005 * 4, 0x00101206);
NV_WR32(par->PRAMIN, 0x0006 * 4, 0x80000013);
NV_WR32(par->PRAMIN, 0x0007 * 4, 0x00101208);
NV_WR32(par->PRAMIN, 0x0008 * 4, 0x80000014);
NV_WR32(par->PRAMIN, 0x0009 * 4, 0x0010120A);
NV_WR32(par->PRAMIN, 0x000A * 4, 0x80000015);
NV_WR32(par->PRAMIN, 0x000B * 4, 0x0010120C);
NV_WR32(par->PRAMIN, 0x000C * 4, 0x80000016);
NV_WR32(par->PRAMIN, 0x000D * 4, 0x0010120E);
NV_WR32(par->PRAMIN, 0x000E * 4, 0x80000017);
NV_WR32(par->PRAMIN, 0x000F * 4, 0x00101210);
NV_WR32(par->PRAMIN, 0x0800 * 4, 0x00003000);
NV_WR32(par->PRAMIN, 0x0801 * 4, par->FbMapSize - 1);
NV_WR32(par->PRAMIN, 0x0802 * 4, 0x00000002);
NV_WR32(par->PRAMIN, 0x0808 * 4, 0x02080062);
NV_WR32(par->PRAMIN, 0x0809 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080A * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x080B * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x080C * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080D * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0810 * 4, 0x02080043);
NV_WR32(par->PRAMIN, 0x0811 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0812 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0813 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0814 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0815 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0818 * 4, 0x02080044);
NV_WR32(par->PRAMIN, 0x0819 * 4, 0x02000000);
NV_WR32(par->PRAMIN, 0x081A * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081B * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081C * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081D * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0820 * 4, 0x02080019);
NV_WR32(par->PRAMIN, 0x0821 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0822 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0823 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0824 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0825 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0828 * 4, 0x020A005C);
NV_WR32(par->PRAMIN, 0x0829 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x082A * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x082B * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x082C * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x082D * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0830 * 4, 0x0208009F);
NV_WR32(par->PRAMIN, 0x0831 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0832 * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x0833 * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x0834 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0835 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0838 * 4, 0x0208004A);
NV_WR32(par->PRAMIN, 0x0839 * 4, 0x02000000);
NV_WR32(par->PRAMIN, 0x083A * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x083B * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x083C * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x083D * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0840 * 4, 0x02080077);
NV_WR32(par->PRAMIN, 0x0841 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0842 * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x0843 * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x0844 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0845 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x084C * 4, 0x00003002);
NV_WR32(par->PRAMIN, 0x084D * 4, 0x00007FFF);
NV_WR32(par->PRAMIN, 0x084E * 4,
par->FbUsableSize | 0x00000002);
#ifdef __BIG_ENDIAN
NV_WR32(par->PRAMIN, 0x080A * 4,
NV_RD32(par->PRAMIN, 0x080A * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x0812 * 4,
NV_RD32(par->PRAMIN, 0x0812 * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x081A * 4,
NV_RD32(par->PRAMIN, 0x081A * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x0822 * 4,
NV_RD32(par->PRAMIN, 0x0822 * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x082A * 4,
NV_RD32(par->PRAMIN, 0x082A * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x0832 * 4,
NV_RD32(par->PRAMIN, 0x0832 * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x083A * 4,
NV_RD32(par->PRAMIN, 0x083A * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x0842 * 4,
NV_RD32(par->PRAMIN, 0x0842 * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x0819 * 4, 0x01000000);
NV_WR32(par->PRAMIN, 0x0839 * 4, 0x01000000);
#endif
} else {
NV_WR32(par->PRAMIN, 0x0000 * 4, 0x80000010);
NV_WR32(par->PRAMIN, 0x0001 * 4, 0x80011201);
NV_WR32(par->PRAMIN, 0x0002 * 4, 0x80000011);
NV_WR32(par->PRAMIN, 0x0003 * 4, 0x80011202);
NV_WR32(par->PRAMIN, 0x0004 * 4, 0x80000012);
NV_WR32(par->PRAMIN, 0x0005 * 4, 0x80011203);
NV_WR32(par->PRAMIN, 0x0006 * 4, 0x80000013);
NV_WR32(par->PRAMIN, 0x0007 * 4, 0x80011204);
NV_WR32(par->PRAMIN, 0x0008 * 4, 0x80000014);
NV_WR32(par->PRAMIN, 0x0009 * 4, 0x80011205);
NV_WR32(par->PRAMIN, 0x000A * 4, 0x80000015);
NV_WR32(par->PRAMIN, 0x000B * 4, 0x80011206);
NV_WR32(par->PRAMIN, 0x000C * 4, 0x80000016);
NV_WR32(par->PRAMIN, 0x000D * 4, 0x80011207);
NV_WR32(par->PRAMIN, 0x000E * 4, 0x80000017);
NV_WR32(par->PRAMIN, 0x000F * 4, 0x80011208);
NV_WR32(par->PRAMIN, 0x0800 * 4, 0x00003000);
NV_WR32(par->PRAMIN, 0x0801 * 4, par->FbMapSize - 1);
NV_WR32(par->PRAMIN, 0x0802 * 4, 0x00000002);
NV_WR32(par->PRAMIN, 0x0803 * 4, 0x00000002);
if (par->Architecture >= NV_ARCH_10)
NV_WR32(par->PRAMIN, 0x0804 * 4, 0x01008062);
else
NV_WR32(par->PRAMIN, 0x0804 * 4, 0x01008042);
NV_WR32(par->PRAMIN, 0x0805 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0806 * 4, 0x12001200);
NV_WR32(par->PRAMIN, 0x0807 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0808 * 4, 0x01008043);
NV_WR32(par->PRAMIN, 0x0809 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080A * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080B * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080C * 4, 0x01008044);
NV_WR32(par->PRAMIN, 0x080D * 4, 0x00000002);
NV_WR32(par->PRAMIN, 0x080E * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080F * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0810 * 4, 0x01008019);
NV_WR32(par->PRAMIN, 0x0811 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0812 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0813 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0814 * 4, 0x0100A05C);
NV_WR32(par->PRAMIN, 0x0815 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0816 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0817 * 4, 0x00000000);
if (par->WaitVSyncPossible)
NV_WR32(par->PRAMIN, 0x0818 * 4, 0x0100809F);
else
NV_WR32(par->PRAMIN, 0x0818 * 4, 0x0100805F);
NV_WR32(par->PRAMIN, 0x0819 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081A * 4, 0x12001200);
NV_WR32(par->PRAMIN, 0x081B * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081C * 4, 0x0100804A);
NV_WR32(par->PRAMIN, 0x081D * 4, 0x00000002);
NV_WR32(par->PRAMIN, 0x081E * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081F * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0820 * 4, 0x01018077);
NV_WR32(par->PRAMIN, 0x0821 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0822 * 4, 0x12001200);
NV_WR32(par->PRAMIN, 0x0823 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0824 * 4, 0x00003002);
NV_WR32(par->PRAMIN, 0x0825 * 4, 0x00007FFF);
NV_WR32(par->PRAMIN, 0x0826 * 4,
par->FbUsableSize | 0x00000002);
NV_WR32(par->PRAMIN, 0x0827 * 4, 0x00000002);
#ifdef __BIG_ENDIAN
NV_WR32(par->PRAMIN, 0x0804 * 4,
NV_RD32(par->PRAMIN, 0x0804 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x0808 * 4,
NV_RD32(par->PRAMIN, 0x0808 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x080C * 4,
NV_RD32(par->PRAMIN, 0x080C * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x0810 * 4,
NV_RD32(par->PRAMIN, 0x0810 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x0814 * 4,
NV_RD32(par->PRAMIN, 0x0814 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x0818 * 4,
NV_RD32(par->PRAMIN, 0x0818 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x081C * 4,
NV_RD32(par->PRAMIN, 0x081C * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x0820 * 4,
NV_RD32(par->PRAMIN, 0x0820 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x080D * 4, 0x00000001);
NV_WR32(par->PRAMIN, 0x081D * 4, 0x00000001);
#endif
}
if (par->Architecture < NV_ARCH_10) {
if ((par->Chipset & 0x0fff) == 0x0020) {
NV_WR32(par->PRAMIN, 0x0824 * 4,
NV_RD32(par->PRAMIN, 0x0824 * 4) | 0x00020000);
NV_WR32(par->PRAMIN, 0x0826 * 4,
NV_RD32(par->PRAMIN,
0x0826 * 4) + par->FbAddress);
}
NV_WR32(par->PGRAPH, 0x0080, 0x000001FF);
NV_WR32(par->PGRAPH, 0x0080, 0x1230C000);
NV_WR32(par->PGRAPH, 0x0084, 0x72111101);
NV_WR32(par->PGRAPH, 0x0088, 0x11D5F071);
NV_WR32(par->PGRAPH, 0x008C, 0x0004FF31);
NV_WR32(par->PGRAPH, 0x008C, 0x4004FF31);
NV_WR32(par->PGRAPH, 0x0140, 0x00000000);
NV_WR32(par->PGRAPH, 0x0100, 0xFFFFFFFF);
NV_WR32(par->PGRAPH, 0x0170, 0x10010100);
NV_WR32(par->PGRAPH, 0x0710, 0xFFFFFFFF);
NV_WR32(par->PGRAPH, 0x0720, 0x00000001);
NV_WR32(par->PGRAPH, 0x0810, 0x00000000);
NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF);
} else {
NV_WR32(par->PGRAPH, 0x0080, 0xFFFFFFFF);
NV_WR32(par->PGRAPH, 0x0080, 0x00000000);
NV_WR32(par->PGRAPH, 0x0140, 0x00000000);
NV_WR32(par->PGRAPH, 0x0100, 0xFFFFFFFF);
NV_WR32(par->PGRAPH, 0x0144, 0x10010100);
NV_WR32(par->PGRAPH, 0x0714, 0xFFFFFFFF);
NV_WR32(par->PGRAPH, 0x0720, 0x00000001);
NV_WR32(par->PGRAPH, 0x0710,
NV_RD32(par->PGRAPH, 0x0710) & 0x0007ff00);
NV_WR32(par->PGRAPH, 0x0710,
NV_RD32(par->PGRAPH, 0x0710) | 0x00020100);
if (par->Architecture == NV_ARCH_10) {
NV_WR32(par->PGRAPH, 0x0084, 0x00118700);
NV_WR32(par->PGRAPH, 0x0088, 0x24E00810);
NV_WR32(par->PGRAPH, 0x008C, 0x55DE0030);
for (i = 0; i < 32; i++)
NV_WR32(&par->PGRAPH[(0x0B00 / 4) + i], 0,
NV_RD32(&par->PFB[(0x0240 / 4) + i],
0));
NV_WR32(par->PGRAPH, 0x640, 0);
NV_WR32(par->PGRAPH, 0x644, 0);
NV_WR32(par->PGRAPH, 0x684, par->FbMapSize - 1);
NV_WR32(par->PGRAPH, 0x688, par->FbMapSize - 1);
NV_WR32(par->PGRAPH, 0x0810, 0x00000000);
NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF);
} else {
if (par->Architecture >= NV_ARCH_40) {
NV_WR32(par->PGRAPH, 0x0084, 0x401287c0);
NV_WR32(par->PGRAPH, 0x008C, 0x60de8051);
NV_WR32(par->PGRAPH, 0x0090, 0x00008000);
NV_WR32(par->PGRAPH, 0x0610, 0x00be3c5f);
NV_WR32(par->PGRAPH, 0x0bc4,
NV_RD32(par->PGRAPH, 0x0bc4) |
0x00008000);
j = NV_RD32(par->REGS, 0x1540) & 0xff;
if (j) {
for (i = 0; !(j & 1); j >>= 1, i++);
NV_WR32(par->PGRAPH, 0x5000, i);
}
if ((par->Chipset & 0xfff0) == 0x0040) {
NV_WR32(par->PGRAPH, 0x09b0,
0x83280fff);
NV_WR32(par->PGRAPH, 0x09b4,
0x000000a0);
} else {
NV_WR32(par->PGRAPH, 0x0820,
0x83280eff);
NV_WR32(par->PGRAPH, 0x0824,
0x000000a0);
}
switch (par->Chipset & 0xfff0) {
case 0x0040:
case 0x0210:
NV_WR32(par->PGRAPH, 0x09b8,
0x0078e366);
NV_WR32(par->PGRAPH, 0x09bc,
0x0000014c);
NV_WR32(par->PFB, 0x033C,
NV_RD32(par->PFB, 0x33C) &
0xffff7fff);
break;
case 0x00C0:
case 0x0120:
NV_WR32(par->PGRAPH, 0x0828,
0x007596ff);
NV_WR32(par->PGRAPH, 0x082C,
0x00000108);
break;
case 0x0160:
case 0x01D0:
case 0x0240:
case 0x03D0:
NV_WR32(par->PMC, 0x1700,
NV_RD32(par->PFB, 0x020C));
NV_WR32(par->PMC, 0x1704, 0);
NV_WR32(par->PMC, 0x1708, 0);
NV_WR32(par->PMC, 0x170C,
NV_RD32(par->PFB, 0x020C));
NV_WR32(par->PGRAPH, 0x0860, 0);
NV_WR32(par->PGRAPH, 0x0864, 0);
NV_WR32(par->PRAMDAC, 0x0608,
NV_RD32(par->PRAMDAC,
0x0608) | 0x00100000);
break;
case 0x0140:
NV_WR32(par->PGRAPH, 0x0828,
0x0072cb77);
NV_WR32(par->PGRAPH, 0x082C,
0x00000108);
break;
case 0x0220:
NV_WR32(par->PGRAPH, 0x0860, 0);
NV_WR32(par->PGRAPH, 0x0864, 0);
NV_WR32(par->PRAMDAC, 0x0608,
NV_RD32(par->PRAMDAC, 0x0608) |
0x00100000);
break;
case 0x0090:
case 0x0290:
case 0x0390:
NV_WR32(par->PRAMDAC, 0x0608,
NV_RD32(par->PRAMDAC, 0x0608) |
0x00100000);
NV_WR32(par->PGRAPH, 0x0828,
0x07830610);
NV_WR32(par->PGRAPH, 0x082C,
0x0000016A);
break;
default:
break;
};
NV_WR32(par->PGRAPH, 0x0b38, 0x2ffff800);
NV_WR32(par->PGRAPH, 0x0b3c, 0x00006000);
NV_WR32(par->PGRAPH, 0x032C, 0x01000000);
NV_WR32(par->PGRAPH, 0x0220, 0x00001200);
} else if (par->Architecture == NV_ARCH_30) {
NV_WR32(par->PGRAPH, 0x0084, 0x40108700);
NV_WR32(par->PGRAPH, 0x0890, 0x00140000);
NV_WR32(par->PGRAPH, 0x008C, 0xf00e0431);
NV_WR32(par->PGRAPH, 0x0090, 0x00008000);
NV_WR32(par->PGRAPH, 0x0610, 0xf04b1f36);
NV_WR32(par->PGRAPH, 0x0B80, 0x1002d888);
NV_WR32(par->PGRAPH, 0x0B88, 0x62ff007f);
} else {
NV_WR32(par->PGRAPH, 0x0084, 0x00118700);
NV_WR32(par->PGRAPH, 0x008C, 0xF20E0431);
NV_WR32(par->PGRAPH, 0x0090, 0x00000000);
NV_WR32(par->PGRAPH, 0x009C, 0x00000040);
if ((par->Chipset & 0x0ff0) >= 0x0250) {
NV_WR32(par->PGRAPH, 0x0890,
0x00080000);
NV_WR32(par->PGRAPH, 0x0610,
0x304B1FB6);
NV_WR32(par->PGRAPH, 0x0B80,
0x18B82880);
NV_WR32(par->PGRAPH, 0x0B84,
0x44000000);
NV_WR32(par->PGRAPH, 0x0098,
0x40000080);
NV_WR32(par->PGRAPH, 0x0B88,
0x000000ff);
} else {
NV_WR32(par->PGRAPH, 0x0880,
0x00080000);
NV_WR32(par->PGRAPH, 0x0094,
0x00000005);
NV_WR32(par->PGRAPH, 0x0B80,
0x45CAA208);
NV_WR32(par->PGRAPH, 0x0B84,
0x24000000);
NV_WR32(par->PGRAPH, 0x0098,
0x00000040);
NV_WR32(par->PGRAPH, 0x0750,
0x00E00038);
NV_WR32(par->PGRAPH, 0x0754,
0x00000030);
NV_WR32(par->PGRAPH, 0x0750,
0x00E10038);
NV_WR32(par->PGRAPH, 0x0754,
0x00000030);
}
}
if ((par->Architecture < NV_ARCH_40) ||
((par->Chipset & 0xfff0) == 0x0040)) {
for (i = 0; i < 32; i++) {
NV_WR32(par->PGRAPH, 0x0900 + i*4,
NV_RD32(par->PFB, 0x0240 +i*4));
NV_WR32(par->PGRAPH, 0x6900 + i*4,
NV_RD32(par->PFB, 0x0240 +i*4));
}
} else {
if (((par->Chipset & 0xfff0) == 0x0090) ||
((par->Chipset & 0xfff0) == 0x01D0) ||
((par->Chipset & 0xfff0) == 0x0290) ||
((par->Chipset & 0xfff0) == 0x0390) ||
((par->Chipset & 0xfff0) == 0x03D0)) {
for (i = 0; i < 60; i++) {
NV_WR32(par->PGRAPH,
0x0D00 + i*4,
NV_RD32(par->PFB,
0x0600 + i*4));
NV_WR32(par->PGRAPH,
0x6900 + i*4,
NV_RD32(par->PFB,
0x0600 + i*4));
}
} else {
for (i = 0; i < 48; i++) {
NV_WR32(par->PGRAPH,
0x0900 + i*4,
NV_RD32(par->PFB,
0x0600 + i*4));
if(((par->Chipset & 0xfff0)
!= 0x0160) &&
((par->Chipset & 0xfff0)
!= 0x0220) &&
((par->Chipset & 0xfff0)
!= 0x240))
NV_WR32(par->PGRAPH,
0x6900 + i*4,
NV_RD32(par->PFB,
0x0600 + i*4));
}
}
}
if (par->Architecture >= NV_ARCH_40) {
if ((par->Chipset & 0xfff0) == 0x0040) {
NV_WR32(par->PGRAPH, 0x09A4,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x09A8,
NV_RD32(par->PFB, 0x0204));
NV_WR32(par->PGRAPH, 0x69A4,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x69A8,
NV_RD32(par->PFB, 0x0204));
NV_WR32(par->PGRAPH, 0x0820, 0);
NV_WR32(par->PGRAPH, 0x0824, 0);
NV_WR32(par->PGRAPH, 0x0864,
par->FbMapSize - 1);
NV_WR32(par->PGRAPH, 0x0868,
par->FbMapSize - 1);
} else {
if ((par->Chipset & 0xfff0) == 0x0090 ||
(par->Chipset & 0xfff0) == 0x01D0 ||
(par->Chipset & 0xfff0) == 0x0290 ||
(par->Chipset & 0xfff0) == 0x0390) {
NV_WR32(par->PGRAPH, 0x0DF0,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x0DF4,
NV_RD32(par->PFB, 0x0204));
} else {
NV_WR32(par->PGRAPH, 0x09F0,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x09F4,
NV_RD32(par->PFB, 0x0204));
}
NV_WR32(par->PGRAPH, 0x69F0,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x69F4,
NV_RD32(par->PFB, 0x0204));
NV_WR32(par->PGRAPH, 0x0840, 0);
NV_WR32(par->PGRAPH, 0x0844, 0);
NV_WR32(par->PGRAPH, 0x08a0,
par->FbMapSize - 1);
NV_WR32(par->PGRAPH, 0x08a4,
par->FbMapSize - 1);
}
} else {
NV_WR32(par->PGRAPH, 0x09A4,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x09A8,
NV_RD32(par->PFB, 0x0204));
NV_WR32(par->PGRAPH, 0x0750, 0x00EA0000);
NV_WR32(par->PGRAPH, 0x0754,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x0750, 0x00EA0004);
NV_WR32(par->PGRAPH, 0x0754,
NV_RD32(par->PFB, 0x0204));
NV_WR32(par->PGRAPH, 0x0820, 0);
NV_WR32(par->PGRAPH, 0x0824, 0);
NV_WR32(par->PGRAPH, 0x0864,
par->FbMapSize - 1);
NV_WR32(par->PGRAPH, 0x0868,
par->FbMapSize - 1);
}
NV_WR32(par->PGRAPH, 0x0B20, 0x00000000);
NV_WR32(par->PGRAPH, 0x0B04, 0xFFFFFFFF);
}
}
NV_WR32(par->PGRAPH, 0x053C, 0);
NV_WR32(par->PGRAPH, 0x0540, 0);
NV_WR32(par->PGRAPH, 0x0544, 0x00007FFF);
NV_WR32(par->PGRAPH, 0x0548, 0x00007FFF);
NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0141 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0480 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000000);
if (par->Architecture >= NV_ARCH_40)
NV_WR32(par->PFIFO, 0x0481 * 4, 0x00010000);
else
NV_WR32(par->PFIFO, 0x0481 * 4, 0x00000100);
NV_WR32(par->PFIFO, 0x0490 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0491 * 4, 0x00000000);
if (par->Architecture >= NV_ARCH_40)
NV_WR32(par->PFIFO, 0x048B * 4, 0x00001213);
else
NV_WR32(par->PFIFO, 0x048B * 4, 0x00001209);
NV_WR32(par->PFIFO, 0x0400 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0414 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0084 * 4, 0x03000100);
NV_WR32(par->PFIFO, 0x0085 * 4, 0x00000110);
NV_WR32(par->PFIFO, 0x0086 * 4, 0x00000112);
NV_WR32(par->PFIFO, 0x0143 * 4, 0x0000FFFF);
NV_WR32(par->PFIFO, 0x0496 * 4, 0x0000FFFF);
NV_WR32(par->PFIFO, 0x0050 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0040 * 4, 0xFFFFFFFF);
NV_WR32(par->PFIFO, 0x0415 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x048C * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x04A0 * 4, 0x00000000);
#ifdef __BIG_ENDIAN
NV_WR32(par->PFIFO, 0x0489 * 4, 0x800F0078);
#else
NV_WR32(par->PFIFO, 0x0489 * 4, 0x000F0078);
#endif
NV_WR32(par->PFIFO, 0x0488 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0480 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0495 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000001);
if (!state) {
par->CurrentState = NULL;
return;
}
if (par->Architecture >= NV_ARCH_10) {
if (par->twoHeads) {
NV_WR32(par->PCRTC0, 0x0860, state->head);
NV_WR32(par->PCRTC0, 0x2860, state->head2);
}
NV_WR32(par->PRAMDAC, 0x0404, NV_RD32(par->PRAMDAC, 0x0404) |
(1 << 25));
NV_WR32(par->PMC, 0x8704, 1);
NV_WR32(par->PMC, 0x8140, 0);
NV_WR32(par->PMC, 0x8920, 0);
NV_WR32(par->PMC, 0x8924, 0);
NV_WR32(par->PMC, 0x8908, par->FbMapSize - 1);
NV_WR32(par->PMC, 0x890C, par->FbMapSize - 1);
NV_WR32(par->PMC, 0x1588, 0);
NV_WR32(par->PCRTC, 0x0810, state->cursorConfig);
NV_WR32(par->PCRTC, 0x0830, state->displayV - 3);
NV_WR32(par->PCRTC, 0x0834, state->displayV - 1);
if (par->FlatPanel) {
if ((par->Chipset & 0x0ff0) == 0x0110) {
NV_WR32(par->PRAMDAC, 0x0528, state->dither);
} else if (par->twoHeads) {
NV_WR32(par->PRAMDAC, 0x083C, state->dither);
}
VGA_WR08(par->PCIO, 0x03D4, 0x53);
VGA_WR08(par->PCIO, 0x03D5, state->timingH);
VGA_WR08(par->PCIO, 0x03D4, 0x54);
VGA_WR08(par->PCIO, 0x03D5, state->timingV);
VGA_WR08(par->PCIO, 0x03D4, 0x21);
VGA_WR08(par->PCIO, 0x03D5, 0xfa);
}
VGA_WR08(par->PCIO, 0x03D4, 0x41);
VGA_WR08(par->PCIO, 0x03D5, state->extra);
}
VGA_WR08(par->PCIO, 0x03D4, 0x19);
VGA_WR08(par->PCIO, 0x03D5, state->repaint0);
VGA_WR08(par->PCIO, 0x03D4, 0x1A);
VGA_WR08(par->PCIO, 0x03D5, state->repaint1);
VGA_WR08(par->PCIO, 0x03D4, 0x25);
VGA_WR08(par->PCIO, 0x03D5, state->screen);
VGA_WR08(par->PCIO, 0x03D4, 0x28);
VGA_WR08(par->PCIO, 0x03D5, state->pixel);
VGA_WR08(par->PCIO, 0x03D4, 0x2D);
VGA_WR08(par->PCIO, 0x03D5, state->horiz);
VGA_WR08(par->PCIO, 0x03D4, 0x1C);
VGA_WR08(par->PCIO, 0x03D5, state->fifo);
VGA_WR08(par->PCIO, 0x03D4, 0x1B);
VGA_WR08(par->PCIO, 0x03D5, state->arbitration0);
VGA_WR08(par->PCIO, 0x03D4, 0x20);
VGA_WR08(par->PCIO, 0x03D5, state->arbitration1);
if(par->Architecture >= NV_ARCH_30) {
VGA_WR08(par->PCIO, 0x03D4, 0x47);
VGA_WR08(par->PCIO, 0x03D5, state->arbitration1 >> 8);
}
VGA_WR08(par->PCIO, 0x03D4, 0x30);
VGA_WR08(par->PCIO, 0x03D5, state->cursor0);
VGA_WR08(par->PCIO, 0x03D4, 0x31);
VGA_WR08(par->PCIO, 0x03D5, state->cursor1);
VGA_WR08(par->PCIO, 0x03D4, 0x2F);
VGA_WR08(par->PCIO, 0x03D5, state->cursor2);
VGA_WR08(par->PCIO, 0x03D4, 0x39);
VGA_WR08(par->PCIO, 0x03D5, state->interlace);
if (!par->FlatPanel) {
if (par->Architecture >= NV_ARCH_40)
NV_WR32(par->PRAMDAC0, 0x0580, state->control);
NV_WR32(par->PRAMDAC0, 0x050C, state->pllsel);
NV_WR32(par->PRAMDAC0, 0x0508, state->vpll);
if (par->twoHeads)
NV_WR32(par->PRAMDAC0, 0x0520, state->vpll2);
if (par->twoStagePLL) {
NV_WR32(par->PRAMDAC0, 0x0578, state->vpllB);
NV_WR32(par->PRAMDAC0, 0x057C, state->vpll2B);
}
} else {
NV_WR32(par->PRAMDAC, 0x0848, state->scale);
NV_WR32(par->PRAMDAC, 0x0828, state->crtcSync +
par->PanelTweak);
}
NV_WR32(par->PRAMDAC, 0x0600, state->general);
NV_WR32(par->PCRTC, 0x0140, 0);
NV_WR32(par->PCRTC, 0x0100, 1);
par->CurrentState = state;
}
void NVUnloadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state) {
VGA_WR08(par->PCIO, 0x03D4, 0x19);
state->repaint0 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x1A);
state->repaint1 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x25);
state->screen = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x28);
state->pixel = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x2D);
state->horiz = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x1C);
state->fifo = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x1B);
state->arbitration0 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x20);
state->arbitration1 = VGA_RD08(par->PCIO, 0x03D5);
if(par->Architecture >= NV_ARCH_30) {
VGA_WR08(par->PCIO, 0x03D4, 0x47);
state->arbitration1 |= (VGA_RD08(par->PCIO, 0x03D5) & 1) << 8;
}
VGA_WR08(par->PCIO, 0x03D4, 0x30);
state->cursor0 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x31);
state->cursor1 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x2F);
state->cursor2 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x39);
state->interlace = VGA_RD08(par->PCIO, 0x03D5);
state->vpll = NV_RD32(par->PRAMDAC0, 0x0508);
if (par->twoHeads)
state->vpll2 = NV_RD32(par->PRAMDAC0, 0x0520);
if (par->twoStagePLL) {
state->vpllB = NV_RD32(par->PRAMDAC0, 0x0578);
state->vpll2B = NV_RD32(par->PRAMDAC0, 0x057C);
}
state->pllsel = NV_RD32(par->PRAMDAC0, 0x050C);
state->general = NV_RD32(par->PRAMDAC, 0x0600);
state->scale = NV_RD32(par->PRAMDAC, 0x0848);
state->config = NV_RD32(par->PFB, 0x0200);
if (par->Architecture >= NV_ARCH_40 && !par->FlatPanel)
state->control = NV_RD32(par->PRAMDAC0, 0x0580);
if (par->Architecture >= NV_ARCH_10) {
if (par->twoHeads) {
state->head = NV_RD32(par->PCRTC0, 0x0860);
state->head2 = NV_RD32(par->PCRTC0, 0x2860);
VGA_WR08(par->PCIO, 0x03D4, 0x44);
state->crtcOwner = VGA_RD08(par->PCIO, 0x03D5);
}
VGA_WR08(par->PCIO, 0x03D4, 0x41);
state->extra = VGA_RD08(par->PCIO, 0x03D5);
state->cursorConfig = NV_RD32(par->PCRTC, 0x0810);
if ((par->Chipset & 0x0ff0) == 0x0110) {
state->dither = NV_RD32(par->PRAMDAC, 0x0528);
} else if (par->twoHeads) {
state->dither = NV_RD32(par->PRAMDAC, 0x083C);
}
if (par->FlatPanel) {
VGA_WR08(par->PCIO, 0x03D4, 0x53);
state->timingH = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x54);
state->timingV = VGA_RD08(par->PCIO, 0x03D5);
}
}
}
void NVSetStartAddress(struct nvidia_par *par, u32 start)
{
NV_WR32(par->PCRTC, 0x800, start);
}
| gpl-2.0 |
nguyentu1602/gcc | gcc/testsuite/gcc.dg/compat/struct-align-2_y.c | 181 | 1695 | /* { dg-options "-Wno-psabi" { target { { i?86-*-* x86_64-*-* } && ilp32 } } } */
/* Disable this test for 16-bit targets. */
#include <limits.h>
#if !(defined __GNUC__) || (INT_MAX > 32767)
#include "compat-common.h"
#include "struct-align-2.h"
#define TEST(NAME) \
struct outer_##NAME { \
int i; \
struct epoll_event_##NAME ee; \
}; \
\
extern unsigned int v1_##NAME; \
extern unsigned int v2_##NAME; \
extern unsigned long long v3_##NAME; \
\
extern struct outer_##NAME s_##NAME[2]; \
\
extern void pass_##NAME (struct outer_##NAME); \
extern struct outer_##NAME return_##NAME (void); \
\
void \
checkp_##NAME (struct outer_##NAME *p) \
{ \
if (p->i != v1_##NAME) \
DEBUG_CHECK; \
if (p->ee.events != v2_##NAME) \
DEBUG_CHECK; \
if (p->ee.data != v3_##NAME) \
DEBUG_CHECK; \
} \
\
void \
test_##NAME (void) \
{ \
struct outer_##NAME s; \
DEBUG_FPUTS (DESC_##NAME); \
DEBUG_NL; \
DEBUG_FPUTS (" global array"); \
checkp_##NAME (&s_##NAME[0]); \
checkp_##NAME (&s_##NAME[1]); \
DEBUG_NL; \
DEBUG_FPUTS (" argument"); \
pass_##NAME (s_##NAME[0]); \
DEBUG_NL; \
DEBUG_FPUTS (" function result"); \
s = return_##NAME (); \
checkp_##NAME (&s); \
DEBUG_NL; \
}
TEST (orig)
#ifndef SKIP_ATTRIBUTE
TEST (structmax)
TEST (struct4)
TEST (struct8)
TEST (data4)
TEST (data8)
TEST (p)
TEST (pstruct4)
TEST (pstruct8)
TEST (pdata4)
TEST (pdata8)
#endif
#else
int i; /* prevent compiling an empty file */
#endif /* INT_MAX */
| gpl-2.0 |
hernstrom/linux | drivers/base/devtmpfs.c | 2229 | 9219 | /*
* devtmpfs - kernel-maintained tmpfs-based /dev
*
* Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
*
* During bootup, before any driver core device is registered,
* devtmpfs, a tmpfs-based filesystem is created. Every driver-core
* device which requests a device node, will add a node in this
* filesystem.
* By default, all devices are named after the name of the device,
* owned by root and have a default mode of 0600. Subsystems can
* overwrite the default setting if needed.
*/
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
#include <linux/ramfs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include "base.h"
static struct task_struct *thread;
#if defined CONFIG_DEVTMPFS_MOUNT
static int mount_dev = 1;
#else
static int mount_dev;
#endif
static DEFINE_SPINLOCK(req_lock);
static struct req {
struct req *next;
struct completion done;
int err;
const char *name;
umode_t mode; /* 0 => delete */
kuid_t uid;
kgid_t gid;
struct device *dev;
} *requests;
static int __init mount_param(char *str)
{
mount_dev = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("devtmpfs.mount=", mount_param);
static struct dentry *dev_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
#ifdef CONFIG_TMPFS
return mount_single(fs_type, flags, data, shmem_fill_super);
#else
return mount_single(fs_type, flags, data, ramfs_fill_super);
#endif
}
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
.mount = dev_mount,
.kill_sb = kill_litter_super,
};
#ifdef CONFIG_BLOCK
static inline int is_blockdev(struct device *dev)
{
return dev->class == &block_class;
}
#else
static inline int is_blockdev(struct device *dev) { return 0; }
#endif
int devtmpfs_create_node(struct device *dev)
{
const char *tmp = NULL;
struct req req;
if (!thread)
return 0;
req.mode = 0;
req.uid = GLOBAL_ROOT_UID;
req.gid = GLOBAL_ROOT_GID;
req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);
if (!req.name)
return -ENOMEM;
if (req.mode == 0)
req.mode = 0600;
if (is_blockdev(dev))
req.mode |= S_IFBLK;
else
req.mode |= S_IFCHR;
req.dev = dev;
init_completion(&req.done);
spin_lock(&req_lock);
req.next = requests;
requests = &req;
spin_unlock(&req_lock);
wake_up_process(thread);
wait_for_completion(&req.done);
kfree(tmp);
return req.err;
}
int devtmpfs_delete_node(struct device *dev)
{
const char *tmp = NULL;
struct req req;
if (!thread)
return 0;
req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp);
if (!req.name)
return -ENOMEM;
req.mode = 0;
req.dev = dev;
init_completion(&req.done);
spin_lock(&req_lock);
req.next = requests;
requests = &req;
spin_unlock(&req_lock);
wake_up_process(thread);
wait_for_completion(&req.done);
kfree(tmp);
return req.err;
}
static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
int err;
dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
err = vfs_mkdir(path.dentry->d_inode, dentry, mode);
if (!err)
/* mark as kernel-created inode */
dentry->d_inode->i_private = &thread;
done_path_create(&path, dentry);
return err;
}
static int create_path(const char *nodepath)
{
char *path;
char *s;
int err = 0;
/* parent directories do not exist, create them */
path = kstrdup(nodepath, GFP_KERNEL);
if (!path)
return -ENOMEM;
s = path;
for (;;) {
s = strchr(s, '/');
if (!s)
break;
s[0] = '\0';
err = dev_mkdir(path, 0755);
if (err && err != -EEXIST)
break;
s[0] = '/';
s++;
}
kfree(path);
return err;
}
static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
kgid_t gid, struct device *dev)
{
struct dentry *dentry;
struct path path;
int err;
dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
}
if (IS_ERR(dentry))
return PTR_ERR(dentry);
err = vfs_mknod(path.dentry->d_inode, dentry, mode, dev->devt);
if (!err) {
struct iattr newattrs;
newattrs.ia_mode = mode;
newattrs.ia_uid = uid;
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
mutex_lock(&dentry->d_inode->i_mutex);
notify_change(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
/* mark as kernel-created inode */
dentry->d_inode->i_private = &thread;
}
done_path_create(&path, dentry);
return err;
}
static int dev_rmdir(const char *name)
{
struct path parent;
struct dentry *dentry;
int err;
dentry = kern_path_locked(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (dentry->d_inode) {
if (dentry->d_inode->i_private == &thread)
err = vfs_rmdir(parent.dentry->d_inode, dentry);
else
err = -EPERM;
} else {
err = -ENOENT;
}
dput(dentry);
mutex_unlock(&parent.dentry->d_inode->i_mutex);
path_put(&parent);
return err;
}
static int delete_path(const char *nodepath)
{
const char *path;
int err = 0;
path = kstrdup(nodepath, GFP_KERNEL);
if (!path)
return -ENOMEM;
for (;;) {
char *base;
base = strrchr(path, '/');
if (!base)
break;
base[0] = '\0';
err = dev_rmdir(path);
if (err)
break;
}
kfree(path);
return err;
}
static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
{
/* did we create it */
if (inode->i_private != &thread)
return 0;
/* does the dev_t match */
if (is_blockdev(dev)) {
if (!S_ISBLK(stat->mode))
return 0;
} else {
if (!S_ISCHR(stat->mode))
return 0;
}
if (stat->rdev != dev->devt)
return 0;
/* ours */
return 1;
}
static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
int deleted = 1;
int err;
dentry = kern_path_locked(nodename, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (dentry->d_inode) {
struct kstat stat;
struct path p = {.mnt = parent.mnt, .dentry = dentry};
err = vfs_getattr(&p, &stat);
if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
struct iattr newattrs;
/*
* before unlinking this node, reset permissions
* of possible references like hardlinks
*/
newattrs.ia_uid = GLOBAL_ROOT_UID;
newattrs.ia_gid = GLOBAL_ROOT_GID;
newattrs.ia_mode = stat.mode & ~0777;
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
mutex_lock(&dentry->d_inode->i_mutex);
notify_change(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
err = vfs_unlink(parent.dentry->d_inode, dentry);
if (!err || err == -ENOENT)
deleted = 1;
}
} else {
err = -ENOENT;
}
dput(dentry);
mutex_unlock(&parent.dentry->d_inode->i_mutex);
path_put(&parent);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
}
/*
* If configured, or requested by the commandline, devtmpfs will be
* auto-mounted after the kernel mounted the root filesystem.
*/
int devtmpfs_mount(const char *mntdir)
{
int err;
if (!mount_dev)
return 0;
if (!thread)
return 0;
err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
printk(KERN_INFO "devtmpfs: mounted\n");
return err;
}
static DECLARE_COMPLETION(setup_done);
static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
struct device *dev)
{
if (mode)
return handle_create(name, mode, uid, gid, dev);
else
return handle_remove(name, dev);
}
static int devtmpfsd(void *p)
{
char options[] = "mode=0755";
int *err = p;
*err = sys_unshare(CLONE_NEWNS);
if (*err)
goto out;
*err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
if (*err)
goto out;
sys_chdir("/.."); /* will traverse into overmounted root */
sys_chroot(".");
complete(&setup_done);
while (1) {
spin_lock(&req_lock);
while (requests) {
struct req *req = requests;
requests = NULL;
spin_unlock(&req_lock);
while (req) {
struct req *next = req->next;
req->err = handle(req->name, req->mode,
req->uid, req->gid, req->dev);
complete(&req->done);
req = next;
}
spin_lock(&req_lock);
}
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&req_lock);
schedule();
}
return 0;
out:
complete(&setup_done);
return *err;
}
/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
int __init devtmpfs_init(void)
{
int err = register_filesystem(&dev_fs_type);
if (err) {
printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
"type %i\n", err);
return err;
}
thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
if (!IS_ERR(thread)) {
wait_for_completion(&setup_done);
} else {
err = PTR_ERR(thread);
thread = NULL;
}
if (err) {
printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
unregister_filesystem(&dev_fs_type);
return err;
}
printk(KERN_INFO "devtmpfs: initialized\n");
return 0;
}
| gpl-2.0 |
arter97/codeaurora | drivers/usb/serial/omninet.c | 2229 | 7090 | /*
* USB ZyXEL omni.net LCD PLUS driver
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
* Please report both successes and troubles to the author at omninet@kroah.com
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define DRIVER_AUTHOR "Alessandro Zummo"
#define DRIVER_DESC "USB ZyXEL omni.net LCD PLUS Driver"
#define ZYXEL_VENDOR_ID 0x0586
#define ZYXEL_OMNINET_ID 0x1000
/* This one seems to be a re-branded ZyXEL device */
#define BT_IGNITIONPRO_ID 0x2000
/* function prototypes */
static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port);
static void omninet_process_read_urb(struct urb *urb);
static void omninet_write_bulk_callback(struct urb *urb);
static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int omninet_write_room(struct tty_struct *tty);
static void omninet_disconnect(struct usb_serial *serial);
static int omninet_port_probe(struct usb_serial_port *port);
static int omninet_port_remove(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver zyxel_omninet_device = {
.driver = {
.owner = THIS_MODULE,
.name = "omninet",
},
.description = "ZyXEL - omni.net lcd plus usb",
.id_table = id_table,
.num_ports = 1,
.port_probe = omninet_port_probe,
.port_remove = omninet_port_remove,
.open = omninet_open,
.write = omninet_write,
.write_room = omninet_write_room,
.write_bulk_callback = omninet_write_bulk_callback,
.process_read_urb = omninet_process_read_urb,
.disconnect = omninet_disconnect,
};
static struct usb_serial_driver * const serial_drivers[] = {
&zyxel_omninet_device, NULL
};
/*
* The protocol.
*
* The omni.net always exchange 64 bytes of data with the host. The first
* four bytes are the control header.
*
* oh_seq is a sequence number. Don't know if/how it's used.
* oh_len is the length of the data bytes in the packet.
* oh_xxx Bit-mapped, related to handshaking and status info.
* I normally set it to 0x03 in transmitted frames.
* 7: Active when the TA is in a CONNECTed state.
* 6: unknown
* 5: handshaking, unknown
* 4: handshaking, unknown
* 3: unknown, usually 0
* 2: unknown, usually 0
* 1: handshaking, unknown, usually set to 1 in transmitted frames
* 0: handshaking, unknown, usually set to 1 in transmitted frames
* oh_pad Probably a pad byte.
*
* After the header you will find data bytes if oh_len was greater than zero.
*/
struct omninet_header {
__u8 oh_seq;
__u8 oh_len;
__u8 oh_xxx;
__u8 oh_pad;
};
struct omninet_data {
__u8 od_outseq; /* Sequence number for bulk_out URBs */
};
static int omninet_port_probe(struct usb_serial_port *port)
{
struct omninet_data *od;
od = kzalloc(sizeof(*od), GFP_KERNEL);
if (!od)
return -ENOMEM;
usb_set_serial_port_data(port, od);
return 0;
}
static int omninet_port_remove(struct usb_serial_port *port)
{
struct omninet_data *od;
od = usb_get_serial_port_data(port);
kfree(od);
return 0;
}
static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct usb_serial_port *wport;
wport = serial->port[1];
tty_port_tty_set(&wport->port, tty);
return usb_serial_generic_open(tty, port);
}
#define OMNINET_HEADERLEN 4
#define OMNINET_BULKOUTSIZE 64
#define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN)
static void omninet_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
const struct omninet_header *hdr = urb->transfer_buffer;
const unsigned char *data;
size_t data_len;
if (urb->actual_length <= OMNINET_HEADERLEN || !hdr->oh_len)
return;
data = (char *)urb->transfer_buffer + OMNINET_HEADERLEN;
data_len = min_t(size_t, urb->actual_length - OMNINET_HEADERLEN,
hdr->oh_len);
tty_insert_flip_string(&port->port, data, data_len);
tty_flip_buffer_push(&port->port);
}
static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct usb_serial *serial = port->serial;
struct usb_serial_port *wport = serial->port[1];
struct omninet_data *od = usb_get_serial_port_data(port);
struct omninet_header *header = (struct omninet_header *)
wport->write_urb->transfer_buffer;
int result;
if (count == 0) {
dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__);
return 0;
}
if (!test_and_clear_bit(0, &port->write_urbs_free)) {
dev_dbg(&port->dev, "%s - already writing\n", __func__);
return 0;
}
count = (count > OMNINET_PAYLOADSIZE) ? OMNINET_PAYLOADSIZE : count;
memcpy(wport->write_urb->transfer_buffer + OMNINET_HEADERLEN,
buf, count);
usb_serial_debug_data(&port->dev, __func__, count,
wport->write_urb->transfer_buffer);
header->oh_seq = od->od_outseq++;
header->oh_len = count;
header->oh_xxx = 0x03;
header->oh_pad = 0x00;
/* send the data out the bulk port, always 64 bytes */
wport->write_urb->transfer_buffer_length = OMNINET_BULKOUTSIZE;
result = usb_submit_urb(wport->write_urb, GFP_ATOMIC);
if (result) {
set_bit(0, &wport->write_urbs_free);
dev_err_console(port,
"%s - failed submitting write urb, error %d\n",
__func__, result);
} else
result = count;
return result;
}
static int omninet_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
struct usb_serial_port *wport = serial->port[1];
int room = 0; /* Default: no room */
if (test_bit(0, &wport->write_urbs_free))
room = wport->bulk_out_size - OMNINET_HEADERLEN;
dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
return room;
}
static void omninet_write_bulk_callback(struct urb *urb)
{
/* struct omninet_header *header = (struct omninet_header *)
urb->transfer_buffer; */
struct usb_serial_port *port = urb->context;
int status = urb->status;
set_bit(0, &port->write_urbs_free);
if (status) {
dev_dbg(&port->dev, "%s - nonzero write bulk status received: %d\n",
__func__, status);
return;
}
usb_serial_port_softint(port);
}
static void omninet_disconnect(struct usb_serial *serial)
{
struct usb_serial_port *wport = serial->port[1];
usb_kill_urb(wport->write_urb);
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
djmatt604/android_kernel_samsung_note2jb | arch/arm/mach-tcc8k/clock.c | 2997 | 14858 | /*
* Lowlevel clock handling for Telechips TCC8xxx SoCs
*
* Copyright (C) 2010 by Hans J. Koch <hjk@linutronix.de>
*
* Licensed under the terms of the GPL v2
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/irqs.h>
#include <mach/tcc8k-regs.h>
#include "common.h"
#define BCLKCTR0 (CKC_BASE + BCLKCTR0_OFFS)
#define BCLKCTR1 (CKC_BASE + BCLKCTR1_OFFS)
#define ACLKREF (CKC_BASE + ACLKREF_OFFS)
#define ACLKUART0 (CKC_BASE + ACLKUART0_OFFS)
#define ACLKUART1 (CKC_BASE + ACLKUART1_OFFS)
#define ACLKUART2 (CKC_BASE + ACLKUART2_OFFS)
#define ACLKUART3 (CKC_BASE + ACLKUART3_OFFS)
#define ACLKUART4 (CKC_BASE + ACLKUART4_OFFS)
#define ACLKI2C (CKC_BASE + ACLKI2C_OFFS)
#define ACLKADC (CKC_BASE + ACLKADC_OFFS)
#define ACLKUSBH (CKC_BASE + ACLKUSBH_OFFS)
#define ACLKLCD (CKC_BASE + ACLKLCD_OFFS)
#define ACLKSDH0 (CKC_BASE + ACLKSDH0_OFFS)
#define ACLKSDH1 (CKC_BASE + ACLKSDH1_OFFS)
#define ACLKSPI0 (CKC_BASE + ACLKSPI0_OFFS)
#define ACLKSPI1 (CKC_BASE + ACLKSPI1_OFFS)
#define ACLKSPDIF (CKC_BASE + ACLKSPDIF_OFFS)
#define ACLKC3DEC (CKC_BASE + ACLKC3DEC_OFFS)
#define ACLKCAN0 (CKC_BASE + ACLKCAN0_OFFS)
#define ACLKCAN1 (CKC_BASE + ACLKCAN1_OFFS)
#define ACLKGSB0 (CKC_BASE + ACLKGSB0_OFFS)
#define ACLKGSB1 (CKC_BASE + ACLKGSB1_OFFS)
#define ACLKGSB2 (CKC_BASE + ACLKGSB2_OFFS)
#define ACLKGSB3 (CKC_BASE + ACLKGSB3_OFFS)
#define ACLKTCT (CKC_BASE + ACLKTCT_OFFS)
#define ACLKTCX (CKC_BASE + ACLKTCX_OFFS)
#define ACLKTCZ (CKC_BASE + ACLKTCZ_OFFS)
#define ACLK_MAX_DIV (0xfff + 1)
/* Crystal frequencies */
static unsigned long xi_rate, xti_rate;
static void __iomem *pll_cfg_addr(int pll)
{
switch (pll) {
case 0: return (CKC_BASE + PLL0CFG_OFFS);
case 1: return (CKC_BASE + PLL1CFG_OFFS);
case 2: return (CKC_BASE + PLL2CFG_OFFS);
default:
BUG();
}
}
static int pll_enable(int pll, int enable)
{
u32 reg;
void __iomem *addr = pll_cfg_addr(pll);
reg = __raw_readl(addr);
if (enable)
reg &= ~PLLxCFG_PD;
else
reg |= PLLxCFG_PD;
__raw_writel(reg, addr);
return 0;
}
static int xi_enable(int enable)
{
u32 reg;
reg = __raw_readl(CKC_BASE + CLKCTRL_OFFS);
if (enable)
reg |= CLKCTRL_XE;
else
reg &= ~CLKCTRL_XE;
__raw_writel(reg, CKC_BASE + CLKCTRL_OFFS);
return 0;
}
static int root_clk_enable(enum root_clks src)
{
switch (src) {
case CLK_SRC_PLL0: return pll_enable(0, 1);
case CLK_SRC_PLL1: return pll_enable(1, 1);
case CLK_SRC_PLL2: return pll_enable(2, 1);
case CLK_SRC_XI: return xi_enable(1);
default:
BUG();
}
return 0;
}
static int root_clk_disable(enum root_clks src)
{
switch (src) {
case CLK_SRC_PLL0: return pll_enable(0, 0);
case CLK_SRC_PLL1: return pll_enable(1, 0);
case CLK_SRC_PLL2: return pll_enable(2, 0);
case CLK_SRC_XI: return xi_enable(0);
default:
BUG();
}
return 0;
}
static int enable_clk(struct clk *clk)
{
u32 reg;
if (clk->root_id != CLK_SRC_NOROOT)
return root_clk_enable(clk->root_id);
if (clk->aclkreg) {
reg = __raw_readl(clk->aclkreg);
reg |= ACLK_EN;
__raw_writel(reg, clk->aclkreg);
}
if (clk->bclkctr) {
reg = __raw_readl(clk->bclkctr);
reg |= 1 << clk->bclk_shift;
__raw_writel(reg, clk->bclkctr);
}
return 0;
}
static void disable_clk(struct clk *clk)
{
u32 reg;
if (clk->root_id != CLK_SRC_NOROOT) {
root_clk_disable(clk->root_id);
return;
}
if (clk->bclkctr) {
reg = __raw_readl(clk->bclkctr);
reg &= ~(1 << clk->bclk_shift);
__raw_writel(reg, clk->bclkctr);
}
if (clk->aclkreg) {
reg = __raw_readl(clk->aclkreg);
reg &= ~ACLK_EN;
__raw_writel(reg, clk->aclkreg);
}
}
static unsigned long get_rate_pll(int pll)
{
u32 reg;
unsigned long s, m, p;
void __iomem *addr = pll_cfg_addr(pll);
reg = __raw_readl(addr);
s = (reg >> 16) & 0x07;
m = (reg >> 8) & 0xff;
p = reg & 0x3f;
return (m * xi_rate) / (p * (1 << s));
}
static unsigned long get_rate_pll_div(int pll)
{
u32 reg;
unsigned long div = 0;
void __iomem *addr;
switch (pll) {
case 0:
addr = CKC_BASE + CLKDIVC0_OFFS;
reg = __raw_readl(addr);
if (reg & CLKDIVC0_P0E)
div = (reg >> 24) & 0x3f;
break;
case 1:
addr = CKC_BASE + CLKDIVC0_OFFS;
reg = __raw_readl(addr);
if (reg & CLKDIVC0_P1E)
div = (reg >> 16) & 0x3f;
break;
case 2:
addr = CKC_BASE + CLKDIVC1_OFFS;
reg = __raw_readl(addr);
if (reg & CLKDIVC1_P2E)
div = reg & 0x3f;
break;
}
return get_rate_pll(pll) / (div + 1);
}
static unsigned long get_rate_xi_div(void)
{
unsigned long div = 0;
u32 reg = __raw_readl(CKC_BASE + CLKDIVC0_OFFS);
if (reg & CLKDIVC0_XE)
div = (reg >> 8) & 0x3f;
return xi_rate / (div + 1);
}
static unsigned long get_rate_xti_div(void)
{
unsigned long div = 0;
u32 reg = __raw_readl(CKC_BASE + CLKDIVC0_OFFS);
if (reg & CLKDIVC0_XTE)
div = reg & 0x3f;
return xti_rate / (div + 1);
}
static unsigned long root_clk_get_rate(enum root_clks src)
{
switch (src) {
case CLK_SRC_PLL0: return get_rate_pll(0);
case CLK_SRC_PLL1: return get_rate_pll(1);
case CLK_SRC_PLL2: return get_rate_pll(2);
case CLK_SRC_PLL0DIV: return get_rate_pll_div(0);
case CLK_SRC_PLL1DIV: return get_rate_pll_div(1);
case CLK_SRC_PLL2DIV: return get_rate_pll_div(2);
case CLK_SRC_XI: return xi_rate;
case CLK_SRC_XTI: return xti_rate;
case CLK_SRC_XIDIV: return get_rate_xi_div();
case CLK_SRC_XTIDIV: return get_rate_xti_div();
default: return 0;
}
}
static unsigned long aclk_get_rate(struct clk *clk)
{
u32 reg;
unsigned long div;
unsigned int src;
reg = __raw_readl(clk->aclkreg);
div = reg & 0x0fff;
src = (reg >> ACLK_SEL_SHIFT) & CLK_SRC_MASK;
return root_clk_get_rate(src) / (div + 1);
}
static unsigned long aclk_best_div(struct clk *clk, unsigned long rate)
{
unsigned long div, src, freq, r1, r2;
if (!rate)
return ACLK_MAX_DIV;
src = __raw_readl(clk->aclkreg) >> ACLK_SEL_SHIFT;
src &= CLK_SRC_MASK;
freq = root_clk_get_rate(src);
div = freq / rate;
if (!div)
return 1;
if (div >= ACLK_MAX_DIV)
return ACLK_MAX_DIV;
r1 = freq / div;
r2 = freq / (div + 1);
if ((rate - r2) < (r1 - rate))
return div + 1;
return div;
}
static unsigned long aclk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned int src;
src = __raw_readl(clk->aclkreg) >> ACLK_SEL_SHIFT;
src &= CLK_SRC_MASK;
return root_clk_get_rate(src) / aclk_best_div(clk, rate);
}
static int aclk_set_rate(struct clk *clk, unsigned long rate)
{
u32 reg;
reg = __raw_readl(clk->aclkreg) & ~ACLK_DIV_MASK;
reg |= aclk_best_div(clk, rate) - 1;
__raw_writel(reg, clk->aclkreg);
return 0;
}
static unsigned long get_rate_sys(struct clk *clk)
{
unsigned int src;
src = __raw_readl(CKC_BASE + CLKCTRL_OFFS) & CLK_SRC_MASK;
return root_clk_get_rate(src);
}
static unsigned long get_rate_bus(struct clk *clk)
{
unsigned int reg, sdiv, bdiv, rate;
reg = __raw_readl(CKC_BASE + CLKCTRL_OFFS);
rate = get_rate_sys(clk);
sdiv = (reg >> 20) & 3;
if (sdiv)
rate /= sdiv + 1;
bdiv = (reg >> 4) & 0xff;
if (bdiv)
rate /= bdiv + 1;
return rate;
}
static unsigned long get_rate_cpu(struct clk *clk)
{
unsigned int reg, div, fsys, fbus;
fbus = get_rate_bus(clk);
reg = __raw_readl(CKC_BASE + CLKCTRL_OFFS);
if (reg & (1 << 29))
return fbus;
fsys = get_rate_sys(clk);
div = (reg >> 16) & 0x0f;
return fbus + ((fsys - fbus) * (div + 1)) / 16;
}
static unsigned long get_rate_root(struct clk *clk)
{
return root_clk_get_rate(clk->root_id);
}
static int aclk_set_parent(struct clk *clock, struct clk *parent)
{
u32 reg;
if (clock->parent == parent)
return 0;
clock->parent = parent;
if (!parent)
return 0;
if (parent->root_id == CLK_SRC_NOROOT)
return 0;
reg = __raw_readl(clock->aclkreg);
reg &= ~ACLK_SEL_MASK;
reg |= (parent->root_id << ACLK_SEL_SHIFT) & ACLK_SEL_MASK;
__raw_writel(reg, clock->aclkreg);
return 0;
}
#define DEFINE_ROOT_CLOCK(name, ri, p) \
static struct clk name = { \
.root_id = ri, \
.get_rate = get_rate_root, \
.enable = enable_clk, \
.disable = disable_clk, \
.parent = p, \
};
#define DEFINE_SPECIAL_CLOCK(name, gr, p) \
static struct clk name = { \
.root_id = CLK_SRC_NOROOT, \
.get_rate = gr, \
.parent = p, \
};
#define DEFINE_ACLOCK(name, bc, bs, ar) \
static struct clk name = { \
.root_id = CLK_SRC_NOROOT, \
.bclkctr = bc, \
.bclk_shift = bs, \
.aclkreg = ar, \
.get_rate = aclk_get_rate, \
.set_rate = aclk_set_rate, \
.round_rate = aclk_round_rate, \
.enable = enable_clk, \
.disable = disable_clk, \
.set_parent = aclk_set_parent, \
};
#define DEFINE_BCLOCK(name, bc, bs, gr, p) \
static struct clk name = { \
.root_id = CLK_SRC_NOROOT, \
.bclkctr = bc, \
.bclk_shift = bs, \
.get_rate = gr, \
.enable = enable_clk, \
.disable = disable_clk, \
.parent = p, \
};
DEFINE_ROOT_CLOCK(xi, CLK_SRC_XI, NULL)
DEFINE_ROOT_CLOCK(xti, CLK_SRC_XTI, NULL)
DEFINE_ROOT_CLOCK(xidiv, CLK_SRC_XIDIV, &xi)
DEFINE_ROOT_CLOCK(xtidiv, CLK_SRC_XTIDIV, &xti)
DEFINE_ROOT_CLOCK(pll0, CLK_SRC_PLL0, &xi)
DEFINE_ROOT_CLOCK(pll1, CLK_SRC_PLL1, &xi)
DEFINE_ROOT_CLOCK(pll2, CLK_SRC_PLL2, &xi)
DEFINE_ROOT_CLOCK(pll0div, CLK_SRC_PLL0DIV, &pll0)
DEFINE_ROOT_CLOCK(pll1div, CLK_SRC_PLL1DIV, &pll1)
DEFINE_ROOT_CLOCK(pll2div, CLK_SRC_PLL2DIV, &pll2)
/* The following 3 clocks are special and are initialized explicitly later */
DEFINE_SPECIAL_CLOCK(sys, get_rate_sys, NULL)
DEFINE_SPECIAL_CLOCK(bus, get_rate_bus, &sys)
DEFINE_SPECIAL_CLOCK(cpu, get_rate_cpu, &sys)
DEFINE_ACLOCK(tct, NULL, 0, ACLKTCT)
DEFINE_ACLOCK(tcx, NULL, 0, ACLKTCX)
DEFINE_ACLOCK(tcz, NULL, 0, ACLKTCZ)
DEFINE_ACLOCK(ref, NULL, 0, ACLKREF)
DEFINE_ACLOCK(uart0, BCLKCTR0, 5, ACLKUART0)
DEFINE_ACLOCK(uart1, BCLKCTR0, 23, ACLKUART1)
DEFINE_ACLOCK(uart2, BCLKCTR0, 6, ACLKUART2)
DEFINE_ACLOCK(uart3, BCLKCTR0, 8, ACLKUART3)
DEFINE_ACLOCK(uart4, BCLKCTR1, 6, ACLKUART4)
DEFINE_ACLOCK(i2c, BCLKCTR0, 7, ACLKI2C)
DEFINE_ACLOCK(adc, BCLKCTR0, 10, ACLKADC)
DEFINE_ACLOCK(usbh0, BCLKCTR0, 11, ACLKUSBH)
DEFINE_ACLOCK(lcd, BCLKCTR0, 13, ACLKLCD)
DEFINE_ACLOCK(sd0, BCLKCTR0, 17, ACLKSDH0)
DEFINE_ACLOCK(sd1, BCLKCTR1, 5, ACLKSDH1)
DEFINE_ACLOCK(spi0, BCLKCTR0, 24, ACLKSPI0)
DEFINE_ACLOCK(spi1, BCLKCTR0, 30, ACLKSPI1)
DEFINE_ACLOCK(spdif, BCLKCTR1, 2, ACLKSPDIF)
DEFINE_ACLOCK(c3dec, BCLKCTR1, 9, ACLKC3DEC)
DEFINE_ACLOCK(can0, BCLKCTR1, 10, ACLKCAN0)
DEFINE_ACLOCK(can1, BCLKCTR1, 11, ACLKCAN1)
DEFINE_ACLOCK(gsb0, BCLKCTR1, 13, ACLKGSB0)
DEFINE_ACLOCK(gsb1, BCLKCTR1, 14, ACLKGSB1)
DEFINE_ACLOCK(gsb2, BCLKCTR1, 15, ACLKGSB2)
DEFINE_ACLOCK(gsb3, BCLKCTR1, 16, ACLKGSB3)
DEFINE_ACLOCK(usbh1, BCLKCTR1, 20, ACLKUSBH)
DEFINE_BCLOCK(dai0, BCLKCTR0, 0, NULL, NULL)
DEFINE_BCLOCK(pic, BCLKCTR0, 1, NULL, NULL)
DEFINE_BCLOCK(tc, BCLKCTR0, 2, NULL, NULL)
DEFINE_BCLOCK(gpio, BCLKCTR0, 3, NULL, NULL)
DEFINE_BCLOCK(usbd, BCLKCTR0, 4, NULL, NULL)
DEFINE_BCLOCK(ecc, BCLKCTR0, 9, NULL, NULL)
DEFINE_BCLOCK(gdma0, BCLKCTR0, 12, NULL, NULL)
DEFINE_BCLOCK(rtc, BCLKCTR0, 15, NULL, NULL)
DEFINE_BCLOCK(nfc, BCLKCTR0, 16, NULL, NULL)
DEFINE_BCLOCK(g2d, BCLKCTR0, 18, NULL, NULL)
DEFINE_BCLOCK(gdma1, BCLKCTR0, 22, NULL, NULL)
DEFINE_BCLOCK(mscl, BCLKCTR0, 25, NULL, NULL)
DEFINE_BCLOCK(bdma, BCLKCTR1, 0, NULL, NULL)
DEFINE_BCLOCK(adma0, BCLKCTR1, 1, NULL, NULL)
DEFINE_BCLOCK(scfg, BCLKCTR1, 3, NULL, NULL)
DEFINE_BCLOCK(cid, BCLKCTR1, 4, NULL, NULL)
DEFINE_BCLOCK(dai1, BCLKCTR1, 7, NULL, NULL)
DEFINE_BCLOCK(adma1, BCLKCTR1, 8, NULL, NULL)
DEFINE_BCLOCK(gps, BCLKCTR1, 12, NULL, NULL)
DEFINE_BCLOCK(gdma2, BCLKCTR1, 17, NULL, NULL)
DEFINE_BCLOCK(gdma3, BCLKCTR1, 18, NULL, NULL)
DEFINE_BCLOCK(ddrc, BCLKCTR1, 19, NULL, NULL)
#define _REGISTER_CLOCK(d, n, c) \
{ \
.dev_id = d, \
.con_id = n, \
.clk = &c, \
},
static struct clk_lookup lookups[] = {
_REGISTER_CLOCK(NULL, "bus", bus)
_REGISTER_CLOCK(NULL, "cpu", cpu)
_REGISTER_CLOCK(NULL, "tct", tct)
_REGISTER_CLOCK(NULL, "tcx", tcx)
_REGISTER_CLOCK(NULL, "tcz", tcz)
_REGISTER_CLOCK(NULL, "ref", ref)
_REGISTER_CLOCK(NULL, "dai0", dai0)
_REGISTER_CLOCK(NULL, "pic", pic)
_REGISTER_CLOCK(NULL, "tc", tc)
_REGISTER_CLOCK(NULL, "gpio", gpio)
_REGISTER_CLOCK(NULL, "usbd", usbd)
_REGISTER_CLOCK("tcc-uart.0", NULL, uart0)
_REGISTER_CLOCK("tcc-uart.2", NULL, uart2)
_REGISTER_CLOCK("tcc-i2c", NULL, i2c)
_REGISTER_CLOCK("tcc-uart.3", NULL, uart3)
_REGISTER_CLOCK(NULL, "ecc", ecc)
_REGISTER_CLOCK(NULL, "adc", adc)
_REGISTER_CLOCK("tcc-usbh.0", "usb", usbh0)
_REGISTER_CLOCK(NULL, "gdma0", gdma0)
_REGISTER_CLOCK(NULL, "lcd", lcd)
_REGISTER_CLOCK(NULL, "rtc", rtc)
_REGISTER_CLOCK(NULL, "nfc", nfc)
_REGISTER_CLOCK("tcc-mmc.0", NULL, sd0)
_REGISTER_CLOCK(NULL, "g2d", g2d)
_REGISTER_CLOCK(NULL, "gdma1", gdma1)
_REGISTER_CLOCK("tcc-uart.1", NULL, uart1)
_REGISTER_CLOCK("tcc-spi.0", NULL, spi0)
_REGISTER_CLOCK(NULL, "mscl", mscl)
_REGISTER_CLOCK("tcc-spi.1", NULL, spi1)
_REGISTER_CLOCK(NULL, "bdma", bdma)
_REGISTER_CLOCK(NULL, "adma0", adma0)
_REGISTER_CLOCK(NULL, "spdif", spdif)
_REGISTER_CLOCK(NULL, "scfg", scfg)
_REGISTER_CLOCK(NULL, "cid", cid)
_REGISTER_CLOCK("tcc-mmc.1", NULL, sd1)
_REGISTER_CLOCK("tcc-uart.4", NULL, uart4)
_REGISTER_CLOCK(NULL, "dai1", dai1)
_REGISTER_CLOCK(NULL, "adma1", adma1)
_REGISTER_CLOCK(NULL, "c3dec", c3dec)
_REGISTER_CLOCK("tcc-can.0", NULL, can0)
_REGISTER_CLOCK("tcc-can.1", NULL, can1)
_REGISTER_CLOCK(NULL, "gps", gps)
_REGISTER_CLOCK("tcc-gsb.0", NULL, gsb0)
_REGISTER_CLOCK("tcc-gsb.1", NULL, gsb1)
_REGISTER_CLOCK("tcc-gsb.2", NULL, gsb2)
_REGISTER_CLOCK("tcc-gsb.3", NULL, gsb3)
_REGISTER_CLOCK(NULL, "gdma2", gdma2)
_REGISTER_CLOCK(NULL, "gdma3", gdma3)
_REGISTER_CLOCK(NULL, "ddrc", ddrc)
_REGISTER_CLOCK("tcc-usbh.1", "usb", usbh1)
};
static struct clk *root_clk_by_index(enum root_clks src)
{
switch (src) {
case CLK_SRC_PLL0: return &pll0;
case CLK_SRC_PLL1: return &pll1;
case CLK_SRC_PLL2: return &pll2;
case CLK_SRC_PLL0DIV: return &pll0div;
case CLK_SRC_PLL1DIV: return &pll1div;
case CLK_SRC_PLL2DIV: return &pll2div;
case CLK_SRC_XI: return ξ
case CLK_SRC_XTI: return &xti;
case CLK_SRC_XIDIV: return &xidiv;
case CLK_SRC_XTIDIV: return &xtidiv;
default: return NULL;
}
}
static void find_aclk_parent(struct clk *clk)
{
unsigned int src;
struct clk *clock;
if (!clk->aclkreg)
return;
src = __raw_readl(clk->aclkreg) >> ACLK_SEL_SHIFT;
src &= CLK_SRC_MASK;
clock = root_clk_by_index(src);
if (!clock)
return;
clk->parent = clock;
clk->set_parent = aclk_set_parent;
}
void __init tcc_clocks_init(unsigned long xi_freq, unsigned long xti_freq)
{
int i;
xi_rate = xi_freq;
xti_rate = xti_freq;
/* fixup parents and add the clock */
for (i = 0; i < ARRAY_SIZE(lookups); i++) {
find_aclk_parent(lookups[i].clk);
clkdev_add(&lookups[i]);
}
tcc8k_timer_init(&tcz, (void __iomem *)TIMER_BASE, INT_TC32);
}
| gpl-2.0 |
boa19861105/android_kernel_htc_b3uhl-JP | drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c | 3253 | 4267 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/gpio.h>
#include <subdev/bios/xpio.h>
u16
dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 data = 0x0000;
u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb) {
if (*ver >= 0x30 && *hdr >= 0x0c)
data = nv_ro16(bios, dcb + 0x0a);
else
if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
data = nv_ro16(bios, dcb - 0x0f);
if (data) {
*ver = nv_ro08(bios, data + 0x00);
if (*ver < 0x30) {
*hdr = 3;
*cnt = nv_ro08(bios, data + 0x02);
*len = nv_ro08(bios, data + 0x01);
} else
if (*ver <= 0x41) {
*hdr = nv_ro08(bios, data + 0x01);
*cnt = nv_ro08(bios, data + 0x02);
*len = nv_ro08(bios, data + 0x03);
} else {
data = 0x0000;
}
}
}
return data;
}
u16
dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
{
u8 hdr, cnt, xver; /* use gpio version for xpio entry parsing */
u16 gpio;
if (!idx--)
gpio = dcb_gpio_table(bios, ver, &hdr, &cnt, len);
else
gpio = dcb_xpio_table(bios, idx, &xver, &hdr, &cnt, len);
if (gpio && ent < cnt)
return gpio + hdr + (ent * *len);
return 0x0000;
}
u16
dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *gpio)
{
u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
if (data) {
if (*ver < 0x40) {
u16 info = nv_ro16(bios, data);
*gpio = (struct dcb_gpio_func) {
.line = (info & 0x001f) >> 0,
.func = (info & 0x07e0) >> 5,
.log[0] = (info & 0x1800) >> 11,
.log[1] = (info & 0x6000) >> 13,
.param = !!(info & 0x8000),
};
} else
if (*ver < 0x41) {
u32 info = nv_ro32(bios, data);
*gpio = (struct dcb_gpio_func) {
.line = (info & 0x0000001f) >> 0,
.func = (info & 0x0000ff00) >> 8,
.log[0] = (info & 0x18000000) >> 27,
.log[1] = (info & 0x60000000) >> 29,
.param = !!(info & 0x80000000),
};
} else {
u32 info = nv_ro32(bios, data + 0);
u8 info1 = nv_ro32(bios, data + 4);
*gpio = (struct dcb_gpio_func) {
.line = (info & 0x0000003f) >> 0,
.func = (info & 0x0000ff00) >> 8,
.log[0] = (info1 & 0x30) >> 4,
.log[1] = (info1 & 0xc0) >> 6,
.param = !!(info & 0x80000000),
};
}
}
return data;
}
u16
dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line,
u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
{
u8 hdr, cnt, i = 0;
u16 data;
while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) {
if ((line == 0xff || line == gpio->line) &&
(func == 0xff || func == gpio->func))
return data;
}
/* DCB 2.2, fixed TVDAC GPIO data */
if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
u8 conf = nv_ro08(bios, data - 5);
u8 addr = nv_ro08(bios, data - 4);
if (conf & 0x01) {
*gpio = (struct dcb_gpio_func) {
.func = DCB_GPIO_TVDAC0,
.line = addr >> 4,
.log[0] = !!(conf & 0x02),
.log[1] = !(conf & 0x02),
};
*ver = 0x00;
return data;
}
}
}
return 0x0000;
}
| gpl-2.0 |
ShinySide/SM-T900_Kernel | net/caif/cfsrvl.c | 4789 | 5478 | /*
* Copyright (C) ST-Ericsson AB 2010
* Author: Sjur Brendeland/sjur.brandeland@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
#define SRVL_CTRL_PKT_SIZE 1
#define SRVL_FLOW_OFF 0x81
#define SRVL_FLOW_ON 0x80
#define SRVL_SET_PIN 0x82
#define SRVL_CTRL_PKT_SIZE 1
#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfsrvl *service = container_obj(layr);
if (layr->up == NULL || layr->up->ctrlcmd == NULL)
return;
switch (ctrl) {
case CAIF_CTRLCMD_INIT_RSP:
service->open = true;
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
case CAIF_CTRLCMD_DEINIT_RSP:
case CAIF_CTRLCMD_INIT_FAIL_RSP:
service->open = false;
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
if (phyid != service->dev_info.id)
break;
if (service->modem_flow_on)
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
service->phy_flow_on = false;
break;
case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND:
if (phyid != service->dev_info.id)
return;
if (service->modem_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_ON_IND,
phyid);
}
service->phy_flow_on = true;
break;
case CAIF_CTRLCMD_FLOW_OFF_IND:
if (service->phy_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
}
service->modem_flow_on = false;
break;
case CAIF_CTRLCMD_FLOW_ON_IND:
if (service->phy_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_ON_IND, phyid);
}
service->modem_flow_on = true;
break;
case _CAIF_CTRLCMD_PHYIF_DOWN_IND:
/* In case interface is down, let's fake a remove shutdown */
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid);
break;
case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
default:
pr_warn("Unexpected ctrl in cfsrvl (%d)\n", ctrl);
/* We have both modem and phy flow on, send flow on */
layr->up->ctrlcmd(layr->up, ctrl, phyid);
service->phy_flow_on = true;
break;
}
}
static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
{
struct cfsrvl *service = container_obj(layr);
caif_assert(layr != NULL);
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
if (!service->supports_flowctrl)
return 0;
switch (ctrl) {
case CAIF_MODEMCMD_FLOW_ON_REQ:
{
struct cfpkt *pkt;
struct caif_payload_info *info;
u8 flow_on = SRVL_FLOW_ON;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt)
return -ENOMEM;
if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
info = cfpkt_info(pkt);
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
return layr->dn->transmit(layr->dn, pkt);
}
case CAIF_MODEMCMD_FLOW_OFF_REQ:
{
struct cfpkt *pkt;
struct caif_payload_info *info;
u8 flow_off = SRVL_FLOW_OFF;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt)
return -ENOMEM;
if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
info = cfpkt_info(pkt);
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
return layr->dn->transmit(layr->dn, pkt);
}
default:
break;
}
return -EINVAL;
}
static void cfsrvl_release(struct cflayer *layer)
{
struct cfsrvl *service = container_of(layer, struct cfsrvl, layer);
kfree(service);
}
void cfsrvl_init(struct cfsrvl *service,
u8 channel_id,
struct dev_info *dev_info,
bool supports_flowctrl
)
{
caif_assert(offsetof(struct cfsrvl, layer) == 0);
service->open = false;
service->modem_flow_on = true;
service->phy_flow_on = true;
service->layer.id = channel_id;
service->layer.ctrlcmd = cfservl_ctrlcmd;
service->layer.modemcmd = cfservl_modemcmd;
service->dev_info = *dev_info;
service->supports_flowctrl = supports_flowctrl;
service->release = cfsrvl_release;
}
bool cfsrvl_ready(struct cfsrvl *service, int *err)
{
if (!service->open) {
*err = -ENOTCONN;
return false;
}
return true;
}
u8 cfsrvl_getphyid(struct cflayer *layer)
{
struct cfsrvl *servl = container_obj(layer);
return servl->dev_info.id;
}
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
{
struct cfsrvl *servl = container_obj(layer);
return servl->dev_info.id == phyid;
}
void caif_free_client(struct cflayer *adap_layer)
{
struct cfsrvl *servl;
if (adap_layer == NULL || adap_layer->dn == NULL)
return;
servl = container_obj(adap_layer->dn);
servl->release(&servl->layer);
}
EXPORT_SYMBOL(caif_free_client);
void caif_client_register_refcnt(struct cflayer *adapt_layer,
void (*hold)(struct cflayer *lyr),
void (*put)(struct cflayer *lyr))
{
struct cfsrvl *service;
service = container_of(adapt_layer->dn, struct cfsrvl, layer);
WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL);
service->hold = hold;
service->put = put;
}
EXPORT_SYMBOL(caif_client_register_refcnt);
| gpl-2.0 |
thanhphat11/Kernel-Stock-A900-SLK | net/caif/cfsrvl.c | 4789 | 5478 | /*
* Copyright (C) ST-Ericsson AB 2010
* Author: Sjur Brendeland/sjur.brandeland@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
#define SRVL_CTRL_PKT_SIZE 1
#define SRVL_FLOW_OFF 0x81
#define SRVL_FLOW_ON 0x80
#define SRVL_SET_PIN 0x82
#define SRVL_CTRL_PKT_SIZE 1
#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfsrvl *service = container_obj(layr);
if (layr->up == NULL || layr->up->ctrlcmd == NULL)
return;
switch (ctrl) {
case CAIF_CTRLCMD_INIT_RSP:
service->open = true;
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
case CAIF_CTRLCMD_DEINIT_RSP:
case CAIF_CTRLCMD_INIT_FAIL_RSP:
service->open = false;
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
if (phyid != service->dev_info.id)
break;
if (service->modem_flow_on)
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
service->phy_flow_on = false;
break;
case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND:
if (phyid != service->dev_info.id)
return;
if (service->modem_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_ON_IND,
phyid);
}
service->phy_flow_on = true;
break;
case CAIF_CTRLCMD_FLOW_OFF_IND:
if (service->phy_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
}
service->modem_flow_on = false;
break;
case CAIF_CTRLCMD_FLOW_ON_IND:
if (service->phy_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_ON_IND, phyid);
}
service->modem_flow_on = true;
break;
case _CAIF_CTRLCMD_PHYIF_DOWN_IND:
/* In case interface is down, let's fake a remove shutdown */
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid);
break;
case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
default:
pr_warn("Unexpected ctrl in cfsrvl (%d)\n", ctrl);
/* We have both modem and phy flow on, send flow on */
layr->up->ctrlcmd(layr->up, ctrl, phyid);
service->phy_flow_on = true;
break;
}
}
static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
{
struct cfsrvl *service = container_obj(layr);
caif_assert(layr != NULL);
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
if (!service->supports_flowctrl)
return 0;
switch (ctrl) {
case CAIF_MODEMCMD_FLOW_ON_REQ:
{
struct cfpkt *pkt;
struct caif_payload_info *info;
u8 flow_on = SRVL_FLOW_ON;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt)
return -ENOMEM;
if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
info = cfpkt_info(pkt);
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
return layr->dn->transmit(layr->dn, pkt);
}
case CAIF_MODEMCMD_FLOW_OFF_REQ:
{
struct cfpkt *pkt;
struct caif_payload_info *info;
u8 flow_off = SRVL_FLOW_OFF;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt)
return -ENOMEM;
if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
info = cfpkt_info(pkt);
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
return layr->dn->transmit(layr->dn, pkt);
}
default:
break;
}
return -EINVAL;
}
static void cfsrvl_release(struct cflayer *layer)
{
struct cfsrvl *service = container_of(layer, struct cfsrvl, layer);
kfree(service);
}
void cfsrvl_init(struct cfsrvl *service,
u8 channel_id,
struct dev_info *dev_info,
bool supports_flowctrl
)
{
caif_assert(offsetof(struct cfsrvl, layer) == 0);
service->open = false;
service->modem_flow_on = true;
service->phy_flow_on = true;
service->layer.id = channel_id;
service->layer.ctrlcmd = cfservl_ctrlcmd;
service->layer.modemcmd = cfservl_modemcmd;
service->dev_info = *dev_info;
service->supports_flowctrl = supports_flowctrl;
service->release = cfsrvl_release;
}
bool cfsrvl_ready(struct cfsrvl *service, int *err)
{
if (!service->open) {
*err = -ENOTCONN;
return false;
}
return true;
}
u8 cfsrvl_getphyid(struct cflayer *layer)
{
struct cfsrvl *servl = container_obj(layer);
return servl->dev_info.id;
}
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
{
struct cfsrvl *servl = container_obj(layer);
return servl->dev_info.id == phyid;
}
void caif_free_client(struct cflayer *adap_layer)
{
struct cfsrvl *servl;
if (adap_layer == NULL || adap_layer->dn == NULL)
return;
servl = container_obj(adap_layer->dn);
servl->release(&servl->layer);
}
EXPORT_SYMBOL(caif_free_client);
void caif_client_register_refcnt(struct cflayer *adapt_layer,
void (*hold)(struct cflayer *lyr),
void (*put)(struct cflayer *lyr))
{
struct cfsrvl *service;
service = container_of(adapt_layer->dn, struct cfsrvl, layer);
WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL);
service->hold = hold;
service->put = put;
}
EXPORT_SYMBOL(caif_client_register_refcnt);
| gpl-2.0 |
omnirom/android_kernel_google_msm | net/caif/cfsrvl.c | 4789 | 5478 | /*
* Copyright (C) ST-Ericsson AB 2010
* Author: Sjur Brendeland/sjur.brandeland@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
#define SRVL_CTRL_PKT_SIZE 1
#define SRVL_FLOW_OFF 0x81
#define SRVL_FLOW_ON 0x80
#define SRVL_SET_PIN 0x82
#define SRVL_CTRL_PKT_SIZE 1
#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfsrvl *service = container_obj(layr);
if (layr->up == NULL || layr->up->ctrlcmd == NULL)
return;
switch (ctrl) {
case CAIF_CTRLCMD_INIT_RSP:
service->open = true;
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
case CAIF_CTRLCMD_DEINIT_RSP:
case CAIF_CTRLCMD_INIT_FAIL_RSP:
service->open = false;
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
if (phyid != service->dev_info.id)
break;
if (service->modem_flow_on)
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
service->phy_flow_on = false;
break;
case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND:
if (phyid != service->dev_info.id)
return;
if (service->modem_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_ON_IND,
phyid);
}
service->phy_flow_on = true;
break;
case CAIF_CTRLCMD_FLOW_OFF_IND:
if (service->phy_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
}
service->modem_flow_on = false;
break;
case CAIF_CTRLCMD_FLOW_ON_IND:
if (service->phy_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_ON_IND, phyid);
}
service->modem_flow_on = true;
break;
case _CAIF_CTRLCMD_PHYIF_DOWN_IND:
/* In case interface is down, let's fake a remove shutdown */
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid);
break;
case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
default:
pr_warn("Unexpected ctrl in cfsrvl (%d)\n", ctrl);
/* We have both modem and phy flow on, send flow on */
layr->up->ctrlcmd(layr->up, ctrl, phyid);
service->phy_flow_on = true;
break;
}
}
static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
{
struct cfsrvl *service = container_obj(layr);
caif_assert(layr != NULL);
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
if (!service->supports_flowctrl)
return 0;
switch (ctrl) {
case CAIF_MODEMCMD_FLOW_ON_REQ:
{
struct cfpkt *pkt;
struct caif_payload_info *info;
u8 flow_on = SRVL_FLOW_ON;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt)
return -ENOMEM;
if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
info = cfpkt_info(pkt);
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
return layr->dn->transmit(layr->dn, pkt);
}
case CAIF_MODEMCMD_FLOW_OFF_REQ:
{
struct cfpkt *pkt;
struct caif_payload_info *info;
u8 flow_off = SRVL_FLOW_OFF;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt)
return -ENOMEM;
if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
info = cfpkt_info(pkt);
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
return layr->dn->transmit(layr->dn, pkt);
}
default:
break;
}
return -EINVAL;
}
static void cfsrvl_release(struct cflayer *layer)
{
struct cfsrvl *service = container_of(layer, struct cfsrvl, layer);
kfree(service);
}
void cfsrvl_init(struct cfsrvl *service,
u8 channel_id,
struct dev_info *dev_info,
bool supports_flowctrl
)
{
caif_assert(offsetof(struct cfsrvl, layer) == 0);
service->open = false;
service->modem_flow_on = true;
service->phy_flow_on = true;
service->layer.id = channel_id;
service->layer.ctrlcmd = cfservl_ctrlcmd;
service->layer.modemcmd = cfservl_modemcmd;
service->dev_info = *dev_info;
service->supports_flowctrl = supports_flowctrl;
service->release = cfsrvl_release;
}
bool cfsrvl_ready(struct cfsrvl *service, int *err)
{
if (!service->open) {
*err = -ENOTCONN;
return false;
}
return true;
}
u8 cfsrvl_getphyid(struct cflayer *layer)
{
struct cfsrvl *servl = container_obj(layer);
return servl->dev_info.id;
}
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
{
struct cfsrvl *servl = container_obj(layer);
return servl->dev_info.id == phyid;
}
void caif_free_client(struct cflayer *adap_layer)
{
struct cfsrvl *servl;
if (adap_layer == NULL || adap_layer->dn == NULL)
return;
servl = container_obj(adap_layer->dn);
servl->release(&servl->layer);
}
EXPORT_SYMBOL(caif_free_client);
void caif_client_register_refcnt(struct cflayer *adapt_layer,
void (*hold)(struct cflayer *lyr),
void (*put)(struct cflayer *lyr))
{
struct cfsrvl *service;
service = container_of(adapt_layer->dn, struct cfsrvl, layer);
WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL);
service->hold = hold;
service->put = put;
}
EXPORT_SYMBOL(caif_client_register_refcnt);
| gpl-2.0 |
sirmordred/android_kernel_samsung_ray | drivers/acpi/acpica/exoparg3.c | 5045 | 8040 |
/******************************************************************************
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#include "acparser.h"
#include "amlcode.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exoparg3")
/*!
* Naming convention for AML interpreter execution routines.
*
* The routines that begin execution of AML opcodes are named with a common
* convention based upon the number of arguments, the number of target operands,
* and whether or not a value is returned:
*
* AcpiExOpcode_xA_yT_zR
*
* Where:
*
* xA - ARGUMENTS: The number of arguments (input operands) that are
* required for this opcode type (1 through 6 args).
* yT - TARGETS: The number of targets (output operands) that are required
* for this opcode type (0, 1, or 2 targets).
* zR - RETURN VALUE: Indicates whether this opcode type returns a value
* as the function return (0 or 1).
*
* The AcpiExOpcode* functions are called via the Dispatcher component with
* fully resolved operands.
!*/
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_3A_0T_0R
*
* PARAMETERS: walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Execute Triadic operator (3 operands)
*
******************************************************************************/
acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
{
union acpi_operand_object **operand = &walk_state->operands[0];
struct acpi_signal_fatal_info *fatal;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
case AML_FATAL_OP: /* Fatal (fatal_type fatal_code fatal_arg) */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"FatalOp: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
(u32) operand[0]->integer.value,
(u32) operand[1]->integer.value,
(u32) operand[2]->integer.value));
fatal = ACPI_ALLOCATE(sizeof(struct acpi_signal_fatal_info));
if (fatal) {
fatal->type = (u32) operand[0]->integer.value;
fatal->code = (u32) operand[1]->integer.value;
fatal->argument = (u32) operand[2]->integer.value;
}
/* Always signal the OS! */
status = acpi_os_signal(ACPI_SIGNAL_FATAL, fatal);
/* Might return while OS is shutting down, just continue */
ACPI_FREE(fatal);
break;
default:
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
cleanup:
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_3A_1T_1R
*
* PARAMETERS: walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Execute Triadic operator (3 operands)
*
******************************************************************************/
acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
{
union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *return_desc = NULL;
char *buffer = NULL;
acpi_status status = AE_OK;
u64 index;
acpi_size length;
ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
case AML_MID_OP: /* Mid (Source[0], Index[1], Length[2], Result[3]) */
/*
* Create the return object. The Source operand is guaranteed to be
* either a String or a Buffer, so just use its type.
*/
return_desc = acpi_ut_create_internal_object((operand[0])->
common.type);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Get the Integer values from the objects */
index = operand[1]->integer.value;
length = (acpi_size) operand[2]->integer.value;
/*
* If the index is beyond the length of the String/Buffer, or if the
* requested length is zero, return a zero-length String/Buffer
*/
if (index >= operand[0]->string.length) {
length = 0;
}
/* Truncate request if larger than the actual String/Buffer */
else if ((index + length) > operand[0]->string.length) {
length = (acpi_size) operand[0]->string.length -
(acpi_size) index;
}
/* Strings always have a sub-pointer, not so for buffers */
switch ((operand[0])->common.type) {
case ACPI_TYPE_STRING:
/* Always allocate a new buffer for the String */
buffer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
}
break;
case ACPI_TYPE_BUFFER:
/* If the requested length is zero, don't allocate a buffer */
if (length > 0) {
/* Allocate a new buffer for the Buffer */
buffer = ACPI_ALLOCATE_ZEROED(length);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
}
}
break;
default: /* Should not happen */
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
if (buffer) {
/* We have a buffer, copy the portion requested */
ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,
length);
}
/* Set the length of the new String/Buffer */
return_desc->string.pointer = buffer;
return_desc->string.length = (u32) length;
/* Mark buffer initialized */
return_desc->buffer.flags |= AOPOBJ_DATA_VALID;
break;
default:
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
/* Store the result in the target */
status = acpi_ex_store(return_desc, operand[3], walk_state);
cleanup:
/* Delete return object on error */
if (ACPI_FAILURE(status) || walk_state->result_obj) {
acpi_ut_remove_reference(return_desc);
walk_state->result_obj = NULL;
}
/* Set the return object and exit */
else {
walk_state->result_obj = return_desc;
}
return_ACPI_STATUS(status);
}
| gpl-2.0 |
lollipop-og/bricked-geehrc | drivers/media/video/cx23885/cx23885-input.c | 5557 | 10179 | /*
* Driver for the Conexant CX23885/7/8 PCIe bridge
*
* Infrared remote control input device
*
* Most of this file is
*
* Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
*
* However, the cx23885_input_{init,fini} functions contained herein are
* derived from Linux kernel files linux/media/video/.../...-input.c marked as:
*
* Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
* Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
* Markus Rechberger <mrechberger@gmail.com>
* Mauro Carvalho Chehab <mchehab@infradead.org>
* Sascha Sommer <saschasommer@freenet.de>
* Copyright (C) 2004, 2005 Chris Pascoe
* Copyright (C) 2003, 2004 Gerd Knorr
* Copyright (C) 2003 Pavel Machek
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/slab.h>
#include <media/rc-core.h>
#include <media/v4l2-subdev.h>
#include "cx23885.h"
#define MODULE_NAME "cx23885"
static void cx23885_input_process_measurements(struct cx23885_dev *dev,
bool overrun)
{
struct cx23885_kernel_ir *kernel_ir = dev->kernel_ir;
ssize_t num;
int count, i;
bool handle = false;
struct ir_raw_event ir_core_event[64];
do {
num = 0;
v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
sizeof(ir_core_event), &num);
count = num / sizeof(struct ir_raw_event);
for (i = 0; i < count; i++) {
ir_raw_event_store(kernel_ir->rc,
&ir_core_event[i]);
handle = true;
}
} while (num != 0);
if (overrun)
ir_raw_event_reset(kernel_ir->rc);
else if (handle)
ir_raw_event_handle(kernel_ir->rc);
}
void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
{
struct v4l2_subdev_ir_parameters params;
int overrun, data_available;
if (dev->sd_ir == NULL || events == 0)
return;
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/*
* The only boards we handle right now. However other boards
* using the CX2388x integrated IR controller should be similar
*/
break;
default:
return;
}
overrun = events & (V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN |
V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN);
data_available = events & (V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED |
V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ);
if (overrun) {
/* If there was a FIFO overrun, stop the device */
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, ¶ms);
params.enable = false;
/* Mitigate race with cx23885_input_ir_stop() */
params.shutdown = atomic_read(&dev->ir_input_stopping);
v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, ¶ms);
}
if (data_available)
cx23885_input_process_measurements(dev, overrun);
if (overrun) {
/* If there was a FIFO overrun, clear & restart the device */
params.enable = true;
/* Mitigate race with cx23885_input_ir_stop() */
params.shutdown = atomic_read(&dev->ir_input_stopping);
v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, ¶ms);
}
}
static int cx23885_input_ir_start(struct cx23885_dev *dev)
{
struct v4l2_subdev_ir_parameters params;
if (dev->sd_ir == NULL)
return -ENODEV;
atomic_set(&dev->ir_input_stopping, 0);
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, ¶ms);
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/*
* The IR controller on this board only returns pulse widths.
* Any other mode setting will fail to set up the device.
*/
params.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
params.enable = true;
params.interrupt_enable = true;
params.shutdown = false;
/* Setup for baseband compatible with both RC-5 and RC-6A */
params.modulation = false;
/* RC-5: 2,222,222 ns = 1/36 kHz * 32 cycles * 2 marks * 1.25*/
/* RC-6A: 3,333,333 ns = 1/36 kHz * 16 cycles * 6 marks * 1.25*/
params.max_pulse_width = 3333333; /* ns */
/* RC-5: 666,667 ns = 1/36 kHz * 32 cycles * 1 mark * 0.75 */
/* RC-6A: 333,333 ns = 1/36 kHz * 16 cycles * 1 mark * 0.75 */
params.noise_filter_min_width = 333333; /* ns */
/*
* This board has inverted receive sense:
* mark is received as low logic level;
* falling edges are detected as rising edges; etc.
*/
params.invert_level = true;
break;
case CX23885_BOARD_TEVII_S470:
/*
* The IR controller on this board only returns pulse widths.
* Any other mode setting will fail to set up the device.
*/
params.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
params.enable = true;
params.interrupt_enable = true;
params.shutdown = false;
/* Setup for a standard NEC protocol */
params.carrier_freq = 37917; /* Hz, 455 kHz/12 for NEC */
params.carrier_range_lower = 33000; /* Hz */
params.carrier_range_upper = 43000; /* Hz */
params.duty_cycle = 33; /* percent, 33 percent for NEC */
/*
* NEC max pulse width: (64/3)/(455 kHz/12) * 16 nec_units
* (64/3)/(455 kHz/12) * 16 nec_units * 1.375 = 12378022 ns
*/
params.max_pulse_width = 12378022; /* ns */
/*
* NEC noise filter min width: (64/3)/(455 kHz/12) * 1 nec_unit
* (64/3)/(455 kHz/12) * 1 nec_units * 0.625 = 351648 ns
*/
params.noise_filter_min_width = 351648; /* ns */
params.modulation = false;
params.invert_level = true;
break;
}
v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, ¶ms);
return 0;
}
static int cx23885_input_ir_open(struct rc_dev *rc)
{
struct cx23885_kernel_ir *kernel_ir = rc->priv;
if (kernel_ir->cx == NULL)
return -ENODEV;
return cx23885_input_ir_start(kernel_ir->cx);
}
static void cx23885_input_ir_stop(struct cx23885_dev *dev)
{
struct v4l2_subdev_ir_parameters params;
if (dev->sd_ir == NULL)
return;
/*
* Stop the sd_ir subdevice from generating notifications and
* scheduling work.
* It is shutdown this way in order to mitigate a race with
* cx23885_input_rx_work_handler() in the overrun case, which could
* re-enable the subdevice.
*/
atomic_set(&dev->ir_input_stopping, 1);
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, ¶ms);
while (params.shutdown == false) {
params.enable = false;
params.interrupt_enable = false;
params.shutdown = true;
v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, ¶ms);
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, ¶ms);
}
flush_work_sync(&dev->cx25840_work);
flush_work_sync(&dev->ir_rx_work);
flush_work_sync(&dev->ir_tx_work);
}
static void cx23885_input_ir_close(struct rc_dev *rc)
{
struct cx23885_kernel_ir *kernel_ir = rc->priv;
if (kernel_ir->cx != NULL)
cx23885_input_ir_stop(kernel_ir->cx);
}
int cx23885_input_init(struct cx23885_dev *dev)
{
struct cx23885_kernel_ir *kernel_ir;
struct rc_dev *rc;
char *rc_map;
enum rc_driver_type driver_type;
unsigned long allowed_protos;
int ret;
/*
* If the IR device (hardware registers, chip, GPIO lines, etc.) isn't
* encapsulated in a v4l2_subdev, then I'm not going to deal with it.
*/
if (dev->sd_ir == NULL)
return -ENODEV;
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Integrated CX2388[58] IR controller */
driver_type = RC_DRIVER_IR_RAW;
allowed_protos = RC_TYPE_ALL;
/* The grey Hauppauge RC-5 remote */
rc_map = RC_MAP_HAUPPAUGE;
break;
case CX23885_BOARD_TEVII_S470:
/* Integrated CX23885 IR controller */
driver_type = RC_DRIVER_IR_RAW;
allowed_protos = RC_TYPE_ALL;
/* A guess at the remote */
rc_map = RC_MAP_TEVII_NEC;
break;
default:
return -ENODEV;
}
/* cx23885 board instance kernel IR state */
kernel_ir = kzalloc(sizeof(struct cx23885_kernel_ir), GFP_KERNEL);
if (kernel_ir == NULL)
return -ENOMEM;
kernel_ir->cx = dev;
kernel_ir->name = kasprintf(GFP_KERNEL, "cx23885 IR (%s)",
cx23885_boards[dev->board].name);
kernel_ir->phys = kasprintf(GFP_KERNEL, "pci-%s/ir0",
pci_name(dev->pci));
/* input device */
rc = rc_allocate_device();
if (!rc) {
ret = -ENOMEM;
goto err_out_free;
}
kernel_ir->rc = rc;
rc->input_name = kernel_ir->name;
rc->input_phys = kernel_ir->phys;
rc->input_id.bustype = BUS_PCI;
rc->input_id.version = 1;
if (dev->pci->subsystem_vendor) {
rc->input_id.vendor = dev->pci->subsystem_vendor;
rc->input_id.product = dev->pci->subsystem_device;
} else {
rc->input_id.vendor = dev->pci->vendor;
rc->input_id.product = dev->pci->device;
}
rc->dev.parent = &dev->pci->dev;
rc->driver_type = driver_type;
rc->allowed_protos = allowed_protos;
rc->priv = kernel_ir;
rc->open = cx23885_input_ir_open;
rc->close = cx23885_input_ir_close;
rc->map_name = rc_map;
rc->driver_name = MODULE_NAME;
/* Go */
dev->kernel_ir = kernel_ir;
ret = rc_register_device(rc);
if (ret)
goto err_out_stop;
return 0;
err_out_stop:
cx23885_input_ir_stop(dev);
dev->kernel_ir = NULL;
rc_free_device(rc);
err_out_free:
kfree(kernel_ir->phys);
kfree(kernel_ir->name);
kfree(kernel_ir);
return ret;
}
void cx23885_input_fini(struct cx23885_dev *dev)
{
/* Always stop the IR hardware from generating interrupts */
cx23885_input_ir_stop(dev);
if (dev->kernel_ir == NULL)
return;
rc_unregister_device(dev->kernel_ir->rc);
kfree(dev->kernel_ir->phys);
kfree(dev->kernel_ir->name);
kfree(dev->kernel_ir);
dev->kernel_ir = NULL;
}
| gpl-2.0 |
ayushrox/GB_Kernel_Pico | arch/sh/boards/mach-sh7763rdp/irq.c | 13237 | 1119 | /*
* linux/arch/sh/boards/renesas/sh7763rdp/irq.c
*
* Renesas Solutions SH7763RDP Support.
*
* Copyright (C) 2008 Renesas Solutions Corp.
* Copyright (C) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <mach/sh7763rdp.h>
#define INTC_BASE (0xFFD00000)
#define INTC_INT2PRI7 (INTC_BASE+0x4001C)
#define INTC_INT2MSKCR (INTC_BASE+0x4003C)
#define INTC_INT2MSKCR1 (INTC_BASE+0x400D4)
/*
* Initialize IRQ setting
*/
void __init init_sh7763rdp_IRQ(void)
{
/* GPIO enabled */
__raw_writel(1 << 25, INTC_INT2MSKCR);
/* enable GPIO interrupts */
__raw_writel((__raw_readl(INTC_INT2PRI7) & 0xFF00FFFF) | 0x000F0000,
INTC_INT2PRI7);
/* USBH enabled */
__raw_writel(1 << 17, INTC_INT2MSKCR1);
/* GETHER enabled */
__raw_writel(1 << 16, INTC_INT2MSKCR1);
/* DMAC enabled */
__raw_writel(1 << 8, INTC_INT2MSKCR);
}
| gpl-2.0 |
surengrig/Milstone-XT720-kernel-upgrade | arch/arm/mach-at91/board-qil-a9260.c | 182 | 6417 | /*
* linux/arch/arm/mach-at91/board-qil-a9260.c
*
* Copyright (C) 2005 SAN People
* Copyright (C) 2006 Atmel
* Copyright (C) 2007 Calao-systems
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/clk.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
#include "sam9_smc.h"
#include "generic.h"
static void __init ek_map_io(void)
{
/* Initialize processor: 12.000 MHz crystal */
at91sam9260_initialize(12000000);
/* DGBU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
| ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
| ATMEL_UART_RI);
/* USART1 on ttyS2. (Rx, Tx, CTS, RTS) */
at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS);
/* USART2 on ttyS3. (Rx, Tx, CTS, RTS) */
at91_register_uart(AT91SAM9260_ID_US2, 3, ATMEL_UART_CTS | ATMEL_UART_RTS);
/* set serial console to ttyS1 (ie, USART0) */
at91_set_serial_console(1);
}
static void __init ek_init_irq(void)
{
at91sam9260_init_interrupts(NULL);
}
/*
* USB Host port
*/
static struct at91_usbh_data __initdata ek_usbh_data = {
.ports = 2,
};
/*
* USB Device port
*/
static struct at91_udc_data __initdata ek_udc_data = {
.vbus_pin = AT91_PIN_PC5,
.pullup_pin = 0, /* pull-up driven by UDC */
};
/*
* SPI devices.
*/
static struct spi_board_info ek_spi_devices[] = {
#if defined(CONFIG_RTC_DRV_M41T94)
{ /* M41T94 RTC */
.modalias = "m41t94",
.chip_select = 0,
.max_speed_hz = 1 * 1000 * 1000,
.bus_num = 0,
}
#endif
};
/*
* MACB Ethernet device
*/
static struct at91_eth_data __initdata ek_macb_data = {
.phy_irq_pin = AT91_PIN_PA31,
.is_rmii = 1,
};
/*
* NAND flash
*/
static struct mtd_partition __initdata ek_nand_partition[] = {
{
.name = "Uboot & Kernel",
.offset = 0,
.size = SZ_16M,
},
{
.name = "Root FS",
.offset = MTDPART_OFS_NXTBLK,
.size = 120 * SZ_1M,
},
{
.name = "FS",
.offset = MTDPART_OFS_NXTBLK,
.size = 120 * SZ_1M,
},
};
static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
{
*num_partitions = ARRAY_SIZE(ek_nand_partition);
return ek_nand_partition;
}
static struct atmel_nand_data __initdata ek_nand_data = {
.ale = 21,
.cle = 22,
// .det_pin = ... not connected
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.partition_info = nand_partitions,
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
.ncs_read_setup = 0,
.nrd_setup = 1,
.ncs_write_setup = 0,
.nwe_setup = 1,
.ncs_read_pulse = 3,
.nrd_pulse = 3,
.ncs_write_pulse = 3,
.nwe_pulse = 3,
.read_cycle = 5,
.write_cycle = 5,
.mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8,
.tdf_cycles = 2,
};
static void __init ek_add_device_nand(void)
{
/* configure chip-select 3 (NAND) */
sam9_smc_configure(3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
/*
* MCI (SD/MMC)
*/
static struct at91_mmc_data __initdata ek_mmc_data = {
.slot_b = 0,
.wire4 = 1,
// .det_pin = ... not connected
// .wp_pin = ... not connected
// .vcc_pin = ... not connected
};
/*
* GPIO Buttons
*/
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
static struct gpio_keys_button ek_buttons[] = {
{ /* USER PUSH BUTTON */
.code = KEY_ENTER,
.gpio = AT91_PIN_PB10,
.active_low = 1,
.desc = "user_pb",
.wakeup = 1,
}
};
static struct gpio_keys_platform_data ek_button_data = {
.buttons = ek_buttons,
.nbuttons = ARRAY_SIZE(ek_buttons),
};
static struct platform_device ek_button_device = {
.name = "gpio-keys",
.id = -1,
.num_resources = 0,
.dev = {
.platform_data = &ek_button_data,
}
};
static void __init ek_add_device_buttons(void)
{
at91_set_GPIO_periph(AT91_PIN_PB10, 1); /* user push button, pull up enabled */
at91_set_deglitch(AT91_PIN_PB10, 1);
platform_device_register(&ek_button_device);
}
#else
static void __init ek_add_device_buttons(void) {}
#endif
/*
* LEDs
*/
static struct gpio_led ek_leds[] = {
{ /* user_led (green) */
.name = "user_led",
.gpio = AT91_PIN_PB21,
.active_low = 0,
.default_trigger = "heartbeat",
}
};
static void __init ek_board_init(void)
{
/* Serial */
at91_add_device_serial();
/* USB Host */
at91_add_device_usbh(&ek_usbh_data);
/* USB Device */
at91_add_device_udc(&ek_udc_data);
/* SPI */
at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices));
/* NAND */
ek_add_device_nand();
/* I2C */
at91_add_device_i2c(NULL, 0);
/* Ethernet */
at91_add_device_eth(&ek_macb_data);
/* MMC */
at91_add_device_mmc(0, &ek_mmc_data);
/* Push Buttons */
ek_add_device_buttons();
/* LEDs */
at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
/* shutdown controller, wakeup button (5 msec low) */
at91_sys_write(AT91_SHDW_MR, AT91_SHDW_CPTWK0_(10) | AT91_SHDW_WKMODE0_LOW
| AT91_SHDW_RTTWKEN);
}
MACHINE_START(QIL_A9260, "CALAO QIL_A9260")
/* Maintainer: calao-systems */
.phys_io = AT91_BASE_SYS,
.io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
.boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
.map_io = ek_map_io,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
| gpl-2.0 |
ipts-linux-org/ipts-linux-new | drivers/ata/pata_marvell.c | 438 | 4294 | /*
* Marvell PATA driver.
*
* For the moment we drive the PATA port in legacy mode. That
* isn't making full use of the device functionality but it is
* easy to get working.
*
* (c) 2006 Red Hat
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_marvell"
#define DRV_VERSION "0.1.6"
/**
* marvell_pata_active - check if PATA is active
* @pdev: PCI device
*
* Returns 1 if the PATA port may be active. We know how to check this
* for the 6145 but not the other devices
*/
static int marvell_pata_active(struct pci_dev *pdev)
{
int i;
u32 devices;
void __iomem *barp;
/* We don't yet know how to do this for other devices */
if (pdev->device != 0x6145)
return 1;
barp = pci_iomap(pdev, 5, 0x10);
if (barp == NULL)
return -ENOMEM;
printk("BAR5:");
for(i = 0; i <= 0x0F; i++)
printk("%02X:%02X ", i, ioread8(barp + i));
printk("\n");
devices = ioread32(barp + 0x0C);
pci_iounmap(pdev, barp);
if (devices & 0x10)
return 1;
return 0;
}
/**
* marvell_pre_reset - probe begin
* @link: link
* @deadline: deadline jiffies for the operation
*
* Perform the PATA port setup we need.
*/
static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (pdev->device == 0x6145 && ap->port_no == 0 &&
!marvell_pata_active(pdev)) /* PATA enable ? */
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
static int marvell_cable_detect(struct ata_port *ap)
{
/* Cable type */
switch(ap->port_no)
{
case 0:
if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
case 1: /* Legacy SATA port */
return ATA_CBL_SATA;
}
BUG();
return 0; /* Our BUG macro needs the right markup */
}
/* No PIO or DMA methods needed for this device */
static struct scsi_host_template marvell_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations marvell_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = marvell_cable_detect,
.prereset = marvell_pre_reset,
};
/**
* marvell_init_one - Register Marvell ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in marvell_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &marvell_ops,
};
static const struct ata_port_info info_sata = {
/* Slave possible as its magically mapped not real */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &marvell_ops,
};
const struct ata_port_info *ppi[] = { &info, &info_sata };
if (pdev->device == 0x6101)
ppi[1] = &ata_dummy_port_info;
#if IS_ENABLED(CONFIG_SATA_AHCI)
if (!marvell_pata_active(pdev)) {
printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n");
return -ENODEV;
}
#endif
return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0);
}
static const struct pci_device_id marvell_pci_tbl[] = {
{ PCI_DEVICE(0x11AB, 0x6101), },
{ PCI_DEVICE(0x11AB, 0x6121), },
{ PCI_DEVICE(0x11AB, 0x6123), },
{ PCI_DEVICE(0x11AB, 0x6145), },
{ PCI_DEVICE(0x1B4B, 0x91A0), },
{ PCI_DEVICE(0x1B4B, 0x91A4), },
{ } /* terminate list */
};
static struct pci_driver marvell_pci_driver = {
.name = DRV_NAME,
.id_table = marvell_pci_tbl,
.probe = marvell_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(marvell_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, marvell_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
steev/linux-kernel | drivers/media/video/cx23885/cimax2.c | 950 | 12737 | /*
* cimax2.c
*
* CIMax2(R) SP2 driver in conjunction with NetUp Dual DVB-S2 CI card
*
* Copyright (C) 2009 NetUP Inc.
* Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru>
* Copyright (C) 2009 Abylay Ospan <aospan@netup.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx23885.h"
#include "dvb_ca_en50221.h"
/**** Bit definitions for MC417_RWD and MC417_OEN registers ***
bits 31-16
+-----------+
| Reserved |
+-----------+
bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8
+-------+-------+-------+-------+-------+-------+-------+-------+
| WR# | RD# | | ACK# | ADHI | ADLO | CS1# | CS0# |
+-------+-------+-------+-------+-------+-------+-------+-------+
bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0
+-------+-------+-------+-------+-------+-------+-------+-------+
| DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
+-------+-------+-------+-------+-------+-------+-------+-------+
***/
/* MC417 */
#define NETUP_DATA 0x000000ff
#define NETUP_WR 0x00008000
#define NETUP_RD 0x00004000
#define NETUP_ACK 0x00001000
#define NETUP_ADHI 0x00000800
#define NETUP_ADLO 0x00000400
#define NETUP_CS1 0x00000200
#define NETUP_CS0 0x00000100
#define NETUP_EN_ALL 0x00001000
#define NETUP_CTRL_OFF (NETUP_CS1 | NETUP_CS0 | NETUP_WR | NETUP_RD)
#define NETUP_CI_CTL 0x04
#define NETUP_CI_RD 1
#define NETUP_IRQ_DETAM 0x1
#define NETUP_IRQ_IRQAM 0x4
static unsigned int ci_dbg;
module_param(ci_dbg, int, 0644);
MODULE_PARM_DESC(ci_dbg, "Enable CI debugging");
static unsigned int ci_irq_enable;
module_param(ci_irq_enable, int, 0644);
MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM");
#define ci_dbg_print(args...) \
do { \
if (ci_dbg) \
printk(KERN_DEBUG args); \
} while (0)
#define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0)
/* stores all private variables for communication with CI */
struct netup_ci_state {
struct dvb_ca_en50221 ca;
struct mutex ca_mutex;
struct i2c_adapter *i2c_adap;
u8 ci_i2c_addr;
int status;
struct work_struct work;
void *priv;
u8 current_irq_mode;
int current_ci_flag;
unsigned long next_status_checked_time;
};
int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
struct i2c_msg msg[] = {
{
.addr = addr,
.flags = 0,
.buf = ®,
.len = 1
}, {
.addr = addr,
.flags = I2C_M_RD,
.buf = buf,
.len = len
}
};
ret = i2c_transfer(i2c_adap, msg, 2);
if (ret != 2) {
ci_dbg_print("%s: i2c read error, Reg = 0x%02x, Status = %d\n",
__func__, reg, ret);
return -1;
}
ci_dbg_print("%s: i2c read Addr=0x%04x, Reg = 0x%02x, data = %02x\n",
__func__, addr, reg, buf[0]);
return 0;
}
int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
u8 buffer[len + 1];
struct i2c_msg msg = {
.addr = addr,
.flags = 0,
.buf = &buffer[0],
.len = len + 1
};
buffer[0] = reg;
memcpy(&buffer[1], buf, len);
ret = i2c_transfer(i2c_adap, &msg, 1);
if (ret != 1) {
ci_dbg_print("%s: i2c write error, Reg=[0x%02x], Status=%d\n",
__func__, reg, ret);
return -1;
}
return 0;
}
int netup_ci_get_mem(struct cx23885_dev *dev)
{
int mem;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
for (;;) {
mem = cx_read(MC417_RWD);
if ((mem & NETUP_ACK) == 0)
break;
if (time_after(jiffies, timeout))
break;
udelay(1);
}
cx_set(MC417_RWD, NETUP_CTRL_OFF);
return mem & 0xff;
}
int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 data)
{
struct netup_ci_state *state = en50221->data;
struct cx23885_tsport *port = state->priv;
struct cx23885_dev *dev = port->dev;
u8 store;
int mem;
int ret;
if (0 != slot)
return -EINVAL;
if (state->current_ci_flag != flag) {
ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &store, 1);
if (ret != 0)
return ret;
store &= ~0x0c;
store |= flag;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &store, 1);
if (ret != 0)
return ret;
};
state->current_ci_flag = flag;
mutex_lock(&dev->gpio_lock);
/* write addr */
cx_write(MC417_OEN, NETUP_EN_ALL);
cx_write(MC417_RWD, NETUP_CTRL_OFF |
NETUP_ADLO | (0xff & addr));
cx_clear(MC417_RWD, NETUP_ADLO);
cx_write(MC417_RWD, NETUP_CTRL_OFF |
NETUP_ADHI | (0xff & (addr >> 8)));
cx_clear(MC417_RWD, NETUP_ADHI);
if (read) { /* data in */
cx_write(MC417_OEN, NETUP_EN_ALL | NETUP_DATA);
} else /* data out */
cx_write(MC417_RWD, NETUP_CTRL_OFF | data);
/* choose chip */
cx_clear(MC417_RWD,
(state->ci_i2c_addr == 0x40) ? NETUP_CS0 : NETUP_CS1);
/* read/write */
cx_clear(MC417_RWD, (read) ? NETUP_RD : NETUP_WR);
mem = netup_ci_get_mem(dev);
mutex_unlock(&dev->gpio_lock);
if (!read)
if (mem < 0)
return -EREMOTEIO;
ci_dbg_print("%s: %s: chipaddr=[0x%x] addr=[0x%02x], %s=%x\n", __func__,
(read) ? "read" : "write", state->ci_i2c_addr, addr,
(flag == NETUP_CI_CTL) ? "ctl" : "mem",
(read) ? mem : data);
if (read)
return mem;
return 0;
}
int netup_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr)
{
return netup_ci_op_cam(en50221, slot, 0, NETUP_CI_RD, addr, 0);
}
int netup_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr, u8 data)
{
return netup_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL,
NETUP_CI_RD, addr, 0);
}
int netup_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
u8 addr, u8 data)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL, 0, addr, data);
}
int netup_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
{
struct netup_ci_state *state = en50221->data;
u8 buf = 0x80;
int ret;
if (0 != slot)
return -EINVAL;
udelay(500);
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
if (ret != 0)
return ret;
udelay(500);
buf = 0x00;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
msleep(1000);
dvb_ca_en50221_camready_irq(&state->ca, 0);
return 0;
}
int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
{
/* not implemented */
return 0;
}
int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
{
struct netup_ci_state *state = en50221->data;
int ret;
if (irq_mode == state->current_irq_mode)
return 0;
ci_dbg_print("%s: chipaddr=[0x%x] setting ci IRQ to [0x%x] \n",
__func__, state->ci_i2c_addr, irq_mode);
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x1b, &irq_mode, 1);
if (ret != 0)
return ret;
state->current_irq_mode = irq_mode;
return 0;
}
int netup_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
{
struct netup_ci_state *state = en50221->data;
u8 buf;
if (0 != slot)
return -EINVAL;
netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
buf |= 0x60;
return netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
}
/* work handler */
static void netup_read_ci_status(struct work_struct *work)
{
struct netup_ci_state *state =
container_of(work, struct netup_ci_state, work);
u8 buf[33];
int ret;
/* CAM module IRQ processing. fast operation */
dvb_ca_en50221_frda_irq(&state->ca, 0);
/* CAM module INSERT/REMOVE processing. slow operation because of i2c
* transfers */
if (time_after(jiffies, state->next_status_checked_time)
|| !state->status) {
ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf[0], 33);
state->next_status_checked_time = jiffies
+ msecs_to_jiffies(1000);
if (ret != 0)
return;
ci_dbg_print("%s: Slot Status Addr=[0x%04x], "
"Reg=[0x%02x], data=%02x, "
"TS config = %02x\n", __func__,
state->ci_i2c_addr, 0, buf[0],
buf[0]);
if (buf[0] & 1)
state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY;
else
state->status = 0;
};
}
/* CI irq handler */
int netup_ci_slot_status(struct cx23885_dev *dev, u32 pci_status)
{
struct cx23885_tsport *port = NULL;
struct netup_ci_state *state = NULL;
if (pci_status & PCI_MSK_GPIO0)
port = &dev->ts1;
else if (pci_status & PCI_MSK_GPIO1)
port = &dev->ts2;
else /* who calls ? */
return 0;
state = port->port_priv;
schedule_work(&state->work);
return 1;
}
int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
{
struct netup_ci_state *state = en50221->data;
if (0 != slot)
return -EINVAL;
netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | ci_irq_flags())
: NETUP_IRQ_DETAM);
return state->status;
}
int netup_ci_init(struct cx23885_tsport *port)
{
struct netup_ci_state *state;
u8 cimax_init[34] = {
0x00, /* module A control*/
0x00, /* auto select mask high A */
0x00, /* auto select mask low A */
0x00, /* auto select pattern high A */
0x00, /* auto select pattern low A */
0x44, /* memory access time A */
0x00, /* invert input A */
0x00, /* RFU */
0x00, /* RFU */
0x00, /* module B control*/
0x00, /* auto select mask high B */
0x00, /* auto select mask low B */
0x00, /* auto select pattern high B */
0x00, /* auto select pattern low B */
0x44, /* memory access time B */
0x00, /* invert input B */
0x00, /* RFU */
0x00, /* RFU */
0x00, /* auto select mask high Ext */
0x00, /* auto select mask low Ext */
0x00, /* auto select pattern high Ext */
0x00, /* auto select pattern low Ext */
0x00, /* RFU */
0x02, /* destination - module A */
0x01, /* power on (use it like store place) */
0x00, /* RFU */
0x00, /* int status read only */
ci_irq_flags() | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
0x05, /* EXTINT=active-high, INT=push-pull */
0x00, /* USCG1 */
0x04, /* ack active low */
0x00, /* LOCK = 0 */
0x33, /* serial mode, rising in, rising out, MSB first*/
0x31, /* syncronization */
};
int ret;
ci_dbg_print("%s\n", __func__);
state = kzalloc(sizeof(struct netup_ci_state), GFP_KERNEL);
if (!state) {
ci_dbg_print("%s: Unable create CI structure!\n", __func__);
ret = -ENOMEM;
goto err;
}
port->port_priv = state;
switch (port->nr) {
case 1:
state->ci_i2c_addr = 0x40;
break;
case 2:
state->ci_i2c_addr = 0x41;
break;
}
state->i2c_adap = &port->dev->i2c_bus[0].i2c_adap;
state->ca.owner = THIS_MODULE;
state->ca.read_attribute_mem = netup_ci_read_attribute_mem;
state->ca.write_attribute_mem = netup_ci_write_attribute_mem;
state->ca.read_cam_control = netup_ci_read_cam_ctl;
state->ca.write_cam_control = netup_ci_write_cam_ctl;
state->ca.slot_reset = netup_ci_slot_reset;
state->ca.slot_shutdown = netup_ci_slot_shutdown;
state->ca.slot_ts_enable = netup_ci_slot_ts_ctl;
state->ca.poll_slot_status = netup_poll_ci_slot_status;
state->ca.data = state;
state->priv = port;
state->current_irq_mode = ci_irq_flags() | NETUP_IRQ_DETAM;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &cimax_init[0], 34);
/* lock registers */
ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x1f, &cimax_init[0x18], 1);
/* power on slots */
ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x18, &cimax_init[0x18], 1);
if (0 != ret)
goto err;
ret = dvb_ca_en50221_init(&port->frontends.adapter,
&state->ca,
/* flags */ 0,
/* n_slots */ 1);
if (0 != ret)
goto err;
INIT_WORK(&state->work, netup_read_ci_status);
schedule_work(&state->work);
ci_dbg_print("%s: CI initialized!\n", __func__);
return 0;
err:
ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
kfree(state);
return ret;
}
void netup_ci_exit(struct cx23885_tsport *port)
{
struct netup_ci_state *state;
if (NULL == port)
return;
state = (struct netup_ci_state *)port->port_priv;
if (NULL == state)
return;
if (NULL == state->ca.data)
return;
dvb_ca_en50221_release(&state->ca);
kfree(state);
}
| gpl-2.0 |
AriesVE-DevCon-TEAM/samsung-kernel-msm7x30 | drivers/net/wireless/mwifiex/sta_ioctl.c | 1206 | 41507 | /*
* Marvell Wireless LAN device driver: functions for station ioctl
*
* Copyright (C) 2011, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "decl.h"
#include "ioctl.h"
#include "util.h"
#include "fw.h"
#include "main.h"
#include "wmm.h"
#include "11n.h"
#include "cfg80211.h"
/*
* Copies the multicast address list from device to driver.
*
* This function does not validate the destination memory for
* size, and the calling function must ensure enough memory is
* available.
*/
int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
struct net_device *dev)
{
int i = 0;
struct netdev_hw_addr *ha;
netdev_for_each_mc_addr(ha, dev)
memcpy(&mlist->mac_list[i++], ha->addr, ETH_ALEN);
return i;
}
/*
* Wait queue completion handler.
*
* This function waits on a cmd wait queue. It also cancels the pending
* request after waking up, in case of errors.
*/
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
{
int status;
struct cmd_ctrl_node *cmd_queued;
if (!adapter->cmd_queued)
return 0;
cmd_queued = adapter->cmd_queued;
adapter->cmd_queued = NULL;
dev_dbg(adapter->dev, "cmd pending\n");
atomic_inc(&adapter->cmd_pending);
/* Status pending, wake up main process */
queue_work(adapter->workqueue, &adapter->main_work);
/* Wait for completion */
status = wait_event_interruptible(adapter->cmd_wait_q.wait,
*(cmd_queued->condition));
if (status) {
dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
return status;
}
status = adapter->cmd_wait_q.status;
adapter->cmd_wait_q.status = 0;
return status;
}
/*
* This function prepares the correct firmware command and
* issues it to set the multicast list.
*
* This function can be used to enable promiscuous mode, or enable all
* multicast packets, or to enable selective multicast.
*/
int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
struct mwifiex_multicast_list *mcast_list)
{
int ret = 0;
u16 old_pkt_filter;
old_pkt_filter = priv->curr_pkt_filter;
if (mcast_list->mode == MWIFIEX_PROMISC_MODE) {
dev_dbg(priv->adapter->dev, "info: Enable Promiscuous mode\n");
priv->curr_pkt_filter |= HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
priv->curr_pkt_filter &=
~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
} else {
/* Multicast */
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
dev_dbg(priv->adapter->dev,
"info: Enabling All Multicast!\n");
priv->curr_pkt_filter |=
HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
} else {
priv->curr_pkt_filter &=
~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
if (mcast_list->num_multicast_addr) {
dev_dbg(priv->adapter->dev,
"info: Set multicast list=%d\n",
mcast_list->num_multicast_addr);
/* Send multicast addresses to firmware */
ret = mwifiex_send_cmd_async(priv,
HostCmd_CMD_MAC_MULTICAST_ADR,
HostCmd_ACT_GEN_SET, 0,
mcast_list);
}
}
}
dev_dbg(priv->adapter->dev,
"info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
old_pkt_filter, priv->curr_pkt_filter);
if (old_pkt_filter != priv->curr_pkt_filter) {
ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET,
0, &priv->curr_pkt_filter);
}
return ret;
}
/*
* This function fills bss descriptor structure using provided
* information.
*/
int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
u8 *bssid, s32 rssi, u8 *ie_buf,
size_t ie_len, u16 beacon_period,
u16 cap_info_bitmap, u8 band,
struct mwifiex_bssdescriptor *bss_desc)
{
int ret;
memcpy(bss_desc->mac_address, bssid, ETH_ALEN);
bss_desc->rssi = rssi;
bss_desc->beacon_buf = ie_buf;
bss_desc->beacon_buf_size = ie_len;
bss_desc->beacon_period = beacon_period;
bss_desc->cap_info_bitmap = cap_info_bitmap;
bss_desc->bss_band = band;
if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
} else {
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
}
if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_IBSS)
bss_desc->bss_mode = NL80211_IFTYPE_ADHOC;
else
bss_desc->bss_mode = NL80211_IFTYPE_STATION;
ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc,
ie_buf, ie_len);
return ret;
}
/*
* In Ad-Hoc mode, the IBSS is created if not found in scan list.
* In both Ad-Hoc and infra mode, an deauthentication is performed
* first.
*/
int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
struct cfg80211_ssid *req_ssid)
{
int ret;
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_bssdescriptor *bss_desc = NULL;
u8 *beacon_ie = NULL;
priv->scan_block = false;
if (bss) {
/* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
GFP_KERNEL);
if (!bss_desc) {
dev_err(priv->adapter->dev, " failed to alloc bss_desc\n");
return -ENOMEM;
}
beacon_ie = kmemdup(bss->information_elements,
bss->len_beacon_ies, GFP_KERNEL);
if (!beacon_ie) {
kfree(bss_desc);
dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
return -ENOMEM;
}
ret = mwifiex_fill_new_bss_desc(priv, bss->bssid, bss->signal,
beacon_ie, bss->len_beacon_ies,
bss->beacon_interval,
bss->capability,
*(u8 *)bss->priv, bss_desc);
if (ret)
goto done;
}
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
/* Infra mode */
ret = mwifiex_deauthenticate(priv, NULL);
if (ret)
goto done;
ret = mwifiex_check_network_compatibility(priv, bss_desc);
if (ret)
goto done;
dev_dbg(adapter->dev, "info: SSID found in scan list ... "
"associating...\n");
if (!netif_queue_stopped(priv->netdev))
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
/* Clear any past association response stored for
* application retrieval */
priv->assoc_rsp_size = 0;
ret = mwifiex_associate(priv, bss_desc);
/* If auth type is auto and association fails using open mode,
* try to connect using shared mode */
if (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG &&
priv->sec_info.is_authtype_auto &&
priv->sec_info.wep_enabled) {
priv->sec_info.authentication_mode =
NL80211_AUTHTYPE_SHARED_KEY;
ret = mwifiex_associate(priv, bss_desc);
}
if (bss)
cfg80211_put_bss(bss);
} else {
/* Adhoc mode */
/* If the requested SSID matches current SSID, return */
if (bss_desc && bss_desc->ssid.ssid_len &&
(!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
ssid, &bss_desc->ssid))) {
kfree(bss_desc);
kfree(beacon_ie);
return 0;
}
/* Exit Adhoc mode first */
dev_dbg(adapter->dev, "info: Sending Adhoc Stop\n");
ret = mwifiex_deauthenticate(priv, NULL);
if (ret)
goto done;
priv->adhoc_is_link_sensed = false;
ret = mwifiex_check_network_compatibility(priv, bss_desc);
if (!netif_queue_stopped(priv->netdev))
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
if (!ret) {
dev_dbg(adapter->dev, "info: network found in scan"
" list. Joining...\n");
ret = mwifiex_adhoc_join(priv, bss_desc);
if (bss)
cfg80211_put_bss(bss);
} else {
dev_dbg(adapter->dev, "info: Network not found in "
"the list, creating adhoc with ssid = %s\n",
req_ssid->ssid);
ret = mwifiex_adhoc_start(priv, req_ssid);
}
}
done:
kfree(bss_desc);
kfree(beacon_ie);
return ret;
}
/*
* IOCTL request handler to set host sleep configuration.
*
* This function prepares the correct firmware command and
* issues it.
*/
static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg)
{
struct mwifiex_adapter *adapter = priv->adapter;
int status = 0;
u32 prev_cond = 0;
if (!hs_cfg)
return -ENOMEM;
switch (action) {
case HostCmd_ACT_GEN_SET:
if (adapter->pps_uapsd_mode) {
dev_dbg(adapter->dev, "info: Host Sleep IOCTL"
" is blocked in UAPSD/PPS mode\n");
status = -1;
break;
}
if (hs_cfg->is_invoke_hostcmd) {
if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL) {
if (!adapter->is_hs_configured)
/* Already cancelled */
break;
/* Save previous condition */
prev_cond = le32_to_cpu(adapter->hs_cfg
.conditions);
adapter->hs_cfg.conditions =
cpu_to_le32(hs_cfg->conditions);
} else if (hs_cfg->conditions) {
adapter->hs_cfg.conditions =
cpu_to_le32(hs_cfg->conditions);
adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
if (hs_cfg->gap)
adapter->hs_cfg.gap = (u8)hs_cfg->gap;
} else if (adapter->hs_cfg.conditions
== cpu_to_le32(HOST_SLEEP_CFG_CANCEL)) {
/* Return failure if no parameters for HS
enable */
status = -1;
break;
}
if (cmd_type == MWIFIEX_SYNC_CMD)
status = mwifiex_send_cmd_sync(priv,
HostCmd_CMD_802_11_HS_CFG_ENH,
HostCmd_ACT_GEN_SET, 0,
&adapter->hs_cfg);
else
status = mwifiex_send_cmd_async(priv,
HostCmd_CMD_802_11_HS_CFG_ENH,
HostCmd_ACT_GEN_SET, 0,
&adapter->hs_cfg);
if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL)
/* Restore previous condition */
adapter->hs_cfg.conditions =
cpu_to_le32(prev_cond);
} else {
adapter->hs_cfg.conditions =
cpu_to_le32(hs_cfg->conditions);
adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
adapter->hs_cfg.gap = (u8)hs_cfg->gap;
}
break;
case HostCmd_ACT_GEN_GET:
hs_cfg->conditions = le32_to_cpu(adapter->hs_cfg.conditions);
hs_cfg->gpio = adapter->hs_cfg.gpio;
hs_cfg->gap = adapter->hs_cfg.gap;
break;
default:
status = -1;
break;
}
return status;
}
/*
* Sends IOCTL request to cancel the existing Host Sleep configuration.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type)
{
struct mwifiex_ds_hs_cfg hscfg;
hscfg.conditions = HOST_SLEEP_CFG_CANCEL;
hscfg.is_invoke_hostcmd = true;
return mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
cmd_type, &hscfg);
}
EXPORT_SYMBOL_GPL(mwifiex_cancel_hs);
/*
* Sends IOCTL request to cancel the existing Host Sleep configuration.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
{
struct mwifiex_ds_hs_cfg hscfg;
if (adapter->hs_activated) {
dev_dbg(adapter->dev, "cmd: HS Already actived\n");
return true;
}
adapter->hs_activate_wait_q_woken = false;
memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
hscfg.is_invoke_hostcmd = true;
if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_STA),
HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
&hscfg)) {
dev_err(adapter->dev, "IOCTL request HS enable failed\n");
return false;
}
if (wait_event_interruptible(adapter->hs_activate_wait_q,
adapter->hs_activate_wait_q_woken)) {
dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(mwifiex_enable_hs);
/*
* IOCTL request handler to get BSS information.
*
* This function collates the information from different driver structures
* to send to the user.
*/
int mwifiex_get_bss_info(struct mwifiex_private *priv,
struct mwifiex_bss_info *info)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_bssdescriptor *bss_desc;
if (!info)
return -1;
bss_desc = &priv->curr_bss_params.bss_descriptor;
info->bss_mode = priv->bss_mode;
memcpy(&info->ssid, &bss_desc->ssid, sizeof(struct cfg80211_ssid));
memcpy(&info->bssid, &bss_desc->mac_address, ETH_ALEN);
info->bss_chan = bss_desc->channel;
info->region_code = adapter->region_code;
info->media_connected = priv->media_connected;
info->max_power_level = priv->max_tx_power_level;
info->min_power_level = priv->min_tx_power_level;
info->adhoc_state = priv->adhoc_state;
info->bcn_nf_last = priv->bcn_nf_last;
if (priv->sec_info.wep_enabled)
info->wep_status = true;
else
info->wep_status = false;
info->is_hs_configured = adapter->is_hs_configured;
info->is_deep_sleep = adapter->is_deep_sleep;
return 0;
}
/*
* The function disables auto deep sleep mode.
*/
int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
{
struct mwifiex_ds_auto_ds auto_ds;
auto_ds.auto_ds = DEEP_SLEEP_OFF;
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds);
}
EXPORT_SYMBOL_GPL(mwifiex_disable_auto_ds);
/*
* IOCTL request handler to set/get active channel.
*
* This function performs validity checking on channel/frequency
* compatibility and returns failure if not valid.
*/
int mwifiex_bss_set_channel(struct mwifiex_private *priv,
struct mwifiex_chan_freq_power *chan)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_chan_freq_power *cfp = NULL;
if (!chan)
return -1;
if (!chan->channel && !chan->freq)
return -1;
if (adapter->adhoc_start_band & BAND_AN)
adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN;
else if (adapter->adhoc_start_band & BAND_A)
adapter->adhoc_start_band = BAND_G | BAND_B;
if (chan->channel) {
if (chan->channel <= MAX_CHANNEL_BAND_BG)
cfp = mwifiex_get_cfp(priv, 0, (u16) chan->channel, 0);
if (!cfp) {
cfp = mwifiex_get_cfp(priv, BAND_A,
(u16) chan->channel, 0);
if (cfp) {
if (adapter->adhoc_11n_enabled)
adapter->adhoc_start_band = BAND_A
| BAND_AN;
else
adapter->adhoc_start_band = BAND_A;
}
}
} else {
if (chan->freq <= MAX_FREQUENCY_BAND_BG)
cfp = mwifiex_get_cfp(priv, 0, 0, chan->freq);
if (!cfp) {
cfp = mwifiex_get_cfp(priv, BAND_A, 0, chan->freq);
if (cfp) {
if (adapter->adhoc_11n_enabled)
adapter->adhoc_start_band = BAND_A
| BAND_AN;
else
adapter->adhoc_start_band = BAND_A;
}
}
}
if (!cfp || !cfp->channel) {
dev_err(adapter->dev, "invalid channel/freq\n");
return -1;
}
priv->adhoc_channel = (u8) cfp->channel;
chan->channel = cfp->channel;
chan->freq = cfp->freq;
return 0;
}
/*
* IOCTL request handler to set/get Ad-Hoc channel.
*
* This function prepares the correct firmware command and
* issues it to set or get the ad-hoc channel.
*/
static int mwifiex_bss_ioctl_ibss_channel(struct mwifiex_private *priv,
u16 action, u16 *channel)
{
if (action == HostCmd_ACT_GEN_GET) {
if (!priv->media_connected) {
*channel = priv->adhoc_channel;
return 0;
}
} else {
priv->adhoc_channel = (u8) *channel;
}
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_RF_CHANNEL,
action, 0, channel);
}
/*
* IOCTL request handler to change Ad-Hoc channel.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*
* The function follows the following steps to perform the change -
* - Get current IBSS information
* - Get current channel
* - If no change is required, return
* - If not connected, change channel and return
* - If connected,
* - Disconnect
* - Change channel
* - Perform specific SSID scan with same SSID
* - Start/Join the IBSS
*/
int
mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel)
{
int ret;
struct mwifiex_bss_info bss_info;
struct mwifiex_ssid_bssid ssid_bssid;
u16 curr_chan = 0;
struct cfg80211_bss *bss = NULL;
struct ieee80211_channel *chan;
enum ieee80211_band band;
memset(&bss_info, 0, sizeof(bss_info));
/* Get BSS information */
if (mwifiex_get_bss_info(priv, &bss_info))
return -1;
/* Get current channel */
ret = mwifiex_bss_ioctl_ibss_channel(priv, HostCmd_ACT_GEN_GET,
&curr_chan);
if (curr_chan == channel) {
ret = 0;
goto done;
}
dev_dbg(priv->adapter->dev, "cmd: updating channel from %d to %d\n",
curr_chan, channel);
if (!bss_info.media_connected) {
ret = 0;
goto done;
}
/* Do disonnect */
memset(&ssid_bssid, 0, ETH_ALEN);
ret = mwifiex_deauthenticate(priv, ssid_bssid.bssid);
ret = mwifiex_bss_ioctl_ibss_channel(priv, HostCmd_ACT_GEN_SET,
&channel);
/* Do specific SSID scanning */
if (mwifiex_request_scan(priv, &bss_info.ssid)) {
ret = -1;
goto done;
}
band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
chan = __ieee80211_get_channel(priv->wdev->wiphy,
ieee80211_channel_to_frequency(channel,
band));
/* Find the BSS we want using available scan results */
bss = cfg80211_get_bss(priv->wdev->wiphy, chan, bss_info.bssid,
bss_info.ssid.ssid, bss_info.ssid.ssid_len,
WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
if (!bss)
wiphy_warn(priv->wdev->wiphy, "assoc: bss %pM not in scan results\n",
bss_info.bssid);
ret = mwifiex_bss_start(priv, bss, &bss_info.ssid);
done:
return ret;
}
/*
* IOCTL request handler to get rate.
*
* This function prepares the correct firmware command and
* issues it to get the current rate if it is connected,
* otherwise, the function returns the lowest supported rate
* for the band.
*/
static int mwifiex_rate_ioctl_get_rate_value(struct mwifiex_private *priv,
struct mwifiex_rate_cfg *rate_cfg)
{
rate_cfg->is_rate_auto = priv->is_data_rate_auto;
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
HostCmd_ACT_GEN_GET, 0, NULL);
}
/*
* IOCTL request handler to set rate.
*
* This function prepares the correct firmware command and
* issues it to set the current rate.
*
* The function also performs validation checking on the supplied value.
*/
static int mwifiex_rate_ioctl_set_rate_value(struct mwifiex_private *priv,
struct mwifiex_rate_cfg *rate_cfg)
{
u8 rates[MWIFIEX_SUPPORTED_RATES];
u8 *rate;
int rate_index, ret;
u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
u32 i;
struct mwifiex_adapter *adapter = priv->adapter;
if (rate_cfg->is_rate_auto) {
memset(bitmap_rates, 0, sizeof(bitmap_rates));
/* Support all HR/DSSS rates */
bitmap_rates[0] = 0x000F;
/* Support all OFDM rates */
bitmap_rates[1] = 0x00FF;
/* Support all HT-MCSs rate */
for (i = 0; i < ARRAY_SIZE(priv->bitmap_rates) - 3; i++)
bitmap_rates[i + 2] = 0xFFFF;
bitmap_rates[9] = 0x3FFF;
} else {
memset(rates, 0, sizeof(rates));
mwifiex_get_active_data_rates(priv, rates);
rate = rates;
for (i = 0; (rate[i] && i < MWIFIEX_SUPPORTED_RATES); i++) {
dev_dbg(adapter->dev, "info: rate=%#x wanted=%#x\n",
rate[i], rate_cfg->rate);
if ((rate[i] & 0x7f) == (rate_cfg->rate & 0x7f))
break;
}
if ((i == MWIFIEX_SUPPORTED_RATES) || !rate[i]) {
dev_err(adapter->dev, "fixed data rate %#x is out "
"of range\n", rate_cfg->rate);
return -1;
}
memset(bitmap_rates, 0, sizeof(bitmap_rates));
rate_index = mwifiex_data_rate_to_index(rate_cfg->rate);
/* Only allow b/g rates to be set */
if (rate_index >= MWIFIEX_RATE_INDEX_HRDSSS0 &&
rate_index <= MWIFIEX_RATE_INDEX_HRDSSS3) {
bitmap_rates[0] = 1 << rate_index;
} else {
rate_index -= 1; /* There is a 0x00 in the table */
if (rate_index >= MWIFIEX_RATE_INDEX_OFDM0 &&
rate_index <= MWIFIEX_RATE_INDEX_OFDM7)
bitmap_rates[1] = 1 << (rate_index -
MWIFIEX_RATE_INDEX_OFDM0);
}
}
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
HostCmd_ACT_GEN_SET, 0, bitmap_rates);
return ret;
}
/*
* IOCTL request handler to set/get rate.
*
* This function can be used to set/get either the rate value or the
* rate index.
*/
static int mwifiex_rate_ioctl_cfg(struct mwifiex_private *priv,
struct mwifiex_rate_cfg *rate_cfg)
{
int status;
if (!rate_cfg)
return -1;
if (rate_cfg->action == HostCmd_ACT_GEN_GET)
status = mwifiex_rate_ioctl_get_rate_value(priv, rate_cfg);
else
status = mwifiex_rate_ioctl_set_rate_value(priv, rate_cfg);
return status;
}
/*
* Sends IOCTL request to get the data rate.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
struct mwifiex_rate_cfg *rate)
{
int ret;
memset(rate, 0, sizeof(struct mwifiex_rate_cfg));
rate->action = HostCmd_ACT_GEN_GET;
ret = mwifiex_rate_ioctl_cfg(priv, rate);
if (!ret) {
if (rate->is_rate_auto)
rate->rate = mwifiex_index_to_data_rate(priv,
priv->tx_rate,
priv->tx_htinfo
);
else
rate->rate = priv->data_rate;
} else {
ret = -1;
}
return ret;
}
/*
* IOCTL request handler to set tx power configuration.
*
* This function prepares the correct firmware command and
* issues it.
*
* For non-auto power mode, all the following power groups are set -
* - Modulation class HR/DSSS
* - Modulation class OFDM
* - Modulation class HTBW20
* - Modulation class HTBW40
*/
int mwifiex_set_tx_power(struct mwifiex_private *priv,
struct mwifiex_power_cfg *power_cfg)
{
int ret;
struct host_cmd_ds_txpwr_cfg *txp_cfg;
struct mwifiex_types_power_group *pg_tlv;
struct mwifiex_power_group *pg;
u8 *buf;
u16 dbm = 0;
if (!power_cfg->is_power_auto) {
dbm = (u16) power_cfg->power_level;
if ((dbm < priv->min_tx_power_level) ||
(dbm > priv->max_tx_power_level)) {
dev_err(priv->adapter->dev, "txpower value %d dBm"
" is out of range (%d dBm-%d dBm)\n",
dbm, priv->min_tx_power_level,
priv->max_tx_power_level);
return -1;
}
}
buf = kzalloc(MWIFIEX_SIZE_OF_CMD_BUFFER, GFP_KERNEL);
if (!buf) {
dev_err(priv->adapter->dev, "%s: failed to alloc cmd buffer\n",
__func__);
return -ENOMEM;
}
txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf;
txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
if (!power_cfg->is_power_auto) {
txp_cfg->mode = cpu_to_le32(1);
pg_tlv = (struct mwifiex_types_power_group *)
(buf + sizeof(struct host_cmd_ds_txpwr_cfg));
pg_tlv->type = TLV_TYPE_POWER_GROUP;
pg_tlv->length = 4 * sizeof(struct mwifiex_power_group);
pg = (struct mwifiex_power_group *)
(buf + sizeof(struct host_cmd_ds_txpwr_cfg)
+ sizeof(struct mwifiex_types_power_group));
/* Power group for modulation class HR/DSSS */
pg->first_rate_code = 0x00;
pg->last_rate_code = 0x03;
pg->modulation_class = MOD_CLASS_HR_DSSS;
pg->power_step = 0;
pg->power_min = (s8) dbm;
pg->power_max = (s8) dbm;
pg++;
/* Power group for modulation class OFDM */
pg->first_rate_code = 0x00;
pg->last_rate_code = 0x07;
pg->modulation_class = MOD_CLASS_OFDM;
pg->power_step = 0;
pg->power_min = (s8) dbm;
pg->power_max = (s8) dbm;
pg++;
/* Power group for modulation class HTBW20 */
pg->first_rate_code = 0x00;
pg->last_rate_code = 0x20;
pg->modulation_class = MOD_CLASS_HT;
pg->power_step = 0;
pg->power_min = (s8) dbm;
pg->power_max = (s8) dbm;
pg->ht_bandwidth = HT_BW_20;
pg++;
/* Power group for modulation class HTBW40 */
pg->first_rate_code = 0x00;
pg->last_rate_code = 0x20;
pg->modulation_class = MOD_CLASS_HT;
pg->power_step = 0;
pg->power_min = (s8) dbm;
pg->power_max = (s8) dbm;
pg->ht_bandwidth = HT_BW_40;
}
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TXPWR_CFG,
HostCmd_ACT_GEN_SET, 0, buf);
kfree(buf);
return ret;
}
/*
* IOCTL request handler to get power save mode.
*
* This function prepares the correct firmware command and
* issues it.
*/
int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode)
{
int ret;
struct mwifiex_adapter *adapter = priv->adapter;
u16 sub_cmd;
if (*ps_mode)
adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
else
adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
sub_cmd = (*ps_mode) ? EN_AUTO_PS : DIS_AUTO_PS;
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
sub_cmd, BITMAP_STA_PS, NULL);
if ((!ret) && (sub_cmd == DIS_AUTO_PS))
ret = mwifiex_send_cmd_async(priv,
HostCmd_CMD_802_11_PS_MODE_ENH,
GET_PS, 0, NULL);
return ret;
}
/*
* IOCTL request handler to set/reset WPA IE.
*
* The supplied WPA IE is treated as a opaque buffer. Only the first field
* is checked to determine WPA version. If buffer length is zero, the existing
* WPA IE is reset.
*/
static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
u8 *ie_data_ptr, u16 ie_len)
{
if (ie_len) {
if (ie_len > sizeof(priv->wpa_ie)) {
dev_err(priv->adapter->dev,
"failed to copy WPA IE, too big\n");
return -1;
}
memcpy(priv->wpa_ie, ie_data_ptr, ie_len);
priv->wpa_ie_len = (u8) ie_len;
dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
priv->wpa_ie_len, priv->wpa_ie[0]);
if (priv->wpa_ie[0] == WLAN_EID_WPA) {
priv->sec_info.wpa_enabled = true;
} else if (priv->wpa_ie[0] == WLAN_EID_RSN) {
priv->sec_info.wpa2_enabled = true;
} else {
priv->sec_info.wpa_enabled = false;
priv->sec_info.wpa2_enabled = false;
}
} else {
memset(priv->wpa_ie, 0, sizeof(priv->wpa_ie));
priv->wpa_ie_len = 0;
dev_dbg(priv->adapter->dev, "info: reset wpa_ie_len=%d IE=%#x\n",
priv->wpa_ie_len, priv->wpa_ie[0]);
priv->sec_info.wpa_enabled = false;
priv->sec_info.wpa2_enabled = false;
}
return 0;
}
/*
* IOCTL request handler to set/reset WAPI IE.
*
* The supplied WAPI IE is treated as a opaque buffer. Only the first field
* is checked to internally enable WAPI. If buffer length is zero, the existing
* WAPI IE is reset.
*/
static int mwifiex_set_wapi_ie(struct mwifiex_private *priv,
u8 *ie_data_ptr, u16 ie_len)
{
if (ie_len) {
if (ie_len > sizeof(priv->wapi_ie)) {
dev_dbg(priv->adapter->dev,
"info: failed to copy WAPI IE, too big\n");
return -1;
}
memcpy(priv->wapi_ie, ie_data_ptr, ie_len);
priv->wapi_ie_len = ie_len;
dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n",
priv->wapi_ie_len, priv->wapi_ie[0]);
if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY)
priv->sec_info.wapi_enabled = true;
} else {
memset(priv->wapi_ie, 0, sizeof(priv->wapi_ie));
priv->wapi_ie_len = ie_len;
dev_dbg(priv->adapter->dev,
"info: Reset wapi_ie_len=%d IE=%#x\n",
priv->wapi_ie_len, priv->wapi_ie[0]);
priv->sec_info.wapi_enabled = false;
}
return 0;
}
/*
* IOCTL request handler to set WAPI key.
*
* This function prepares the correct firmware command and
* issues it.
*/
static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
struct mwifiex_ds_encrypt_key *encrypt_key)
{
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
encrypt_key);
}
/*
* IOCTL request handler to set WEP network key.
*
* This function prepares the correct firmware command and
* issues it, after validation checks.
*/
static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
struct mwifiex_ds_encrypt_key *encrypt_key)
{
int ret;
struct mwifiex_wep_key *wep_key;
int index;
if (priv->wep_key_curr_index >= NUM_WEP_KEYS)
priv->wep_key_curr_index = 0;
wep_key = &priv->wep_key[priv->wep_key_curr_index];
index = encrypt_key->key_index;
if (encrypt_key->key_disable) {
priv->sec_info.wep_enabled = 0;
} else if (!encrypt_key->key_len) {
/* Copy the required key as the current key */
wep_key = &priv->wep_key[index];
if (!wep_key->key_length) {
dev_err(priv->adapter->dev,
"key not set, so cannot enable it\n");
return -1;
}
priv->wep_key_curr_index = (u16) index;
priv->sec_info.wep_enabled = 1;
} else {
wep_key = &priv->wep_key[index];
memset(wep_key, 0, sizeof(struct mwifiex_wep_key));
/* Copy the key in the driver */
memcpy(wep_key->key_material,
encrypt_key->key_material,
encrypt_key->key_len);
wep_key->key_index = index;
wep_key->key_length = encrypt_key->key_len;
priv->sec_info.wep_enabled = 1;
}
if (wep_key->key_length) {
/* Send request to firmware */
ret = mwifiex_send_cmd_async(priv,
HostCmd_CMD_802_11_KEY_MATERIAL,
HostCmd_ACT_GEN_SET, 0, NULL);
if (ret)
return ret;
}
if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&priv->curr_pkt_filter);
return ret;
}
/*
* IOCTL request handler to set WPA key.
*
* This function prepares the correct firmware command and
* issues it, after validation checks.
*
* Current driver only supports key length of up to 32 bytes.
*
* This function can also be used to disable a currently set key.
*/
static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
struct mwifiex_ds_encrypt_key *encrypt_key)
{
int ret;
u8 remove_key = false;
struct host_cmd_ds_802_11_key_material *ibss_key;
/* Current driver only supports key length of up to 32 bytes */
if (encrypt_key->key_len > WLAN_MAX_KEY_LEN) {
dev_err(priv->adapter->dev, "key length too long\n");
return -1;
}
if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
/*
* IBSS/WPA-None uses only one key (Group) for both receiving
* and sending unicast and multicast packets.
*/
/* Send the key as PTK to firmware */
encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
ret = mwifiex_send_cmd_async(priv,
HostCmd_CMD_802_11_KEY_MATERIAL,
HostCmd_ACT_GEN_SET,
KEY_INFO_ENABLED, encrypt_key);
if (ret)
return ret;
ibss_key = &priv->aes_key;
memset(ibss_key, 0,
sizeof(struct host_cmd_ds_802_11_key_material));
/* Copy the key in the driver */
memcpy(ibss_key->key_param_set.key, encrypt_key->key_material,
encrypt_key->key_len);
memcpy(&ibss_key->key_param_set.key_len, &encrypt_key->key_len,
sizeof(ibss_key->key_param_set.key_len));
ibss_key->key_param_set.key_type_id
= cpu_to_le16(KEY_TYPE_ID_TKIP);
ibss_key->key_param_set.key_info = cpu_to_le16(KEY_ENABLED);
/* Send the key as GTK to firmware */
encrypt_key->key_index = ~MWIFIEX_KEY_INDEX_UNICAST;
}
if (!encrypt_key->key_index)
encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
if (remove_key)
ret = mwifiex_send_cmd_sync(priv,
HostCmd_CMD_802_11_KEY_MATERIAL,
HostCmd_ACT_GEN_SET,
!KEY_INFO_ENABLED, encrypt_key);
else
ret = mwifiex_send_cmd_sync(priv,
HostCmd_CMD_802_11_KEY_MATERIAL,
HostCmd_ACT_GEN_SET,
KEY_INFO_ENABLED, encrypt_key);
return ret;
}
/*
* IOCTL request handler to set/get network keys.
*
* This is a generic key handling function which supports WEP, WPA
* and WAPI.
*/
static int
mwifiex_sec_ioctl_encrypt_key(struct mwifiex_private *priv,
struct mwifiex_ds_encrypt_key *encrypt_key)
{
int status;
if (encrypt_key->is_wapi_key)
status = mwifiex_sec_ioctl_set_wapi_key(priv, encrypt_key);
else if (encrypt_key->key_len > WLAN_KEY_LEN_WEP104)
status = mwifiex_sec_ioctl_set_wpa_key(priv, encrypt_key);
else
status = mwifiex_sec_ioctl_set_wep_key(priv, encrypt_key);
return status;
}
/*
* This function returns the driver version.
*/
int
mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
int max_len)
{
union {
u32 l;
u8 c[4];
} ver;
char fw_ver[32];
ver.l = adapter->fw_release_number;
sprintf(fw_ver, "%u.%u.%u.p%u", ver.c[2], ver.c[1], ver.c[0], ver.c[3]);
snprintf(version, max_len, driver_version, fw_ver);
dev_dbg(adapter->dev, "info: MWIFIEX VERSION: %s\n", version);
return 0;
}
/*
* Sends IOCTL request to get signal information.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int mwifiex_get_signal_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_signal *signal)
{
int status;
signal->selector = ALL_RSSI_INFO_MASK;
/* Signal info can be obtained only if connected */
if (!priv->media_connected) {
dev_dbg(priv->adapter->dev,
"info: Can not get signal in disconnected state\n");
return -1;
}
status = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
HostCmd_ACT_GEN_GET, 0, signal);
if (!status) {
if (signal->selector & BCN_RSSI_AVG_MASK)
priv->qual_level = signal->bcn_rssi_avg;
if (signal->selector & BCN_NF_AVG_MASK)
priv->qual_noise = signal->bcn_nf_avg;
}
return status;
}
/*
* Sends IOCTL request to set encoding parameters.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
int key_len, u8 key_index, int disable)
{
struct mwifiex_ds_encrypt_key encrypt_key;
memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
encrypt_key.key_len = key_len;
if (!disable) {
encrypt_key.key_index = key_index;
if (key_len)
memcpy(encrypt_key.key_material, key, key_len);
} else {
encrypt_key.key_disable = true;
}
return mwifiex_sec_ioctl_encrypt_key(priv, &encrypt_key);
}
/*
* Sends IOCTL request to get extended version.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int
mwifiex_get_ver_ext(struct mwifiex_private *priv)
{
struct mwifiex_ver_ext ver_ext;
memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_VERSION_EXT,
HostCmd_ACT_GEN_GET, 0, &ver_ext))
return -1;
return 0;
}
/*
* Sends IOCTL request to get statistics information.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int
mwifiex_get_stats_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_stats *log)
{
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_GET_LOG,
HostCmd_ACT_GEN_GET, 0, log);
}
/*
* IOCTL request handler to read/write register.
*
* This function prepares the correct firmware command and
* issues it.
*
* Access to the following registers are supported -
* - MAC
* - BBP
* - RF
* - PMIC
* - CAU
*/
static int mwifiex_reg_mem_ioctl_reg_rw(struct mwifiex_private *priv,
struct mwifiex_ds_reg_rw *reg_rw,
u16 action)
{
u16 cmd_no;
switch (le32_to_cpu(reg_rw->type)) {
case MWIFIEX_REG_MAC:
cmd_no = HostCmd_CMD_MAC_REG_ACCESS;
break;
case MWIFIEX_REG_BBP:
cmd_no = HostCmd_CMD_BBP_REG_ACCESS;
break;
case MWIFIEX_REG_RF:
cmd_no = HostCmd_CMD_RF_REG_ACCESS;
break;
case MWIFIEX_REG_PMIC:
cmd_no = HostCmd_CMD_PMIC_REG_ACCESS;
break;
case MWIFIEX_REG_CAU:
cmd_no = HostCmd_CMD_CAU_REG_ACCESS;
break;
default:
return -1;
}
return mwifiex_send_cmd_sync(priv, cmd_no, action, 0, reg_rw);
}
/*
* Sends IOCTL request to write to a register.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int
mwifiex_reg_write(struct mwifiex_private *priv, u32 reg_type,
u32 reg_offset, u32 reg_value)
{
struct mwifiex_ds_reg_rw reg_rw;
reg_rw.type = cpu_to_le32(reg_type);
reg_rw.offset = cpu_to_le32(reg_offset);
reg_rw.value = cpu_to_le32(reg_value);
return mwifiex_reg_mem_ioctl_reg_rw(priv, ®_rw, HostCmd_ACT_GEN_SET);
}
/*
* Sends IOCTL request to read from a register.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int
mwifiex_reg_read(struct mwifiex_private *priv, u32 reg_type,
u32 reg_offset, u32 *value)
{
int ret;
struct mwifiex_ds_reg_rw reg_rw;
reg_rw.type = cpu_to_le32(reg_type);
reg_rw.offset = cpu_to_le32(reg_offset);
ret = mwifiex_reg_mem_ioctl_reg_rw(priv, ®_rw, HostCmd_ACT_GEN_GET);
if (ret)
goto done;
*value = le32_to_cpu(reg_rw.value);
done:
return ret;
}
/*
* Sends IOCTL request to read from EEPROM.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int
mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes,
u8 *value)
{
int ret;
struct mwifiex_ds_read_eeprom rd_eeprom;
rd_eeprom.offset = cpu_to_le16((u16) offset);
rd_eeprom.byte_count = cpu_to_le16((u16) bytes);
/* Send request to firmware */
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
HostCmd_ACT_GEN_GET, 0, &rd_eeprom);
if (!ret)
memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA);
return ret;
}
/*
* This function sets a generic IE. In addition to generic IE, it can
* also handle WPA, WPA2 and WAPI IEs.
*/
static int
mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
u16 ie_len)
{
int ret = 0;
struct ieee_types_vendor_header *pvendor_ie;
const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 };
const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 };
/* If the passed length is zero, reset the buffer */
if (!ie_len) {
priv->gen_ie_buf_len = 0;
priv->wps.session_enable = false;
return 0;
} else if (!ie_data_ptr) {
return -1;
}
pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
/* Test to see if it is a WPA IE, if not, then it is a gen IE */
if (((pvendor_ie->element_id == WLAN_EID_WPA) &&
(!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui)))) ||
(pvendor_ie->element_id == WLAN_EID_RSN)) {
/* IE is a WPA/WPA2 IE so call set_wpa function */
ret = mwifiex_set_wpa_ie_helper(priv, ie_data_ptr, ie_len);
priv->wps.session_enable = false;
return ret;
} else if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) {
/* IE is a WAPI IE so call set_wapi function */
ret = mwifiex_set_wapi_ie(priv, ie_data_ptr, ie_len);
return ret;
}
/*
* Verify that the passed length is not larger than the
* available space remaining in the buffer
*/
if (ie_len < (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) {
/* Test to see if it is a WPS IE, if so, enable
* wps session flag
*/
pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) &&
(!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui)))) {
priv->wps.session_enable = true;
dev_dbg(priv->adapter->dev,
"info: WPS Session Enabled.\n");
}
/* Append the passed data to the end of the
genIeBuffer */
memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr,
ie_len);
/* Increment the stored buffer length by the
size passed */
priv->gen_ie_buf_len += ie_len;
} else {
/* Passed data does not fit in the remaining
buffer space */
ret = -1;
}
/* Return 0, or -1 for error case */
return ret;
}
/*
* IOCTL request handler to set/get generic IE.
*
* In addition to various generic IEs, this function can also be
* used to set the ARP filter.
*/
static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
struct mwifiex_ds_misc_gen_ie *gen_ie,
u16 action)
{
struct mwifiex_adapter *adapter = priv->adapter;
switch (gen_ie->type) {
case MWIFIEX_IE_TYPE_GEN_IE:
if (action == HostCmd_ACT_GEN_GET) {
gen_ie->len = priv->wpa_ie_len;
memcpy(gen_ie->ie_data, priv->wpa_ie, gen_ie->len);
} else {
mwifiex_set_gen_ie_helper(priv, gen_ie->ie_data,
(u16) gen_ie->len);
}
break;
case MWIFIEX_IE_TYPE_ARP_FILTER:
memset(adapter->arp_filter, 0, sizeof(adapter->arp_filter));
if (gen_ie->len > ARP_FILTER_MAX_BUF_SIZE) {
adapter->arp_filter_size = 0;
dev_err(adapter->dev, "invalid ARP filter size\n");
return -1;
} else {
memcpy(adapter->arp_filter, gen_ie->ie_data,
gen_ie->len);
adapter->arp_filter_size = gen_ie->len;
}
break;
default:
dev_err(adapter->dev, "invalid IE type\n");
return -1;
}
return 0;
}
/*
* Sends IOCTL request to set a generic IE.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
int
mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
{
struct mwifiex_ds_misc_gen_ie gen_ie;
if (ie_len > IEEE_MAX_IE_SIZE)
return -EFAULT;
gen_ie.type = MWIFIEX_IE_TYPE_GEN_IE;
gen_ie.len = ie_len;
memcpy(gen_ie.ie_data, ie, ie_len);
if (mwifiex_misc_ioctl_gen_ie(priv, &gen_ie, HostCmd_ACT_GEN_SET))
return -EFAULT;
return 0;
}
| gpl-2.0 |
ajopanoor/hydra | arch/x86/vdso/vdso32-setup.c | 1206 | 2524 | /*
* (C) Copyright 2002 Linus Torvalds
* Portions based on the vdso-randomization code from exec-shield:
* Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
*
* This file contains the needed initializations to support sysenter.
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/mm_types.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/vdso.h>
#ifdef CONFIG_COMPAT_VDSO
#define VDSO_DEFAULT 0
#else
#define VDSO_DEFAULT 1
#endif
/*
* Should the kernel map a VDSO page into processes and pass its
* address down to glibc upon exec()?
*/
unsigned int __read_mostly vdso32_enabled = VDSO_DEFAULT;
static int __init vdso32_setup(char *s)
{
vdso32_enabled = simple_strtoul(s, NULL, 0);
if (vdso32_enabled > 1)
pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
return 1;
}
/*
* For consistency, the argument vdso32=[012] affects the 32-bit vDSO
* behavior on both 64-bit and 32-bit kernels.
* On 32-bit kernels, vdso=[012] means the same thing.
*/
__setup("vdso32=", vdso32_setup);
#ifdef CONFIG_X86_32
__setup_param("vdso=", vdso_setup, vdso32_setup, 0);
#endif
#ifdef CONFIG_X86_64
#define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SYSENTER32))
#define vdso32_syscall() (boot_cpu_has(X86_FEATURE_SYSCALL32))
#else /* CONFIG_X86_32 */
#define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP))
#define vdso32_syscall() (0)
#endif /* CONFIG_X86_64 */
#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
const struct vdso_image *selected_vdso32;
#endif
int __init sysenter_setup(void)
{
#ifdef CONFIG_COMPAT
if (vdso32_syscall())
selected_vdso32 = &vdso_image_32_syscall;
else
#endif
if (vdso32_sysenter())
selected_vdso32 = &vdso_image_32_sysenter;
else
selected_vdso32 = &vdso_image_32_int80;
init_vdso_image(selected_vdso32);
return 0;
}
#ifdef CONFIG_X86_64
subsys_initcall(sysenter_setup);
#ifdef CONFIG_SYSCTL
/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
static struct ctl_table abi_table2[] = {
{
.procname = "vsyscall32",
.data = &vdso32_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{}
};
static struct ctl_table abi_root_table2[] = {
{
.procname = "abi",
.mode = 0555,
.child = abi_table2
},
{}
};
static __init int ia32_binfmt_init(void)
{
register_sysctl_table(abi_root_table2);
return 0;
}
__initcall(ia32_binfmt_init);
#endif /* CONFIG_SYSCTL */
#endif /* CONFIG_X86_64 */
| gpl-2.0 |
JFCM121CAKE/android_kernel_samsung_jf | arch/mips/mm/gup.c | 1718 | 7698 | /*
* Lockless get_user_pages_fast for MIPS
*
* Copyright (C) 2008 Nick Piggin
* Copyright (C) 2008 Novell Inc.
* Copyright (C) 2011 Ralf Baechle
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/vmstat.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <asm/pgtable.h>
static inline pte_t gup_get_pte(pte_t *ptep)
{
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
pte_t pte;
retry:
pte.pte_low = ptep->pte_low;
smp_rmb();
pte.pte_high = ptep->pte_high;
smp_rmb();
if (unlikely(pte.pte_low != ptep->pte_low))
goto retry;
return pte;
#else
return ACCESS_ONCE(*ptep);
#endif
}
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
pte_t *ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *page;
if (!pte_present(pte) ||
pte_special(pte) || (write && !pte_write(pte))) {
pte_unmap(ptep);
return 0;
}
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
get_page(page);
SetPageReferenced(page);
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
pte_unmap(ptep - 1);
return 1;
}
static inline void get_head_page_multiple(struct page *page, int nr)
{
VM_BUG_ON(page != compound_head(page));
VM_BUG_ON(page_count(page) == 0);
atomic_add(nr, &page->_count);
SetPageReferenced(page);
}
static int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
pte_t pte = *(pte_t *)&pmd;
struct page *head, *page;
int refs;
if (write && !pte_write(pte))
return 0;
/* hugepages are never "special" */
VM_BUG_ON(pte_special(pte));
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
refs = 0;
head = pte_page(pte);
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
if (PageTail(page))
get_huge_page_tail(page);
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
get_head_page_multiple(head, refs);
return 1;
}
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pmd_t *pmdp;
pmdp = pmd_offset(&pud, addr);
do {
pmd_t pmd = *pmdp;
next = pmd_addr_end(addr, end);
/*
* The pmd_trans_splitting() check below explains why
* pmdp_splitting_flush has to flush the tlb, to stop
* this gup-fast code from running while we set the
* splitting bit in the pmd. Returning zero will take
* the slow path that will call wait_split_huge_page()
* if the pmd is still in splitting state. gup-fast
* can't because it has irq disabled and
* wait_split_huge_page() would never return as the
* tlb flush IPI wouldn't run.
*/
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
if (unlikely(pmd_huge(pmd))) {
if (!gup_huge_pmd(pmd, addr, next, write, pages,nr))
return 0;
} else {
if (!gup_pte_range(pmd, addr, next, write, pages,nr))
return 0;
}
} while (pmdp++, addr = next, addr != end);
return 1;
}
static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
pte_t pte = *(pte_t *)&pud;
struct page *head, *page;
int refs;
if (write && !pte_write(pte))
return 0;
/* hugepages are never "special" */
VM_BUG_ON(pte_special(pte));
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
refs = 0;
head = pte_page(pte);
page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
if (PageTail(page))
get_huge_page_tail(page);
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
get_head_page_multiple(head, refs);
return 1;
}
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pud_t *pudp;
pudp = pud_offset(&pgd, addr);
do {
pud_t pud = *pudp;
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
if (unlikely(pud_huge(pud))) {
if (!gup_huge_pud(pud, addr, next, write, pages,nr))
return 0;
} else {
if (!gup_pmd_range(pud, addr, next, write, pages,nr))
return 0;
}
} while (pudp++, addr = next, addr != end);
return 1;
}
/*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
unsigned long flags;
pgd_t *pgdp;
int nr = 0;
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(void __user *)start, len)))
return 0;
/*
* XXX: batch / limit 'nr', to avoid large irq off latency
* needs some instrumenting to determine the common sizes used by
* important workloads (eg. DB2), and whether limiting the batch
* size will decrease performance.
*
* It seems like we're in the clear for the moment. Direct-IO is
* the main guy that batches up lots of get_user_pages, and even
* they are limited to 64-at-a-time which is not so many.
*/
/*
* This doesn't prevent pagetable teardown, but does prevent
* the pagetables and pages from being freed.
*
* So long as we atomically load page table pointers versus teardown,
* we can follow the address down to the page and take a ref on it.
*/
local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
pgd_t pgd = *pgdp;
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
break;
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
break;
} while (pgdp++, addr = next, addr != end);
local_irq_restore(flags);
return nr;
}
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
* calling get_user_pages().
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno.
*/
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
pgd_t *pgdp;
int ret, nr = 0;
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (end < start)
goto slow_irqon;
/* XXX: batch / limit 'nr' */
local_irq_disable();
pgdp = pgd_offset(mm, addr);
do {
pgd_t pgd = *pgdp;
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
goto slow;
} while (pgdp++, addr = next, addr != end);
local_irq_enable();
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
return nr;
slow:
local_irq_enable();
slow_irqon:
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
(end - start) >> PAGE_SHIFT,
write, 0, pages, NULL);
up_read(&mm->mmap_sem);
/* Have to be a bit careful with return values */
if (nr > 0) {
if (ret < 0)
ret = nr;
else
ret += nr;
}
return ret;
}
| gpl-2.0 |
maniacx/android_kernel_htcleo-3.0 | drivers/mmc/host/dw_mmc.c | 1718 | 44377 | /*
* Synopsys DesignWare Multimedia Card Interface driver
* (Based on NXP driver for lpc 31xx)
*
* Copyright (C) 2009 NXP Semiconductors
* Copyright (C) 2009, 2010 Imagination Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/blkdev.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include "dw_mmc.h"
/* Common flag combinations */
#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
SDMMC_INT_HTO | SDMMC_INT_SBE | \
SDMMC_INT_EBE)
#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
SDMMC_INT_RESP_ERR)
#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
#define DW_MCI_SEND_STATUS 1
#define DW_MCI_RECV_STATUS 2
#define DW_MCI_DMA_THRESHOLD 16
#ifdef CONFIG_MMC_DW_IDMAC
struct idmac_desc {
u32 des0; /* Control Descriptor */
#define IDMAC_DES0_DIC BIT(1)
#define IDMAC_DES0_LD BIT(2)
#define IDMAC_DES0_FD BIT(3)
#define IDMAC_DES0_CH BIT(4)
#define IDMAC_DES0_ER BIT(5)
#define IDMAC_DES0_CES BIT(30)
#define IDMAC_DES0_OWN BIT(31)
u32 des1; /* Buffer sizes */
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
u32 des2; /* buffer 1 physical address */
u32 des3; /* buffer 2 physical address */
};
#endif /* CONFIG_MMC_DW_IDMAC */
/**
* struct dw_mci_slot - MMC slot state
* @mmc: The mmc_host representing this slot.
* @host: The MMC controller this slot is using.
* @ctype: Card type for this slot.
* @mrq: mmc_request currently being processed or waiting to be
* processed, or NULL when the slot is idle.
* @queue_node: List node for placing this node in the @queue list of
* &struct dw_mci.
* @clock: Clock rate configured by set_ios(). Protected by host->lock.
* @flags: Random state bits associated with the slot.
* @id: Number of this slot.
* @last_detect_state: Most recently observed card detect state.
*/
struct dw_mci_slot {
struct mmc_host *mmc;
struct dw_mci *host;
u32 ctype;
struct mmc_request *mrq;
struct list_head queue_node;
unsigned int clock;
unsigned long flags;
#define DW_MMC_CARD_PRESENT 0
#define DW_MMC_CARD_NEED_INIT 1
int id;
int last_detect_state;
};
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
struct dw_mci_slot *slot = s->private;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_command *stop;
struct mmc_data *data;
/* Make sure we get a consistent snapshot */
spin_lock_bh(&slot->host->lock);
mrq = slot->mrq;
if (mrq) {
cmd = mrq->cmd;
data = mrq->data;
stop = mrq->stop;
if (cmd)
seq_printf(s,
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
cmd->opcode, cmd->arg, cmd->flags,
cmd->resp[0], cmd->resp[1], cmd->resp[2],
cmd->resp[2], cmd->error);
if (data)
seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
data->bytes_xfered, data->blocks,
data->blksz, data->flags, data->error);
if (stop)
seq_printf(s,
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
stop->opcode, stop->arg, stop->flags,
stop->resp[0], stop->resp[1], stop->resp[2],
stop->resp[2], stop->error);
}
spin_unlock_bh(&slot->host->lock);
return 0;
}
static int dw_mci_req_open(struct inode *inode, struct file *file)
{
return single_open(file, dw_mci_req_show, inode->i_private);
}
static const struct file_operations dw_mci_req_fops = {
.owner = THIS_MODULE,
.open = dw_mci_req_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int dw_mci_regs_show(struct seq_file *s, void *v)
{
seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
return 0;
}
static int dw_mci_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, dw_mci_regs_show, inode->i_private);
}
static const struct file_operations dw_mci_regs_fops = {
.owner = THIS_MODULE,
.open = dw_mci_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
{
struct mmc_host *mmc = slot->mmc;
struct dw_mci *host = slot->host;
struct dentry *root;
struct dentry *node;
root = mmc->debugfs_root;
if (!root)
return;
node = debugfs_create_file("regs", S_IRUSR, root, host,
&dw_mci_regs_fops);
if (!node)
goto err;
node = debugfs_create_file("req", S_IRUSR, root, slot,
&dw_mci_req_fops);
if (!node)
goto err;
node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
if (!node)
goto err;
node = debugfs_create_x32("pending_events", S_IRUSR, root,
(u32 *)&host->pending_events);
if (!node)
goto err;
node = debugfs_create_x32("completed_events", S_IRUSR, root,
(u32 *)&host->completed_events);
if (!node)
goto err;
return;
err:
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
#endif /* defined(CONFIG_DEBUG_FS) */
static void dw_mci_set_timeout(struct dw_mci *host)
{
/* timeout (maximum) */
mci_writel(host, TMOUT, 0xffffffff);
}
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct mmc_data *data;
u32 cmdr;
cmd->error = -EINPROGRESS;
cmdr = cmd->opcode;
if (cmdr == MMC_STOP_TRANSMISSION)
cmdr |= SDMMC_CMD_STOP;
else
cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
if (cmd->flags & MMC_RSP_PRESENT) {
/* We expect a response, so set this bit */
cmdr |= SDMMC_CMD_RESP_EXP;
if (cmd->flags & MMC_RSP_136)
cmdr |= SDMMC_CMD_RESP_LONG;
}
if (cmd->flags & MMC_RSP_CRC)
cmdr |= SDMMC_CMD_RESP_CRC;
data = cmd->data;
if (data) {
cmdr |= SDMMC_CMD_DAT_EXP;
if (data->flags & MMC_DATA_STREAM)
cmdr |= SDMMC_CMD_STRM_MODE;
if (data->flags & MMC_DATA_WRITE)
cmdr |= SDMMC_CMD_DAT_WR;
}
return cmdr;
}
static void dw_mci_start_command(struct dw_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
host->cmd = cmd;
dev_vdbg(&host->pdev->dev,
"start command: ARGR=0x%08x CMDR=0x%08x\n",
cmd->arg, cmd_flags);
mci_writel(host, CMDARG, cmd->arg);
wmb();
mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
}
static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
{
dw_mci_start_command(host, data->stop, host->stop_cmdr);
}
/* DMA interface functions */
static void dw_mci_stop_dma(struct dw_mci *host)
{
if (host->use_dma) {
host->dma_ops->stop(host);
host->dma_ops->cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
}
#ifdef CONFIG_MMC_DW_IDMAC
static void dw_mci_dma_cleanup(struct dw_mci *host)
{
struct mmc_data *data = host->data;
if (data)
dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE));
}
static void dw_mci_idmac_stop_dma(struct dw_mci *host)
{
u32 temp;
/* Disable and reset the IDMAC interface */
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_USE_IDMAC;
temp |= SDMMC_CTRL_DMA_RESET;
mci_writel(host, CTRL, temp);
/* Stop the IDMAC running */
temp = mci_readl(host, BMOD);
temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
mci_writel(host, BMOD, temp);
}
static void dw_mci_idmac_complete_dma(struct dw_mci *host)
{
struct mmc_data *data = host->data;
dev_vdbg(&host->pdev->dev, "DMA complete\n");
host->dma_ops->cleanup(host);
/*
* If the card was removed, data will be NULL. No point in trying to
* send the stop command or waiting for NBUSY in this case.
*/
if (data) {
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
}
static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
unsigned int sg_len)
{
int i;
struct idmac_desc *desc = host->sg_cpu;
for (i = 0; i < sg_len; i++, desc++) {
unsigned int length = sg_dma_len(&data->sg[i]);
u32 mem_addr = sg_dma_address(&data->sg[i]);
/* Set the OWN bit and disable interrupts for this descriptor */
desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
/* Buffer length */
IDMAC_SET_BUFFER1_SIZE(desc, length);
/* Physical address to DMA to/from */
desc->des2 = mem_addr;
}
/* Set first descriptor */
desc = host->sg_cpu;
desc->des0 |= IDMAC_DES0_FD;
/* Set last descriptor */
desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
desc->des0 |= IDMAC_DES0_LD;
wmb();
}
static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
{
u32 temp;
dw_mci_translate_sglist(host, host->data, sg_len);
/* Select IDMAC interface */
temp = mci_readl(host, CTRL);
temp |= SDMMC_CTRL_USE_IDMAC;
mci_writel(host, CTRL, temp);
wmb();
/* Enable the IDMAC */
temp = mci_readl(host, BMOD);
temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
mci_writel(host, BMOD, temp);
/* Start it running */
mci_writel(host, PLDMND, 1);
}
static int dw_mci_idmac_init(struct dw_mci *host)
{
struct idmac_desc *p;
int i;
/* Number of descriptors in the ring buffer */
host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
/* Forward link the descriptor list */
for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
/* Set the last descriptor as the end-of-ring descriptor */
p->des3 = host->sg_dma;
p->des0 = IDMAC_DES0_ER;
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
SDMMC_IDMAC_INT_TI);
/* Set the descriptor base address */
mci_writel(host, DBADDR, host->sg_dma);
return 0;
}
static struct dw_mci_dma_ops dw_mci_idmac_ops = {
.init = dw_mci_idmac_init,
.start = dw_mci_idmac_start_dma,
.stop = dw_mci_idmac_stop_dma,
.complete = dw_mci_idmac_complete_dma,
.cleanup = dw_mci_dma_cleanup,
};
#endif /* CONFIG_MMC_DW_IDMAC */
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
{
struct scatterlist *sg;
unsigned int i, direction, sg_len;
u32 temp;
/* If we don't have a channel, we can't do DMA */
if (!host->use_dma)
return -ENODEV;
/*
* We don't do DMA on "complex" transfers, i.e. with
* non-word-aligned buffers or lengths. Also, we don't bother
* with all the DMA setup overhead for short transfers.
*/
if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
return -EINVAL;
if (data->blksz & 3)
return -EINVAL;
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3)
return -EINVAL;
}
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
else
direction = DMA_TO_DEVICE;
sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
direction);
dev_vdbg(&host->pdev->dev,
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
(unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
sg_len);
/* Enable the DMA interface */
temp = mci_readl(host, CTRL);
temp |= SDMMC_CTRL_DMA_ENABLE;
mci_writel(host, CTRL, temp);
/* Disable RX/TX IRQs, let DMA handle it */
temp = mci_readl(host, INTMASK);
temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
mci_writel(host, INTMASK, temp);
host->dma_ops->start(host, sg_len);
return 0;
}
static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
{
u32 temp;
data->error = -EINPROGRESS;
WARN_ON(host->data);
host->sg = NULL;
host->data = data;
if (dw_mci_submit_data_dma(host, data)) {
host->sg = data->sg;
host->pio_offset = 0;
if (data->flags & MMC_DATA_READ)
host->dir_status = DW_MCI_RECV_STATUS;
else
host->dir_status = DW_MCI_SEND_STATUS;
temp = mci_readl(host, INTMASK);
temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
mci_writel(host, INTMASK, temp);
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_DMA_ENABLE;
mci_writel(host, CTRL, temp);
}
}
static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
{
struct dw_mci *host = slot->host;
unsigned long timeout = jiffies + msecs_to_jiffies(500);
unsigned int cmd_status = 0;
mci_writel(host, CMDARG, arg);
wmb();
mci_writel(host, CMD, SDMMC_CMD_START | cmd);
while (time_before(jiffies, timeout)) {
cmd_status = mci_readl(host, CMD);
if (!(cmd_status & SDMMC_CMD_START))
return;
}
dev_err(&slot->mmc->class_dev,
"Timeout sending command (cmd %#x arg %#x status %#x)\n",
cmd, arg, cmd_status);
}
static void dw_mci_setup_bus(struct dw_mci_slot *slot)
{
struct dw_mci *host = slot->host;
u32 div;
if (slot->clock != host->current_speed) {
if (host->bus_hz % slot->clock)
/*
* move the + 1 after the divide to prevent
* over-clocking the card.
*/
div = ((host->bus_hz / slot->clock) >> 1) + 1;
else
div = (host->bus_hz / slot->clock) >> 1;
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
" div = %d)\n", slot->id, host->bus_hz, slot->clock,
div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
/* disable clock */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
/* inform CIU */
mci_send_cmd(slot,
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
/* set clock to desired speed */
mci_writel(host, CLKDIV, div);
/* inform CIU */
mci_send_cmd(slot,
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
/* enable clock */
mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE |
SDMMC_CLKEN_LOW_PWR);
/* inform CIU */
mci_send_cmd(slot,
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
host->current_speed = slot->clock;
}
/* Set the current slot bus width */
mci_writel(host, CTYPE, slot->ctype);
}
static void dw_mci_start_request(struct dw_mci *host,
struct dw_mci_slot *slot)
{
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
u32 cmdflags;
mrq = slot->mrq;
if (host->pdata->select_slot)
host->pdata->select_slot(slot->id);
/* Slot specific timing and width adjustment */
dw_mci_setup_bus(slot);
host->cur_slot = slot;
host->mrq = mrq;
host->pending_events = 0;
host->completed_events = 0;
host->data_status = 0;
data = mrq->data;
if (data) {
dw_mci_set_timeout(host);
mci_writel(host, BYTCNT, data->blksz*data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
}
cmd = mrq->cmd;
cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
/* this is the first command, send the initialization clock */
if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
cmdflags |= SDMMC_CMD_INIT;
if (data) {
dw_mci_submit_data(host, data);
wmb();
}
dw_mci_start_command(host, cmd, cmdflags);
if (mrq->stop)
host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
}
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
struct mmc_request *mrq)
{
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
host->state);
spin_lock_bh(&host->lock);
slot->mrq = mrq;
if (host->state == STATE_IDLE) {
host->state = STATE_SENDING_CMD;
dw_mci_start_request(host, slot);
} else {
list_add_tail(&slot->queue_node, &host->queue);
}
spin_unlock_bh(&host->lock);
}
static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
WARN_ON(slot->mrq);
if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
/* We don't support multiple blocks of weird lengths. */
dw_mci_queue_request(host, slot, mrq);
}
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
u32 regs;
/* set default 1 bit mode */
slot->ctype = SDMMC_CTYPE_1BIT;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
slot->ctype = SDMMC_CTYPE_1BIT;
break;
case MMC_BUS_WIDTH_4:
slot->ctype = SDMMC_CTYPE_4BIT;
break;
case MMC_BUS_WIDTH_8:
slot->ctype = SDMMC_CTYPE_8BIT;
break;
}
/* DDR mode set */
if (ios->ddr) {
regs = mci_readl(slot->host, UHS_REG);
regs |= (0x1 << slot->id) << 16;
mci_writel(slot->host, UHS_REG, regs);
}
if (ios->clock) {
/*
* Use mirror of ios->clock to prevent race with mmc
* core ios update when finding the minimum.
*/
slot->clock = ios->clock;
}
switch (ios->power_mode) {
case MMC_POWER_UP:
set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
break;
default:
break;
}
}
static int dw_mci_get_ro(struct mmc_host *mmc)
{
int read_only;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci_board *brd = slot->host->pdata;
/* Use platform get_ro function, else try on board write protect */
if (brd->get_ro)
read_only = brd->get_ro(slot->id);
else
read_only =
mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
dev_dbg(&mmc->class_dev, "card is %s\n",
read_only ? "read-only" : "read-write");
return read_only;
}
static int dw_mci_get_cd(struct mmc_host *mmc)
{
int present;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci_board *brd = slot->host->pdata;
/* Use platform get_cd function, else try onboard card detect */
if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
present = 1;
else if (brd->get_cd)
present = !brd->get_cd(slot->id);
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
== 0 ? 1 : 0;
if (present)
dev_dbg(&mmc->class_dev, "card is present\n");
else
dev_dbg(&mmc->class_dev, "card is not present\n");
return present;
}
static const struct mmc_host_ops dw_mci_ops = {
.request = dw_mci_request,
.set_ios = dw_mci_set_ios,
.get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd,
};
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
__releases(&host->lock)
__acquires(&host->lock)
{
struct dw_mci_slot *slot;
struct mmc_host *prev_mmc = host->cur_slot->mmc;
WARN_ON(host->cmd || host->data);
host->cur_slot->mrq = NULL;
host->mrq = NULL;
if (!list_empty(&host->queue)) {
slot = list_entry(host->queue.next,
struct dw_mci_slot, queue_node);
list_del(&slot->queue_node);
dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
mmc_hostname(slot->mmc));
host->state = STATE_SENDING_CMD;
dw_mci_start_request(host, slot);
} else {
dev_vdbg(&host->pdev->dev, "list empty\n");
host->state = STATE_IDLE;
}
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
}
static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
{
u32 status = host->cmd_status;
host->cmd_status = 0;
/* Read the response from the card (up to 16 bytes) */
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
cmd->resp[3] = mci_readl(host, RESP0);
cmd->resp[2] = mci_readl(host, RESP1);
cmd->resp[1] = mci_readl(host, RESP2);
cmd->resp[0] = mci_readl(host, RESP3);
} else {
cmd->resp[0] = mci_readl(host, RESP0);
cmd->resp[1] = 0;
cmd->resp[2] = 0;
cmd->resp[3] = 0;
}
}
if (status & SDMMC_INT_RTO)
cmd->error = -ETIMEDOUT;
else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
cmd->error = -EILSEQ;
else if (status & SDMMC_INT_RESP_ERR)
cmd->error = -EIO;
else
cmd->error = 0;
if (cmd->error) {
/* newer ip versions need a delay between retries */
if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
mdelay(20);
if (cmd->data) {
host->data = NULL;
dw_mci_stop_dma(host);
}
}
}
static void dw_mci_tasklet_func(unsigned long priv)
{
struct dw_mci *host = (struct dw_mci *)priv;
struct mmc_data *data;
struct mmc_command *cmd;
enum dw_mci_state state;
enum dw_mci_state prev_state;
u32 status;
spin_lock(&host->lock);
state = host->state;
data = host->data;
do {
prev_state = state;
switch (state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
&host->pending_events))
break;
cmd = host->cmd;
host->cmd = NULL;
set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
dw_mci_command_complete(host, host->mrq->cmd);
if (!host->mrq->data || cmd->error) {
dw_mci_request_end(host, host->mrq);
goto unlock;
}
prev_state = state = STATE_SENDING_DATA;
/* fall through */
case STATE_SENDING_DATA:
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
if (data->stop)
send_stop_cmd(host, data);
state = STATE_DATA_ERROR;
break;
}
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
&host->pending_events))
break;
set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
prev_state = state = STATE_DATA_BUSY;
/* fall through */
case STATE_DATA_BUSY:
if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
&host->pending_events))
break;
host->data = NULL;
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
status = host->data_status;
if (status & DW_MCI_DATA_ERROR_FLAGS) {
if (status & SDMMC_INT_DTO) {
dev_err(&host->pdev->dev,
"data timeout error\n");
data->error = -ETIMEDOUT;
} else if (status & SDMMC_INT_DCRC) {
dev_err(&host->pdev->dev,
"data CRC error\n");
data->error = -EILSEQ;
} else {
dev_err(&host->pdev->dev,
"data FIFO error "
"(status=%08x)\n",
status);
data->error = -EIO;
}
} else {
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
}
if (!data->stop) {
dw_mci_request_end(host, host->mrq);
goto unlock;
}
prev_state = state = STATE_SENDING_STOP;
if (!data->error)
send_stop_cmd(host, data);
/* fall through */
case STATE_SENDING_STOP:
if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
&host->pending_events))
break;
host->cmd = NULL;
dw_mci_command_complete(host, host->mrq->stop);
dw_mci_request_end(host, host->mrq);
goto unlock;
case STATE_DATA_ERROR:
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
&host->pending_events))
break;
state = STATE_DATA_BUSY;
break;
}
} while (state != prev_state);
host->state = state;
unlock:
spin_unlock(&host->lock);
}
static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
{
u16 *pdata = (u16 *)buf;
WARN_ON(cnt % 2 != 0);
cnt = cnt >> 1;
while (cnt > 0) {
mci_writew(host, DATA, *pdata++);
cnt--;
}
}
static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
{
u16 *pdata = (u16 *)buf;
WARN_ON(cnt % 2 != 0);
cnt = cnt >> 1;
while (cnt > 0) {
*pdata++ = mci_readw(host, DATA);
cnt--;
}
}
static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
{
u32 *pdata = (u32 *)buf;
WARN_ON(cnt % 4 != 0);
WARN_ON((unsigned long)pdata & 0x3);
cnt = cnt >> 2;
while (cnt > 0) {
mci_writel(host, DATA, *pdata++);
cnt--;
}
}
static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
{
u32 *pdata = (u32 *)buf;
WARN_ON(cnt % 4 != 0);
WARN_ON((unsigned long)pdata & 0x3);
cnt = cnt >> 2;
while (cnt > 0) {
*pdata++ = mci_readl(host, DATA);
cnt--;
}
}
static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
{
u64 *pdata = (u64 *)buf;
WARN_ON(cnt % 8 != 0);
cnt = cnt >> 3;
while (cnt > 0) {
mci_writeq(host, DATA, *pdata++);
cnt--;
}
}
static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
{
u64 *pdata = (u64 *)buf;
WARN_ON(cnt % 8 != 0);
cnt = cnt >> 3;
while (cnt > 0) {
*pdata++ = mci_readq(host, DATA);
cnt--;
}
}
static void dw_mci_read_data_pio(struct dw_mci *host)
{
struct scatterlist *sg = host->sg;
void *buf = sg_virt(sg);
unsigned int offset = host->pio_offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int nbytes = 0, len;
do {
len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
if (offset + len <= sg->length) {
host->pull_data(host, (void *)(buf + offset), len);
offset += len;
nbytes += len;
if (offset == sg->length) {
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
if (!sg)
goto done;
offset = 0;
buf = sg_virt(sg);
}
} else {
unsigned int remaining = sg->length - offset;
host->pull_data(host, (void *)(buf + offset),
remaining);
nbytes += remaining;
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
if (!sg)
goto done;
offset = len - remaining;
buf = sg_virt(sg);
host->pull_data(host, buf, offset);
nbytes += offset;
}
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
if (status & DW_MCI_DATA_ERROR_FLAGS) {
host->data_status = status;
data->bytes_xfered += nbytes;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
return;
}
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
host->pio_offset = offset;
data->bytes_xfered += nbytes;
return;
done:
data->bytes_xfered += nbytes;
smp_wmb();
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static void dw_mci_write_data_pio(struct dw_mci *host)
{
struct scatterlist *sg = host->sg;
void *buf = sg_virt(sg);
unsigned int offset = host->pio_offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int nbytes = 0, len;
do {
len = SDMMC_FIFO_SZ -
(SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
if (offset + len <= sg->length) {
host->push_data(host, (void *)(buf + offset), len);
offset += len;
nbytes += len;
if (offset == sg->length) {
host->sg = sg = sg_next(sg);
if (!sg)
goto done;
offset = 0;
buf = sg_virt(sg);
}
} else {
unsigned int remaining = sg->length - offset;
host->push_data(host, (void *)(buf + offset),
remaining);
nbytes += remaining;
host->sg = sg = sg_next(sg);
if (!sg)
goto done;
offset = len - remaining;
buf = sg_virt(sg);
host->push_data(host, (void *)buf, offset);
nbytes += offset;
}
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
if (status & DW_MCI_DATA_ERROR_FLAGS) {
host->data_status = status;
data->bytes_xfered += nbytes;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
return;
}
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
host->pio_offset = offset;
data->bytes_xfered += nbytes;
return;
done:
data->bytes_xfered += nbytes;
smp_wmb();
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
{
if (!host->cmd_status)
host->cmd_status = status;
smp_wmb();
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
{
struct dw_mci *host = dev_id;
u32 status, pending;
unsigned int pass_count = 0;
do {
status = mci_readl(host, RINTSTS);
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
/*
* DTO fix - version 2.10a and below, and only if internal DMA
* is configured.
*/
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
if (!pending &&
((mci_readl(host, STATUS) >> 17) & 0x1fff))
pending |= SDMMC_INT_DATA_OVER;
}
if (!pending)
break;
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = status;
smp_wmb();
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
host->data_status = status;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_DATA_OVER) {
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
host->data_status = status;
smp_wmb();
if (host->dir_status == DW_MCI_RECV_STATUS) {
if (host->sg != NULL)
dw_mci_read_data_pio(host);
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_RXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
if (host->sg)
dw_mci_read_data_pio(host);
}
if (pending & SDMMC_INT_TXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
if (host->sg)
dw_mci_write_data_pio(host);
}
if (pending & SDMMC_INT_CMD_DONE) {
mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
dw_mci_cmd_interrupt(host, status);
}
if (pending & SDMMC_INT_CD) {
mci_writel(host, RINTSTS, SDMMC_INT_CD);
tasklet_schedule(&host->card_tasklet);
}
} while (pass_count++ < 5);
#ifdef CONFIG_MMC_DW_IDMAC
/* Handle DMA interrupts */
pending = mci_readl(host, IDSTS);
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
host->dma_ops->complete(host);
}
#endif
return IRQ_HANDLED;
}
static void dw_mci_tasklet_card(unsigned long data)
{
struct dw_mci *host = (struct dw_mci *)data;
int i;
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
struct mmc_host *mmc = slot->mmc;
struct mmc_request *mrq;
int present;
u32 ctrl;
present = dw_mci_get_cd(mmc);
while (present != slot->last_detect_state) {
spin_lock(&host->lock);
dev_dbg(&slot->mmc->class_dev, "card %s\n",
present ? "inserted" : "removed");
/* Card change detected */
slot->last_detect_state = present;
/* Power up slot */
if (present != 0) {
if (host->pdata->setpower)
host->pdata->setpower(slot->id,
mmc->ocr_avail);
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
}
/* Clean up queue if present */
mrq = slot->mrq;
if (mrq) {
if (mrq == host->mrq) {
host->data = NULL;
host->cmd = NULL;
switch (host->state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
mrq->cmd->error = -ENOMEDIUM;
if (!mrq->data)
break;
/* fall through */
case STATE_SENDING_DATA:
mrq->data->error = -ENOMEDIUM;
dw_mci_stop_dma(host);
break;
case STATE_DATA_BUSY:
case STATE_DATA_ERROR:
if (mrq->data->error == -EINPROGRESS)
mrq->data->error = -ENOMEDIUM;
if (!mrq->stop)
break;
/* fall through */
case STATE_SENDING_STOP:
mrq->stop->error = -ENOMEDIUM;
break;
}
dw_mci_request_end(host, mrq);
} else {
list_del(&slot->queue_node);
mrq->cmd->error = -ENOMEDIUM;
if (mrq->data)
mrq->data->error = -ENOMEDIUM;
if (mrq->stop)
mrq->stop->error = -ENOMEDIUM;
spin_unlock(&host->lock);
mmc_request_done(slot->mmc, mrq);
spin_lock(&host->lock);
}
}
/* Power down slot */
if (present == 0) {
if (host->pdata->setpower)
host->pdata->setpower(slot->id, 0);
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
/*
* Clear down the FIFO - doing so generates a
* block interrupt, hence setting the
* scatter-gather pointer to NULL.
*/
host->sg = NULL;
ctrl = mci_readl(host, CTRL);
ctrl |= SDMMC_CTRL_FIFO_RESET;
mci_writel(host, CTRL, ctrl);
#ifdef CONFIG_MMC_DW_IDMAC
ctrl = mci_readl(host, BMOD);
ctrl |= 0x01; /* Software reset of DMA */
mci_writel(host, BMOD, ctrl);
#endif
}
spin_unlock(&host->lock);
present = dw_mci_get_cd(mmc);
}
mmc_detect_change(slot->mmc,
msecs_to_jiffies(host->pdata->detect_delay_ms));
}
}
static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
{
struct mmc_host *mmc;
struct dw_mci_slot *slot;
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
if (!mmc)
return -ENOMEM;
slot = mmc_priv(mmc);
slot->id = id;
slot->mmc = mmc;
slot->host = host;
mmc->ops = &dw_mci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
mmc->f_max = host->bus_hz;
if (host->pdata->get_ocr)
mmc->ocr_avail = host->pdata->get_ocr(id);
else
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/*
* Start with slot power disabled, it will be enabled when a card
* is detected.
*/
if (host->pdata->setpower)
host->pdata->setpower(id, 0);
if (host->pdata->caps)
mmc->caps = host->pdata->caps;
else
mmc->caps = 0;
if (host->pdata->get_bus_wd)
if (host->pdata->get_bus_wd(slot->id) >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
#ifdef CONFIG_MMC_DW_IDMAC
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65536;
mmc->max_blk_count = host->ring_size;
mmc->max_seg_size = 0x1000;
mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
#else
if (host->pdata->blk_settings) {
mmc->max_segs = host->pdata->blk_settings->max_segs;
mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
mmc->max_req_size = host->pdata->blk_settings->max_req_size;
mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
} else {
/* Useful defaults if platform data is unset. */
mmc->max_segs = 64;
mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
mmc->max_blk_count = 512;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
}
#endif /* CONFIG_MMC_DW_IDMAC */
host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
if (IS_ERR(host->vmmc)) {
printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
host->vmmc = NULL;
} else
regulator_enable(host->vmmc);
if (dw_mci_get_cd(mmc))
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
else
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
host->slot[id] = slot;
mmc_add_host(mmc);
#if defined(CONFIG_DEBUG_FS)
dw_mci_init_debugfs(slot);
#endif
/* Card initially undetected */
slot->last_detect_state = 0;
/*
* Card may have been plugged in prior to boot so we
* need to run the detect tasklet
*/
tasklet_schedule(&host->card_tasklet);
return 0;
}
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
{
/* Shutdown detect IRQ */
if (slot->host->pdata->exit)
slot->host->pdata->exit(id);
/* Debugfs stuff is cleaned up by mmc core */
mmc_remove_host(slot->mmc);
slot->host->slot[id] = NULL;
mmc_free_host(slot->mmc);
}
static void dw_mci_init_dma(struct dw_mci *host)
{
/* Alloc memory for sg translation */
host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
&host->sg_dma, GFP_KERNEL);
if (!host->sg_cpu) {
dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
__func__);
goto no_dma;
}
/* Determine which DMA interface to use */
#ifdef CONFIG_MMC_DW_IDMAC
host->dma_ops = &dw_mci_idmac_ops;
dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
#endif
if (!host->dma_ops)
goto no_dma;
if (host->dma_ops->init) {
if (host->dma_ops->init(host)) {
dev_err(&host->pdev->dev, "%s: Unable to initialize "
"DMA Controller.\n", __func__);
goto no_dma;
}
} else {
dev_err(&host->pdev->dev, "DMA initialization not found.\n");
goto no_dma;
}
host->use_dma = 1;
return;
no_dma:
dev_info(&host->pdev->dev, "Using PIO mode.\n");
host->use_dma = 0;
return;
}
static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
{
unsigned long timeout = jiffies + msecs_to_jiffies(500);
unsigned int ctrl;
mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
SDMMC_CTRL_DMA_RESET));
/* wait till resets clear */
do {
ctrl = mci_readl(host, CTRL);
if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
SDMMC_CTRL_DMA_RESET)))
return true;
} while (time_before(jiffies, timeout));
dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
return false;
}
static int dw_mci_probe(struct platform_device *pdev)
{
struct dw_mci *host;
struct resource *regs;
struct dw_mci_board *pdata;
int irq, ret, i, width;
u32 fifo_size;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->pdev = pdev;
host->pdata = pdata = pdev->dev.platform_data;
if (!pdata || !pdata->init) {
dev_err(&pdev->dev,
"Platform data must supply init function\n");
ret = -ENODEV;
goto err_freehost;
}
if (!pdata->select_slot && pdata->num_slots > 1) {
dev_err(&pdev->dev,
"Platform data must supply select_slot function\n");
ret = -ENODEV;
goto err_freehost;
}
if (!pdata->bus_hz) {
dev_err(&pdev->dev,
"Platform data must supply bus speed\n");
ret = -ENODEV;
goto err_freehost;
}
host->bus_hz = pdata->bus_hz;
host->quirks = pdata->quirks;
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
ret = -ENOMEM;
host->regs = ioremap(regs->start, regs->end - regs->start + 1);
if (!host->regs)
goto err_freehost;
host->dma_ops = pdata->dma_ops;
dw_mci_init_dma(host);
/*
* Get the host data width - this assumes that HCON has been set with
* the correct values.
*/
i = (mci_readl(host, HCON) >> 7) & 0x7;
if (!i) {
host->push_data = dw_mci_push_data16;
host->pull_data = dw_mci_pull_data16;
width = 16;
host->data_shift = 1;
} else if (i == 2) {
host->push_data = dw_mci_push_data64;
host->pull_data = dw_mci_pull_data64;
width = 64;
host->data_shift = 3;
} else {
/* Check for a reserved value, and warn if it is */
WARN((i != 1),
"HCON reports a reserved host data width!\n"
"Defaulting to 32-bit access.\n");
host->push_data = dw_mci_push_data32;
host->pull_data = dw_mci_pull_data32;
width = 32;
host->data_shift = 2;
}
/* Reset all blocks */
if (!mci_wait_reset(&pdev->dev, host)) {
ret = -ENODEV;
goto err_dmaunmap;
}
/* Clear the interrupts for the host controller */
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
/*
* FIFO threshold settings RxMark = fifo_size / 2 - 1,
* Tx Mark = fifo_size / 2 DMA Size = 8
*/
fifo_size = mci_readl(host, FIFOTH);
fifo_size = (fifo_size >> 16) & 0x7ff;
host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
((fifo_size/2) << 0));
mci_writel(host, FIFOTH, host->fifoth_val);
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
tasklet_init(&host->card_tasklet,
dw_mci_tasklet_card, (unsigned long)host);
ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
if (ret)
goto err_dmaunmap;
platform_set_drvdata(pdev, host);
if (host->pdata->num_slots)
host->num_slots = host->pdata->num_slots;
else
host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
/* We need at least one slot to succeed */
for (i = 0; i < host->num_slots; i++) {
ret = dw_mci_init_slot(host, i);
if (ret) {
ret = -ENODEV;
goto err_init_slot;
}
}
/*
* Enable interrupts for command done, data over, data empty, card det,
* receive ready and error such as transmit, receive timeout, crc error
*/
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
dev_info(&pdev->dev, "DW MMC controller at irq %d, "
"%d bit host data width\n", irq, width);
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
return 0;
err_init_slot:
/* De-init any initialized slots */
while (i > 0) {
if (host->slot[i])
dw_mci_cleanup_slot(host->slot[i], i);
i--;
}
free_irq(irq, host);
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
host->sg_cpu, host->sg_dma);
iounmap(host->regs);
if (host->vmmc) {
regulator_disable(host->vmmc);
regulator_put(host->vmmc);
}
err_freehost:
kfree(host);
return ret;
}
static int __exit dw_mci_remove(struct platform_device *pdev)
{
struct dw_mci *host = platform_get_drvdata(pdev);
int i;
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
platform_set_drvdata(pdev, NULL);
for (i = 0; i < host->num_slots; i++) {
dev_dbg(&pdev->dev, "remove slot %d\n", i);
if (host->slot[i])
dw_mci_cleanup_slot(host->slot[i], i);
}
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
free_irq(platform_get_irq(pdev, 0), host);
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
if (host->vmmc) {
regulator_disable(host->vmmc);
regulator_put(host->vmmc);
}
iounmap(host->regs);
kfree(host);
return 0;
}
#ifdef CONFIG_PM
/*
* TODO: we should probably disable the clock to the card in the suspend path.
*/
static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
{
int i, ret;
struct dw_mci *host = platform_get_drvdata(pdev);
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
continue;
ret = mmc_suspend_host(slot->mmc);
if (ret < 0) {
while (--i >= 0) {
slot = host->slot[i];
if (slot)
mmc_resume_host(host->slot[i]->mmc);
}
return ret;
}
}
if (host->vmmc)
regulator_disable(host->vmmc);
return 0;
}
static int dw_mci_resume(struct platform_device *pdev)
{
int i, ret;
struct dw_mci *host = platform_get_drvdata(pdev);
if (host->vmmc)
regulator_enable(host->vmmc);
if (host->dma_ops->init)
host->dma_ops->init(host);
if (!mci_wait_reset(&pdev->dev, host)) {
ret = -ENODEV;
return ret;
}
/* Restore the old value at FIFOTH register */
mci_writel(host, FIFOTH, host->fifoth_val);
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
continue;
ret = mmc_resume_host(host->slot[i]->mmc);
if (ret < 0)
return ret;
}
return 0;
}
#else
#define dw_mci_suspend NULL
#define dw_mci_resume NULL
#endif /* CONFIG_PM */
static struct platform_driver dw_mci_driver = {
.remove = __exit_p(dw_mci_remove),
.suspend = dw_mci_suspend,
.resume = dw_mci_resume,
.driver = {
.name = "dw_mmc",
},
};
static int __init dw_mci_init(void)
{
return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
}
static void __exit dw_mci_exit(void)
{
platform_driver_unregister(&dw_mci_driver);
}
module_init(dw_mci_init);
module_exit(dw_mci_exit);
MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
MODULE_AUTHOR("NXP Semiconductor VietNam");
MODULE_AUTHOR("Imagination Technologies Ltd");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
androidbftab1/bf-kernel | drivers/input/joystick/interact.c | 2230 | 7963 | /*
* Copyright (c) 2001 Vojtech Pavlik
*
* Based on the work of:
* Toby Deshane
*/
/*
* InterAct digital gamepad/joystick driver for Linux
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
#define DRIVER_DESC "InterAct digital joystick driver"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
#define INTERACT_MAX_START 600 /* 400 us */
#define INTERACT_MAX_STROBE 60 /* 40 us */
#define INTERACT_MAX_LENGTH 32 /* 32 bits */
#define INTERACT_TYPE_HHFX 0 /* HammerHead/FX */
#define INTERACT_TYPE_PP8D 1 /* ProPad 8 */
struct interact {
struct gameport *gameport;
struct input_dev *dev;
int bads;
int reads;
unsigned char type;
unsigned char length;
char phys[32];
};
static short interact_abs_hhfx[] =
{ ABS_RX, ABS_RY, ABS_X, ABS_Y, ABS_HAT0X, ABS_HAT0Y, -1 };
static short interact_abs_pp8d[] =
{ ABS_X, ABS_Y, -1 };
static short interact_btn_hhfx[] =
{ BTN_TR, BTN_X, BTN_Y, BTN_Z, BTN_A, BTN_B, BTN_C, BTN_TL, BTN_TL2, BTN_TR2, BTN_MODE, BTN_SELECT, -1 };
static short interact_btn_pp8d[] =
{ BTN_C, BTN_TL, BTN_TR, BTN_A, BTN_B, BTN_Y, BTN_Z, BTN_X, -1 };
struct interact_type {
int id;
short *abs;
short *btn;
char *name;
unsigned char length;
unsigned char b8;
};
static struct interact_type interact_type[] = {
{ 0x6202, interact_abs_hhfx, interact_btn_hhfx, "InterAct HammerHead/FX", 32, 4 },
{ 0x53f8, interact_abs_pp8d, interact_btn_pp8d, "InterAct ProPad 8 Digital", 16, 0 },
{ 0 }};
/*
* interact_read_packet() reads and InterAct joystick data.
*/
static int interact_read_packet(struct gameport *gameport, int length, u32 *data)
{
unsigned long flags;
unsigned char u, v;
unsigned int t, s;
int i;
i = 0;
data[0] = data[1] = data[2] = 0;
t = gameport_time(gameport, INTERACT_MAX_START);
s = gameport_time(gameport, INTERACT_MAX_STROBE);
local_irq_save(flags);
gameport_trigger(gameport);
v = gameport_read(gameport);
while (t > 0 && i < length) {
t--;
u = v; v = gameport_read(gameport);
if (v & ~u & 0x40) {
data[0] = (data[0] << 1) | ((v >> 4) & 1);
data[1] = (data[1] << 1) | ((v >> 5) & 1);
data[2] = (data[2] << 1) | ((v >> 7) & 1);
i++;
t = s;
}
}
local_irq_restore(flags);
return i;
}
/*
* interact_poll() reads and analyzes InterAct joystick data.
*/
static void interact_poll(struct gameport *gameport)
{
struct interact *interact = gameport_get_drvdata(gameport);
struct input_dev *dev = interact->dev;
u32 data[3];
int i;
interact->reads++;
if (interact_read_packet(interact->gameport, interact->length, data) < interact->length) {
interact->bads++;
} else {
for (i = 0; i < 3; i++)
data[i] <<= INTERACT_MAX_LENGTH - interact->length;
switch (interact->type) {
case INTERACT_TYPE_HHFX:
for (i = 0; i < 4; i++)
input_report_abs(dev, interact_abs_hhfx[i], (data[i & 1] >> ((i >> 1) << 3)) & 0xff);
for (i = 0; i < 2; i++)
input_report_abs(dev, ABS_HAT0Y - i,
((data[1] >> ((i << 1) + 17)) & 1) - ((data[1] >> ((i << 1) + 16)) & 1));
for (i = 0; i < 8; i++)
input_report_key(dev, interact_btn_hhfx[i], (data[0] >> (i + 16)) & 1);
for (i = 0; i < 4; i++)
input_report_key(dev, interact_btn_hhfx[i + 8], (data[1] >> (i + 20)) & 1);
break;
case INTERACT_TYPE_PP8D:
for (i = 0; i < 2; i++)
input_report_abs(dev, interact_abs_pp8d[i],
((data[0] >> ((i << 1) + 20)) & 1) - ((data[0] >> ((i << 1) + 21)) & 1));
for (i = 0; i < 8; i++)
input_report_key(dev, interact_btn_pp8d[i], (data[1] >> (i + 16)) & 1);
break;
}
}
input_sync(dev);
}
/*
* interact_open() is a callback from the input open routine.
*/
static int interact_open(struct input_dev *dev)
{
struct interact *interact = input_get_drvdata(dev);
gameport_start_polling(interact->gameport);
return 0;
}
/*
* interact_close() is a callback from the input close routine.
*/
static void interact_close(struct input_dev *dev)
{
struct interact *interact = input_get_drvdata(dev);
gameport_stop_polling(interact->gameport);
}
/*
* interact_connect() probes for InterAct joysticks.
*/
static int interact_connect(struct gameport *gameport, struct gameport_driver *drv)
{
struct interact *interact;
struct input_dev *input_dev;
__u32 data[3];
int i, t;
int err;
interact = kzalloc(sizeof(struct interact), GFP_KERNEL);
input_dev = input_allocate_device();
if (!interact || !input_dev) {
err = -ENOMEM;
goto fail1;
}
interact->gameport = gameport;
interact->dev = input_dev;
gameport_set_drvdata(gameport, interact);
err = gameport_open(gameport, drv, GAMEPORT_MODE_RAW);
if (err)
goto fail1;
i = interact_read_packet(gameport, INTERACT_MAX_LENGTH * 2, data);
if (i != 32 || (data[0] >> 24) != 0x0c || (data[1] >> 24) != 0x02) {
err = -ENODEV;
goto fail2;
}
for (i = 0; interact_type[i].length; i++)
if (interact_type[i].id == (data[2] >> 16))
break;
if (!interact_type[i].length) {
printk(KERN_WARNING "interact.c: Unknown joystick on %s. [len %d d0 %08x d1 %08x i2 %08x]\n",
gameport->phys, i, data[0], data[1], data[2]);
err = -ENODEV;
goto fail2;
}
gameport_set_poll_handler(gameport, interact_poll);
gameport_set_poll_interval(gameport, 20);
snprintf(interact->phys, sizeof(interact->phys), "%s/input0", gameport->phys);
interact->type = i;
interact->length = interact_type[i].length;
input_dev->name = interact_type[i].name;
input_dev->phys = interact->phys;
input_dev->id.bustype = BUS_GAMEPORT;
input_dev->id.vendor = GAMEPORT_ID_VENDOR_INTERACT;
input_dev->id.product = interact_type[i].id;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &gameport->dev;
input_set_drvdata(input_dev, interact);
input_dev->open = interact_open;
input_dev->close = interact_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
for (i = 0; (t = interact_type[interact->type].abs[i]) >= 0; i++) {
if (i < interact_type[interact->type].b8)
input_set_abs_params(input_dev, t, 0, 255, 0, 0);
else
input_set_abs_params(input_dev, t, -1, 1, 0, 0);
}
for (i = 0; (t = interact_type[interact->type].btn[i]) >= 0; i++)
__set_bit(t, input_dev->keybit);
err = input_register_device(interact->dev);
if (err)
goto fail2;
return 0;
fail2: gameport_close(gameport);
fail1: gameport_set_drvdata(gameport, NULL);
input_free_device(input_dev);
kfree(interact);
return err;
}
static void interact_disconnect(struct gameport *gameport)
{
struct interact *interact = gameport_get_drvdata(gameport);
input_unregister_device(interact->dev);
gameport_close(gameport);
gameport_set_drvdata(gameport, NULL);
kfree(interact);
}
static struct gameport_driver interact_drv = {
.driver = {
.name = "interact",
},
.description = DRIVER_DESC,
.connect = interact_connect,
.disconnect = interact_disconnect,
};
module_gameport_driver(interact_drv);
| gpl-2.0 |
omnirom/android_kernel_oppo_msm8916 | arch/x86/platform/olpc/olpc-xo15-sci.c | 2230 | 5599 | /*
* Support for OLPC XO-1.5 System Control Interrupts (SCI)
*
* Copyright (C) 2009-2010 One Laptop per Child
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/power_supply.h>
#include <linux/olpc-ec.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <asm/olpc.h>
#define DRV_NAME "olpc-xo15-sci"
#define PFX DRV_NAME ": "
#define XO15_SCI_CLASS DRV_NAME
#define XO15_SCI_DEVICE_NAME "OLPC XO-1.5 SCI"
static unsigned long xo15_sci_gpe;
static bool lid_wake_on_close;
/*
* The normal ACPI LID wakeup behavior is wake-on-open, but not
* wake-on-close. This is implemented as standard by the XO-1.5 DSDT.
*
* We provide here a sysfs attribute that will additionally enable
* wake-on-close behavior. This is useful (e.g.) when we oportunistically
* suspend with the display running; if the lid is then closed, we want to
* wake up to turn the display off.
*
* This is controlled through a custom method in the XO-1.5 DSDT.
*/
static int set_lid_wake_behavior(bool wake_on_close)
{
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status;
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = wake_on_close;
status = acpi_evaluate_object(NULL, "\\_SB.PCI0.LID.LIDW", &arg_list, NULL);
if (ACPI_FAILURE(status)) {
pr_warning(PFX "failed to set lid behavior\n");
return 1;
}
lid_wake_on_close = wake_on_close;
return 0;
}
static ssize_t
lid_wake_on_close_show(struct kobject *s, struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", lid_wake_on_close);
}
static ssize_t lid_wake_on_close_store(struct kobject *s,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int val;
if (sscanf(buf, "%u", &val) != 1)
return -EINVAL;
set_lid_wake_behavior(!!val);
return n;
}
static struct kobj_attribute lid_wake_on_close_attr =
__ATTR(lid_wake_on_close, 0644,
lid_wake_on_close_show,
lid_wake_on_close_store);
static void battery_status_changed(void)
{
struct power_supply *psy = power_supply_get_by_name("olpc-battery");
if (psy) {
power_supply_changed(psy);
put_device(psy->dev);
}
}
static void ac_status_changed(void)
{
struct power_supply *psy = power_supply_get_by_name("olpc-ac");
if (psy) {
power_supply_changed(psy);
put_device(psy->dev);
}
}
static void process_sci_queue(void)
{
u16 data;
int r;
do {
r = olpc_ec_sci_query(&data);
if (r || !data)
break;
pr_debug(PFX "SCI 0x%x received\n", data);
switch (data) {
case EC_SCI_SRC_BATERR:
case EC_SCI_SRC_BATSOC:
case EC_SCI_SRC_BATTERY:
case EC_SCI_SRC_BATCRIT:
battery_status_changed();
break;
case EC_SCI_SRC_ACPWR:
ac_status_changed();
break;
}
} while (data);
if (r)
pr_err(PFX "Failed to clear SCI queue");
}
static void process_sci_queue_work(struct work_struct *work)
{
process_sci_queue();
}
static DECLARE_WORK(sci_work, process_sci_queue_work);
static u32 xo15_sci_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
{
schedule_work(&sci_work);
return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
}
static int xo15_sci_add(struct acpi_device *device)
{
unsigned long long tmp;
acpi_status status;
int r;
if (!device)
return -EINVAL;
strcpy(acpi_device_name(device), XO15_SCI_DEVICE_NAME);
strcpy(acpi_device_class(device), XO15_SCI_CLASS);
/* Get GPE bit assignment (EC events). */
status = acpi_evaluate_integer(device->handle, "_GPE", NULL, &tmp);
if (ACPI_FAILURE(status))
return -EINVAL;
xo15_sci_gpe = tmp;
status = acpi_install_gpe_handler(NULL, xo15_sci_gpe,
ACPI_GPE_EDGE_TRIGGERED,
xo15_sci_gpe_handler, device);
if (ACPI_FAILURE(status))
return -ENODEV;
dev_info(&device->dev, "Initialized, GPE = 0x%lx\n", xo15_sci_gpe);
r = sysfs_create_file(&device->dev.kobj, &lid_wake_on_close_attr.attr);
if (r)
goto err_sysfs;
/* Flush queue, and enable all SCI events */
process_sci_queue();
olpc_ec_mask_write(EC_SCI_SRC_ALL);
acpi_enable_gpe(NULL, xo15_sci_gpe);
/* Enable wake-on-EC */
if (device->wakeup.flags.valid)
device_init_wakeup(&device->dev, true);
return 0;
err_sysfs:
acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
cancel_work_sync(&sci_work);
return r;
}
static int xo15_sci_remove(struct acpi_device *device)
{
acpi_disable_gpe(NULL, xo15_sci_gpe);
acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
cancel_work_sync(&sci_work);
sysfs_remove_file(&device->dev.kobj, &lid_wake_on_close_attr.attr);
return 0;
}
static int xo15_sci_resume(struct device *dev)
{
/* Enable all EC events */
olpc_ec_mask_write(EC_SCI_SRC_ALL);
/* Power/battery status might have changed */
battery_status_changed();
ac_status_changed();
return 0;
}
static SIMPLE_DEV_PM_OPS(xo15_sci_pm, NULL, xo15_sci_resume);
static const struct acpi_device_id xo15_sci_device_ids[] = {
{"XO15EC", 0},
{"", 0},
};
static struct acpi_driver xo15_sci_drv = {
.name = DRV_NAME,
.class = XO15_SCI_CLASS,
.ids = xo15_sci_device_ids,
.ops = {
.add = xo15_sci_add,
.remove = xo15_sci_remove,
},
.drv.pm = &xo15_sci_pm,
};
static int __init xo15_sci_init(void)
{
return acpi_bus_register_driver(&xo15_sci_drv);
}
device_initcall(xo15_sci_init);
| gpl-2.0 |
pranav01/kernel_sprout | drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c | 2486 | 8220 | /*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
/* Enable Arbiter */
reg &= ~IXGBE_RMCS_ARBDIS;
/* Enable Receive Recycle within the BWG */
reg |= IXGBE_RMCS_RRM;
/* Enable Deficit Fixed Priority arbitration*/
reg |= IXGBE_RMCS_DFP;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
credit_refill = refill[i];
credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
if (prio_type[i] == prio_link)
reg |= IXGBE_RT2CR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
}
reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
reg |= IXGBE_RDRXCTL_RDMTS_1_2;
reg |= IXGBE_RDRXCTL_MPBEN;
reg |= IXGBE_RDRXCTL_MCEN;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
/* Make sure there is enough descriptors before arbitration */
reg &= ~IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
return 0;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
reg |= IXGBE_DPMCS_TSOEF;
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
max_credits = max[i];
reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
reg |= refill[i];
reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDTQ2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDTQ2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
/* Enable Data Plane Arbiter */
reg &= ~IXGBE_PDPMCS_ARBDIS;
/* Enable DFP and Transmit Recycle Mode */
reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
reg = refill[i];
reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDPT2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDPT2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
}
/* Enable Tx packet buffer division */
reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
reg |= IXGBE_DTXCTL_ENDBUBD;
IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
return 0;
}
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Priority Flow Control for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 fcrtl, reg;
u8 i;
/* Enable Transmit Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
reg &= ~IXGBE_RMCS_TFCE_802_3X;
reg |= IXGBE_RMCS_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Enable Receive Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
if (pfc_en)
reg |= IXGBE_FCTRL_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue;
}
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
}
/* Configure pause time */
reg = hw->fc.pause_time * 0x00010001;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
return 0;
}
/**
* ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
u8 j = 0;
/* Receive Queues stats setting - 8 queues per statistics reg */
for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
}
/* Transmit Queues stats setting - 4 queues per statistics reg */
for (i = 0; i < 8; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
reg |= ((0x1010101) * i);
IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_pfc_82598(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82598(hw);
return 0;
}
| gpl-2.0 |
Sohamlad7/kernel | arch/ia64/kernel/ptrace.c | 2742 | 57987 | /*
* Kernel support for the ptrace() and syscall tracing interfaces.
*
* Copyright (C) 1999-2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2006 Intel Co
* 2006-08-12 - IA64 Native Utrace implementation support added by
* Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*
* Derived from the x86 and Alpha versions.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/tracehook.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace_offsets.h>
#include <asm/rse.h>
#include <asm/uaccess.h>
#include <asm/unwind.h>
#ifdef CONFIG_PERFMON
#include <asm/perfmon.h>
#endif
#include "entry.h"
/*
* Bits in the PSR that we allow ptrace() to change:
* be, up, ac, mfl, mfh (the user mask; five bits total)
* db (debug breakpoint fault; one bit)
* id (instruction debug fault disable; one bit)
* dd (data debug fault disable; one bit)
* ri (restart instruction; two bits)
* is (instruction set; one bit)
*/
#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
| IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
#define PFM_MASK MASK(38)
#define PTRACE_DEBUG 0
#if PTRACE_DEBUG
# define dprintk(format...) printk(format)
# define inline
#else
# define dprintk(format...)
#endif
/* Return TRUE if PT was created due to kernel-entry via a system-call. */
static inline int
in_syscall (struct pt_regs *pt)
{
return (long) pt->cr_ifs >= 0;
}
/*
* Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
* bitset where bit i is set iff the NaT bit of register i is set.
*/
unsigned long
ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
{
# define GET_BITS(first, last, unat) \
({ \
unsigned long bit = ia64_unat_pos(&pt->r##first); \
unsigned long nbits = (last - first + 1); \
unsigned long mask = MASK(nbits) << first; \
unsigned long dist; \
if (bit < first) \
dist = 64 + bit - first; \
else \
dist = bit - first; \
ia64_rotr(unat, dist) & mask; \
})
unsigned long val;
/*
* Registers that are stored consecutively in struct pt_regs
* can be handled in parallel. If the register order in
* struct_pt_regs changes, this code MUST be updated.
*/
val = GET_BITS( 1, 1, scratch_unat);
val |= GET_BITS( 2, 3, scratch_unat);
val |= GET_BITS(12, 13, scratch_unat);
val |= GET_BITS(14, 14, scratch_unat);
val |= GET_BITS(15, 15, scratch_unat);
val |= GET_BITS( 8, 11, scratch_unat);
val |= GET_BITS(16, 31, scratch_unat);
return val;
# undef GET_BITS
}
/*
* Set the NaT bits for the scratch registers according to NAT and
* return the resulting unat (assuming the scratch registers are
* stored in PT).
*/
unsigned long
ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
{
# define PUT_BITS(first, last, nat) \
({ \
unsigned long bit = ia64_unat_pos(&pt->r##first); \
unsigned long nbits = (last - first + 1); \
unsigned long mask = MASK(nbits) << first; \
long dist; \
if (bit < first) \
dist = 64 + bit - first; \
else \
dist = bit - first; \
ia64_rotl(nat & mask, dist); \
})
unsigned long scratch_unat;
/*
* Registers that are stored consecutively in struct pt_regs
* can be handled in parallel. If the register order in
* struct_pt_regs changes, this code MUST be updated.
*/
scratch_unat = PUT_BITS( 1, 1, nat);
scratch_unat |= PUT_BITS( 2, 3, nat);
scratch_unat |= PUT_BITS(12, 13, nat);
scratch_unat |= PUT_BITS(14, 14, nat);
scratch_unat |= PUT_BITS(15, 15, nat);
scratch_unat |= PUT_BITS( 8, 11, nat);
scratch_unat |= PUT_BITS(16, 31, nat);
return scratch_unat;
# undef PUT_BITS
}
#define IA64_MLX_TEMPLATE 0x2
#define IA64_MOVL_OPCODE 6
void
ia64_increment_ip (struct pt_regs *regs)
{
unsigned long w0, ri = ia64_psr(regs)->ri + 1;
if (ri > 2) {
ri = 0;
regs->cr_iip += 16;
} else if (ri == 2) {
get_user(w0, (char __user *) regs->cr_iip + 0);
if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
/*
* rfi'ing to slot 2 of an MLX bundle causes
* an illegal operation fault. We don't want
* that to happen...
*/
ri = 0;
regs->cr_iip += 16;
}
}
ia64_psr(regs)->ri = ri;
}
void
ia64_decrement_ip (struct pt_regs *regs)
{
unsigned long w0, ri = ia64_psr(regs)->ri - 1;
if (ia64_psr(regs)->ri == 0) {
regs->cr_iip -= 16;
ri = 2;
get_user(w0, (char __user *) regs->cr_iip + 0);
if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
/*
* rfi'ing to slot 2 of an MLX bundle causes
* an illegal operation fault. We don't want
* that to happen...
*/
ri = 1;
}
}
ia64_psr(regs)->ri = ri;
}
/*
* This routine is used to read an rnat bits that are stored on the
* kernel backing store. Since, in general, the alignment of the user
* and kernel are different, this is not completely trivial. In
* essence, we need to construct the user RNAT based on up to two
* kernel RNAT values and/or the RNAT value saved in the child's
* pt_regs.
*
* user rbs
*
* +--------+ <-- lowest address
* | slot62 |
* +--------+
* | rnat | 0x....1f8
* +--------+
* | slot00 | \
* +--------+ |
* | slot01 | > child_regs->ar_rnat
* +--------+ |
* | slot02 | / kernel rbs
* +--------+ +--------+
* <- child_regs->ar_bspstore | slot61 | <-- krbs
* +- - - - + +--------+
* | slot62 |
* +- - - - + +--------+
* | rnat |
* +- - - - + +--------+
* vrnat | slot00 |
* +- - - - + +--------+
* = =
* +--------+
* | slot00 | \
* +--------+ |
* | slot01 | > child_stack->ar_rnat
* +--------+ |
* | slot02 | /
* +--------+
* <--- child_stack->ar_bspstore
*
* The way to think of this code is as follows: bit 0 in the user rnat
* corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
* value. The kernel rnat value holding this bit is stored in
* variable rnat0. rnat1 is loaded with the kernel rnat value that
* form the upper bits of the user rnat value.
*
* Boundary cases:
*
* o when reading the rnat "below" the first rnat slot on the kernel
* backing store, rnat0/rnat1 are set to 0 and the low order bits are
* merged in from pt->ar_rnat.
*
* o when reading the rnat "above" the last rnat slot on the kernel
* backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
*/
static unsigned long
get_rnat (struct task_struct *task, struct switch_stack *sw,
unsigned long *krbs, unsigned long *urnat_addr,
unsigned long *urbs_end)
{
unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
unsigned long umask = 0, mask, m;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
long num_regs, nbits;
struct pt_regs *pt;
pt = task_pt_regs(task);
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
if (urbs_end < urnat_addr)
nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
else
nbits = 63;
mask = MASK(nbits);
/*
* First, figure out which bit number slot 0 in user-land maps
* to in the kernel rnat. Do this by figuring out how many
* register slots we're beyond the user's backingstore and
* then computing the equivalent address in kernel space.
*/
num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
shift = ia64_rse_slot_num(slot0_kaddr);
rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
rnat0_kaddr = rnat1_kaddr - 64;
if (ubspstore + 63 > urnat_addr) {
/* some bits need to be merged in from pt->ar_rnat */
umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
urnat = (pt->ar_rnat & umask);
mask &= ~umask;
if (!mask)
return urnat;
}
m = mask << shift;
if (rnat0_kaddr >= kbsp)
rnat0 = sw->ar_rnat;
else if (rnat0_kaddr > krbs)
rnat0 = *rnat0_kaddr;
urnat |= (rnat0 & m) >> shift;
m = mask >> (63 - shift);
if (rnat1_kaddr >= kbsp)
rnat1 = sw->ar_rnat;
else if (rnat1_kaddr > krbs)
rnat1 = *rnat1_kaddr;
urnat |= (rnat1 & m) << (63 - shift);
return urnat;
}
/*
* The reverse of get_rnat.
*/
static void
put_rnat (struct task_struct *task, struct switch_stack *sw,
unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
unsigned long *urbs_end)
{
unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
long num_regs, nbits;
struct pt_regs *pt;
unsigned long cfm, *urbs_kargs;
pt = task_pt_regs(task);
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
urbs_kargs = urbs_end;
if (in_syscall(pt)) {
/*
* If entered via syscall, don't allow user to set rnat bits
* for syscall args.
*/
cfm = pt->cr_ifs;
urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
}
if (urbs_kargs >= urnat_addr)
nbits = 63;
else {
if ((urnat_addr - 63) >= urbs_kargs)
return;
nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
}
mask = MASK(nbits);
/*
* First, figure out which bit number slot 0 in user-land maps
* to in the kernel rnat. Do this by figuring out how many
* register slots we're beyond the user's backingstore and
* then computing the equivalent address in kernel space.
*/
num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
shift = ia64_rse_slot_num(slot0_kaddr);
rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
rnat0_kaddr = rnat1_kaddr - 64;
if (ubspstore + 63 > urnat_addr) {
/* some bits need to be place in pt->ar_rnat: */
umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
mask &= ~umask;
if (!mask)
return;
}
/*
* Note: Section 11.1 of the EAS guarantees that bit 63 of an
* rnat slot is ignored. so we don't have to clear it here.
*/
rnat0 = (urnat << shift);
m = mask << shift;
if (rnat0_kaddr >= kbsp)
sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
else if (rnat0_kaddr > krbs)
*rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
rnat1 = (urnat >> (63 - shift));
m = mask >> (63 - shift);
if (rnat1_kaddr >= kbsp)
sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
else if (rnat1_kaddr > krbs)
*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
}
static inline int
on_kernel_rbs (unsigned long addr, unsigned long bspstore,
unsigned long urbs_end)
{
unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
urbs_end);
return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
}
/*
* Read a word from the user-level backing store of task CHILD. ADDR
* is the user-level address to read the word from, VAL a pointer to
* the return value, and USER_BSP gives the end of the user-level
* backing store (i.e., it's the address that would be in ar.bsp after
* the user executed a "cover" instruction).
*
* This routine takes care of accessing the kernel register backing
* store for those registers that got spilled there. It also takes
* care of calculating the appropriate RNaT collection words.
*/
long
ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
unsigned long user_rbs_end, unsigned long addr, long *val)
{
unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
struct pt_regs *child_regs;
size_t copied;
long ret;
urbs_end = (long *) user_rbs_end;
laddr = (unsigned long *) addr;
child_regs = task_pt_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (on_kernel_rbs(addr, (unsigned long) bspstore,
(unsigned long) urbs_end))
{
/*
* Attempt to read the RBS in an area that's actually
* on the kernel RBS => read the corresponding bits in
* the kernel RBS.
*/
rnat_addr = ia64_rse_rnat_addr(laddr);
ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
if (laddr == rnat_addr) {
/* return NaT collection word itself */
*val = ret;
return 0;
}
if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
/*
* It is implementation dependent whether the
* data portion of a NaT value gets saved on a
* st8.spill or RSE spill (e.g., see EAS 2.6,
* 4.4.4.6 Register Spill and Fill). To get
* consistent behavior across all possible
* IA-64 implementations, we return zero in
* this case.
*/
*val = 0;
return 0;
}
if (laddr < urbs_end) {
/*
* The desired word is on the kernel RBS and
* is not a NaT.
*/
regnum = ia64_rse_num_regs(bspstore, laddr);
*val = *ia64_rse_skip_regs(krbs, regnum);
return 0;
}
}
copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
if (copied != sizeof(ret))
return -EIO;
*val = ret;
return 0;
}
long
ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
unsigned long user_rbs_end, unsigned long addr, long val)
{
unsigned long *bspstore, *krbs, regnum, *laddr;
unsigned long *urbs_end = (long *) user_rbs_end;
struct pt_regs *child_regs;
laddr = (unsigned long *) addr;
child_regs = task_pt_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (on_kernel_rbs(addr, (unsigned long) bspstore,
(unsigned long) urbs_end))
{
/*
* Attempt to write the RBS in an area that's actually
* on the kernel RBS => write the corresponding bits
* in the kernel RBS.
*/
if (ia64_rse_is_rnat_slot(laddr))
put_rnat(child, child_stack, krbs, laddr, val,
urbs_end);
else {
if (laddr < urbs_end) {
regnum = ia64_rse_num_regs(bspstore, laddr);
*ia64_rse_skip_regs(krbs, regnum) = val;
}
}
} else if (access_process_vm(child, addr, &val, sizeof(val), 1)
!= sizeof(val))
return -EIO;
return 0;
}
/*
* Calculate the address of the end of the user-level register backing
* store. This is the address that would have been stored in ar.bsp
* if the user had executed a "cover" instruction right before
* entering the kernel. If CFMP is not NULL, it is used to return the
* "current frame mask" that was active at the time the kernel was
* entered.
*/
unsigned long
ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
unsigned long *cfmp)
{
unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
long ndirty;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
bspstore = (unsigned long *) pt->ar_bspstore;
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
if (in_syscall(pt))
ndirty += (cfm & 0x7f);
else
cfm &= ~(1UL << 63); /* clear valid bit */
if (cfmp)
*cfmp = cfm;
return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
}
/*
* Synchronize (i.e, write) the RSE backing store living in kernel
* space to the VM of the CHILD task. SW and PT are the pointers to
* the switch_stack and pt_regs structures, respectively.
* USER_RBS_END is the user-level address at which the backing store
* ends.
*/
long
ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
unsigned long user_rbs_start, unsigned long user_rbs_end)
{
unsigned long addr, val;
long ret;
/* now copy word for word from kernel rbs to user rbs: */
for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
if (ret < 0)
return ret;
if (access_process_vm(child, addr, &val, sizeof(val), 1)
!= sizeof(val))
return -EIO;
}
return 0;
}
static long
ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
unsigned long user_rbs_start, unsigned long user_rbs_end)
{
unsigned long addr, val;
long ret;
/* now copy word for word from user rbs to kernel rbs: */
for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
if (access_process_vm(child, addr, &val, sizeof(val), 0)
!= sizeof(val))
return -EIO;
ret = ia64_poke(child, sw, user_rbs_end, addr, val);
if (ret < 0)
return ret;
}
return 0;
}
typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
unsigned long, unsigned long);
static void do_sync_rbs(struct unw_frame_info *info, void *arg)
{
struct pt_regs *pt;
unsigned long urbs_end;
syncfunc_t fn = arg;
if (unw_unwind_to_user(info) < 0)
return;
pt = task_pt_regs(info->task);
urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
}
/*
* when a thread is stopped (ptraced), debugger might change thread's user
* stack (change memory directly), and we must avoid the RSE stored in kernel
* to override user stack (user space's RSE is newer than kernel's in the
* case). To workaround the issue, we copy kernel RSE to user RSE before the
* task is stopped, so user RSE has updated data. we then copy user RSE to
* kernel after the task is resummed from traced stop and kernel will use the
* newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
* synchronize user RSE to kernel.
*/
void ia64_ptrace_stop(void)
{
if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
return;
set_notify_resume(current);
unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
}
/*
* This is called to read back the register backing store.
*/
void ia64_sync_krbs(void)
{
clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
}
/*
* After PTRACE_ATTACH, a thread's register backing store area in user
* space is assumed to contain correct data whenever the thread is
* stopped. arch_ptrace_stop takes care of this on tracing stops.
* But if the child was already stopped for job control when we attach
* to it, then it might not ever get into ptrace_stop by the time we
* want to examine the user memory containing the RBS.
*/
void
ptrace_attach_sync_user_rbs (struct task_struct *child)
{
int stopped = 0;
struct unw_frame_info info;
/*
* If the child is in TASK_STOPPED, we need to change that to
* TASK_TRACED momentarily while we operate on it. This ensures
* that the child won't be woken up and return to user mode while
* we are doing the sync. (It can only be woken up for SIGKILL.)
*/
read_lock(&tasklist_lock);
if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
if (child->state == TASK_STOPPED &&
!test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
set_notify_resume(child);
child->state = TASK_TRACED;
stopped = 1;
}
spin_unlock_irq(&child->sighand->siglock);
}
read_unlock(&tasklist_lock);
if (!stopped)
return;
unw_init_from_blocked_task(&info, child);
do_sync_rbs(&info, ia64_sync_user_rbs);
/*
* Now move the child back into TASK_STOPPED if it should be in a
* job control stop, so that SIGCONT can be used to wake it up.
*/
read_lock(&tasklist_lock);
if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
if (child->state == TASK_TRACED &&
(child->signal->flags & SIGNAL_STOP_STOPPED)) {
child->state = TASK_STOPPED;
}
spin_unlock_irq(&child->sighand->siglock);
}
read_unlock(&tasklist_lock);
}
/*
* Write f32-f127 back to task->thread.fph if it has been modified.
*/
inline void
ia64_flush_fph (struct task_struct *task)
{
struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
/*
* Prevent migrating this task while
* we're fiddling with the FPU state
*/
preempt_disable();
if (ia64_is_local_fpu_owner(task) && psr->mfh) {
psr->mfh = 0;
task->thread.flags |= IA64_THREAD_FPH_VALID;
ia64_save_fpu(&task->thread.fph[0]);
}
preempt_enable();
}
/*
* Sync the fph state of the task so that it can be manipulated
* through thread.fph. If necessary, f32-f127 are written back to
* thread.fph or, if the fph state hasn't been used before, thread.fph
* is cleared to zeroes. Also, access to f32-f127 is disabled to
* ensure that the task picks up the state from thread.fph when it
* executes again.
*/
void
ia64_sync_fph (struct task_struct *task)
{
struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
ia64_flush_fph(task);
if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
task->thread.flags |= IA64_THREAD_FPH_VALID;
memset(&task->thread.fph, 0, sizeof(task->thread.fph));
}
ia64_drop_fpu(task);
psr->dfh = 1;
}
/*
* Change the machine-state of CHILD such that it will return via the normal
* kernel exit-path, rather than the syscall-exit path.
*/
static void
convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
unsigned long cfm)
{
struct unw_frame_info info, prev_info;
unsigned long ip, sp, pr;
unw_init_from_blocked_task(&info, child);
while (1) {
prev_info = info;
if (unw_unwind(&info) < 0)
return;
unw_get_sp(&info, &sp);
if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
< IA64_PT_REGS_SIZE) {
dprintk("ptrace.%s: ran off the top of the kernel "
"stack\n", __func__);
return;
}
if (unw_get_pr (&prev_info, &pr) < 0) {
unw_get_rp(&prev_info, &ip);
dprintk("ptrace.%s: failed to read "
"predicate register (ip=0x%lx)\n",
__func__, ip);
return;
}
if (unw_is_intr_frame(&info)
&& (pr & (1UL << PRED_USER_STACK)))
break;
}
/*
* Note: at the time of this call, the target task is blocked
* in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
* (aka, "pLvSys") we redirect execution from
* .work_pending_syscall_end to .work_processed_kernel.
*/
unw_get_pr(&prev_info, &pr);
pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
pr |= (1UL << PRED_NON_SYSCALL);
unw_set_pr(&prev_info, pr);
pt->cr_ifs = (1UL << 63) | cfm;
/*
* Clear the memory that is NOT written on syscall-entry to
* ensure we do not leak kernel-state to user when execution
* resumes.
*/
pt->r2 = 0;
pt->r3 = 0;
pt->r14 = 0;
memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
pt->b7 = 0;
pt->ar_ccv = 0;
pt->ar_csd = 0;
pt->ar_ssd = 0;
}
static int
access_nat_bits (struct task_struct *child, struct pt_regs *pt,
struct unw_frame_info *info,
unsigned long *data, int write_access)
{
unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
char nat = 0;
if (write_access) {
nat_bits = *data;
scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
dprintk("ptrace: failed to set ar.unat\n");
return -1;
}
for (regnum = 4; regnum <= 7; ++regnum) {
unw_get_gr(info, regnum, &dummy, &nat);
unw_set_gr(info, regnum, dummy,
(nat_bits >> regnum) & 1);
}
} else {
if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
dprintk("ptrace: failed to read ar.unat\n");
return -1;
}
nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
for (regnum = 4; regnum <= 7; ++regnum) {
unw_get_gr(info, regnum, &dummy, &nat);
nat_bits |= (nat != 0) << regnum;
}
*data = nat_bits;
}
return 0;
}
static int
access_uarea (struct task_struct *child, unsigned long addr,
unsigned long *data, int write_access);
static long
ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
{
unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
struct unw_frame_info info;
struct ia64_fpreg fpval;
struct switch_stack *sw;
struct pt_regs *pt;
long ret, retval = 0;
char nat = 0;
int i;
if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
return -EIO;
pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
unw_init_from_blocked_task(&info, child);
if (unw_unwind_to_user(&info) < 0) {
return -EIO;
}
if (((unsigned long) ppr & 0x7) != 0) {
dprintk("ptrace:unaligned register address %p\n", ppr);
return -EIO;
}
if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
|| access_uarea(child, PT_AR_EC, &ec, 0) < 0
|| access_uarea(child, PT_AR_LC, &lc, 0) < 0
|| access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
|| access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
|| access_uarea(child, PT_CFM, &cfm, 0)
|| access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
return -EIO;
/* control regs */
retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
retval |= __put_user(psr, &ppr->cr_ipsr);
/* app regs */
retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
retval |= __put_user(cfm, &ppr->cfm);
/* gr1-gr3 */
retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
/* gr4-gr7 */
for (i = 4; i < 8; i++) {
if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
return -EIO;
retval |= __put_user(val, &ppr->gr[i]);
}
/* gr8-gr11 */
retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
/* gr12-gr15 */
retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
/* gr16-gr31 */
retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
/* b0 */
retval |= __put_user(pt->b0, &ppr->br[0]);
/* b1-b5 */
for (i = 1; i < 6; i++) {
if (unw_access_br(&info, i, &val, 0) < 0)
return -EIO;
__put_user(val, &ppr->br[i]);
}
/* b6-b7 */
retval |= __put_user(pt->b6, &ppr->br[6]);
retval |= __put_user(pt->b7, &ppr->br[7]);
/* fr2-fr5 */
for (i = 2; i < 6; i++) {
if (unw_get_fr(&info, i, &fpval) < 0)
return -EIO;
retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
}
/* fr6-fr11 */
retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
sizeof(struct ia64_fpreg) * 6);
/* fp scratch regs(12-15) */
retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
sizeof(struct ia64_fpreg) * 4);
/* fr16-fr31 */
for (i = 16; i < 32; i++) {
if (unw_get_fr(&info, i, &fpval) < 0)
return -EIO;
retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
}
/* fph */
ia64_flush_fph(child);
retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
sizeof(ppr->fr[32]) * 96);
/* preds */
retval |= __put_user(pt->pr, &ppr->pr);
/* nat bits */
retval |= __put_user(nat_bits, &ppr->nat);
ret = retval ? -EIO : 0;
return ret;
}
static long
ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
{
unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
struct unw_frame_info info;
struct switch_stack *sw;
struct ia64_fpreg fpval;
struct pt_regs *pt;
long ret, retval = 0;
int i;
memset(&fpval, 0, sizeof(fpval));
if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
return -EIO;
pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
unw_init_from_blocked_task(&info, child);
if (unw_unwind_to_user(&info) < 0) {
return -EIO;
}
if (((unsigned long) ppr & 0x7) != 0) {
dprintk("ptrace:unaligned register address %p\n", ppr);
return -EIO;
}
/* control regs */
retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
retval |= __get_user(psr, &ppr->cr_ipsr);
/* app regs */
retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
retval |= __get_user(cfm, &ppr->cfm);
/* gr1-gr3 */
retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
/* gr4-gr7 */
for (i = 4; i < 8; i++) {
retval |= __get_user(val, &ppr->gr[i]);
/* NaT bit will be set via PT_NAT_BITS: */
if (unw_set_gr(&info, i, val, 0) < 0)
return -EIO;
}
/* gr8-gr11 */
retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
/* gr12-gr15 */
retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
/* gr16-gr31 */
retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
/* b0 */
retval |= __get_user(pt->b0, &ppr->br[0]);
/* b1-b5 */
for (i = 1; i < 6; i++) {
retval |= __get_user(val, &ppr->br[i]);
unw_set_br(&info, i, val);
}
/* b6-b7 */
retval |= __get_user(pt->b6, &ppr->br[6]);
retval |= __get_user(pt->b7, &ppr->br[7]);
/* fr2-fr5 */
for (i = 2; i < 6; i++) {
retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
if (unw_set_fr(&info, i, fpval) < 0)
return -EIO;
}
/* fr6-fr11 */
retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
sizeof(ppr->fr[6]) * 6);
/* fp scratch regs(12-15) */
retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
sizeof(ppr->fr[12]) * 4);
/* fr16-fr31 */
for (i = 16; i < 32; i++) {
retval |= __copy_from_user(&fpval, &ppr->fr[i],
sizeof(fpval));
if (unw_set_fr(&info, i, fpval) < 0)
return -EIO;
}
/* fph */
ia64_sync_fph(child);
retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
sizeof(ppr->fr[32]) * 96);
/* preds */
retval |= __get_user(pt->pr, &ppr->pr);
/* nat bits */
retval |= __get_user(nat_bits, &ppr->nat);
retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
retval |= access_uarea(child, PT_AR_EC, &ec, 1);
retval |= access_uarea(child, PT_AR_LC, &lc, 1);
retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
retval |= access_uarea(child, PT_CFM, &cfm, 1);
retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
ret = retval ? -EIO : 0;
return ret;
}
void
user_enable_single_step (struct task_struct *child)
{
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
child_psr->ss = 1;
}
void
user_enable_block_step (struct task_struct *child)
{
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
child_psr->tb = 1;
}
void
user_disable_single_step (struct task_struct *child)
{
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
/* make sure the single step/taken-branch trap bits are not set: */
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
child_psr->ss = 0;
child_psr->tb = 0;
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure the single step bit is not set.
*/
void
ptrace_disable (struct task_struct *child)
{
user_disable_single_step(child);
}
long
arch_ptrace (struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
/* read word at location addr */
if (access_process_vm(child, addr, &data, sizeof(data), 0)
!= sizeof(data))
return -EIO;
/* ensure return value is not mistaken for error code */
force_successful_syscall_return();
return data;
/* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
* by the generic ptrace_request().
*/
case PTRACE_PEEKUSR:
/* read the word at addr in the USER area */
if (access_uarea(child, addr, &data, 0) < 0)
return -EIO;
/* ensure return value is not mistaken for error code */
force_successful_syscall_return();
return data;
case PTRACE_POKEUSR:
/* write the word at addr in the USER area */
if (access_uarea(child, addr, &data, 1) < 0)
return -EIO;
return 0;
case PTRACE_OLD_GETSIGINFO:
/* for backwards-compatibility */
return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
case PTRACE_OLD_SETSIGINFO:
/* for backwards-compatibility */
return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
case PTRACE_GETREGS:
return ptrace_getregs(child,
(struct pt_all_user_regs __user *) data);
case PTRACE_SETREGS:
return ptrace_setregs(child,
(struct pt_all_user_regs __user *) data);
default:
return ptrace_request(child, request, addr, data);
}
}
/* "asmlinkage" so the input arguments are preserved... */
asmlinkage long
syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7,
struct pt_regs regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (tracehook_report_syscall_entry(®s))
return -ENOSYS;
/* copy user rbs to kernel rbs */
if (test_thread_flag(TIF_RESTORE_RSE))
ia64_sync_krbs();
audit_syscall_entry(AUDIT_ARCH_IA64, regs.r15, arg0, arg1, arg2, arg3);
return 0;
}
/* "asmlinkage" so the input arguments are preserved... */
asmlinkage void
syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7,
struct pt_regs regs)
{
int step;
audit_syscall_exit(®s);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(®s, step);
/* copy user rbs to kernel rbs */
if (test_thread_flag(TIF_RESTORE_RSE))
ia64_sync_krbs();
}
/* Utrace implementation starts here */
struct regset_get {
void *kbuf;
void __user *ubuf;
};
struct regset_set {
const void *kbuf;
const void __user *ubuf;
};
struct regset_getset {
struct task_struct *target;
const struct user_regset *regset;
union {
struct regset_get get;
struct regset_set set;
} u;
unsigned int pos;
unsigned int count;
int ret;
};
static int
access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
unsigned long addr, unsigned long *data, int write_access)
{
struct pt_regs *pt;
unsigned long *ptr = NULL;
int ret;
char nat = 0;
pt = task_pt_regs(target);
switch (addr) {
case ELF_GR_OFFSET(1):
ptr = &pt->r1;
break;
case ELF_GR_OFFSET(2):
case ELF_GR_OFFSET(3):
ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
break;
case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
if (write_access) {
/* read NaT bit first: */
unsigned long dummy;
ret = unw_get_gr(info, addr/8, &dummy, &nat);
if (ret < 0)
return ret;
}
return unw_access_gr(info, addr/8, data, &nat, write_access);
case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
break;
case ELF_GR_OFFSET(12):
case ELF_GR_OFFSET(13):
ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
break;
case ELF_GR_OFFSET(14):
ptr = &pt->r14;
break;
case ELF_GR_OFFSET(15):
ptr = &pt->r15;
}
if (write_access)
*ptr = *data;
else
*data = *ptr;
return 0;
}
static int
access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
unsigned long addr, unsigned long *data, int write_access)
{
struct pt_regs *pt;
unsigned long *ptr = NULL;
pt = task_pt_regs(target);
switch (addr) {
case ELF_BR_OFFSET(0):
ptr = &pt->b0;
break;
case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
data, write_access);
case ELF_BR_OFFSET(6):
ptr = &pt->b6;
break;
case ELF_BR_OFFSET(7):
ptr = &pt->b7;
}
if (write_access)
*ptr = *data;
else
*data = *ptr;
return 0;
}
static int
access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
unsigned long addr, unsigned long *data, int write_access)
{
struct pt_regs *pt;
unsigned long cfm, urbs_end;
unsigned long *ptr = NULL;
pt = task_pt_regs(target);
if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
switch (addr) {
case ELF_AR_RSC_OFFSET:
/* force PL3 */
if (write_access)
pt->ar_rsc = *data | (3 << 2);
else
*data = pt->ar_rsc;
return 0;
case ELF_AR_BSP_OFFSET:
/*
* By convention, we use PT_AR_BSP to refer to
* the end of the user-level backing store.
* Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
* to get the real value of ar.bsp at the time
* the kernel was entered.
*
* Furthermore, when changing the contents of
* PT_AR_BSP (or PT_CFM) while the task is
* blocked in a system call, convert the state
* so that the non-system-call exit
* path is used. This ensures that the proper
* state will be picked up when resuming
* execution. However, it *also* means that
* once we write PT_AR_BSP/PT_CFM, it won't be
* possible to modify the syscall arguments of
* the pending system call any longer. This
* shouldn't be an issue because modifying
* PT_AR_BSP/PT_CFM generally implies that
* we're either abandoning the pending system
* call or that we defer it's re-execution
* (e.g., due to GDB doing an inferior
* function call).
*/
urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
if (write_access) {
if (*data != urbs_end) {
if (in_syscall(pt))
convert_to_non_syscall(target,
pt,
cfm);
/*
* Simulate user-level write
* of ar.bsp:
*/
pt->loadrs = 0;
pt->ar_bspstore = *data;
}
} else
*data = urbs_end;
return 0;
case ELF_AR_BSPSTORE_OFFSET:
ptr = &pt->ar_bspstore;
break;
case ELF_AR_RNAT_OFFSET:
ptr = &pt->ar_rnat;
break;
case ELF_AR_CCV_OFFSET:
ptr = &pt->ar_ccv;
break;
case ELF_AR_UNAT_OFFSET:
ptr = &pt->ar_unat;
break;
case ELF_AR_FPSR_OFFSET:
ptr = &pt->ar_fpsr;
break;
case ELF_AR_PFS_OFFSET:
ptr = &pt->ar_pfs;
break;
case ELF_AR_LC_OFFSET:
return unw_access_ar(info, UNW_AR_LC, data,
write_access);
case ELF_AR_EC_OFFSET:
return unw_access_ar(info, UNW_AR_EC, data,
write_access);
case ELF_AR_CSD_OFFSET:
ptr = &pt->ar_csd;
break;
case ELF_AR_SSD_OFFSET:
ptr = &pt->ar_ssd;
}
} else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
switch (addr) {
case ELF_CR_IIP_OFFSET:
ptr = &pt->cr_iip;
break;
case ELF_CFM_OFFSET:
urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
if (write_access) {
if (((cfm ^ *data) & PFM_MASK) != 0) {
if (in_syscall(pt))
convert_to_non_syscall(target,
pt,
cfm);
pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
| (*data & PFM_MASK));
}
} else
*data = cfm;
return 0;
case ELF_CR_IPSR_OFFSET:
if (write_access) {
unsigned long tmp = *data;
/* psr.ri==3 is a reserved value: SDM 2:25 */
if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
tmp &= ~IA64_PSR_RI;
pt->cr_ipsr = ((tmp & IPSR_MASK)
| (pt->cr_ipsr & ~IPSR_MASK));
} else
*data = (pt->cr_ipsr & IPSR_MASK);
return 0;
}
} else if (addr == ELF_NAT_OFFSET)
return access_nat_bits(target, pt, info,
data, write_access);
else if (addr == ELF_PR_OFFSET)
ptr = &pt->pr;
else
return -1;
if (write_access)
*ptr = *data;
else
*data = *ptr;
return 0;
}
static int
access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
unsigned long addr, unsigned long *data, int write_access)
{
if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
return access_elf_gpreg(target, info, addr, data, write_access);
else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
return access_elf_breg(target, info, addr, data, write_access);
else
return access_elf_areg(target, info, addr, data, write_access);
}
void do_gpregs_get(struct unw_frame_info *info, void *arg)
{
struct pt_regs *pt;
struct regset_getset *dst = arg;
elf_greg_t tmp[16];
unsigned int i, index, min_copy;
if (unw_unwind_to_user(info) < 0)
return;
/*
* coredump format:
* r0-r31
* NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
* predicate registers (p0-p63)
* b0-b7
* ip cfm user-mask
* ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
*/
/* Skip r0 */
if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
&dst->u.get.kbuf,
&dst->u.get.ubuf,
0, ELF_GR_OFFSET(1));
if (dst->ret || dst->count == 0)
return;
}
/* gr1 - gr15 */
if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
(dst->pos + dst->count) : ELF_GR_OFFSET(16);
for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 0) < 0) {
dst->ret = -EIO;
return;
}
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
if (dst->ret || dst->count == 0)
return;
}
/* r16-r31 */
if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
pt = task_pt_regs(dst->target);
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
if (dst->ret || dst->count == 0)
return;
}
/* nat, pr, b0 - b7 */
if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
(dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 0) < 0) {
dst->ret = -EIO;
return;
}
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
if (dst->ret || dst->count == 0)
return;
}
/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
*/
if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
(dst->pos + dst->count) : ELF_AR_END_OFFSET;
for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 0) < 0) {
dst->ret = -EIO;
return;
}
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
}
}
void do_gpregs_set(struct unw_frame_info *info, void *arg)
{
struct pt_regs *pt;
struct regset_getset *dst = arg;
elf_greg_t tmp[16];
unsigned int i, index;
if (unw_unwind_to_user(info) < 0)
return;
/* Skip r0 */
if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
&dst->u.set.kbuf,
&dst->u.set.ubuf,
0, ELF_GR_OFFSET(1));
if (dst->ret || dst->count == 0)
return;
}
/* gr1-gr15 */
if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
i = dst->pos;
index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
if (dst->ret)
return;
for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 1) < 0) {
dst->ret = -EIO;
return;
}
if (dst->count == 0)
return;
}
/* gr16-gr31 */
if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
pt = task_pt_regs(dst->target);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
if (dst->ret || dst->count == 0)
return;
}
/* nat, pr, b0 - b7 */
if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
i = dst->pos;
index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
if (dst->ret)
return;
for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 1) < 0) {
dst->ret = -EIO;
return;
}
if (dst->count == 0)
return;
}
/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
*/
if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
i = dst->pos;
index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
if (dst->ret)
return;
for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 1) < 0) {
dst->ret = -EIO;
return;
}
}
}
#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
void do_fpregs_get(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
struct task_struct *task = dst->target;
elf_fpreg_t tmp[30];
int index, min_copy, i;
if (unw_unwind_to_user(info) < 0)
return;
/* Skip pos 0 and 1 */
if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
&dst->u.get.kbuf,
&dst->u.get.ubuf,
0, ELF_FP_OFFSET(2));
if (dst->count == 0 || dst->ret)
return;
}
/* fr2-fr31 */
if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
dst->pos + dst->count);
for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
index++)
if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
&tmp[index])) {
dst->ret = -EIO;
return;
}
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
if (dst->count == 0 || dst->ret)
return;
}
/* fph */
if (dst->count > 0) {
ia64_flush_fph(dst->target);
if (task->thread.flags & IA64_THREAD_FPH_VALID)
dst->ret = user_regset_copyout(
&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf,
&dst->target->thread.fph,
ELF_FP_OFFSET(32), -1);
else
/* Zero fill instead. */
dst->ret = user_regset_copyout_zero(
&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf,
ELF_FP_OFFSET(32), -1);
}
}
void do_fpregs_set(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
elf_fpreg_t fpreg, tmp[30];
int index, start, end;
if (unw_unwind_to_user(info) < 0)
return;
/* Skip pos 0 and 1 */
if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
&dst->u.set.kbuf,
&dst->u.set.ubuf,
0, ELF_FP_OFFSET(2));
if (dst->count == 0 || dst->ret)
return;
}
/* fr2-fr31 */
if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
start = dst->pos;
end = min(((unsigned int)ELF_FP_OFFSET(32)),
dst->pos + dst->count);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
if (dst->ret)
return;
if (start & 0xF) { /* only write high part */
if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
&fpreg)) {
dst->ret = -EIO;
return;
}
tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
= fpreg.u.bits[0];
start &= ~0xFUL;
}
if (end & 0xF) { /* only write low part */
if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
&fpreg)) {
dst->ret = -EIO;
return;
}
tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
= fpreg.u.bits[1];
end = (end + 0xF) & ~0xFUL;
}
for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
index = start / sizeof(elf_fpreg_t);
if (unw_set_fr(info, index, tmp[index - 2])) {
dst->ret = -EIO;
return;
}
}
if (dst->ret || dst->count == 0)
return;
}
/* fph */
if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
ia64_sync_fph(dst->target);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf,
&dst->u.set.ubuf,
&dst->target->thread.fph,
ELF_FP_OFFSET(32), -1);
}
}
static int
do_regset_call(void (*call)(struct unw_frame_info *, void *),
struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct regset_getset info = { .target = target, .regset = regset,
.pos = pos, .count = count,
.u.set = { .kbuf = kbuf, .ubuf = ubuf },
.ret = 0 };
if (target == current)
unw_init_running(call, &info);
else {
struct unw_frame_info ufi;
memset(&ufi, 0, sizeof(ufi));
unw_init_from_blocked_task(&ufi, target);
(*call)(&ufi, &info);
}
return info.ret;
}
static int
gpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return do_regset_call(do_gpregs_get, target, regset, pos, count,
kbuf, ubuf);
}
static int gpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return do_regset_call(do_gpregs_set, target, regset, pos, count,
kbuf, ubuf);
}
static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
{
do_sync_rbs(info, ia64_sync_user_rbs);
}
/*
* This is called to write back the register backing store.
* ptrace does this before it stops, so that a tracer reading the user
* memory after the thread stops will get the current register data.
*/
static int
gpregs_writeback(struct task_struct *target,
const struct user_regset *regset,
int now)
{
if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
return 0;
set_notify_resume(target);
return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
NULL, NULL);
}
static int
fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
}
static int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return do_regset_call(do_fpregs_get, target, regset, pos, count,
kbuf, ubuf);
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return do_regset_call(do_fpregs_set, target, regset, pos, count,
kbuf, ubuf);
}
static int
access_uarea(struct task_struct *child, unsigned long addr,
unsigned long *data, int write_access)
{
unsigned int pos = -1; /* an invalid value */
int ret;
unsigned long *ptr, regnum;
if ((addr & 0x7) != 0) {
dprintk("ptrace: unaligned register address 0x%lx\n", addr);
return -1;
}
if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
(addr >= PT_R7 + 8 && addr < PT_B1) ||
(addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
(addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
dprintk("ptrace: rejecting access to register "
"address 0x%lx\n", addr);
return -1;
}
switch (addr) {
case PT_F32 ... (PT_F127 + 15):
pos = addr - PT_F32 + ELF_FP_OFFSET(32);
break;
case PT_F2 ... (PT_F5 + 15):
pos = addr - PT_F2 + ELF_FP_OFFSET(2);
break;
case PT_F10 ... (PT_F31 + 15):
pos = addr - PT_F10 + ELF_FP_OFFSET(10);
break;
case PT_F6 ... (PT_F9 + 15):
pos = addr - PT_F6 + ELF_FP_OFFSET(6);
break;
}
if (pos != -1) {
if (write_access)
ret = fpregs_set(child, NULL, pos,
sizeof(unsigned long), data, NULL);
else
ret = fpregs_get(child, NULL, pos,
sizeof(unsigned long), data, NULL);
if (ret != 0)
return -1;
return 0;
}
switch (addr) {
case PT_NAT_BITS:
pos = ELF_NAT_OFFSET;
break;
case PT_R4 ... PT_R7:
pos = addr - PT_R4 + ELF_GR_OFFSET(4);
break;
case PT_B1 ... PT_B5:
pos = addr - PT_B1 + ELF_BR_OFFSET(1);
break;
case PT_AR_EC:
pos = ELF_AR_EC_OFFSET;
break;
case PT_AR_LC:
pos = ELF_AR_LC_OFFSET;
break;
case PT_CR_IPSR:
pos = ELF_CR_IPSR_OFFSET;
break;
case PT_CR_IIP:
pos = ELF_CR_IIP_OFFSET;
break;
case PT_CFM:
pos = ELF_CFM_OFFSET;
break;
case PT_AR_UNAT:
pos = ELF_AR_UNAT_OFFSET;
break;
case PT_AR_PFS:
pos = ELF_AR_PFS_OFFSET;
break;
case PT_AR_RSC:
pos = ELF_AR_RSC_OFFSET;
break;
case PT_AR_RNAT:
pos = ELF_AR_RNAT_OFFSET;
break;
case PT_AR_BSPSTORE:
pos = ELF_AR_BSPSTORE_OFFSET;
break;
case PT_PR:
pos = ELF_PR_OFFSET;
break;
case PT_B6:
pos = ELF_BR_OFFSET(6);
break;
case PT_AR_BSP:
pos = ELF_AR_BSP_OFFSET;
break;
case PT_R1 ... PT_R3:
pos = addr - PT_R1 + ELF_GR_OFFSET(1);
break;
case PT_R12 ... PT_R15:
pos = addr - PT_R12 + ELF_GR_OFFSET(12);
break;
case PT_R8 ... PT_R11:
pos = addr - PT_R8 + ELF_GR_OFFSET(8);
break;
case PT_R16 ... PT_R31:
pos = addr - PT_R16 + ELF_GR_OFFSET(16);
break;
case PT_AR_CCV:
pos = ELF_AR_CCV_OFFSET;
break;
case PT_AR_FPSR:
pos = ELF_AR_FPSR_OFFSET;
break;
case PT_B0:
pos = ELF_BR_OFFSET(0);
break;
case PT_B7:
pos = ELF_BR_OFFSET(7);
break;
case PT_AR_CSD:
pos = ELF_AR_CSD_OFFSET;
break;
case PT_AR_SSD:
pos = ELF_AR_SSD_OFFSET;
break;
}
if (pos != -1) {
if (write_access)
ret = gpregs_set(child, NULL, pos,
sizeof(unsigned long), data, NULL);
else
ret = gpregs_get(child, NULL, pos,
sizeof(unsigned long), data, NULL);
if (ret != 0)
return -1;
return 0;
}
/* access debug registers */
if (addr >= PT_IBR) {
regnum = (addr - PT_IBR) >> 3;
ptr = &child->thread.ibr[0];
} else {
regnum = (addr - PT_DBR) >> 3;
ptr = &child->thread.dbr[0];
}
if (regnum >= 8) {
dprintk("ptrace: rejecting access to register "
"address 0x%lx\n", addr);
return -1;
}
#ifdef CONFIG_PERFMON
/*
* Check if debug registers are used by perfmon. This
* test must be done once we know that we can do the
* operation, i.e. the arguments are all valid, but
* before we start modifying the state.
*
* Perfmon needs to keep a count of how many processes
* are trying to modify the debug registers for system
* wide monitoring sessions.
*
* We also include read access here, because they may
* cause the PMU-installed debug register state
* (dbr[], ibr[]) to be reset. The two arrays are also
* used by perfmon, but we do not use
* IA64_THREAD_DBG_VALID. The registers are restored
* by the PMU context switch code.
*/
if (pfm_use_debug_registers(child))
return -1;
#endif
if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
child->thread.flags |= IA64_THREAD_DBG_VALID;
memset(child->thread.dbr, 0,
sizeof(child->thread.dbr));
memset(child->thread.ibr, 0,
sizeof(child->thread.ibr));
}
ptr += regnum;
if ((regnum & 1) && write_access) {
/* don't let the user set kernel-level breakpoints: */
*ptr = *data & ~(7UL << 56);
return 0;
}
if (write_access)
*ptr = *data;
else
*data = *ptr;
return 0;
}
static const struct user_regset native_regsets[] = {
{
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
.get = gpregs_get, .set = gpregs_set,
.writeback = gpregs_writeback
},
{
.core_note_type = NT_PRFPREG,
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
.get = fpregs_get, .set = fpregs_set, .active = fpregs_active
},
};
static const struct user_regset_view user_ia64_view = {
.name = "ia64",
.e_machine = EM_IA_64,
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
{
return &user_ia64_view;
}
struct syscall_get_set_args {
unsigned int i;
unsigned int n;
unsigned long *args;
struct pt_regs *regs;
int rw;
};
static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
{
struct syscall_get_set_args *args = data;
struct pt_regs *pt = args->regs;
unsigned long *krbs, cfm, ndirty;
int i, count;
if (unw_unwind_to_user(info) < 0)
return;
cfm = pt->cr_ifs;
krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
count = 0;
if (in_syscall(pt))
count = min_t(int, args->n, cfm & 0x7f);
for (i = 0; i < count; i++) {
if (args->rw)
*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
args->args[i];
else
args->args[i] = *ia64_rse_skip_regs(krbs,
ndirty + i + args->i);
}
if (!args->rw) {
while (i < args->n) {
args->args[i] = 0;
i++;
}
}
}
void ia64_syscall_get_set_arguments(struct task_struct *task,
struct pt_regs *regs, unsigned int i, unsigned int n,
unsigned long *args, int rw)
{
struct syscall_get_set_args data = {
.i = i,
.n = n,
.args = args,
.regs = regs,
.rw = rw,
};
if (task == current)
unw_init_running(syscall_get_set_args_cb, &data);
else {
struct unw_frame_info ufi;
memset(&ufi, 0, sizeof(ufi));
unw_init_from_blocked_task(&ufi, task);
syscall_get_set_args_cb(&ufi, &data);
}
}
| gpl-2.0 |
dumtara/android_kernel_yu_msm8916 | arch/ia64/kernel/ptrace.c | 2742 | 57987 | /*
* Kernel support for the ptrace() and syscall tracing interfaces.
*
* Copyright (C) 1999-2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2006 Intel Co
* 2006-08-12 - IA64 Native Utrace implementation support added by
* Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*
* Derived from the x86 and Alpha versions.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/tracehook.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace_offsets.h>
#include <asm/rse.h>
#include <asm/uaccess.h>
#include <asm/unwind.h>
#ifdef CONFIG_PERFMON
#include <asm/perfmon.h>
#endif
#include "entry.h"
/*
* Bits in the PSR that we allow ptrace() to change:
* be, up, ac, mfl, mfh (the user mask; five bits total)
* db (debug breakpoint fault; one bit)
* id (instruction debug fault disable; one bit)
* dd (data debug fault disable; one bit)
* ri (restart instruction; two bits)
* is (instruction set; one bit)
*/
#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
| IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
#define PFM_MASK MASK(38)
#define PTRACE_DEBUG 0
#if PTRACE_DEBUG
# define dprintk(format...) printk(format)
# define inline
#else
# define dprintk(format...)
#endif
/* Return TRUE if PT was created due to kernel-entry via a system-call. */
static inline int
in_syscall (struct pt_regs *pt)
{
return (long) pt->cr_ifs >= 0;
}
/*
* Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
* bitset where bit i is set iff the NaT bit of register i is set.
*/
unsigned long
ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
{
# define GET_BITS(first, last, unat) \
({ \
unsigned long bit = ia64_unat_pos(&pt->r##first); \
unsigned long nbits = (last - first + 1); \
unsigned long mask = MASK(nbits) << first; \
unsigned long dist; \
if (bit < first) \
dist = 64 + bit - first; \
else \
dist = bit - first; \
ia64_rotr(unat, dist) & mask; \
})
unsigned long val;
/*
* Registers that are stored consecutively in struct pt_regs
* can be handled in parallel. If the register order in
* struct_pt_regs changes, this code MUST be updated.
*/
val = GET_BITS( 1, 1, scratch_unat);
val |= GET_BITS( 2, 3, scratch_unat);
val |= GET_BITS(12, 13, scratch_unat);
val |= GET_BITS(14, 14, scratch_unat);
val |= GET_BITS(15, 15, scratch_unat);
val |= GET_BITS( 8, 11, scratch_unat);
val |= GET_BITS(16, 31, scratch_unat);
return val;
# undef GET_BITS
}
/*
* Set the NaT bits for the scratch registers according to NAT and
* return the resulting unat (assuming the scratch registers are
* stored in PT).
*/
unsigned long
ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
{
# define PUT_BITS(first, last, nat) \
({ \
unsigned long bit = ia64_unat_pos(&pt->r##first); \
unsigned long nbits = (last - first + 1); \
unsigned long mask = MASK(nbits) << first; \
long dist; \
if (bit < first) \
dist = 64 + bit - first; \
else \
dist = bit - first; \
ia64_rotl(nat & mask, dist); \
})
unsigned long scratch_unat;
/*
* Registers that are stored consecutively in struct pt_regs
* can be handled in parallel. If the register order in
* struct_pt_regs changes, this code MUST be updated.
*/
scratch_unat = PUT_BITS( 1, 1, nat);
scratch_unat |= PUT_BITS( 2, 3, nat);
scratch_unat |= PUT_BITS(12, 13, nat);
scratch_unat |= PUT_BITS(14, 14, nat);
scratch_unat |= PUT_BITS(15, 15, nat);
scratch_unat |= PUT_BITS( 8, 11, nat);
scratch_unat |= PUT_BITS(16, 31, nat);
return scratch_unat;
# undef PUT_BITS
}
#define IA64_MLX_TEMPLATE 0x2
#define IA64_MOVL_OPCODE 6
void
ia64_increment_ip (struct pt_regs *regs)
{
unsigned long w0, ri = ia64_psr(regs)->ri + 1;
if (ri > 2) {
ri = 0;
regs->cr_iip += 16;
} else if (ri == 2) {
get_user(w0, (char __user *) regs->cr_iip + 0);
if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
/*
* rfi'ing to slot 2 of an MLX bundle causes
* an illegal operation fault. We don't want
* that to happen...
*/
ri = 0;
regs->cr_iip += 16;
}
}
ia64_psr(regs)->ri = ri;
}
void
ia64_decrement_ip (struct pt_regs *regs)
{
unsigned long w0, ri = ia64_psr(regs)->ri - 1;
if (ia64_psr(regs)->ri == 0) {
regs->cr_iip -= 16;
ri = 2;
get_user(w0, (char __user *) regs->cr_iip + 0);
if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
/*
* rfi'ing to slot 2 of an MLX bundle causes
* an illegal operation fault. We don't want
* that to happen...
*/
ri = 1;
}
}
ia64_psr(regs)->ri = ri;
}
/*
* This routine is used to read an rnat bits that are stored on the
* kernel backing store. Since, in general, the alignment of the user
* and kernel are different, this is not completely trivial. In
* essence, we need to construct the user RNAT based on up to two
* kernel RNAT values and/or the RNAT value saved in the child's
* pt_regs.
*
* user rbs
*
* +--------+ <-- lowest address
* | slot62 |
* +--------+
* | rnat | 0x....1f8
* +--------+
* | slot00 | \
* +--------+ |
* | slot01 | > child_regs->ar_rnat
* +--------+ |
* | slot02 | / kernel rbs
* +--------+ +--------+
* <- child_regs->ar_bspstore | slot61 | <-- krbs
* +- - - - + +--------+
* | slot62 |
* +- - - - + +--------+
* | rnat |
* +- - - - + +--------+
* vrnat | slot00 |
* +- - - - + +--------+
* = =
* +--------+
* | slot00 | \
* +--------+ |
* | slot01 | > child_stack->ar_rnat
* +--------+ |
* | slot02 | /
* +--------+
* <--- child_stack->ar_bspstore
*
* The way to think of this code is as follows: bit 0 in the user rnat
* corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
* value. The kernel rnat value holding this bit is stored in
* variable rnat0. rnat1 is loaded with the kernel rnat value that
* form the upper bits of the user rnat value.
*
* Boundary cases:
*
* o when reading the rnat "below" the first rnat slot on the kernel
* backing store, rnat0/rnat1 are set to 0 and the low order bits are
* merged in from pt->ar_rnat.
*
* o when reading the rnat "above" the last rnat slot on the kernel
* backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
*/
static unsigned long
get_rnat (struct task_struct *task, struct switch_stack *sw,
unsigned long *krbs, unsigned long *urnat_addr,
unsigned long *urbs_end)
{
unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
unsigned long umask = 0, mask, m;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
long num_regs, nbits;
struct pt_regs *pt;
pt = task_pt_regs(task);
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
if (urbs_end < urnat_addr)
nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
else
nbits = 63;
mask = MASK(nbits);
/*
* First, figure out which bit number slot 0 in user-land maps
* to in the kernel rnat. Do this by figuring out how many
* register slots we're beyond the user's backingstore and
* then computing the equivalent address in kernel space.
*/
num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
shift = ia64_rse_slot_num(slot0_kaddr);
rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
rnat0_kaddr = rnat1_kaddr - 64;
if (ubspstore + 63 > urnat_addr) {
/* some bits need to be merged in from pt->ar_rnat */
umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
urnat = (pt->ar_rnat & umask);
mask &= ~umask;
if (!mask)
return urnat;
}
m = mask << shift;
if (rnat0_kaddr >= kbsp)
rnat0 = sw->ar_rnat;
else if (rnat0_kaddr > krbs)
rnat0 = *rnat0_kaddr;
urnat |= (rnat0 & m) >> shift;
m = mask >> (63 - shift);
if (rnat1_kaddr >= kbsp)
rnat1 = sw->ar_rnat;
else if (rnat1_kaddr > krbs)
rnat1 = *rnat1_kaddr;
urnat |= (rnat1 & m) << (63 - shift);
return urnat;
}
/*
* The reverse of get_rnat.
*/
static void
put_rnat (struct task_struct *task, struct switch_stack *sw,
unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
unsigned long *urbs_end)
{
unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
long num_regs, nbits;
struct pt_regs *pt;
unsigned long cfm, *urbs_kargs;
pt = task_pt_regs(task);
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
urbs_kargs = urbs_end;
if (in_syscall(pt)) {
/*
* If entered via syscall, don't allow user to set rnat bits
* for syscall args.
*/
cfm = pt->cr_ifs;
urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
}
if (urbs_kargs >= urnat_addr)
nbits = 63;
else {
if ((urnat_addr - 63) >= urbs_kargs)
return;
nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
}
mask = MASK(nbits);
/*
* First, figure out which bit number slot 0 in user-land maps
* to in the kernel rnat. Do this by figuring out how many
* register slots we're beyond the user's backingstore and
* then computing the equivalent address in kernel space.
*/
num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
shift = ia64_rse_slot_num(slot0_kaddr);
rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
rnat0_kaddr = rnat1_kaddr - 64;
if (ubspstore + 63 > urnat_addr) {
/* some bits need to be place in pt->ar_rnat: */
umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
mask &= ~umask;
if (!mask)
return;
}
/*
* Note: Section 11.1 of the EAS guarantees that bit 63 of an
* rnat slot is ignored. so we don't have to clear it here.
*/
rnat0 = (urnat << shift);
m = mask << shift;
if (rnat0_kaddr >= kbsp)
sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
else if (rnat0_kaddr > krbs)
*rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
rnat1 = (urnat >> (63 - shift));
m = mask >> (63 - shift);
if (rnat1_kaddr >= kbsp)
sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
else if (rnat1_kaddr > krbs)
*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
}
static inline int
on_kernel_rbs (unsigned long addr, unsigned long bspstore,
unsigned long urbs_end)
{
unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
urbs_end);
return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
}
/*
* Read a word from the user-level backing store of task CHILD. ADDR
* is the user-level address to read the word from, VAL a pointer to
* the return value, and USER_BSP gives the end of the user-level
* backing store (i.e., it's the address that would be in ar.bsp after
* the user executed a "cover" instruction).
*
* This routine takes care of accessing the kernel register backing
* store for those registers that got spilled there. It also takes
* care of calculating the appropriate RNaT collection words.
*/
long
ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
unsigned long user_rbs_end, unsigned long addr, long *val)
{
unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
struct pt_regs *child_regs;
size_t copied;
long ret;
urbs_end = (long *) user_rbs_end;
laddr = (unsigned long *) addr;
child_regs = task_pt_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (on_kernel_rbs(addr, (unsigned long) bspstore,
(unsigned long) urbs_end))
{
/*
* Attempt to read the RBS in an area that's actually
* on the kernel RBS => read the corresponding bits in
* the kernel RBS.
*/
rnat_addr = ia64_rse_rnat_addr(laddr);
ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
if (laddr == rnat_addr) {
/* return NaT collection word itself */
*val = ret;
return 0;
}
if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
/*
* It is implementation dependent whether the
* data portion of a NaT value gets saved on a
* st8.spill or RSE spill (e.g., see EAS 2.6,
* 4.4.4.6 Register Spill and Fill). To get
* consistent behavior across all possible
* IA-64 implementations, we return zero in
* this case.
*/
*val = 0;
return 0;
}
if (laddr < urbs_end) {
/*
* The desired word is on the kernel RBS and
* is not a NaT.
*/
regnum = ia64_rse_num_regs(bspstore, laddr);
*val = *ia64_rse_skip_regs(krbs, regnum);
return 0;
}
}
copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
if (copied != sizeof(ret))
return -EIO;
*val = ret;
return 0;
}
long
ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
unsigned long user_rbs_end, unsigned long addr, long val)
{
unsigned long *bspstore, *krbs, regnum, *laddr;
unsigned long *urbs_end = (long *) user_rbs_end;
struct pt_regs *child_regs;
laddr = (unsigned long *) addr;
child_regs = task_pt_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (on_kernel_rbs(addr, (unsigned long) bspstore,
(unsigned long) urbs_end))
{
/*
* Attempt to write the RBS in an area that's actually
* on the kernel RBS => write the corresponding bits
* in the kernel RBS.
*/
if (ia64_rse_is_rnat_slot(laddr))
put_rnat(child, child_stack, krbs, laddr, val,
urbs_end);
else {
if (laddr < urbs_end) {
regnum = ia64_rse_num_regs(bspstore, laddr);
*ia64_rse_skip_regs(krbs, regnum) = val;
}
}
} else if (access_process_vm(child, addr, &val, sizeof(val), 1)
!= sizeof(val))
return -EIO;
return 0;
}
/*
* Calculate the address of the end of the user-level register backing
* store. This is the address that would have been stored in ar.bsp
* if the user had executed a "cover" instruction right before
* entering the kernel. If CFMP is not NULL, it is used to return the
* "current frame mask" that was active at the time the kernel was
* entered.
*/
unsigned long
ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
unsigned long *cfmp)
{
unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
long ndirty;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
bspstore = (unsigned long *) pt->ar_bspstore;
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
if (in_syscall(pt))
ndirty += (cfm & 0x7f);
else
cfm &= ~(1UL << 63); /* clear valid bit */
if (cfmp)
*cfmp = cfm;
return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
}
/*
* Synchronize (i.e, write) the RSE backing store living in kernel
* space to the VM of the CHILD task. SW and PT are the pointers to
* the switch_stack and pt_regs structures, respectively.
* USER_RBS_END is the user-level address at which the backing store
* ends.
*/
long
ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
unsigned long user_rbs_start, unsigned long user_rbs_end)
{
unsigned long addr, val;
long ret;
/* now copy word for word from kernel rbs to user rbs: */
for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
if (ret < 0)
return ret;
if (access_process_vm(child, addr, &val, sizeof(val), 1)
!= sizeof(val))
return -EIO;
}
return 0;
}
static long
ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
unsigned long user_rbs_start, unsigned long user_rbs_end)
{
unsigned long addr, val;
long ret;
/* now copy word for word from user rbs to kernel rbs: */
for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
if (access_process_vm(child, addr, &val, sizeof(val), 0)
!= sizeof(val))
return -EIO;
ret = ia64_poke(child, sw, user_rbs_end, addr, val);
if (ret < 0)
return ret;
}
return 0;
}
typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
unsigned long, unsigned long);
static void do_sync_rbs(struct unw_frame_info *info, void *arg)
{
struct pt_regs *pt;
unsigned long urbs_end;
syncfunc_t fn = arg;
if (unw_unwind_to_user(info) < 0)
return;
pt = task_pt_regs(info->task);
urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
}
/*
* when a thread is stopped (ptraced), debugger might change thread's user
* stack (change memory directly), and we must avoid the RSE stored in kernel
* to override user stack (user space's RSE is newer than kernel's in the
* case). To workaround the issue, we copy kernel RSE to user RSE before the
* task is stopped, so user RSE has updated data. we then copy user RSE to
* kernel after the task is resummed from traced stop and kernel will use the
* newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
* synchronize user RSE to kernel.
*/
void ia64_ptrace_stop(void)
{
if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
return;
set_notify_resume(current);
unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
}
/*
* This is called to read back the register backing store.
*/
void ia64_sync_krbs(void)
{
clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
}
/*
* After PTRACE_ATTACH, a thread's register backing store area in user
* space is assumed to contain correct data whenever the thread is
* stopped. arch_ptrace_stop takes care of this on tracing stops.
* But if the child was already stopped for job control when we attach
* to it, then it might not ever get into ptrace_stop by the time we
* want to examine the user memory containing the RBS.
*/
void
ptrace_attach_sync_user_rbs (struct task_struct *child)
{
int stopped = 0;
struct unw_frame_info info;
/*
* If the child is in TASK_STOPPED, we need to change that to
* TASK_TRACED momentarily while we operate on it. This ensures
* that the child won't be woken up and return to user mode while
* we are doing the sync. (It can only be woken up for SIGKILL.)
*/
read_lock(&tasklist_lock);
if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
if (child->state == TASK_STOPPED &&
!test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
set_notify_resume(child);
child->state = TASK_TRACED;
stopped = 1;
}
spin_unlock_irq(&child->sighand->siglock);
}
read_unlock(&tasklist_lock);
if (!stopped)
return;
unw_init_from_blocked_task(&info, child);
do_sync_rbs(&info, ia64_sync_user_rbs);
/*
* Now move the child back into TASK_STOPPED if it should be in a
* job control stop, so that SIGCONT can be used to wake it up.
*/
read_lock(&tasklist_lock);
if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
if (child->state == TASK_TRACED &&
(child->signal->flags & SIGNAL_STOP_STOPPED)) {
child->state = TASK_STOPPED;
}
spin_unlock_irq(&child->sighand->siglock);
}
read_unlock(&tasklist_lock);
}
/*
* Write f32-f127 back to task->thread.fph if it has been modified.
*/
inline void
ia64_flush_fph (struct task_struct *task)
{
struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
/*
* Prevent migrating this task while
* we're fiddling with the FPU state
*/
preempt_disable();
if (ia64_is_local_fpu_owner(task) && psr->mfh) {
psr->mfh = 0;
task->thread.flags |= IA64_THREAD_FPH_VALID;
ia64_save_fpu(&task->thread.fph[0]);
}
preempt_enable();
}
/*
* Sync the fph state of the task so that it can be manipulated
* through thread.fph. If necessary, f32-f127 are written back to
* thread.fph or, if the fph state hasn't been used before, thread.fph
* is cleared to zeroes. Also, access to f32-f127 is disabled to
* ensure that the task picks up the state from thread.fph when it
* executes again.
*/
void
ia64_sync_fph (struct task_struct *task)
{
struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
ia64_flush_fph(task);
if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
task->thread.flags |= IA64_THREAD_FPH_VALID;
memset(&task->thread.fph, 0, sizeof(task->thread.fph));
}
ia64_drop_fpu(task);
psr->dfh = 1;
}
/*
* Change the machine-state of CHILD such that it will return via the normal
* kernel exit-path, rather than the syscall-exit path.
*/
static void
convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
unsigned long cfm)
{
struct unw_frame_info info, prev_info;
unsigned long ip, sp, pr;
unw_init_from_blocked_task(&info, child);
while (1) {
prev_info = info;
if (unw_unwind(&info) < 0)
return;
unw_get_sp(&info, &sp);
if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
< IA64_PT_REGS_SIZE) {
dprintk("ptrace.%s: ran off the top of the kernel "
"stack\n", __func__);
return;
}
if (unw_get_pr (&prev_info, &pr) < 0) {
unw_get_rp(&prev_info, &ip);
dprintk("ptrace.%s: failed to read "
"predicate register (ip=0x%lx)\n",
__func__, ip);
return;
}
if (unw_is_intr_frame(&info)
&& (pr & (1UL << PRED_USER_STACK)))
break;
}
/*
* Note: at the time of this call, the target task is blocked
* in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
* (aka, "pLvSys") we redirect execution from
* .work_pending_syscall_end to .work_processed_kernel.
*/
unw_get_pr(&prev_info, &pr);
pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
pr |= (1UL << PRED_NON_SYSCALL);
unw_set_pr(&prev_info, pr);
pt->cr_ifs = (1UL << 63) | cfm;
/*
* Clear the memory that is NOT written on syscall-entry to
* ensure we do not leak kernel-state to user when execution
* resumes.
*/
pt->r2 = 0;
pt->r3 = 0;
pt->r14 = 0;
memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
pt->b7 = 0;
pt->ar_ccv = 0;
pt->ar_csd = 0;
pt->ar_ssd = 0;
}
static int
access_nat_bits (struct task_struct *child, struct pt_regs *pt,
struct unw_frame_info *info,
unsigned long *data, int write_access)
{
unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
char nat = 0;
if (write_access) {
nat_bits = *data;
scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
dprintk("ptrace: failed to set ar.unat\n");
return -1;
}
for (regnum = 4; regnum <= 7; ++regnum) {
unw_get_gr(info, regnum, &dummy, &nat);
unw_set_gr(info, regnum, dummy,
(nat_bits >> regnum) & 1);
}
} else {
if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
dprintk("ptrace: failed to read ar.unat\n");
return -1;
}
nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
for (regnum = 4; regnum <= 7; ++regnum) {
unw_get_gr(info, regnum, &dummy, &nat);
nat_bits |= (nat != 0) << regnum;
}
*data = nat_bits;
}
return 0;
}
static int
access_uarea (struct task_struct *child, unsigned long addr,
unsigned long *data, int write_access);
static long
ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
{
unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
struct unw_frame_info info;
struct ia64_fpreg fpval;
struct switch_stack *sw;
struct pt_regs *pt;
long ret, retval = 0;
char nat = 0;
int i;
if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
return -EIO;
pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
unw_init_from_blocked_task(&info, child);
if (unw_unwind_to_user(&info) < 0) {
return -EIO;
}
if (((unsigned long) ppr & 0x7) != 0) {
dprintk("ptrace:unaligned register address %p\n", ppr);
return -EIO;
}
if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
|| access_uarea(child, PT_AR_EC, &ec, 0) < 0
|| access_uarea(child, PT_AR_LC, &lc, 0) < 0
|| access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
|| access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
|| access_uarea(child, PT_CFM, &cfm, 0)
|| access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
return -EIO;
/* control regs */
retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
retval |= __put_user(psr, &ppr->cr_ipsr);
/* app regs */
retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
retval |= __put_user(cfm, &ppr->cfm);
/* gr1-gr3 */
retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
/* gr4-gr7 */
for (i = 4; i < 8; i++) {
if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
return -EIO;
retval |= __put_user(val, &ppr->gr[i]);
}
/* gr8-gr11 */
retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
/* gr12-gr15 */
retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
/* gr16-gr31 */
retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
/* b0 */
retval |= __put_user(pt->b0, &ppr->br[0]);
/* b1-b5 */
for (i = 1; i < 6; i++) {
if (unw_access_br(&info, i, &val, 0) < 0)
return -EIO;
__put_user(val, &ppr->br[i]);
}
/* b6-b7 */
retval |= __put_user(pt->b6, &ppr->br[6]);
retval |= __put_user(pt->b7, &ppr->br[7]);
/* fr2-fr5 */
for (i = 2; i < 6; i++) {
if (unw_get_fr(&info, i, &fpval) < 0)
return -EIO;
retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
}
/* fr6-fr11 */
retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
sizeof(struct ia64_fpreg) * 6);
/* fp scratch regs(12-15) */
retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
sizeof(struct ia64_fpreg) * 4);
/* fr16-fr31 */
for (i = 16; i < 32; i++) {
if (unw_get_fr(&info, i, &fpval) < 0)
return -EIO;
retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
}
/* fph */
ia64_flush_fph(child);
retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
sizeof(ppr->fr[32]) * 96);
/* preds */
retval |= __put_user(pt->pr, &ppr->pr);
/* nat bits */
retval |= __put_user(nat_bits, &ppr->nat);
ret = retval ? -EIO : 0;
return ret;
}
static long
ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
{
unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
struct unw_frame_info info;
struct switch_stack *sw;
struct ia64_fpreg fpval;
struct pt_regs *pt;
long ret, retval = 0;
int i;
memset(&fpval, 0, sizeof(fpval));
if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
return -EIO;
pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
unw_init_from_blocked_task(&info, child);
if (unw_unwind_to_user(&info) < 0) {
return -EIO;
}
if (((unsigned long) ppr & 0x7) != 0) {
dprintk("ptrace:unaligned register address %p\n", ppr);
return -EIO;
}
/* control regs */
retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
retval |= __get_user(psr, &ppr->cr_ipsr);
/* app regs */
retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
retval |= __get_user(cfm, &ppr->cfm);
/* gr1-gr3 */
retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
/* gr4-gr7 */
for (i = 4; i < 8; i++) {
retval |= __get_user(val, &ppr->gr[i]);
/* NaT bit will be set via PT_NAT_BITS: */
if (unw_set_gr(&info, i, val, 0) < 0)
return -EIO;
}
/* gr8-gr11 */
retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
/* gr12-gr15 */
retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
/* gr16-gr31 */
retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
/* b0 */
retval |= __get_user(pt->b0, &ppr->br[0]);
/* b1-b5 */
for (i = 1; i < 6; i++) {
retval |= __get_user(val, &ppr->br[i]);
unw_set_br(&info, i, val);
}
/* b6-b7 */
retval |= __get_user(pt->b6, &ppr->br[6]);
retval |= __get_user(pt->b7, &ppr->br[7]);
/* fr2-fr5 */
for (i = 2; i < 6; i++) {
retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
if (unw_set_fr(&info, i, fpval) < 0)
return -EIO;
}
/* fr6-fr11 */
retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
sizeof(ppr->fr[6]) * 6);
/* fp scratch regs(12-15) */
retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
sizeof(ppr->fr[12]) * 4);
/* fr16-fr31 */
for (i = 16; i < 32; i++) {
retval |= __copy_from_user(&fpval, &ppr->fr[i],
sizeof(fpval));
if (unw_set_fr(&info, i, fpval) < 0)
return -EIO;
}
/* fph */
ia64_sync_fph(child);
retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
sizeof(ppr->fr[32]) * 96);
/* preds */
retval |= __get_user(pt->pr, &ppr->pr);
/* nat bits */
retval |= __get_user(nat_bits, &ppr->nat);
retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
retval |= access_uarea(child, PT_AR_EC, &ec, 1);
retval |= access_uarea(child, PT_AR_LC, &lc, 1);
retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
retval |= access_uarea(child, PT_CFM, &cfm, 1);
retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
ret = retval ? -EIO : 0;
return ret;
}
void
user_enable_single_step (struct task_struct *child)
{
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
child_psr->ss = 1;
}
void
user_enable_block_step (struct task_struct *child)
{
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
child_psr->tb = 1;
}
void
user_disable_single_step (struct task_struct *child)
{
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
/* make sure the single step/taken-branch trap bits are not set: */
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
child_psr->ss = 0;
child_psr->tb = 0;
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure the single step bit is not set.
*/
void
ptrace_disable (struct task_struct *child)
{
user_disable_single_step(child);
}
long
arch_ptrace (struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
/* read word at location addr */
if (access_process_vm(child, addr, &data, sizeof(data), 0)
!= sizeof(data))
return -EIO;
/* ensure return value is not mistaken for error code */
force_successful_syscall_return();
return data;
/* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
* by the generic ptrace_request().
*/
case PTRACE_PEEKUSR:
/* read the word at addr in the USER area */
if (access_uarea(child, addr, &data, 0) < 0)
return -EIO;
/* ensure return value is not mistaken for error code */
force_successful_syscall_return();
return data;
case PTRACE_POKEUSR:
/* write the word at addr in the USER area */
if (access_uarea(child, addr, &data, 1) < 0)
return -EIO;
return 0;
case PTRACE_OLD_GETSIGINFO:
/* for backwards-compatibility */
return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
case PTRACE_OLD_SETSIGINFO:
/* for backwards-compatibility */
return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
case PTRACE_GETREGS:
return ptrace_getregs(child,
(struct pt_all_user_regs __user *) data);
case PTRACE_SETREGS:
return ptrace_setregs(child,
(struct pt_all_user_regs __user *) data);
default:
return ptrace_request(child, request, addr, data);
}
}
/* "asmlinkage" so the input arguments are preserved... */
asmlinkage long
syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7,
struct pt_regs regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (tracehook_report_syscall_entry(®s))
return -ENOSYS;
/* copy user rbs to kernel rbs */
if (test_thread_flag(TIF_RESTORE_RSE))
ia64_sync_krbs();
audit_syscall_entry(AUDIT_ARCH_IA64, regs.r15, arg0, arg1, arg2, arg3);
return 0;
}
/* "asmlinkage" so the input arguments are preserved... */
asmlinkage void
syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7,
struct pt_regs regs)
{
int step;
audit_syscall_exit(®s);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(®s, step);
/* copy user rbs to kernel rbs */
if (test_thread_flag(TIF_RESTORE_RSE))
ia64_sync_krbs();
}
/* Utrace implementation starts here */
struct regset_get {
void *kbuf;
void __user *ubuf;
};
struct regset_set {
const void *kbuf;
const void __user *ubuf;
};
struct regset_getset {
struct task_struct *target;
const struct user_regset *regset;
union {
struct regset_get get;
struct regset_set set;
} u;
unsigned int pos;
unsigned int count;
int ret;
};
static int
access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
unsigned long addr, unsigned long *data, int write_access)
{
struct pt_regs *pt;
unsigned long *ptr = NULL;
int ret;
char nat = 0;
pt = task_pt_regs(target);
switch (addr) {
case ELF_GR_OFFSET(1):
ptr = &pt->r1;
break;
case ELF_GR_OFFSET(2):
case ELF_GR_OFFSET(3):
ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
break;
case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
if (write_access) {
/* read NaT bit first: */
unsigned long dummy;
ret = unw_get_gr(info, addr/8, &dummy, &nat);
if (ret < 0)
return ret;
}
return unw_access_gr(info, addr/8, data, &nat, write_access);
case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
break;
case ELF_GR_OFFSET(12):
case ELF_GR_OFFSET(13):
ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
break;
case ELF_GR_OFFSET(14):
ptr = &pt->r14;
break;
case ELF_GR_OFFSET(15):
ptr = &pt->r15;
}
if (write_access)
*ptr = *data;
else
*data = *ptr;
return 0;
}
static int
access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
unsigned long addr, unsigned long *data, int write_access)
{
struct pt_regs *pt;
unsigned long *ptr = NULL;
pt = task_pt_regs(target);
switch (addr) {
case ELF_BR_OFFSET(0):
ptr = &pt->b0;
break;
case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
data, write_access);
case ELF_BR_OFFSET(6):
ptr = &pt->b6;
break;
case ELF_BR_OFFSET(7):
ptr = &pt->b7;
}
if (write_access)
*ptr = *data;
else
*data = *ptr;
return 0;
}
static int
access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
unsigned long addr, unsigned long *data, int write_access)
{
struct pt_regs *pt;
unsigned long cfm, urbs_end;
unsigned long *ptr = NULL;
pt = task_pt_regs(target);
if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
switch (addr) {
case ELF_AR_RSC_OFFSET:
/* force PL3 */
if (write_access)
pt->ar_rsc = *data | (3 << 2);
else
*data = pt->ar_rsc;
return 0;
case ELF_AR_BSP_OFFSET:
/*
* By convention, we use PT_AR_BSP to refer to
* the end of the user-level backing store.
* Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
* to get the real value of ar.bsp at the time
* the kernel was entered.
*
* Furthermore, when changing the contents of
* PT_AR_BSP (or PT_CFM) while the task is
* blocked in a system call, convert the state
* so that the non-system-call exit
* path is used. This ensures that the proper
* state will be picked up when resuming
* execution. However, it *also* means that
* once we write PT_AR_BSP/PT_CFM, it won't be
* possible to modify the syscall arguments of
* the pending system call any longer. This
* shouldn't be an issue because modifying
* PT_AR_BSP/PT_CFM generally implies that
* we're either abandoning the pending system
* call or that we defer it's re-execution
* (e.g., due to GDB doing an inferior
* function call).
*/
urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
if (write_access) {
if (*data != urbs_end) {
if (in_syscall(pt))
convert_to_non_syscall(target,
pt,
cfm);
/*
* Simulate user-level write
* of ar.bsp:
*/
pt->loadrs = 0;
pt->ar_bspstore = *data;
}
} else
*data = urbs_end;
return 0;
case ELF_AR_BSPSTORE_OFFSET:
ptr = &pt->ar_bspstore;
break;
case ELF_AR_RNAT_OFFSET:
ptr = &pt->ar_rnat;
break;
case ELF_AR_CCV_OFFSET:
ptr = &pt->ar_ccv;
break;
case ELF_AR_UNAT_OFFSET:
ptr = &pt->ar_unat;
break;
case ELF_AR_FPSR_OFFSET:
ptr = &pt->ar_fpsr;
break;
case ELF_AR_PFS_OFFSET:
ptr = &pt->ar_pfs;
break;
case ELF_AR_LC_OFFSET:
return unw_access_ar(info, UNW_AR_LC, data,
write_access);
case ELF_AR_EC_OFFSET:
return unw_access_ar(info, UNW_AR_EC, data,
write_access);
case ELF_AR_CSD_OFFSET:
ptr = &pt->ar_csd;
break;
case ELF_AR_SSD_OFFSET:
ptr = &pt->ar_ssd;
}
} else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
switch (addr) {
case ELF_CR_IIP_OFFSET:
ptr = &pt->cr_iip;
break;
case ELF_CFM_OFFSET:
urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
if (write_access) {
if (((cfm ^ *data) & PFM_MASK) != 0) {
if (in_syscall(pt))
convert_to_non_syscall(target,
pt,
cfm);
pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
| (*data & PFM_MASK));
}
} else
*data = cfm;
return 0;
case ELF_CR_IPSR_OFFSET:
if (write_access) {
unsigned long tmp = *data;
/* psr.ri==3 is a reserved value: SDM 2:25 */
if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
tmp &= ~IA64_PSR_RI;
pt->cr_ipsr = ((tmp & IPSR_MASK)
| (pt->cr_ipsr & ~IPSR_MASK));
} else
*data = (pt->cr_ipsr & IPSR_MASK);
return 0;
}
} else if (addr == ELF_NAT_OFFSET)
return access_nat_bits(target, pt, info,
data, write_access);
else if (addr == ELF_PR_OFFSET)
ptr = &pt->pr;
else
return -1;
if (write_access)
*ptr = *data;
else
*data = *ptr;
return 0;
}
static int
access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
unsigned long addr, unsigned long *data, int write_access)
{
if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
return access_elf_gpreg(target, info, addr, data, write_access);
else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
return access_elf_breg(target, info, addr, data, write_access);
else
return access_elf_areg(target, info, addr, data, write_access);
}
void do_gpregs_get(struct unw_frame_info *info, void *arg)
{
struct pt_regs *pt;
struct regset_getset *dst = arg;
elf_greg_t tmp[16];
unsigned int i, index, min_copy;
if (unw_unwind_to_user(info) < 0)
return;
/*
* coredump format:
* r0-r31
* NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
* predicate registers (p0-p63)
* b0-b7
* ip cfm user-mask
* ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
*/
/* Skip r0 */
if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
&dst->u.get.kbuf,
&dst->u.get.ubuf,
0, ELF_GR_OFFSET(1));
if (dst->ret || dst->count == 0)
return;
}
/* gr1 - gr15 */
if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
(dst->pos + dst->count) : ELF_GR_OFFSET(16);
for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 0) < 0) {
dst->ret = -EIO;
return;
}
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
if (dst->ret || dst->count == 0)
return;
}
/* r16-r31 */
if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
pt = task_pt_regs(dst->target);
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
if (dst->ret || dst->count == 0)
return;
}
/* nat, pr, b0 - b7 */
if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
(dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 0) < 0) {
dst->ret = -EIO;
return;
}
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
if (dst->ret || dst->count == 0)
return;
}
/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
*/
if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
(dst->pos + dst->count) : ELF_AR_END_OFFSET;
for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 0) < 0) {
dst->ret = -EIO;
return;
}
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
}
}
void do_gpregs_set(struct unw_frame_info *info, void *arg)
{
struct pt_regs *pt;
struct regset_getset *dst = arg;
elf_greg_t tmp[16];
unsigned int i, index;
if (unw_unwind_to_user(info) < 0)
return;
/* Skip r0 */
if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
&dst->u.set.kbuf,
&dst->u.set.ubuf,
0, ELF_GR_OFFSET(1));
if (dst->ret || dst->count == 0)
return;
}
/* gr1-gr15 */
if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
i = dst->pos;
index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
if (dst->ret)
return;
for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 1) < 0) {
dst->ret = -EIO;
return;
}
if (dst->count == 0)
return;
}
/* gr16-gr31 */
if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
pt = task_pt_regs(dst->target);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
if (dst->ret || dst->count == 0)
return;
}
/* nat, pr, b0 - b7 */
if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
i = dst->pos;
index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
if (dst->ret)
return;
for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 1) < 0) {
dst->ret = -EIO;
return;
}
if (dst->count == 0)
return;
}
/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
*/
if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
i = dst->pos;
index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
if (dst->ret)
return;
for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
if (access_elf_reg(dst->target, info, i,
&tmp[index], 1) < 0) {
dst->ret = -EIO;
return;
}
}
}
#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
void do_fpregs_get(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
struct task_struct *task = dst->target;
elf_fpreg_t tmp[30];
int index, min_copy, i;
if (unw_unwind_to_user(info) < 0)
return;
/* Skip pos 0 and 1 */
if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
&dst->u.get.kbuf,
&dst->u.get.ubuf,
0, ELF_FP_OFFSET(2));
if (dst->count == 0 || dst->ret)
return;
}
/* fr2-fr31 */
if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
dst->pos + dst->count);
for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
index++)
if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
&tmp[index])) {
dst->ret = -EIO;
return;
}
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
if (dst->count == 0 || dst->ret)
return;
}
/* fph */
if (dst->count > 0) {
ia64_flush_fph(dst->target);
if (task->thread.flags & IA64_THREAD_FPH_VALID)
dst->ret = user_regset_copyout(
&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf,
&dst->target->thread.fph,
ELF_FP_OFFSET(32), -1);
else
/* Zero fill instead. */
dst->ret = user_regset_copyout_zero(
&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf,
ELF_FP_OFFSET(32), -1);
}
}
void do_fpregs_set(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
elf_fpreg_t fpreg, tmp[30];
int index, start, end;
if (unw_unwind_to_user(info) < 0)
return;
/* Skip pos 0 and 1 */
if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
&dst->u.set.kbuf,
&dst->u.set.ubuf,
0, ELF_FP_OFFSET(2));
if (dst->count == 0 || dst->ret)
return;
}
/* fr2-fr31 */
if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
start = dst->pos;
end = min(((unsigned int)ELF_FP_OFFSET(32)),
dst->pos + dst->count);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
if (dst->ret)
return;
if (start & 0xF) { /* only write high part */
if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
&fpreg)) {
dst->ret = -EIO;
return;
}
tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
= fpreg.u.bits[0];
start &= ~0xFUL;
}
if (end & 0xF) { /* only write low part */
if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
&fpreg)) {
dst->ret = -EIO;
return;
}
tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
= fpreg.u.bits[1];
end = (end + 0xF) & ~0xFUL;
}
for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
index = start / sizeof(elf_fpreg_t);
if (unw_set_fr(info, index, tmp[index - 2])) {
dst->ret = -EIO;
return;
}
}
if (dst->ret || dst->count == 0)
return;
}
/* fph */
if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
ia64_sync_fph(dst->target);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf,
&dst->u.set.ubuf,
&dst->target->thread.fph,
ELF_FP_OFFSET(32), -1);
}
}
static int
do_regset_call(void (*call)(struct unw_frame_info *, void *),
struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct regset_getset info = { .target = target, .regset = regset,
.pos = pos, .count = count,
.u.set = { .kbuf = kbuf, .ubuf = ubuf },
.ret = 0 };
if (target == current)
unw_init_running(call, &info);
else {
struct unw_frame_info ufi;
memset(&ufi, 0, sizeof(ufi));
unw_init_from_blocked_task(&ufi, target);
(*call)(&ufi, &info);
}
return info.ret;
}
static int
gpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return do_regset_call(do_gpregs_get, target, regset, pos, count,
kbuf, ubuf);
}
static int gpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return do_regset_call(do_gpregs_set, target, regset, pos, count,
kbuf, ubuf);
}
static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
{
do_sync_rbs(info, ia64_sync_user_rbs);
}
/*
* This is called to write back the register backing store.
* ptrace does this before it stops, so that a tracer reading the user
* memory after the thread stops will get the current register data.
*/
static int
gpregs_writeback(struct task_struct *target,
const struct user_regset *regset,
int now)
{
if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
return 0;
set_notify_resume(target);
return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
NULL, NULL);
}
static int
fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
}
static int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return do_regset_call(do_fpregs_get, target, regset, pos, count,
kbuf, ubuf);
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return do_regset_call(do_fpregs_set, target, regset, pos, count,
kbuf, ubuf);
}
static int
access_uarea(struct task_struct *child, unsigned long addr,
unsigned long *data, int write_access)
{
unsigned int pos = -1; /* an invalid value */
int ret;
unsigned long *ptr, regnum;
if ((addr & 0x7) != 0) {
dprintk("ptrace: unaligned register address 0x%lx\n", addr);
return -1;
}
if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
(addr >= PT_R7 + 8 && addr < PT_B1) ||
(addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
(addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
dprintk("ptrace: rejecting access to register "
"address 0x%lx\n", addr);
return -1;
}
switch (addr) {
case PT_F32 ... (PT_F127 + 15):
pos = addr - PT_F32 + ELF_FP_OFFSET(32);
break;
case PT_F2 ... (PT_F5 + 15):
pos = addr - PT_F2 + ELF_FP_OFFSET(2);
break;
case PT_F10 ... (PT_F31 + 15):
pos = addr - PT_F10 + ELF_FP_OFFSET(10);
break;
case PT_F6 ... (PT_F9 + 15):
pos = addr - PT_F6 + ELF_FP_OFFSET(6);
break;
}
if (pos != -1) {
if (write_access)
ret = fpregs_set(child, NULL, pos,
sizeof(unsigned long), data, NULL);
else
ret = fpregs_get(child, NULL, pos,
sizeof(unsigned long), data, NULL);
if (ret != 0)
return -1;
return 0;
}
switch (addr) {
case PT_NAT_BITS:
pos = ELF_NAT_OFFSET;
break;
case PT_R4 ... PT_R7:
pos = addr - PT_R4 + ELF_GR_OFFSET(4);
break;
case PT_B1 ... PT_B5:
pos = addr - PT_B1 + ELF_BR_OFFSET(1);
break;
case PT_AR_EC:
pos = ELF_AR_EC_OFFSET;
break;
case PT_AR_LC:
pos = ELF_AR_LC_OFFSET;
break;
case PT_CR_IPSR:
pos = ELF_CR_IPSR_OFFSET;
break;
case PT_CR_IIP:
pos = ELF_CR_IIP_OFFSET;
break;
case PT_CFM:
pos = ELF_CFM_OFFSET;
break;
case PT_AR_UNAT:
pos = ELF_AR_UNAT_OFFSET;
break;
case PT_AR_PFS:
pos = ELF_AR_PFS_OFFSET;
break;
case PT_AR_RSC:
pos = ELF_AR_RSC_OFFSET;
break;
case PT_AR_RNAT:
pos = ELF_AR_RNAT_OFFSET;
break;
case PT_AR_BSPSTORE:
pos = ELF_AR_BSPSTORE_OFFSET;
break;
case PT_PR:
pos = ELF_PR_OFFSET;
break;
case PT_B6:
pos = ELF_BR_OFFSET(6);
break;
case PT_AR_BSP:
pos = ELF_AR_BSP_OFFSET;
break;
case PT_R1 ... PT_R3:
pos = addr - PT_R1 + ELF_GR_OFFSET(1);
break;
case PT_R12 ... PT_R15:
pos = addr - PT_R12 + ELF_GR_OFFSET(12);
break;
case PT_R8 ... PT_R11:
pos = addr - PT_R8 + ELF_GR_OFFSET(8);
break;
case PT_R16 ... PT_R31:
pos = addr - PT_R16 + ELF_GR_OFFSET(16);
break;
case PT_AR_CCV:
pos = ELF_AR_CCV_OFFSET;
break;
case PT_AR_FPSR:
pos = ELF_AR_FPSR_OFFSET;
break;
case PT_B0:
pos = ELF_BR_OFFSET(0);
break;
case PT_B7:
pos = ELF_BR_OFFSET(7);
break;
case PT_AR_CSD:
pos = ELF_AR_CSD_OFFSET;
break;
case PT_AR_SSD:
pos = ELF_AR_SSD_OFFSET;
break;
}
if (pos != -1) {
if (write_access)
ret = gpregs_set(child, NULL, pos,
sizeof(unsigned long), data, NULL);
else
ret = gpregs_get(child, NULL, pos,
sizeof(unsigned long), data, NULL);
if (ret != 0)
return -1;
return 0;
}
/* access debug registers */
if (addr >= PT_IBR) {
regnum = (addr - PT_IBR) >> 3;
ptr = &child->thread.ibr[0];
} else {
regnum = (addr - PT_DBR) >> 3;
ptr = &child->thread.dbr[0];
}
if (regnum >= 8) {
dprintk("ptrace: rejecting access to register "
"address 0x%lx\n", addr);
return -1;
}
#ifdef CONFIG_PERFMON
/*
* Check if debug registers are used by perfmon. This
* test must be done once we know that we can do the
* operation, i.e. the arguments are all valid, but
* before we start modifying the state.
*
* Perfmon needs to keep a count of how many processes
* are trying to modify the debug registers for system
* wide monitoring sessions.
*
* We also include read access here, because they may
* cause the PMU-installed debug register state
* (dbr[], ibr[]) to be reset. The two arrays are also
* used by perfmon, but we do not use
* IA64_THREAD_DBG_VALID. The registers are restored
* by the PMU context switch code.
*/
if (pfm_use_debug_registers(child))
return -1;
#endif
if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
child->thread.flags |= IA64_THREAD_DBG_VALID;
memset(child->thread.dbr, 0,
sizeof(child->thread.dbr));
memset(child->thread.ibr, 0,
sizeof(child->thread.ibr));
}
ptr += regnum;
if ((regnum & 1) && write_access) {
/* don't let the user set kernel-level breakpoints: */
*ptr = *data & ~(7UL << 56);
return 0;
}
if (write_access)
*ptr = *data;
else
*data = *ptr;
return 0;
}
static const struct user_regset native_regsets[] = {
{
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
.get = gpregs_get, .set = gpregs_set,
.writeback = gpregs_writeback
},
{
.core_note_type = NT_PRFPREG,
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
.get = fpregs_get, .set = fpregs_set, .active = fpregs_active
},
};
static const struct user_regset_view user_ia64_view = {
.name = "ia64",
.e_machine = EM_IA_64,
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
{
return &user_ia64_view;
}
struct syscall_get_set_args {
unsigned int i;
unsigned int n;
unsigned long *args;
struct pt_regs *regs;
int rw;
};
static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
{
struct syscall_get_set_args *args = data;
struct pt_regs *pt = args->regs;
unsigned long *krbs, cfm, ndirty;
int i, count;
if (unw_unwind_to_user(info) < 0)
return;
cfm = pt->cr_ifs;
krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
count = 0;
if (in_syscall(pt))
count = min_t(int, args->n, cfm & 0x7f);
for (i = 0; i < count; i++) {
if (args->rw)
*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
args->args[i];
else
args->args[i] = *ia64_rse_skip_regs(krbs,
ndirty + i + args->i);
}
if (!args->rw) {
while (i < args->n) {
args->args[i] = 0;
i++;
}
}
}
void ia64_syscall_get_set_arguments(struct task_struct *task,
struct pt_regs *regs, unsigned int i, unsigned int n,
unsigned long *args, int rw)
{
struct syscall_get_set_args data = {
.i = i,
.n = n,
.args = args,
.regs = regs,
.rw = rw,
};
if (task == current)
unw_init_running(syscall_get_set_args_cb, &data);
else {
struct unw_frame_info ufi;
memset(&ufi, 0, sizeof(ufi));
unw_init_from_blocked_task(&ufi, task);
syscall_get_set_args_cb(&ufi, &data);
}
}
| gpl-2.0 |
EPDCenter/android_kernel_bq_qc | drivers/xen/xenfs/privcmd.c | 3254 | 8414 | /******************************************************************************
* privcmd.c
*
* Interface to privileged domain-0 commands.
*
* Copyright (c) 2002-2004, K A Fraser, B Dragovic
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/seq_file.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/privcmd.h>
#include <xen/interface/xen.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif
static long privcmd_ioctl_hypercall(void __user *udata)
{
struct privcmd_hypercall hypercall;
long ret;
if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
return -EFAULT;
ret = privcmd_call(hypercall.op,
hypercall.arg[0], hypercall.arg[1],
hypercall.arg[2], hypercall.arg[3],
hypercall.arg[4]);
return ret;
}
static void free_page_list(struct list_head *pages)
{
struct page *p, *n;
list_for_each_entry_safe(p, n, pages, lru)
__free_page(p);
INIT_LIST_HEAD(pages);
}
/*
* Given an array of items in userspace, return a list of pages
* containing the data. If copying fails, either because of memory
* allocation failure or a problem reading user memory, return an
* error code; its up to the caller to dispose of any partial list.
*/
static int gather_array(struct list_head *pagelist,
unsigned nelem, size_t size,
void __user *data)
{
unsigned pageidx;
void *pagedata;
int ret;
if (size > PAGE_SIZE)
return 0;
pageidx = PAGE_SIZE;
pagedata = NULL; /* quiet, gcc */
while (nelem--) {
if (pageidx > PAGE_SIZE-size) {
struct page *page = alloc_page(GFP_KERNEL);
ret = -ENOMEM;
if (page == NULL)
goto fail;
pagedata = page_address(page);
list_add_tail(&page->lru, pagelist);
pageidx = 0;
}
ret = -EFAULT;
if (copy_from_user(pagedata + pageidx, data, size))
goto fail;
data += size;
pageidx += size;
}
ret = 0;
fail:
return ret;
}
/*
* Call function "fn" on each element of the array fragmented
* over a list of pages.
*/
static int traverse_pages(unsigned nelem, size_t size,
struct list_head *pos,
int (*fn)(void *data, void *state),
void *state)
{
void *pagedata;
unsigned pageidx;
int ret = 0;
BUG_ON(size > PAGE_SIZE);
pageidx = PAGE_SIZE;
pagedata = NULL; /* hush, gcc */
while (nelem--) {
if (pageidx > PAGE_SIZE-size) {
struct page *page;
pos = pos->next;
page = list_entry(pos, struct page, lru);
pagedata = page_address(page);
pageidx = 0;
}
ret = (*fn)(pagedata + pageidx, state);
if (ret)
break;
pageidx += size;
}
return ret;
}
struct mmap_mfn_state {
unsigned long va;
struct vm_area_struct *vma;
domid_t domain;
};
static int mmap_mfn_range(void *data, void *state)
{
struct privcmd_mmap_entry *msg = data;
struct mmap_mfn_state *st = state;
struct vm_area_struct *vma = st->vma;
int rc;
/* Do not allow range to wrap the address space. */
if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
return -EINVAL;
/* Range chunks must be contiguous in va space. */
if ((msg->va != st->va) ||
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
return -EINVAL;
rc = xen_remap_domain_mfn_range(vma,
msg->va & PAGE_MASK,
msg->mfn, msg->npages,
vma->vm_page_prot,
st->domain);
if (rc < 0)
return rc;
st->va += msg->npages << PAGE_SHIFT;
return 0;
}
static long privcmd_ioctl_mmap(void __user *udata)
{
struct privcmd_mmap mmapcmd;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc;
LIST_HEAD(pagelist);
struct mmap_mfn_state state;
if (!xen_initial_domain())
return -EPERM;
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
return -EFAULT;
rc = gather_array(&pagelist,
mmapcmd.num, sizeof(struct privcmd_mmap_entry),
mmapcmd.entry);
if (rc || list_empty(&pagelist))
goto out;
down_write(&mm->mmap_sem);
{
struct page *page = list_first_entry(&pagelist,
struct page, lru);
struct privcmd_mmap_entry *msg = page_address(page);
vma = find_vma(mm, msg->va);
rc = -EINVAL;
if (!vma || (msg->va != vma->vm_start) ||
!privcmd_enforce_singleshot_mapping(vma))
goto out_up;
}
state.va = vma->vm_start;
state.vma = vma;
state.domain = mmapcmd.dom;
rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
&pagelist,
mmap_mfn_range, &state);
out_up:
up_write(&mm->mmap_sem);
out:
free_page_list(&pagelist);
return rc;
}
struct mmap_batch_state {
domid_t domain;
unsigned long va;
struct vm_area_struct *vma;
int err;
xen_pfn_t __user *user;
};
static int mmap_batch_fn(void *data, void *state)
{
xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state;
if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
st->vma->vm_page_prot, st->domain) < 0) {
*mfnp |= 0xf0000000U;
st->err++;
}
st->va += PAGE_SIZE;
return 0;
}
static int mmap_return_errors(void *data, void *state)
{
xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state;
return put_user(*mfnp, st->user++);
}
static struct vm_operations_struct privcmd_vm_ops;
static long privcmd_ioctl_mmap_batch(void __user *udata)
{
int ret;
struct privcmd_mmapbatch m;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long nr_pages;
LIST_HEAD(pagelist);
struct mmap_batch_state state;
if (!xen_initial_domain())
return -EPERM;
if (copy_from_user(&m, udata, sizeof(m)))
return -EFAULT;
nr_pages = m.num;
if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
return -EINVAL;
ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
m.arr);
if (ret || list_empty(&pagelist))
goto out;
down_write(&mm->mmap_sem);
vma = find_vma(mm, m.addr);
ret = -EINVAL;
if (!vma ||
vma->vm_ops != &privcmd_vm_ops ||
(m.addr != vma->vm_start) ||
((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
!privcmd_enforce_singleshot_mapping(vma)) {
up_write(&mm->mmap_sem);
goto out;
}
state.domain = m.dom;
state.vma = vma;
state.va = m.addr;
state.err = 0;
ret = traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_batch_fn, &state);
up_write(&mm->mmap_sem);
if (state.err > 0) {
state.user = m.arr;
ret = traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist,
mmap_return_errors, &state);
}
out:
free_page_list(&pagelist);
return ret;
}
static long privcmd_ioctl(struct file *file,
unsigned int cmd, unsigned long data)
{
int ret = -ENOSYS;
void __user *udata = (void __user *) data;
switch (cmd) {
case IOCTL_PRIVCMD_HYPERCALL:
ret = privcmd_ioctl_hypercall(udata);
break;
case IOCTL_PRIVCMD_MMAP:
ret = privcmd_ioctl_mmap(udata);
break;
case IOCTL_PRIVCMD_MMAPBATCH:
ret = privcmd_ioctl_mmap_batch(udata);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
vma, vma->vm_start, vma->vm_end,
vmf->pgoff, vmf->virtual_address);
return VM_FAULT_SIGBUS;
}
static struct vm_operations_struct privcmd_vm_ops = {
.fault = privcmd_fault
};
static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
{
/* Unsupported for auto-translate guests. */
if (xen_feature(XENFEAT_auto_translated_physmap))
return -ENOSYS;
/* DONTCOPY is essential for Xen because copy_page_range doesn't know
* how to recreate these mappings */
vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
vma->vm_ops = &privcmd_vm_ops;
vma->vm_private_data = NULL;
return 0;
}
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{
return (xchg(&vma->vm_private_data, (void *)1) == NULL);
}
#endif
const struct file_operations privcmd_file_ops = {
.unlocked_ioctl = privcmd_ioctl,
.mmap = privcmd_mmap,
};
| gpl-2.0 |
MotoX-2015/android_kernel_motorola_msm8992 | arch/blackfin/mach-bf561/coreb.c | 11958 | 1902 | /* Load firmware into Core B on a BF561
*
* Copyright 2004-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
/* The Core B reset func requires code in the application that is loaded into
* Core B. In order to reset, the application needs to install an interrupt
* handler for Supplemental Interrupt 0, that sets RETI to 0xff600000 and
* writes bit 11 of SICB_SYSCR when bit 5 of SICA_SYSCR is 0. This causes Core
* B to stall when Supplemental Interrupt 0 is set, and will reset PC to
* 0xff600000 when COREB_SRAM_INIT is cleared.
*/
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#define CMD_COREB_START _IO('b', 0)
#define CMD_COREB_STOP _IO('b', 1)
#define CMD_COREB_RESET _IO('b', 2)
static long
coreb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret = 0;
switch (cmd) {
case CMD_COREB_START:
bfin_write_SYSCR(bfin_read_SYSCR() & ~0x0020);
break;
case CMD_COREB_STOP:
bfin_write_SYSCR(bfin_read_SYSCR() | 0x0020);
bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | 0x0080);
break;
case CMD_COREB_RESET:
bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | 0x0080);
break;
default:
ret = -EINVAL;
break;
}
CSYNC();
return ret;
}
static const struct file_operations coreb_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = coreb_ioctl,
.llseek = noop_llseek,
};
static struct miscdevice coreb_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "coreb",
.fops = &coreb_fops,
};
static int __init bf561_coreb_init(void)
{
return misc_register(&coreb_dev);
}
module_init(bf561_coreb_init);
static void __exit bf561_coreb_exit(void)
{
misc_deregister(&coreb_dev);
}
module_exit(bf561_coreb_exit);
MODULE_AUTHOR("Bas Vermeulen <bvermeul@blackstar.xs4all.nl>");
MODULE_DESCRIPTION("BF561 Core B Support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Windeal/Linux-2.6.32.64 | drivers/hwmon/lm75.c | 695 | 10482 | /*
lm75.c - Part of lm_sensors, Linux kernel modules for hardware
monitoring
Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include "lm75.h"
/*
* This driver handles the LM75 and compatible digital temperature sensors.
* Only types which are _not_ listed in I2C_CLIENT_INSMOD_*() need to be
* listed here. We start at 9 since I2C_CLIENT_INSMOD_*() currently allow
* definition of up to 8 chip types (plus zero).
*/
enum lm75_type { /* keep sorted in alphabetical order */
ds1775 = 9,
ds75,
/* lm75 -- in I2C_CLIENT_INSMOD_1() */
lm75a,
max6625,
max6626,
mcp980x,
stds75,
tcn75,
tmp100,
tmp101,
tmp175,
tmp275,
tmp75,
};
/* Addresses scanned */
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
/* Insmod parameters */
I2C_CLIENT_INSMOD_1(lm75);
/* The LM75 registers */
#define LM75_REG_CONF 0x01
static const u8 LM75_REG_TEMP[3] = {
0x00, /* input */
0x03, /* max */
0x02, /* hyst */
};
/* Each client has this additional data */
struct lm75_data {
struct device *hwmon_dev;
struct mutex update_lock;
u8 orig_conf;
char valid; /* !=0 if registers are valid */
unsigned long last_updated; /* In jiffies */
u16 temp[3]; /* Register values,
0 = input
1 = max
2 = hyst */
};
static int lm75_read_value(struct i2c_client *client, u8 reg);
static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value);
static struct lm75_data *lm75_update_device(struct device *dev);
/*-----------------------------------------------------------------------*/
/* sysfs attributes for hwmon */
static ssize_t show_temp(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm75_data *data = lm75_update_device(dev);
return sprintf(buf, "%d\n",
LM75_TEMP_FROM_REG(data->temp[attr->index]));
}
static ssize_t set_temp(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct i2c_client *client = to_i2c_client(dev);
struct lm75_data *data = i2c_get_clientdata(client);
int nr = attr->index;
long temp = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp[nr] = LM75_TEMP_TO_REG(temp);
lm75_write_value(client, LM75_REG_TEMP[nr], data->temp[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
show_temp, set_temp, 1);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
show_temp, set_temp, 2);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static struct attribute *lm75_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
NULL
};
static const struct attribute_group lm75_group = {
.attrs = lm75_attributes,
};
/*-----------------------------------------------------------------------*/
/* device probe and removal */
static int
lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct lm75_data *data;
int status;
u8 set_mask, clr_mask;
int new;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
return -EIO;
data = kzalloc(sizeof(struct lm75_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
* Then tweak to be more precise when appropriate.
*/
set_mask = 0;
clr_mask = (1 << 0) /* continuous conversions */
| (1 << 6) | (1 << 5); /* 9-bit mode */
/* configure as specified */
status = lm75_read_value(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
goto exit_free;
}
data->orig_conf = status;
new = status & ~clr_mask;
new |= set_mask;
if (status != new)
lm75_write_value(client, LM75_REG_CONF, new);
dev_dbg(&client->dev, "Config %02x\n", new);
/* Register sysfs hooks */
status = sysfs_create_group(&client->dev.kobj, &lm75_group);
if (status)
goto exit_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
status = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
dev_info(&client->dev, "%s: sensor '%s'\n",
dev_name(data->hwmon_dev), client->name);
return 0;
exit_remove:
sysfs_remove_group(&client->dev.kobj, &lm75_group);
exit_free:
i2c_set_clientdata(client, NULL);
kfree(data);
return status;
}
static int lm75_remove(struct i2c_client *client)
{
struct lm75_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm75_group);
lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
i2c_set_clientdata(client, NULL);
kfree(data);
return 0;
}
static const struct i2c_device_id lm75_ids[] = {
{ "ds1775", ds1775, },
{ "ds75", ds75, },
{ "lm75", lm75, },
{ "lm75a", lm75a, },
{ "max6625", max6625, },
{ "max6626", max6626, },
{ "mcp980x", mcp980x, },
{ "stds75", stds75, },
{ "tcn75", tcn75, },
{ "tmp100", tmp100, },
{ "tmp101", tmp101, },
{ "tmp175", tmp175, },
{ "tmp275", tmp275, },
{ "tmp75", tmp75, },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, lm75_ids);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm75_detect(struct i2c_client *new_client, int kind,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
int i;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
/* Now, we do the remaining detection. There is no identification-
dedicated register so we have to rely on several tricks:
unused bits, registers cycling over 8-address boundaries,
addresses 0x04-0x07 returning the last read value.
The cycling+unused addresses combination is not tested,
since it would significantly slow the detection down and would
hardly add any value. */
if (kind < 0) {
int cur, conf, hyst, os;
/* Unused addresses */
cur = i2c_smbus_read_word_data(new_client, 0);
conf = i2c_smbus_read_byte_data(new_client, 1);
hyst = i2c_smbus_read_word_data(new_client, 2);
if (i2c_smbus_read_word_data(new_client, 4) != hyst
|| i2c_smbus_read_word_data(new_client, 5) != hyst
|| i2c_smbus_read_word_data(new_client, 6) != hyst
|| i2c_smbus_read_word_data(new_client, 7) != hyst)
return -ENODEV;
os = i2c_smbus_read_word_data(new_client, 3);
if (i2c_smbus_read_word_data(new_client, 4) != os
|| i2c_smbus_read_word_data(new_client, 5) != os
|| i2c_smbus_read_word_data(new_client, 6) != os
|| i2c_smbus_read_word_data(new_client, 7) != os)
return -ENODEV;
/* Unused bits */
if (conf & 0xe0)
return -ENODEV;
/* Addresses cycling */
for (i = 8; i < 0xff; i += 8)
if (i2c_smbus_read_byte_data(new_client, i + 1) != conf
|| i2c_smbus_read_word_data(new_client, i + 2) != hyst
|| i2c_smbus_read_word_data(new_client, i + 3) != os)
return -ENODEV;
}
/* NOTE: we treat "force=..." and "force_lm75=..." the same.
* Only new-style driver binding distinguishes chip types.
*/
strlcpy(info->type, "lm75", I2C_NAME_SIZE);
return 0;
}
static struct i2c_driver lm75_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm75",
},
.probe = lm75_probe,
.remove = lm75_remove,
.id_table = lm75_ids,
.detect = lm75_detect,
.address_data = &addr_data,
};
/*-----------------------------------------------------------------------*/
/* register access */
/* All registers are word-sized, except for the configuration register.
LM75 uses a high-byte first convention, which is exactly opposite to
the SMBus standard. */
static int lm75_read_value(struct i2c_client *client, u8 reg)
{
int value;
if (reg == LM75_REG_CONF)
return i2c_smbus_read_byte_data(client, reg);
value = i2c_smbus_read_word_data(client, reg);
return (value < 0) ? value : swab16(value);
}
static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
{
if (reg == LM75_REG_CONF)
return i2c_smbus_write_byte_data(client, reg, value);
else
return i2c_smbus_write_word_data(client, reg, swab16(value));
}
static struct lm75_data *lm75_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm75_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
int i;
dev_dbg(&client->dev, "Starting lm75 update\n");
for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
int status;
status = lm75_read_value(client, LM75_REG_TEMP[i]);
if (status < 0)
dev_dbg(&client->dev, "reg %d, err %d\n",
LM75_REG_TEMP[i], status);
else
data->temp[i] = status;
}
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
/*-----------------------------------------------------------------------*/
/* module glue */
static int __init sensors_lm75_init(void)
{
return i2c_add_driver(&lm75_driver);
}
static void __exit sensors_lm75_exit(void)
{
i2c_del_driver(&lm75_driver);
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
MODULE_DESCRIPTION("LM75 driver");
MODULE_LICENSE("GPL");
module_init(sensors_lm75_init);
module_exit(sensors_lm75_exit);
| gpl-2.0 |
bas-t/media_tree | arch/blackfin/kernel/setup.c | 1207 | 40323 | /*
* Copyright 2004-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/pfn.h>
#ifdef CONFIG_MTD_UCLINUX
#include <linux/mtd/map.h>
#include <linux/ext2_fs.h>
#include <uapi/linux/cramfs_fs.h>
#include <linux/romfs_fs.h>
#endif
#include <asm/cplb.h>
#include <asm/cacheflush.h>
#include <asm/blackfin.h>
#include <asm/cplbinit.h>
#include <asm/clocks.h>
#include <asm/div64.h>
#include <asm/cpu.h>
#include <asm/fixed_code.h>
#include <asm/early_printk.h>
#include <asm/irq_handler.h>
#include <asm/pda.h>
#ifdef CONFIG_BF60x
#include <mach/pm.h>
#endif
#ifdef CONFIG_SCB_PRIORITY
#include <asm/scb.h>
#endif
u16 _bfin_swrst;
EXPORT_SYMBOL(_bfin_swrst);
unsigned long memory_start, memory_end, physical_mem_end;
unsigned long _rambase, _ramstart, _ramend;
unsigned long reserved_mem_dcache_on;
unsigned long reserved_mem_icache_on;
EXPORT_SYMBOL(memory_start);
EXPORT_SYMBOL(memory_end);
EXPORT_SYMBOL(physical_mem_end);
EXPORT_SYMBOL(_ramend);
EXPORT_SYMBOL(reserved_mem_dcache_on);
#ifdef CONFIG_MTD_UCLINUX
extern struct map_info uclinux_ram_map;
unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
EXPORT_SYMBOL(memory_mtd_end);
EXPORT_SYMBOL(memory_mtd_start);
EXPORT_SYMBOL(mtd_size);
#endif
char __initdata command_line[COMMAND_LINE_SIZE];
struct blackfin_initial_pda __initdata initial_pda;
/* boot memmap, for parsing "memmap=" */
#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
#define BFIN_MEMMAP_RAM 1
#define BFIN_MEMMAP_RESERVED 2
static struct bfin_memmap {
int nr_map;
struct bfin_memmap_entry {
unsigned long long addr; /* start of memory segment */
unsigned long long size;
unsigned long type;
} map[BFIN_MEMMAP_MAX];
} bfin_memmap __initdata;
/* for memmap sanitization */
struct change_member {
struct bfin_memmap_entry *pentry; /* pointer to original entry */
unsigned long long addr; /* address for this change point */
};
static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
static int early_init_clkin_hz(char *buf);
#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
void __init generate_cplb_tables(void)
{
unsigned int cpu;
generate_cplb_tables_all();
/* Generate per-CPU I&D CPLB tables */
for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
generate_cplb_tables_cpu(cpu);
}
#endif
void bfin_setup_caches(unsigned int cpu)
{
#ifdef CONFIG_BFIN_ICACHE
bfin_icache_init(icplb_tbl[cpu]);
#endif
#ifdef CONFIG_BFIN_DCACHE
bfin_dcache_init(dcplb_tbl[cpu]);
#endif
bfin_setup_cpudata(cpu);
/*
* In cache coherence emulation mode, we need to have the
* D-cache enabled before running any atomic operation which
* might involve cache invalidation (i.e. spinlock, rwlock).
* So printk's are deferred until then.
*/
#ifdef CONFIG_BFIN_ICACHE
printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
printk(KERN_INFO " External memory:"
# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
" cacheable"
# else
" uncacheable"
# endif
" in instruction cache\n");
if (L2_LENGTH)
printk(KERN_INFO " L2 SRAM :"
# ifdef CONFIG_BFIN_L2_ICACHEABLE
" cacheable"
# else
" uncacheable"
# endif
" in instruction cache\n");
#else
printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
#endif
#ifdef CONFIG_BFIN_DCACHE
printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
printk(KERN_INFO " External memory:"
# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
" cacheable (write-back)"
# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
" cacheable (write-through)"
# else
" uncacheable"
# endif
" in data cache\n");
if (L2_LENGTH)
printk(KERN_INFO " L2 SRAM :"
# if defined CONFIG_BFIN_L2_WRITEBACK
" cacheable (write-back)"
# elif defined CONFIG_BFIN_L2_WRITETHROUGH
" cacheable (write-through)"
# else
" uncacheable"
# endif
" in data cache\n");
#else
printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
#endif
}
void bfin_setup_cpudata(unsigned int cpu)
{
struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
cpudata->imemctl = bfin_read_IMEM_CONTROL();
cpudata->dmemctl = bfin_read_DMEM_CONTROL();
}
void __init bfin_cache_init(void)
{
#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
generate_cplb_tables();
#endif
bfin_setup_caches(0);
}
void __init bfin_relocate_l1_mem(void)
{
unsigned long text_l1_len = (unsigned long)_text_l1_len;
unsigned long data_l1_len = (unsigned long)_data_l1_len;
unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
unsigned long l2_len = (unsigned long)_l2_len;
early_shadow_stamp();
/*
* due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
* we know that everything about l1 text/data is nice and aligned,
* so copy by 4 byte chunks, and don't worry about overlapping
* src/dest.
*
* We can't use the dma_memcpy functions, since they can call
* scheduler functions which might be in L1 :( and core writes
* into L1 instruction cause bad access errors, so we are stuck,
* we are required to use DMA, but can't use the common dma
* functions. We can't use memcpy either - since that might be
* going to be in the relocated L1
*/
blackfin_dma_early_init();
/* if necessary, copy L1 text to L1 instruction SRAM */
if (L1_CODE_LENGTH && text_l1_len)
early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
/* if necessary, copy L1 data to L1 data bank A SRAM */
if (L1_DATA_A_LENGTH && data_l1_len)
early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
/* if necessary, copy L1 data B to L1 data bank B SRAM */
if (L1_DATA_B_LENGTH && data_b_l1_len)
early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
early_dma_memcpy_done();
#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
#endif
/* if necessary, copy L2 text/data to L2 SRAM */
if (L2_LENGTH && l2_len)
memcpy(_stext_l2, _l2_lma, l2_len);
}
#ifdef CONFIG_SMP
void __init bfin_relocate_coreb_l1_mem(void)
{
unsigned long text_l1_len = (unsigned long)_text_l1_len;
unsigned long data_l1_len = (unsigned long)_data_l1_len;
unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
blackfin_dma_early_init();
/* if necessary, copy L1 text to L1 instruction SRAM */
if (L1_CODE_LENGTH && text_l1_len)
early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
text_l1_len);
/* if necessary, copy L1 data to L1 data bank A SRAM */
if (L1_DATA_A_LENGTH && data_l1_len)
early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
data_l1_len);
/* if necessary, copy L1 data B to L1 data bank B SRAM */
if (L1_DATA_B_LENGTH && data_b_l1_len)
early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
data_b_l1_len);
early_dma_memcpy_done();
#ifdef CONFIG_ICACHE_FLUSH_L1
blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
(unsigned long)_stext_l1 + COREB_L1_CODE_START;
#endif
}
#endif
#ifdef CONFIG_ROMKERNEL
void __init bfin_relocate_xip_data(void)
{
early_shadow_stamp();
memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
}
#endif
/* add_memory_region to memmap */
static void __init add_memory_region(unsigned long long start,
unsigned long long size, int type)
{
int i;
i = bfin_memmap.nr_map;
if (i == BFIN_MEMMAP_MAX) {
printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
return;
}
bfin_memmap.map[i].addr = start;
bfin_memmap.map[i].size = size;
bfin_memmap.map[i].type = type;
bfin_memmap.nr_map++;
}
/*
* Sanitize the boot memmap, removing overlaps.
*/
static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
{
struct change_member *change_tmp;
unsigned long current_type, last_type;
unsigned long long last_addr;
int chgidx, still_changing;
int overlap_entries;
int new_entry;
int old_nr, new_nr, chg_nr;
int i;
/*
Visually we're performing the following (1,2,3,4 = memory types)
Sample memory map (w/overlaps):
____22__________________
______________________4_
____1111________________
_44_____________________
11111111________________
____________________33__
___________44___________
__________33333_________
______________22________
___________________2222_
_________111111111______
_____________________11_
_________________4______
Sanitized equivalent (no overlap):
1_______________________
_44_____________________
___1____________________
____22__________________
______11________________
_________1______________
__________3_____________
___________44___________
_____________33_________
_______________2________
________________1_______
_________________4______
___________________2____
____________________33__
______________________4_
*/
/* if there's only one memory region, don't bother */
if (*pnr_map < 2)
return -1;
old_nr = *pnr_map;
/* bail out if we find any unreasonable addresses in memmap */
for (i = 0; i < old_nr; i++)
if (map[i].addr + map[i].size < map[i].addr)
return -1;
/* create pointers for initial change-point information (for sorting) */
for (i = 0; i < 2*old_nr; i++)
change_point[i] = &change_point_list[i];
/* record all known change-points (starting and ending addresses),
omitting those that are for empty memory regions */
chgidx = 0;
for (i = 0; i < old_nr; i++) {
if (map[i].size != 0) {
change_point[chgidx]->addr = map[i].addr;
change_point[chgidx++]->pentry = &map[i];
change_point[chgidx]->addr = map[i].addr + map[i].size;
change_point[chgidx++]->pentry = &map[i];
}
}
chg_nr = chgidx; /* true number of change-points */
/* sort change-point list by memory addresses (low -> high) */
still_changing = 1;
while (still_changing) {
still_changing = 0;
for (i = 1; i < chg_nr; i++) {
/* if <current_addr> > <last_addr>, swap */
/* or, if current=<start_addr> & last=<end_addr>, swap */
if ((change_point[i]->addr < change_point[i-1]->addr) ||
((change_point[i]->addr == change_point[i-1]->addr) &&
(change_point[i]->addr == change_point[i]->pentry->addr) &&
(change_point[i-1]->addr != change_point[i-1]->pentry->addr))
) {
change_tmp = change_point[i];
change_point[i] = change_point[i-1];
change_point[i-1] = change_tmp;
still_changing = 1;
}
}
}
/* create a new memmap, removing overlaps */
overlap_entries = 0; /* number of entries in the overlap table */
new_entry = 0; /* index for creating new memmap entries */
last_type = 0; /* start with undefined memory type */
last_addr = 0; /* start with 0 as last starting address */
/* loop through change-points, determining affect on the new memmap */
for (chgidx = 0; chgidx < chg_nr; chgidx++) {
/* keep track of all overlapping memmap entries */
if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
/* add map entry to overlap list (> 1 entry implies an overlap) */
overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
} else {
/* remove entry from list (order independent, so swap with last) */
for (i = 0; i < overlap_entries; i++) {
if (overlap_list[i] == change_point[chgidx]->pentry)
overlap_list[i] = overlap_list[overlap_entries-1];
}
overlap_entries--;
}
/* if there are overlapping entries, decide which "type" to use */
/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
current_type = 0;
for (i = 0; i < overlap_entries; i++)
if (overlap_list[i]->type > current_type)
current_type = overlap_list[i]->type;
/* continue building up new memmap based on this information */
if (current_type != last_type) {
if (last_type != 0) {
new_map[new_entry].size =
change_point[chgidx]->addr - last_addr;
/* move forward only if the new size was non-zero */
if (new_map[new_entry].size != 0)
if (++new_entry >= BFIN_MEMMAP_MAX)
break; /* no more space left for new entries */
}
if (current_type != 0) {
new_map[new_entry].addr = change_point[chgidx]->addr;
new_map[new_entry].type = current_type;
last_addr = change_point[chgidx]->addr;
}
last_type = current_type;
}
}
new_nr = new_entry; /* retain count for new entries */
/* copy new mapping into original location */
memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
*pnr_map = new_nr;
return 0;
}
static void __init print_memory_map(char *who)
{
int i;
for (i = 0; i < bfin_memmap.nr_map; i++) {
printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
bfin_memmap.map[i].addr,
bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
switch (bfin_memmap.map[i].type) {
case BFIN_MEMMAP_RAM:
printk(KERN_CONT "(usable)\n");
break;
case BFIN_MEMMAP_RESERVED:
printk(KERN_CONT "(reserved)\n");
break;
default:
printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
break;
}
}
}
static __init int parse_memmap(char *arg)
{
unsigned long long start_at, mem_size;
if (!arg)
return -EINVAL;
mem_size = memparse(arg, &arg);
if (*arg == '@') {
start_at = memparse(arg+1, &arg);
add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
} else if (*arg == '$') {
start_at = memparse(arg+1, &arg);
add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
}
return 0;
}
/*
* Initial parsing of the command line. Currently, we support:
* - Controlling the linux memory size: mem=xxx[KMG]
* - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
* $ -> reserved memory is dcacheable
* # -> reserved memory is icacheable
* - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
* @ from <start> to <start>+<mem>, type RAM
* $ from <start> to <start>+<mem>, type RESERVED
*/
static __init void parse_cmdline_early(char *cmdline_p)
{
char c = ' ', *to = cmdline_p;
unsigned int memsize;
for (;;) {
if (c == ' ') {
if (!memcmp(to, "mem=", 4)) {
to += 4;
memsize = memparse(to, &to);
if (memsize)
_ramend = memsize;
} else if (!memcmp(to, "max_mem=", 8)) {
to += 8;
memsize = memparse(to, &to);
if (memsize) {
physical_mem_end = memsize;
if (*to != ' ') {
if (*to == '$'
|| *(to + 1) == '$')
reserved_mem_dcache_on = 1;
if (*to == '#'
|| *(to + 1) == '#')
reserved_mem_icache_on = 1;
}
}
} else if (!memcmp(to, "clkin_hz=", 9)) {
to += 9;
early_init_clkin_hz(to);
#ifdef CONFIG_EARLY_PRINTK
} else if (!memcmp(to, "earlyprintk=", 12)) {
to += 12;
setup_early_printk(to);
#endif
} else if (!memcmp(to, "memmap=", 7)) {
to += 7;
parse_memmap(to);
}
}
c = *(to++);
if (!c)
break;
}
}
/*
* Setup memory defaults from user config.
* The physical memory layout looks like:
*
* [_rambase, _ramstart]: kernel image
* [memory_start, memory_end]: dynamic memory managed by kernel
* [memory_end, _ramend]: reserved memory
* [memory_mtd_start(memory_end),
* memory_mtd_start + mtd_size]: rootfs (if any)
* [_ramend - DMA_UNCACHED_REGION,
* _ramend]: uncached DMA region
* [_ramend, physical_mem_end]: memory not managed by kernel
*/
static __init void memory_setup(void)
{
#ifdef CONFIG_MTD_UCLINUX
unsigned long mtd_phys = 0;
#endif
unsigned long max_mem;
_rambase = CONFIG_BOOT_LOAD;
_ramstart = (unsigned long)_end;
if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
console_init();
panic("DMA region exceeds memory limit: %lu.",
_ramend - _ramstart);
}
max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
/* Due to a Hardware Anomaly we need to limit the size of usable
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
*/
# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
if (max_mem >= 56 * 1024 * 1024)
max_mem = 56 * 1024 * 1024;
# else
if (max_mem >= 60 * 1024 * 1024)
max_mem = 60 * 1024 * 1024;
# endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
#endif /* ANOMALY_05000263 */
#ifdef CONFIG_MPU
/* Round up to multiple of 4MB */
memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
#else
memory_start = PAGE_ALIGN(_ramstart);
#endif
#if defined(CONFIG_MTD_UCLINUX)
/* generic memory mapped MTD driver */
memory_mtd_end = memory_end;
mtd_phys = _ramstart;
mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
mtd_size =
PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
# endif
# if defined(CONFIG_CRAMFS)
if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
# endif
# if defined(CONFIG_ROMFS_FS)
if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
&& ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
mtd_size =
PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
/* ROM_FS is XIP, so if we found it, we need to limit memory */
if (memory_end > max_mem) {
pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
(max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
memory_end = max_mem;
}
}
# endif /* CONFIG_ROMFS_FS */
/* Since the default MTD_UCLINUX has no magic number, we just blindly
* read 8 past the end of the kernel's image, and look at it.
* When no image is attached, mtd_size is set to a random number
* Do some basic sanity checks before operating on things
*/
if (mtd_size == 0 || memory_end <= mtd_size) {
pr_emerg("Could not find valid ram mtd attached.\n");
} else {
memory_end -= mtd_size;
/* Relocate MTD image to the top of memory after the uncached memory area */
uclinux_ram_map.phys = memory_mtd_start = memory_end;
uclinux_ram_map.size = mtd_size;
pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
_end, mtd_size, (void *)memory_mtd_start);
dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
}
#endif /* CONFIG_MTD_UCLINUX */
/* We need lo limit memory, since everything could have a text section
* of userspace in it, and expose anomaly 05000263. If the anomaly
* doesn't exist, or we don't need to - then dont.
*/
if (memory_end > max_mem) {
pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
(max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
memory_end = max_mem;
}
#ifdef CONFIG_MPU
#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
#else
page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
#endif
page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
#endif
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)0;
printk(KERN_INFO "Board Memory: %ldMB\n", (physical_mem_end - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", (_ramend - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
printk(KERN_INFO "Memory map:\n"
" fixedcode = 0x%p-0x%p\n"
" text = 0x%p-0x%p\n"
" rodata = 0x%p-0x%p\n"
" bss = 0x%p-0x%p\n"
" data = 0x%p-0x%p\n"
" stack = 0x%p-0x%p\n"
" init = 0x%p-0x%p\n"
" available = 0x%p-0x%p\n"
#ifdef CONFIG_MTD_UCLINUX
" rootfs = 0x%p-0x%p\n"
#endif
#if DMA_UNCACHED_REGION > 0
" DMA Zone = 0x%p-0x%p\n"
#endif
, (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
_stext, _etext,
__start_rodata, __end_rodata,
__bss_start, __bss_stop,
_sdata, _edata,
(void *)&init_thread_union,
(void *)((int)(&init_thread_union) + THREAD_SIZE),
__init_begin, __init_end,
(void *)_ramstart, (void *)memory_end
#ifdef CONFIG_MTD_UCLINUX
, (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
#endif
#if DMA_UNCACHED_REGION > 0
, (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
#endif
);
}
/*
* Find the lowest, highest page frame number we have available
*/
void __init find_min_max_pfn(void)
{
int i;
max_pfn = 0;
min_low_pfn = PFN_DOWN(memory_end);
for (i = 0; i < bfin_memmap.nr_map; i++) {
unsigned long start, end;
/* RAM? */
if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
continue;
start = PFN_UP(bfin_memmap.map[i].addr);
end = PFN_DOWN(bfin_memmap.map[i].addr +
bfin_memmap.map[i].size);
if (start >= end)
continue;
if (end > max_pfn)
max_pfn = end;
if (start < min_low_pfn)
min_low_pfn = start;
}
}
static __init void setup_bootmem_allocator(void)
{
int bootmap_size;
int i;
unsigned long start_pfn, end_pfn;
unsigned long curr_pfn, last_pfn, size;
/* mark memory between memory_start and memory_end usable */
add_memory_region(memory_start,
memory_end - memory_start, BFIN_MEMMAP_RAM);
/* sanity check for overlap */
sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
print_memory_map("boot memmap");
/* initialize globals in linux/bootmem.h */
find_min_max_pfn();
/* pfn of the last usable page frame */
if (max_pfn > memory_end >> PAGE_SHIFT)
max_pfn = memory_end >> PAGE_SHIFT;
/* pfn of last page frame directly mapped by kernel */
max_low_pfn = max_pfn;
/* pfn of the first usable page frame after kernel image*/
if (min_low_pfn < memory_start >> PAGE_SHIFT)
min_low_pfn = memory_start >> PAGE_SHIFT;
start_pfn = CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT;
end_pfn = memory_end >> PAGE_SHIFT;
/*
* give all the memory to the bootmap allocator, tell it to put the
* boot mem_map at the start of memory.
*/
bootmap_size = init_bootmem_node(NODE_DATA(0),
memory_start >> PAGE_SHIFT, /* map goes here */
start_pfn, end_pfn);
/* register the memmap regions with the bootmem allocator */
for (i = 0; i < bfin_memmap.nr_map; i++) {
/*
* Reserve usable memory
*/
if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
continue;
/*
* We are rounding up the start address of usable memory:
*/
curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
if (curr_pfn >= end_pfn)
continue;
/*
* ... and at the end of the usable range downwards:
*/
last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
bfin_memmap.map[i].size);
if (last_pfn > end_pfn)
last_pfn = end_pfn;
/*
* .. finally, did all the rounding and playing
* around just make the area go away?
*/
if (last_pfn <= curr_pfn)
continue;
size = last_pfn - curr_pfn;
free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
}
/* reserve memory before memory_start, including bootmap */
reserve_bootmem(CONFIG_PHY_RAM_BASE_ADDRESS,
memory_start + bootmap_size + PAGE_SIZE - 1 - CONFIG_PHY_RAM_BASE_ADDRESS,
BOOTMEM_DEFAULT);
}
#define EBSZ_TO_MEG(ebsz) \
({ \
int meg = 0; \
switch (ebsz & 0xf) { \
case 0x1: meg = 16; break; \
case 0x3: meg = 32; break; \
case 0x5: meg = 64; break; \
case 0x7: meg = 128; break; \
case 0x9: meg = 256; break; \
case 0xb: meg = 512; break; \
} \
meg; \
})
static inline int __init get_mem_size(void)
{
#if defined(EBIU_SDBCTL)
# if defined(BF561_FAMILY)
int ret = 0;
u32 sdbctl = bfin_read_EBIU_SDBCTL();
ret += EBSZ_TO_MEG(sdbctl >> 0);
ret += EBSZ_TO_MEG(sdbctl >> 8);
ret += EBSZ_TO_MEG(sdbctl >> 16);
ret += EBSZ_TO_MEG(sdbctl >> 24);
return ret;
# else
return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
# endif
#elif defined(EBIU_DDRCTL1)
u32 ddrctl = bfin_read_EBIU_DDRCTL1();
int ret = 0;
switch (ddrctl & 0xc0000) {
case DEVSZ_64:
ret = 64 / 8;
break;
case DEVSZ_128:
ret = 128 / 8;
break;
case DEVSZ_256:
ret = 256 / 8;
break;
case DEVSZ_512:
ret = 512 / 8;
break;
}
switch (ddrctl & 0x30000) {
case DEVWD_4:
ret *= 2;
case DEVWD_8:
ret *= 2;
case DEVWD_16:
break;
}
if ((ddrctl & 0xc000) == 0x4000)
ret *= 2;
return ret;
#elif defined(CONFIG_BF60x)
u32 ddrctl = bfin_read_DMC0_CFG();
int ret;
switch (ddrctl & 0xf00) {
case DEVSZ_64:
ret = 64 / 8;
break;
case DEVSZ_128:
ret = 128 / 8;
break;
case DEVSZ_256:
ret = 256 / 8;
break;
case DEVSZ_512:
ret = 512 / 8;
break;
case DEVSZ_1G:
ret = 1024 / 8;
break;
case DEVSZ_2G:
ret = 2048 / 8;
break;
}
return ret;
#endif
BUG();
}
__attribute__((weak))
void __init native_machine_early_platform_add_devices(void)
{
}
#ifdef CONFIG_BF60x
static inline u_long bfin_get_clk(char *name)
{
struct clk *clk;
u_long clk_rate;
clk = clk_get(NULL, name);
if (IS_ERR(clk))
return 0;
clk_rate = clk_get_rate(clk);
clk_put(clk);
return clk_rate;
}
#endif
void __init setup_arch(char **cmdline_p)
{
u32 mmr;
unsigned long sclk, cclk;
native_machine_early_platform_add_devices();
enable_shadow_console();
/* Check to make sure we are running on the right processor */
mmr = bfin_cpuid();
if (unlikely(CPUID != bfin_cpuid()))
printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
CPU, bfin_cpuid(), bfin_revid());
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
#if defined(CONFIG_CMDLINE_BOOL)
strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
command_line[sizeof(command_line) - 1] = 0;
#endif
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
memset(&bfin_memmap, 0, sizeof(bfin_memmap));
#ifdef CONFIG_BF60x
/* Should init clock device before parse command early */
clk_init();
#endif
/* If the user does not specify things on the command line, use
* what the bootloader set things up as
*/
physical_mem_end = 0;
parse_cmdline_early(&command_line[0]);
if (_ramend == 0)
_ramend = get_mem_size() * 1024 * 1024;
if (physical_mem_end == 0)
physical_mem_end = _ramend;
memory_setup();
#ifndef CONFIG_BF60x
/* Initialize Async memory banks */
bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
bfin_write_EBIU_AMGCTL(AMGCTLVAL);
#ifdef CONFIG_EBIU_MBSCTLVAL
bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
#endif
#endif
#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
#endif
cclk = get_cclk();
sclk = get_sclk();
if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
#ifdef BF561_FAMILY
if (ANOMALY_05000266) {
bfin_read_IMDMA_D0_IRQ_STATUS();
bfin_read_IMDMA_D1_IRQ_STATUS();
}
#endif
mmr = bfin_read_TBUFCTL();
printk(KERN_INFO "Hardware Trace %s and %sabled\n",
(mmr & 0x1) ? "active" : "off",
(mmr & 0x2) ? "en" : "dis");
#ifndef CONFIG_BF60x
mmr = bfin_read_SYSCR();
printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
/* Newer parts mirror SWRST bits in SYSCR */
#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
defined(CONFIG_BF538) || defined(CONFIG_BF539)
_bfin_swrst = bfin_read_SWRST();
#else
/* Clear boot mode field */
_bfin_swrst = mmr & ~0xf;
#endif
#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
#endif
#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
#endif
#ifdef CONFIG_SMP
if (_bfin_swrst & SWRST_DBL_FAULT_A) {
#else
if (_bfin_swrst & RESET_DOUBLE) {
#endif
printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* We assume the crashing kernel, and the current symbol table match */
printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
initial_pda.retx_doublefault);
printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
initial_pda.dcplb_doublefault_addr);
printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
initial_pda.icplb_doublefault_addr);
#endif
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
initial_pda.retx);
} else if (_bfin_swrst & RESET_WDOG)
printk(KERN_INFO "Recovering from Watchdog event\n");
else if (_bfin_swrst & RESET_SOFTWARE)
printk(KERN_NOTICE "Reset caused by Software reset\n");
#endif
printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
if (bfin_compiled_revid() == 0xffff)
printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
else if (bfin_compiled_revid() == -1)
printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
else
printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
if (likely(CPUID == bfin_cpuid())) {
if (bfin_revid() != bfin_compiled_revid()) {
if (bfin_compiled_revid() == -1)
printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
bfin_revid());
else if (bfin_compiled_revid() != 0xffff) {
printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
bfin_compiled_revid(), bfin_revid());
if (bfin_compiled_revid() > bfin_revid())
panic("Error: you are missing anomaly workarounds for this rev");
}
}
if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
CPU, bfin_revid());
}
printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
#ifdef CONFIG_BF60x
printk(KERN_INFO "Processor Speed: %lu MHz core clock, %lu MHz SCLk, %lu MHz SCLK0, %lu MHz SCLK1 and %lu MHz DCLK\n",
cclk / 1000000, bfin_get_clk("SYSCLK") / 1000000, get_sclk0() / 1000000, get_sclk1() / 1000000, get_dclk() / 1000000);
#else
printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
cclk / 1000000, sclk / 1000000);
#endif
setup_bootmem_allocator();
paging_init();
/* Copy atomic sequences to their fixed location, and sanity check that
these locations are the ones that we advertise to userspace. */
memcpy((void *)FIXED_CODE_START, &fixed_code_start,
FIXED_CODE_END - FIXED_CODE_START);
BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
!= SIGRETURN_STUB - FIXED_CODE_START);
BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
!= ATOMIC_XCHG32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
!= ATOMIC_CAS32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
!= ATOMIC_ADD32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
!= ATOMIC_SUB32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
!= ATOMIC_IOR32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
!= ATOMIC_AND32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
!= ATOMIC_XOR32 - FIXED_CODE_START);
BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
!= SAFE_USER_INSTRUCTION - FIXED_CODE_START);
#ifdef CONFIG_SMP
platform_init_cpus();
#endif
init_exception_vectors();
bfin_cache_init(); /* Initialize caches for the boot CPU */
#ifdef CONFIG_SCB_PRIORITY
init_scb();
#endif
}
static int __init topology_init(void)
{
unsigned int cpu;
for_each_possible_cpu(cpu) {
register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
}
return 0;
}
subsys_initcall(topology_init);
/* Get the input clock frequency */
static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
#ifndef CONFIG_BF60x
static u_long get_clkin_hz(void)
{
return cached_clkin_hz;
}
#endif
static int __init early_init_clkin_hz(char *buf)
{
cached_clkin_hz = simple_strtoul(buf, NULL, 0);
#ifdef BFIN_KERNEL_CLOCK
if (cached_clkin_hz != CONFIG_CLKIN_HZ)
panic("cannot change clkin_hz when reprogramming clocks");
#endif
return 1;
}
early_param("clkin_hz=", early_init_clkin_hz);
#ifndef CONFIG_BF60x
/* Get the voltage input multiplier */
static u_long get_vco(void)
{
static u_long cached_vco;
u_long msel, pll_ctl;
/* The assumption here is that VCO never changes at runtime.
* If, someday, we support that, then we'll have to change this.
*/
if (cached_vco)
return cached_vco;
pll_ctl = bfin_read_PLL_CTL();
msel = (pll_ctl >> 9) & 0x3F;
if (0 == msel)
msel = 64;
cached_vco = get_clkin_hz();
cached_vco >>= (1 & pll_ctl); /* DF bit */
cached_vco *= msel;
return cached_vco;
}
#endif
/* Get the Core clock */
u_long get_cclk(void)
{
#ifdef CONFIG_BF60x
return bfin_get_clk("CCLK");
#else
static u_long cached_cclk_pll_div, cached_cclk;
u_long csel, ssel;
if (bfin_read_PLL_STAT() & 0x1)
return get_clkin_hz();
ssel = bfin_read_PLL_DIV();
if (ssel == cached_cclk_pll_div)
return cached_cclk;
else
cached_cclk_pll_div = ssel;
csel = ((ssel >> 4) & 0x03);
ssel &= 0xf;
if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */
cached_cclk = get_vco() / ssel;
else
cached_cclk = get_vco() >> csel;
return cached_cclk;
#endif
}
EXPORT_SYMBOL(get_cclk);
#ifdef CONFIG_BF60x
/* Get the bf60x clock of SCLK0 domain */
u_long get_sclk0(void)
{
return bfin_get_clk("SCLK0");
}
EXPORT_SYMBOL(get_sclk0);
/* Get the bf60x clock of SCLK1 domain */
u_long get_sclk1(void)
{
return bfin_get_clk("SCLK1");
}
EXPORT_SYMBOL(get_sclk1);
/* Get the bf60x DRAM clock */
u_long get_dclk(void)
{
return bfin_get_clk("DCLK");
}
EXPORT_SYMBOL(get_dclk);
#endif
/* Get the default system clock */
u_long get_sclk(void)
{
#ifdef CONFIG_BF60x
return get_sclk0();
#else
static u_long cached_sclk;
u_long ssel;
/* The assumption here is that SCLK never changes at runtime.
* If, someday, we support that, then we'll have to change this.
*/
if (cached_sclk)
return cached_sclk;
if (bfin_read_PLL_STAT() & 0x1)
return get_clkin_hz();
ssel = bfin_read_PLL_DIV() & 0xf;
if (0 == ssel) {
printk(KERN_WARNING "Invalid System Clock\n");
ssel = 1;
}
cached_sclk = get_vco() / ssel;
return cached_sclk;
#endif
}
EXPORT_SYMBOL(get_sclk);
unsigned long sclk_to_usecs(unsigned long sclk)
{
u64 tmp = USEC_PER_SEC * (u64)sclk;
do_div(tmp, get_sclk());
return tmp;
}
EXPORT_SYMBOL(sclk_to_usecs);
unsigned long usecs_to_sclk(unsigned long usecs)
{
u64 tmp = get_sclk() * (u64)usecs;
do_div(tmp, USEC_PER_SEC);
return tmp;
}
EXPORT_SYMBOL(usecs_to_sclk);
/*
* Get CPU information for use by the procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
char *cpu, *mmu, *fpu, *vendor, *cache;
uint32_t revid;
int cpu_num = *(unsigned int *)v;
u_long sclk, cclk;
u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
cpu = CPU;
mmu = "none";
fpu = "none";
revid = bfin_revid();
sclk = get_sclk();
cclk = get_cclk();
switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
case 0xca:
vendor = "Analog Devices";
break;
default:
vendor = "unknown";
break;
}
seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
if (CPUID == bfin_cpuid())
seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
else
seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
CPUID, bfin_cpuid());
seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
"stepping\t: %d ",
cpu, cclk/1000000, sclk/1000000,
#ifdef CONFIG_MPU
"mpu on",
#else
"mpu off",
#endif
revid);
if (bfin_revid() != bfin_compiled_revid()) {
if (bfin_compiled_revid() == -1)
seq_printf(m, "(Compiled for Rev none)");
else if (bfin_compiled_revid() == 0xffff)
seq_printf(m, "(Compiled for Rev any)");
else
seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
}
seq_printf(m, "\ncpu MHz\t\t: %lu.%06lu/%lu.%06lu\n",
cclk/1000000, cclk%1000000,
sclk/1000000, sclk%1000000);
seq_printf(m, "bogomips\t: %lu.%02lu\n"
"Calibration\t: %lu loops\n",
(loops_per_jiffy * HZ) / 500000,
((loops_per_jiffy * HZ) / 5000) % 100,
(loops_per_jiffy * HZ));
/* Check Cache configutation */
switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
case ACACHE_BSRAM:
cache = "dbank-A/B\t: cache/sram";
dcache_size = 16;
dsup_banks = 1;
break;
case ACACHE_BCACHE:
cache = "dbank-A/B\t: cache/cache";
dcache_size = 32;
dsup_banks = 2;
break;
case ASRAM_BSRAM:
cache = "dbank-A/B\t: sram/sram";
dcache_size = 0;
dsup_banks = 0;
break;
default:
cache = "unknown";
dcache_size = 0;
dsup_banks = 0;
break;
}
/* Is it turned on? */
if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
dcache_size = 0;
if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
icache_size = 0;
seq_printf(m, "cache size\t: %d KB(L1 icache) "
"%d KB(L1 dcache) %d KB(L2 cache)\n",
icache_size, dcache_size, 0);
seq_printf(m, "%s\n", cache);
seq_printf(m, "external memory\t: "
#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
"cacheable"
#else
"uncacheable"
#endif
" in instruction cache\n");
seq_printf(m, "external memory\t: "
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
"cacheable (write-back)"
#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
"cacheable (write-through)"
#else
"uncacheable"
#endif
" in data cache\n");
if (icache_size)
seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
else
seq_printf(m, "icache setup\t: off\n");
seq_printf(m,
"dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
BFIN_DLINES);
#ifdef __ARCH_SYNC_CORE_DCACHE
seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
#endif
#ifdef __ARCH_SYNC_CORE_ICACHE
seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
#endif
seq_printf(m, "\n");
if (cpu_num != num_possible_cpus() - 1)
return 0;
if (L2_LENGTH) {
seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
seq_printf(m, "L2 SRAM\t\t: "
#if defined(CONFIG_BFIN_L2_ICACHEABLE)
"cacheable"
#else
"uncacheable"
#endif
" in instruction cache\n");
seq_printf(m, "L2 SRAM\t\t: "
#if defined(CONFIG_BFIN_L2_WRITEBACK)
"cacheable (write-back)"
#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
"cacheable (write-through)"
#else
"uncacheable"
#endif
" in data cache\n");
}
seq_printf(m, "board name\t: %s\n", bfin_board_name);
seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
physical_mem_end >> 10, 0ul, physical_mem_end);
seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
((int)memory_end - (int)_rambase) >> 10,
_rambase, memory_end);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0)
*pos = cpumask_first(cpu_online_mask);
if (*pos >= num_online_cpus())
return NULL;
return pos;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
*pos = cpumask_next(*pos, cpu_online_mask);
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
void __init cmdline_init(const char *r0)
{
early_shadow_stamp();
if (r0)
strlcpy(command_line, r0, COMMAND_LINE_SIZE);
}
| gpl-2.0 |
GameTheory-/android_kernel_lge_l1m | drivers/parisc/sba_iommu.c | 4535 | 59220 | /*
** System Bus Adapter (SBA) I/O MMU manager
**
** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org>
** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com>
** (c) Copyright 2000-2004 Hewlett-Packard Company
**
** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
**
** This module initializes the IOC (I/O Controller) found on B1000/C3000/
** J5000/J7000/N-class/L-class machines and their successors.
**
** FIXME: add DMA hint support programming in both sba and lba modules.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
#include <asm/hardware.h> /* for register_parisc_driver() stuff */
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/ropes.h>
#include <asm/mckinley.h> /* for proc_mckinley_root */
#include <asm/runway.h> /* for proc_runway_root */
#include <asm/pdc.h> /* for PDC_MODEL_* */
#include <asm/pdcpat.h> /* for is_pdc_pat() */
#include <asm/parisc-device.h>
#define MODULE_NAME "SBA"
/*
** The number of debug flags is a clue - this code is fragile.
** Don't even think about messing with it unless you have
** plenty of 710's to sacrifice to the computer gods. :^)
*/
#undef DEBUG_SBA_INIT
#undef DEBUG_SBA_RUN
#undef DEBUG_SBA_RUN_SG
#undef DEBUG_SBA_RESOURCE
#undef ASSERT_PDIR_SANITY
#undef DEBUG_LARGE_SG_ENTRIES
#undef DEBUG_DMB_TRAP
#ifdef DEBUG_SBA_INIT
#define DBG_INIT(x...) printk(x)
#else
#define DBG_INIT(x...)
#endif
#ifdef DEBUG_SBA_RUN
#define DBG_RUN(x...) printk(x)
#else
#define DBG_RUN(x...)
#endif
#ifdef DEBUG_SBA_RUN_SG
#define DBG_RUN_SG(x...) printk(x)
#else
#define DBG_RUN_SG(x...)
#endif
#ifdef DEBUG_SBA_RESOURCE
#define DBG_RES(x...) printk(x)
#else
#define DBG_RES(x...)
#endif
#define SBA_INLINE __inline__
#define DEFAULT_DMA_HINT_REG 0
struct sba_device *sba_list;
EXPORT_SYMBOL_GPL(sba_list);
static unsigned long ioc_needs_fdc = 0;
/* global count of IOMMUs in the system */
static unsigned int global_ioc_cnt = 0;
/* PA8700 (Piranha 2.2) bug workaround */
static unsigned long piranha_bad_128k = 0;
/* Looks nice and keeps the compiler happy */
#define SBA_DEV(d) ((struct sba_device *) (d))
#ifdef CONFIG_AGP_PARISC
#define SBA_AGP_SUPPORT
#endif /*CONFIG_AGP_PARISC*/
#ifdef SBA_AGP_SUPPORT
static int sba_reserve_agpgart = 1;
module_param(sba_reserve_agpgart, int, 0444);
MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
#endif
/************************************
** SBA register read and write support
**
** BE WARNED: register writes are posted.
** (ie follow writes which must reach HW with a read)
**
** Superdome (in particular, REO) allows only 64-bit CSR accesses.
*/
#define READ_REG32(addr) readl(addr)
#define READ_REG64(addr) readq(addr)
#define WRITE_REG32(val, addr) writel((val), (addr))
#define WRITE_REG64(val, addr) writeq((val), (addr))
#ifdef CONFIG_64BIT
#define READ_REG(addr) READ_REG64(addr)
#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
#else
#define READ_REG(addr) READ_REG32(addr)
#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
#endif
#ifdef DEBUG_SBA_INIT
/* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */
/**
* sba_dump_ranges - debugging only - print ranges assigned to this IOA
* @hpa: base address of the sba
*
* Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO
* IO Adapter (aka Bus Converter).
*/
static void
sba_dump_ranges(void __iomem *hpa)
{
DBG_INIT("SBA at 0x%p\n", hpa);
DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
DBG_INIT("\n");
DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
}
/**
* sba_dump_tlb - debugging only - print IOMMU operating parameters
* @hpa: base address of the IOMMU
*
* Print the size/location of the IO MMU PDIR.
*/
static void sba_dump_tlb(void __iomem *hpa)
{
DBG_INIT("IO TLB at 0x%p\n", hpa);
DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
DBG_INIT("\n");
}
#else
#define sba_dump_ranges(x)
#define sba_dump_tlb(x)
#endif /* DEBUG_SBA_INIT */
#ifdef ASSERT_PDIR_SANITY
/**
* sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
* @pide: pdir index.
*
* Print one entry of the IO MMU PDIR in human readable form.
*/
static void
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
{
/* start printing from lowest pde in rval */
u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
uint rcnt;
printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
msg,
rptr, pide & (BITS_PER_LONG - 1), *rptr);
rcnt = 0;
while (rcnt < BITS_PER_LONG) {
printk(KERN_DEBUG "%s %2d %p %016Lx\n",
(rcnt == (pide & (BITS_PER_LONG - 1)))
? " -->" : " ",
rcnt, ptr, *ptr );
rcnt++;
ptr++;
}
printk(KERN_DEBUG "%s", msg);
}
/**
* sba_check_pdir - debugging only - consistency checker
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
*
* Verify the resource map and pdir state is consistent
*/
static int
sba_check_pdir(struct ioc *ioc, char *msg)
{
u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */
u64 *pptr = ioc->pdir_base; /* pdir ptr */
uint pide = 0;
while (rptr < rptr_end) {
u32 rval = *rptr;
int rcnt = 32; /* number of bits we might check */
while (rcnt) {
/* Get last byte and highest bit from that */
u32 pde = ((u32) (((char *)pptr)[7])) << 24;
if ((rval ^ pde) & 0x80000000)
{
/*
** BUMMER! -- res_map != pdir --
** Dump rval and matching pdir entries
*/
sba_dump_pdir_entry(ioc, msg, pide);
return(1);
}
rcnt--;
rval <<= 1; /* try the next bit */
pptr++;
pide++;
}
rptr++; /* look at next word of res_map */
}
/* It'd be nice if we always got here :^) */
return 0;
}
/**
* sba_dump_sg - debugging only - print Scatter-Gather list
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: head of the SG list
* @nents: number of entries in SG list
*
* print the SG list so we can verify it's correct by hand.
*/
static void
sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
{
while (nents-- > 0) {
printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
nents,
(unsigned long) sg_dma_address(startsg),
sg_dma_len(startsg),
sg_virt_addr(startsg), startsg->length);
startsg++;
}
}
#endif /* ASSERT_PDIR_SANITY */
/**************************************************************
*
* I/O Pdir Resource Management
*
* Bits set in the resource map are in use.
* Each bit can represent a number of pages.
* LSbs represent lower addresses (IOVA's).
*
***************************************************************/
#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
/* Convert from IOVP to IOVA and vice versa. */
#ifdef ZX1_SUPPORT
/* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */
#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
#else
/* only support Astro and ancestors. Saves a few cycles in key places */
#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
#define SBA_IOVP(ioc,iova) (iova)
#endif
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
unsigned int bitshiftcnt)
{
return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
+ bitshiftcnt;
}
/**
* sba_search_bitmap - find free space in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @bits_wanted: number of entries we need.
*
* Find consecutive free bits in resource bitmap.
* Each bit represents one entry in the IO Pdir.
* Cool perf optimization: search for log2(size) bits at a time.
*/
static SBA_INLINE unsigned long
sba_search_bitmap(struct ioc *ioc, struct device *dev,
unsigned long bits_wanted)
{
unsigned long *res_ptr = ioc->res_hint;
unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
unsigned long pide = ~0UL, tpide;
unsigned long boundary_size;
unsigned long shift;
int ret;
boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
#if defined(ZX1_SUPPORT)
BUG_ON(ioc->ibase & ~IOVP_MASK);
shift = ioc->ibase >> IOVP_SHIFT;
#else
shift = 0;
#endif
if (bits_wanted > (BITS_PER_LONG/2)) {
/* Search word at a time - no mask needed */
for(; res_ptr < res_end; ++res_ptr) {
tpide = ptr_to_pide(ioc, res_ptr, 0);
ret = iommu_is_span_boundary(tpide, bits_wanted,
shift,
boundary_size);
if ((*res_ptr == 0) && !ret) {
*res_ptr = RESMAP_MASK(bits_wanted);
pide = tpide;
break;
}
}
/* point to the next word on next pass */
res_ptr++;
ioc->res_bitshift = 0;
} else {
/*
** Search the resource bit map on well-aligned values.
** "o" is the alignment.
** We need the alignment to invalidate I/O TLB using
** SBA HW features in the unmap path.
*/
unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
unsigned long mask;
if (bitshiftcnt >= BITS_PER_LONG) {
bitshiftcnt = 0;
res_ptr++;
}
mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
while(res_ptr < res_end)
{
DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
WARN_ON(mask == 0);
tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
ret = iommu_is_span_boundary(tpide, bits_wanted,
shift,
boundary_size);
if ((((*res_ptr) & mask) == 0) && !ret) {
*res_ptr |= mask; /* mark resources busy! */
pide = tpide;
break;
}
mask >>= o;
bitshiftcnt += o;
if (mask == 0) {
mask = RESMAP_MASK(bits_wanted);
bitshiftcnt=0;
res_ptr++;
}
}
/* look in the same word on the next pass */
ioc->res_bitshift = bitshiftcnt + bits_wanted;
}
/* wrapped ? */
if (res_end <= res_ptr) {
ioc->res_hint = (unsigned long *) ioc->res_map;
ioc->res_bitshift = 0;
} else {
ioc->res_hint = res_ptr;
}
return (pide);
}
/**
* sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @size: number of bytes to create a mapping for
*
* Given a size, find consecutive unmarked and then mark those bits in the
* resource bit map.
*/
static int
sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
{
unsigned int pages_needed = size >> IOVP_SHIFT;
#ifdef SBA_COLLECT_STATS
unsigned long cr_start = mfctl(16);
#endif
unsigned long pide;
pide = sba_search_bitmap(ioc, dev, pages_needed);
if (pide >= (ioc->res_size << 3)) {
pide = sba_search_bitmap(ioc, dev, pages_needed);
if (pide >= (ioc->res_size << 3))
panic("%s: I/O MMU @ %p is out of mapping resources\n",
__FILE__, ioc->ioc_hpa);
}
#ifdef ASSERT_PDIR_SANITY
/* verify the first enable bit is clear */
if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
}
#endif
DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
__func__, size, pages_needed, pide,
(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
ioc->res_bitshift );
#ifdef SBA_COLLECT_STATS
{
unsigned long cr_end = mfctl(16);
unsigned long tmp = cr_end - cr_start;
/* check for roll over */
cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
}
ioc->avg_search[ioc->avg_idx++] = cr_start;
ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
ioc->used_pages += pages_needed;
#endif
return (pide);
}
/**
* sba_free_range - unmark bits in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO virtual address which was previously allocated.
* @size: number of bytes to create a mapping for
*
* clear bits in the ioc's resource map
*/
static SBA_INLINE void
sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
{
unsigned long iovp = SBA_IOVP(ioc, iova);
unsigned int pide = PDIR_INDEX(iovp);
unsigned int ridx = pide >> 3; /* convert bit to byte address */
unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
int bits_not_wanted = size >> IOVP_SHIFT;
/* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
__func__, (uint) iova, size,
bits_not_wanted, m, pide, res_ptr, *res_ptr);
#ifdef SBA_COLLECT_STATS
ioc->used_pages -= bits_not_wanted;
#endif
*res_ptr &= ~m;
}
/**************************************************************
*
* "Dynamic DMA Mapping" support (aka "Coherent I/O")
*
***************************************************************/
#ifdef SBA_HINT_SUPPORT
#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
#endif
typedef unsigned long space_t;
#define KERNEL_SPACE 0
/**
* sba_io_pdir_entry - fill in one IO PDIR entry
* @pdir_ptr: pointer to IO PDIR entry
* @sid: process Space ID - currently only support KERNEL_SPACE
* @vba: Virtual CPU address of buffer to map
* @hint: DMA hint set to use for this mapping
*
* SBA Mapping Routine
*
* Given a virtual address (vba, arg2) and space id, (sid, arg1)
* sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
* pdir_ptr (arg0).
* Using the bass-ackwards HP bit numbering, Each IO Pdir entry
* for Astro/Ike looks like:
*
*
* 0 19 51 55 63
* +-+---------------------+----------------------------------+----+--------+
* |V| U | PPN[43:12] | U | VI |
* +-+---------------------+----------------------------------+----+--------+
*
* Pluto is basically identical, supports fewer physical address bits:
*
* 0 23 51 55 63
* +-+------------------------+-------------------------------+----+--------+
* |V| U | PPN[39:12] | U | VI |
* +-+------------------------+-------------------------------+----+--------+
*
* V == Valid Bit (Most Significant Bit is bit 0)
* U == Unused
* PPN == Physical Page Number
* VI == Virtual Index (aka Coherent Index)
*
* LPA instruction output is put into PPN field.
* LCI (Load Coherence Index) instruction provides the "VI" bits.
*
* We pre-swap the bytes since PCX-W is Big Endian and the
* IOMMU uses little endian for the pdir.
*/
static void SBA_INLINE
sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hint)
{
u64 pa; /* physical address */
register unsigned ci; /* coherent index */
pa = virt_to_phys(vba);
pa &= IOVP_MASK;
mtsp(sid,1);
asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
*pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
* (bit #61, big endian), we have to flush and sync every time
* IO-PDIR is changed in Ike/Astro.
*/
if (ioc_needs_fdc)
asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
}
/**
* sba_mark_invalid - invalidate one or more IO PDIR entries
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO Virtual Address mapped earlier
* @byte_cnt: number of bytes this mapping covers.
*
* Marking the IO PDIR entry(ies) as Invalid and invalidate
* corresponding IO TLB entry. The Ike PCOM (Purge Command Register)
* is to purge stale entries in the IO TLB when unmapping entries.
*
* The PCOM register supports purging of multiple pages, with a minium
* of 1 page and a maximum of 2GB. Hardware requires the address be
* aligned to the size of the range being purged. The size of the range
* must be a power of 2. The "Cool perf optimization" in the
* allocation routine helps keep that true.
*/
static SBA_INLINE void
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
#ifdef ASSERT_PDIR_SANITY
/* Assert first pdir entry is set.
**
** Even though this is a big-endian machine, the entries
** in the iopdir are little endian. That's why we look at
** the byte at +7 instead of at +0.
*/
if (0x80 != (((u8 *) pdir_ptr)[7])) {
sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
}
#endif
if (byte_cnt > IOVP_SIZE)
{
#if 0
unsigned long entries_per_cacheline = ioc_needs_fdc ?
L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
- (unsigned long) pdir_ptr;
: 262144;
#endif
/* set "size" field for PCOM */
iovp |= get_order(byte_cnt) + PAGE_SHIFT;
do {
/* clear I/O Pdir entry "valid" bit first */
((u8 *) pdir_ptr)[7] = 0;
if (ioc_needs_fdc) {
asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
#if 0
entries_per_cacheline = L1_CACHE_SHIFT - 3;
#endif
}
pdir_ptr++;
byte_cnt -= IOVP_SIZE;
} while (byte_cnt > IOVP_SIZE);
} else
iovp |= IOVP_SHIFT; /* set "size" field for PCOM */
/*
** clear I/O PDIR entry "valid" bit.
** We have to R/M/W the cacheline regardless how much of the
** pdir entry that we clobber.
** The rest of the entry would be useful for debugging if we
** could dump core on HPMC.
*/
((u8 *) pdir_ptr)[7] = 0;
if (ioc_needs_fdc)
asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
}
/**
* sba_dma_supported - PCI driver can query DMA support
* @dev: instance of PCI owned by the driver that's asking
* @mask: number of address bits this PCI device can handle
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
static int sba_dma_supported( struct device *dev, u64 mask)
{
struct ioc *ioc;
if (dev == NULL) {
printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
BUG();
return(0);
}
/* Documentation/PCI/PCI-DMA-mapping.txt tells drivers to try 64-bit
* first, then fall back to 32-bit if that fails.
* We are just "encouraging" 32-bit DMA masks here since we can
* never allow IOMMU bypass unless we add special support for ZX1.
*/
if (mask > ~0U)
return 0;
ioc = GET_IOC(dev);
/*
* check if mask is >= than the current max IO Virt Address
* The max IO Virt address will *always* < 30 bits.
*/
return((int)(mask >= (ioc->ibase - 1 +
(ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
}
/**
* sba_map_single - map one buffer and return IOVA for DMA
* @dev: instance of PCI owned by the driver that's asking.
* @addr: driver buffer to map.
* @size: number of bytes to map in driver buffer.
* @direction: R/W or both.
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
static dma_addr_t
sba_map_single(struct device *dev, void *addr, size_t size,
enum dma_data_direction direction)
{
struct ioc *ioc;
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
u64 *pdir_start;
int pide;
ioc = GET_IOC(dev);
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
/* round up to nearest IOVP_SIZE */
size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef ASSERT_PDIR_SANITY
sba_check_pdir(ioc,"Check before sba_map_single()");
#endif
#ifdef SBA_COLLECT_STATS
ioc->msingle_calls++;
ioc->msingle_pages += size >> IOVP_SHIFT;
#endif
pide = sba_alloc_range(ioc, dev, size);
iovp = (dma_addr_t) pide << IOVP_SHIFT;
DBG_RUN("%s() 0x%p -> 0x%lx\n",
__func__, addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]);
while (size > 0) {
sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
pdir_start,
(u8) (((u8 *) pdir_start)[7]),
(u8) (((u8 *) pdir_start)[6]),
(u8) (((u8 *) pdir_start)[5]),
(u8) (((u8 *) pdir_start)[4]),
(u8) (((u8 *) pdir_start)[3]),
(u8) (((u8 *) pdir_start)[2]),
(u8) (((u8 *) pdir_start)[1]),
(u8) (((u8 *) pdir_start)[0])
);
addr += IOVP_SIZE;
size -= IOVP_SIZE;
pdir_start++;
}
/* force FDC ops in io_pdir_entry() to be visible to IOMMU */
if (ioc_needs_fdc)
asm volatile("sync" : : );
#ifdef ASSERT_PDIR_SANITY
sba_check_pdir(ioc,"Check after sba_map_single()");
#endif
spin_unlock_irqrestore(&ioc->res_lock, flags);
/* form complete address */
return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
}
/**
* sba_unmap_single - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
* @direction: R/W or both.
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
static void
sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction direction)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
struct sba_dma_pair *d;
#endif
unsigned long flags;
dma_addr_t offset;
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
ioc = GET_IOC(dev);
offset = iova & ~IOVP_MASK;
iova ^= offset; /* clear offset bits */
size += offset;
size = ALIGN(size, IOVP_SIZE);
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef SBA_COLLECT_STATS
ioc->usingle_calls++;
ioc->usingle_pages += size >> IOVP_SHIFT;
#endif
sba_mark_invalid(ioc, iova, size);
#if DELAYED_RESOURCE_CNT > 0
/* Delaying when we re-use a IO Pdir entry reduces the number
* of MMIO reads needed to flush writes to the PCOM register.
*/
d = &(ioc->saved[ioc->saved_cnt]);
d->iova = iova;
d->size = size;
if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
int cnt = ioc->saved_cnt;
while (cnt--) {
sba_free_range(ioc, d->iova, d->size);
d--;
}
ioc->saved_cnt = 0;
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
}
#else /* DELAYED_RESOURCE_CNT == 0 */
sba_free_range(ioc, iova, size);
/* If fdc's were issued, force fdc's to be visible now */
if (ioc_needs_fdc)
asm volatile("sync" : : );
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
#endif /* DELAYED_RESOURCE_CNT == 0 */
spin_unlock_irqrestore(&ioc->res_lock, flags);
/* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
** For Astro based systems this isn't a big deal WRT performance.
** As long as 2.4 kernels copyin/copyout data from/to userspace,
** we don't need the syncdma. The issue here is I/O MMU cachelines
** are *not* coherent in all cases. May be hwrev dependent.
** Need to investigate more.
asm volatile("syncdma");
*/
}
/**
* sba_alloc_consistent - allocate/map shared mem for DMA
* @hwdev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
static void *sba_alloc_consistent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
if (!hwdev) {
/* only support PCI */
*dma_handle = 0;
return NULL;
}
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret) {
memset(ret, 0, size);
*dma_handle = sba_map_single(hwdev, ret, size, 0);
}
return ret;
}
/**
* sba_free_consistent - free/unmap shared mem for DMA
* @hwdev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @vaddr: virtual address IOVA of "consistent" buffer.
* @dma_handler: IO virtual address of "consistent" buffer.
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
static void
sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
sba_unmap_single(hwdev, dma_handle, size, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
/*
** Since 0 is a valid pdir_base index value, can't use that
** to determine if a value is valid or not. Use a flag to indicate
** the SG list entry contains a valid pdir index.
*/
#define PIDE_FLAG 0x80000000UL
#ifdef SBA_COLLECT_STATS
#define IOMMU_MAP_STATS
#endif
#include "iommu-helpers.h"
#ifdef DEBUG_LARGE_SG_ENTRIES
int dump_run_sg = 0;
#endif
/**
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @direction: R/W or both.
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
static int
sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
struct ioc *ioc;
int coalesced, filled = 0;
unsigned long flags;
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev);
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg_dma_address(sglist) = sba_map_single(dev,
(void *)sg_virt_addr(sglist),
sglist->length, direction);
sg_dma_len(sglist) = sglist->length;
return 1;
}
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef ASSERT_PDIR_SANITY
if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
{
sba_dump_sg(ioc, sglist, nents);
panic("Check before sba_map_sg()");
}
#endif
#ifdef SBA_COLLECT_STATS
ioc->msg_calls++;
#endif
/*
** First coalesce the chunks and allocate I/O pdir space
**
** If this is one DMA stream, we can properly map using the
** correct virtual address associated with each DMA page.
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
/*
** Program the I/O Pdir
**
** map the virtual addresses to the I/O Pdir
** o dma_address will contain the pdir index
** o dma_len will contain the number of bytes to map
** o address contains the virtual address.
*/
filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
/* force FDC ops in io_pdir_entry() to be visible to IOMMU */
if (ioc_needs_fdc)
asm volatile("sync" : : );
#ifdef ASSERT_PDIR_SANITY
if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
{
sba_dump_sg(ioc, sglist, nents);
panic("Check after sba_map_sg()\n");
}
#endif
spin_unlock_irqrestore(&ioc->res_lock, flags);
DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
return filled;
}
/**
* sba_unmap_sg - unmap Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @direction: R/W or both.
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
static void
sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
struct ioc *ioc;
#ifdef ASSERT_PDIR_SANITY
unsigned long flags;
#endif
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sg_virt_addr(sglist), sglist->length);
ioc = GET_IOC(dev);
#ifdef SBA_COLLECT_STATS
ioc->usg_calls++;
#endif
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
sba_check_pdir(ioc,"Check before sba_unmap_sg()");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
while (sg_dma_len(sglist) && nents--) {
sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
#ifdef SBA_COLLECT_STATS
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
#endif
++sglist;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
sba_check_pdir(ioc,"Check after sba_unmap_sg()");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
}
static struct hppa_dma_ops sba_ops = {
.dma_supported = sba_dma_supported,
.alloc_consistent = sba_alloc_consistent,
.alloc_noncoherent = sba_alloc_consistent,
.free_consistent = sba_free_consistent,
.map_single = sba_map_single,
.unmap_single = sba_unmap_single,
.map_sg = sba_map_sg,
.unmap_sg = sba_unmap_sg,
.dma_sync_single_for_cpu = NULL,
.dma_sync_single_for_device = NULL,
.dma_sync_sg_for_cpu = NULL,
.dma_sync_sg_for_device = NULL,
};
/**************************************************************************
**
** SBA PAT PDC support
**
** o call pdc_pat_cell_module()
** o store ranges in PCI "resource" structures
**
**************************************************************************/
static void
sba_get_pat_resources(struct sba_device *sba_dev)
{
#if 0
/*
** TODO/REVISIT/FIXME: support for directed ranges requires calls to
** PAT PDC to program the SBA/LBA directed range registers...this
** burden may fall on the LBA code since it directly supports the
** PCI subsystem. It's not clear yet. - ggg
*/
PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
FIXME : ???
PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
Tells where the dvi bits are located in the address.
PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
FIXME : ???
#endif
}
/**************************************************************
*
* Initialization and claim
*
***************************************************************/
#define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */
#define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */
static void *
sba_alloc_pdir(unsigned int pdir_size)
{
unsigned long pdir_base;
unsigned long pdir_order = get_order(pdir_size);
pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
if (NULL == (void *) pdir_base) {
panic("%s() could not allocate I/O Page Table\n",
__func__);
}
/* If this is not PA8700 (PCX-W2)
** OR newer than ver 2.2
** OR in a system that doesn't need VINDEX bits from SBA,
**
** then we aren't exposed to the HW bug.
*/
if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
|| (boot_cpu_data.pdc.versions > 0x202)
|| (boot_cpu_data.pdc.capabilities & 0x08L) )
return (void *) pdir_base;
/*
* PA8700 (PCX-W2, aka piranha) silent data corruption fix
*
* An interaction between PA8700 CPU (Ver 2.2 or older) and
* Ike/Astro can cause silent data corruption. This is only
* a problem if the I/O PDIR is located in memory such that
* (little-endian) bits 17 and 18 are on and bit 20 is off.
*
* Since the max IO Pdir size is 2MB, by cleverly allocating the
* right physical address, we can either avoid (IOPDIR <= 1MB)
* or minimize (2MB IO Pdir) the problem if we restrict the
* IO Pdir to a maximum size of 2MB-128K (1902K).
*
* Because we always allocate 2^N sized IO pdirs, either of the
* "bad" regions will be the last 128K if at all. That's easy
* to test for.
*
*/
if (pdir_order <= (19-12)) {
if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
/* allocate a new one on 512k alignment */
unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
/* release original */
free_pages(pdir_base, pdir_order);
pdir_base = new_pdir;
/* release excess */
while (pdir_order < (19-12)) {
new_pdir += pdir_size;
free_pages(new_pdir, pdir_order);
pdir_order +=1;
pdir_size <<=1;
}
}
} else {
/*
** 1MB or 2MB Pdir
** Needs to be aligned on an "odd" 1MB boundary.
*/
unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */
/* release original */
free_pages( pdir_base, pdir_order);
/* release first 1MB */
free_pages(new_pdir, 20-12);
pdir_base = new_pdir + 1024*1024;
if (pdir_order > (20-12)) {
/*
** 2MB Pdir.
**
** Flag tells init_bitmap() to mark bad 128k as used
** and to reduce the size by 128k.
*/
piranha_bad_128k = 1;
new_pdir += 3*1024*1024;
/* release last 1MB */
free_pages(new_pdir, 20-12);
/* release unusable 128KB */
free_pages(new_pdir - 128*1024 , 17-12);
pdir_size -= 128*1024;
}
}
memset((void *) pdir_base, 0, pdir_size);
return (void *) pdir_base;
}
struct ibase_data_struct {
struct ioc *ioc;
int ioc_num;
};
static int setup_ibase_imask_callback(struct device *dev, void *data)
{
/* lba_set_iregs() is in drivers/parisc/lba_pci.c */
extern void lba_set_iregs(struct parisc_device *, u32, u32);
struct parisc_device *lba = to_parisc_device(dev);
struct ibase_data_struct *ibd = data;
int rope_num = (lba->hpa.start >> 13) & 0xf;
if (rope_num >> 3 == ibd->ioc_num)
lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
return 0;
}
/* setup Mercury or Elroy IBASE/IMASK registers. */
static void
setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
{
struct ibase_data_struct ibase_data = {
.ioc = ioc,
.ioc_num = ioc_num,
};
device_for_each_child(&sba->dev, &ibase_data,
setup_ibase_imask_callback);
}
#ifdef SBA_AGP_SUPPORT
static int
sba_ioc_find_quicksilver(struct device *dev, void *data)
{
int *agp_found = data;
struct parisc_device *lba = to_parisc_device(dev);
if (IS_QUICKSILVER(lba))
*agp_found = 1;
return 0;
}
#endif
static void
sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
{
u32 iova_space_mask;
u32 iova_space_size;
int iov_order, tcnfg;
#ifdef SBA_AGP_SUPPORT
int agp_found = 0;
#endif
/*
** Firmware programs the base and size of a "safe IOVA space"
** (one that doesn't overlap memory or LMMIO space) in the
** IBASE and IMASK registers.
*/
ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
iova_space_size /= 2;
}
/*
** iov_order is always based on a 1GB IOVA space since we want to
** turn on the other half for AGP GART.
*/
iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
__func__, ioc->ioc_hpa, iova_space_size >> 20,
iov_order + PAGE_SHIFT);
ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size));
if (!ioc->pdir_base)
panic("Couldn't allocate I/O Page Table\n");
memset(ioc->pdir_base, 0, ioc->pdir_size);
DBG_INIT("%s() pdir %p size %x\n",
__func__, ioc->pdir_base, ioc->pdir_size);
#ifdef SBA_HINT_SUPPORT
ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
ioc->hint_shift_pdir, ioc->hint_mask_pdir);
#endif
WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
/* build IMASK for IOC and Elroy */
iova_space_mask = 0xffffffff;
iova_space_mask <<= (iov_order + PAGE_SHIFT);
ioc->imask = iova_space_mask;
#ifdef ZX1_SUPPORT
ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
#endif
sba_dump_tlb(ioc->ioc_hpa);
setup_ibase_imask(sba, ioc, ioc_num);
WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
#ifdef CONFIG_64BIT
/*
** Setting the upper bits makes checking for bypass addresses
** a little faster later on.
*/
ioc->imask |= 0xFFFFFFFF00000000UL;
#endif
/* Set I/O PDIR Page size to system page size */
switch (PAGE_SHIFT) {
case 12: tcnfg = 0; break; /* 4K */
case 13: tcnfg = 1; break; /* 8K */
case 14: tcnfg = 2; break; /* 16K */
case 16: tcnfg = 3; break; /* 64K */
default:
panic(__FILE__ "Unsupported system page size %d",
1 << PAGE_SHIFT);
break;
}
WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
/*
** Program the IOC's ibase and enable IOVA translation
** Bit zero == enable bit.
*/
WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
/*
** Clear I/O TLB of any possible entries.
** (Yes. This is a bit paranoid...but so what)
*/
WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
#ifdef SBA_AGP_SUPPORT
/*
** If an AGP device is present, only use half of the IOV space
** for PCI DMA. Unfortunately we can't know ahead of time
** whether GART support will actually be used, for now we
** can just key on any AGP device found in the system.
** We program the next pdir index after we stop w/ a key for
** the GART code to handshake on.
*/
device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
if (agp_found && sba_reserve_agpgart) {
printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
__func__, (iova_space_size/2) >> 20);
ioc->pdir_size /= 2;
ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
}
#endif /*SBA_AGP_SUPPORT*/
}
static void
sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
{
u32 iova_space_size, iova_space_mask;
unsigned int pdir_size, iov_order;
/*
** Determine IOVA Space size from memory size.
**
** Ideally, PCI drivers would register the maximum number
** of DMA they can have outstanding for each device they
** own. Next best thing would be to guess how much DMA
** can be outstanding based on PCI Class/sub-class. Both
** methods still require some "extra" to support PCI
** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
**
** While we have 32-bits "IOVA" space, top two 2 bits are used
** for DMA hints - ergo only 30 bits max.
*/
iova_space_size = (u32) (totalram_pages/global_ioc_cnt);
/* limit IOVA space size to 1MB-1GB */
if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
iova_space_size = 1 << (20 - PAGE_SHIFT);
}
else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
iova_space_size = 1 << (30 - PAGE_SHIFT);
}
/*
** iova space must be log2() in size.
** thus, pdir/res_map will also be log2().
** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced)
*/
iov_order = get_order(iova_space_size << PAGE_SHIFT);
/* iova_space_size is now bytes, not pages */
iova_space_size = 1 << (iov_order + PAGE_SHIFT);
ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
__func__,
ioc->ioc_hpa,
(unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
iova_space_size>>20,
iov_order + PAGE_SHIFT);
ioc->pdir_base = sba_alloc_pdir(pdir_size);
DBG_INIT("%s() pdir %p size %x\n",
__func__, ioc->pdir_base, pdir_size);
#ifdef SBA_HINT_SUPPORT
/* FIXME : DMA HINTs not used */
ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
ioc->hint_shift_pdir, ioc->hint_mask_pdir);
#endif
WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
/* build IMASK for IOC and Elroy */
iova_space_mask = 0xffffffff;
iova_space_mask <<= (iov_order + PAGE_SHIFT);
/*
** On C3000 w/512MB mem, HP-UX 10.20 reports:
** ibase=0, imask=0xFE000000, size=0x2000000.
*/
ioc->ibase = 0;
ioc->imask = iova_space_mask; /* save it */
#ifdef ZX1_SUPPORT
ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
#endif
DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
__func__, ioc->ibase, ioc->imask);
/*
** FIXME: Hint registers are programmed with default hint
** values during boot, so hints should be sane even if we
** can't reprogram them the way drivers want.
*/
setup_ibase_imask(sba, ioc, ioc_num);
/*
** Program the IOC's ibase and enable IOVA translation
*/
WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
/* Set I/O PDIR Page size to 4K */
WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
/*
** Clear I/O TLB of any possible entries.
** (Yes. This is a bit paranoid...but so what)
*/
WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
ioc->ibase = 0; /* used by SBA_IOVA and related macros */
DBG_INIT("%s() DONE\n", __func__);
}
/**************************************************************************
**
** SBA initialization code (HW and SW)
**
** o identify SBA chip itself
** o initialize SBA chip modes (HardFail)
** o initialize SBA chip modes (HardFail)
** o FIXME: initialize DMA hints for reasonable defaults
**
**************************************************************************/
static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
{
return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
}
static void sba_hw_init(struct sba_device *sba_dev)
{
int i;
int num_ioc;
u64 ioc_ctl;
if (!is_pdc_pat()) {
/* Shutdown the USB controller on Astro-based workstations.
** Once we reprogram the IOMMU, the next DMA performed by
** USB will HPMC the box. USB is only enabled if a
** keyboard is present and found.
**
** With serial console, j6k v5.0 firmware says:
** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7
**
** FIXME: Using GFX+USB console at power up but direct
** linux to serial console is still broken.
** USB could generate DMA so we must reset USB.
** The proper sequence would be:
** o block console output
** o reset USB device
** o reprogram serial port
** o unblock console output
*/
if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
pdc_io_reset_devices();
}
}
#if 0
printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
/*
** Need to deal with DMA from LAN.
** Maybe use page zero boot device as a handle to talk
** to PDC about which device to shutdown.
**
** Netbooting, j6k v5.0 firmware says:
** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002
** ARGH! invalid class.
*/
if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
&& (PAGE0->mem_boot.cl_class != CL_SEQU)) {
pdc_io_reset();
}
#endif
if (!IS_PLUTO(sba_dev->dev)) {
ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
__func__, sba_dev->sba_hpa, ioc_ctl);
ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
/* j6700 v1.6 firmware sets 0x294f */
/* A500 firmware sets 0x4d */
WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
#ifdef DEBUG_SBA_INIT
ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
DBG_INIT(" 0x%Lx\n", ioc_ctl);
#endif
} /* if !PLUTO */
if (IS_ASTRO(sba_dev->dev)) {
int err;
sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
num_ioc = 1;
sba_dev->chip_resv.name = "Astro Intr Ack";
sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
BUG_ON(err < 0);
} else if (IS_PLUTO(sba_dev->dev)) {
int err;
sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
num_ioc = 1;
sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
WARN_ON(err < 0);
sba_dev->iommu_resv.name = "IOVA Space";
sba_dev->iommu_resv.start = 0x40000000UL;
sba_dev->iommu_resv.end = 0x50000000UL - 1;
err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
WARN_ON(err < 0);
} else {
/* IKE, REO */
sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
num_ioc = 2;
/* TODO - LOOKUP Ike/Stretch chipset mem map */
}
/* XXX: What about Reo Grande? */
sba_dev->num_ioc = num_ioc;
for (i = 0; i < num_ioc; i++) {
void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
unsigned int j;
for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
/*
* Clear ROPE(N)_CONFIG AO bit.
* Disables "NT Ordering" (~= !"Relaxed Ordering")
* Overrides bit 1 in DMA Hint Sets.
* Improves netperf UDP_STREAM by ~10% for bcm5701.
*/
if (IS_PLUTO(sba_dev->dev)) {
void __iomem *rope_cfg;
unsigned long cfg_val;
rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
cfg_val = READ_REG(rope_cfg);
cfg_val &= ~IOC_ROPE_AO;
WRITE_REG(cfg_val, rope_cfg);
}
/*
** Make sure the box crashes on rope errors.
*/
WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
}
/* flush out the last writes */
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
i,
READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
);
DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
);
if (IS_PLUTO(sba_dev->dev)) {
sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
} else {
sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
}
}
}
static void
sba_common_init(struct sba_device *sba_dev)
{
int i;
/* add this one to the head of the list (order doesn't matter)
** This will be useful for debugging - especially if we get coredumps
*/
sba_dev->next = sba_list;
sba_list = sba_dev;
for(i=0; i< sba_dev->num_ioc; i++) {
int res_size;
#ifdef DEBUG_DMB_TRAP
extern void iterate_pages(unsigned long , unsigned long ,
void (*)(pte_t * , unsigned long),
unsigned long );
void set_data_memory_break(pte_t * , unsigned long);
#endif
/* resource map size dictated by pdir_size */
res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
/* Second part of PIRANHA BUG */
if (piranha_bad_128k) {
res_size -= (128*1024)/sizeof(u64);
}
res_size >>= 3; /* convert bit count to byte count */
DBG_INIT("%s() res_size 0x%x\n",
__func__, res_size);
sba_dev->ioc[i].res_size = res_size;
sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
#ifdef DEBUG_DMB_TRAP
iterate_pages( sba_dev->ioc[i].res_map, res_size,
set_data_memory_break, 0);
#endif
if (NULL == sba_dev->ioc[i].res_map)
{
panic("%s:%s() could not allocate resource map\n",
__FILE__, __func__ );
}
memset(sba_dev->ioc[i].res_map, 0, res_size);
/* next available IOVP - circular search */
sba_dev->ioc[i].res_hint = (unsigned long *)
&(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
#ifdef ASSERT_PDIR_SANITY
/* Mark first bit busy - ie no IOVA 0 */
sba_dev->ioc[i].res_map[0] = 0x80;
sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
#endif
/* Third (and last) part of PIRANHA BUG */
if (piranha_bad_128k) {
/* region from +1408K to +1536 is un-usable. */
int idx_start = (1408*1024/sizeof(u64)) >> 3;
int idx_end = (1536*1024/sizeof(u64)) >> 3;
long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
/* mark that part of the io pdir busy */
while (p_start < p_end)
*p_start++ = -1;
}
#ifdef DEBUG_DMB_TRAP
iterate_pages( sba_dev->ioc[i].res_map, res_size,
set_data_memory_break, 0);
iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
set_data_memory_break, 0);
#endif
DBG_INIT("%s() %d res_map %x %p\n",
__func__, i, res_size, sba_dev->ioc[i].res_map);
}
spin_lock_init(&sba_dev->sba_lock);
ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
#ifdef DEBUG_SBA_INIT
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
* (bit #61, big endian), we have to flush and sync every time
* IO-PDIR is changed in Ike/Astro.
*/
if (ioc_needs_fdc) {
printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
} else {
printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
}
#endif
}
#ifdef CONFIG_PROC_FS
static int sba_proc_info(struct seq_file *m, void *p)
{
struct sba_device *sba_dev = sba_list;
struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
#ifdef SBA_COLLECT_STATS
unsigned long avg = 0, min, max;
#endif
int i, len = 0;
len += seq_printf(m, "%s rev %d.%d\n",
sba_dev->name,
(sba_dev->hw_rev & 0x7) + 1,
(sba_dev->hw_rev & 0x18) >> 3
);
len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
(int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
total_pages);
len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)
);
for (i=0; i<4; i++)
len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i,
READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)
);
#ifdef SBA_COLLECT_STATS
len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
total_pages - ioc->used_pages, ioc->used_pages,
(int) (ioc->used_pages * 100 / total_pages));
min = max = ioc->avg_search[0];
for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
avg += ioc->avg_search[i];
if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
}
avg /= SBA_SEARCH_SAMPLE;
len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
min, avg, max);
len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
ioc->msingle_calls, ioc->msingle_pages,
(int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
/* KLUGE - unmap_sg calls unmap_single for each mapped page */
min = ioc->usingle_calls;
max = ioc->usingle_pages - ioc->usg_pages;
len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
min, max, (int) ((max * 1000)/min));
len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
ioc->msg_calls, ioc->msg_pages,
(int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
ioc->usg_calls, ioc->usg_pages,
(int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
#endif
return 0;
}
static int
sba_proc_open(struct inode *i, struct file *f)
{
return single_open(f, &sba_proc_info, NULL);
}
static const struct file_operations sba_proc_fops = {
.owner = THIS_MODULE,
.open = sba_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int
sba_proc_bitmap_info(struct seq_file *m, void *p)
{
struct sba_device *sba_dev = sba_list;
struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
unsigned int *res_ptr = (unsigned int *)ioc->res_map;
int i, len = 0;
for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) {
if ((i & 7) == 0)
len += seq_printf(m, "\n ");
len += seq_printf(m, " %08x", *res_ptr);
}
len += seq_printf(m, "\n");
return 0;
}
static int
sba_proc_bitmap_open(struct inode *i, struct file *f)
{
return single_open(f, &sba_proc_bitmap_info, NULL);
}
static const struct file_operations sba_proc_bitmap_fops = {
.owner = THIS_MODULE,
.open = sba_proc_bitmap_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_PROC_FS */
static struct parisc_device_id sba_tbl[] = {
{ HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
{ HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
{ 0, }
};
static int sba_driver_callback(struct parisc_device *);
static struct parisc_driver sba_driver = {
.name = MODULE_NAME,
.id_table = sba_tbl,
.probe = sba_driver_callback,
};
/*
** Determine if sba should claim this chip (return 0) or not (return 1).
** If so, initialize the chip and tell other partners in crime they
** have work to do.
*/
static int sba_driver_callback(struct parisc_device *dev)
{
struct sba_device *sba_dev;
u32 func_class;
int i;
char *version;
void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *root;
#endif
sba_dump_ranges(sba_addr);
/* Read HW Rev First */
func_class = READ_REG(sba_addr + SBA_FCLASS);
if (IS_ASTRO(dev)) {
unsigned long fclass;
static char astro_rev[]="Astro ?.?";
/* Astro is broken...Read HW Rev First */
fclass = READ_REG(sba_addr);
astro_rev[6] = '1' + (char) (fclass & 0x7);
astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
version = astro_rev;
} else if (IS_IKE(dev)) {
static char ike_rev[] = "Ike rev ?";
ike_rev[8] = '0' + (char) (func_class & 0xff);
version = ike_rev;
} else if (IS_PLUTO(dev)) {
static char pluto_rev[]="Pluto ?.?";
pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
pluto_rev[8] = '0' + (char) (func_class & 0x0f);
version = pluto_rev;
} else {
static char reo_rev[] = "REO rev ?";
reo_rev[8] = '0' + (char) (func_class & 0xff);
version = reo_rev;
}
if (!global_ioc_cnt) {
global_ioc_cnt = count_parisc_driver(&sba_driver);
/* Astro and Pluto have one IOC per SBA */
if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
global_ioc_cnt *= 2;
}
printk(KERN_INFO "%s found %s at 0x%llx\n",
MODULE_NAME, version, (unsigned long long)dev->hpa.start);
sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
if (!sba_dev) {
printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
return -ENOMEM;
}
parisc_set_drvdata(dev, sba_dev);
for(i=0; i<MAX_IOC; i++)
spin_lock_init(&(sba_dev->ioc[i].res_lock));
sba_dev->dev = dev;
sba_dev->hw_rev = func_class;
sba_dev->name = dev->name;
sba_dev->sba_hpa = sba_addr;
sba_get_pat_resources(sba_dev);
sba_hw_init(sba_dev);
sba_common_init(sba_dev);
hppa_dma_ops = &sba_ops;
#ifdef CONFIG_PROC_FS
switch (dev->id.hversion) {
case PLUTO_MCKINLEY_PORT:
root = proc_mckinley_root;
break;
case ASTRO_RUNWAY_PORT:
case IKE_MERCED_PORT:
default:
root = proc_runway_root;
break;
}
proc_create("sba_iommu", 0, root, &sba_proc_fops);
proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
#endif
parisc_has_iommu();
return 0;
}
/*
** One time initialization to let the world know the SBA was found.
** This is the only routine which is NOT static.
** Must be called exactly once before pci_init().
*/
void __init sba_init(void)
{
register_parisc_driver(&sba_driver);
}
/**
* sba_get_iommu - Assign the iommu pointer for the pci bus controller.
* @dev: The parisc device.
*
* Returns the appropriate IOMMU data for the given parisc PCI controller.
* This is cached and used later for PCI DMA Mapping.
*/
void * sba_get_iommu(struct parisc_device *pci_hba)
{
struct parisc_device *sba_dev = parisc_parent(pci_hba);
struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
char t = sba_dev->id.hw_type;
int iocnum = (pci_hba->hw_path >> 3); /* rope # */
WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
return &(sba->ioc[iocnum]);
}
/**
* sba_directed_lmmio - return first directed LMMIO range routed to rope
* @pa_dev: The parisc device.
* @r: resource PCI host controller wants start/end fields assigned.
*
* For the given parisc PCI controller, determine if any direct ranges
* are routed down the corresponding rope.
*/
void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
{
struct parisc_device *sba_dev = parisc_parent(pci_hba);
struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
char t = sba_dev->id.hw_type;
int i;
int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
r->start = r->end = 0;
/* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */
for (i=0; i<4; i++) {
int base, size;
void __iomem *reg = sba->sba_hpa + i*0x18;
base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
if ((base & 1) == 0)
continue; /* not enabled */
size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
if ((size & (ROPES_PER_IOC-1)) != rope)
continue; /* directed down different rope */
r->start = (base & ~1UL) | PCI_F_EXTEND;
size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
r->end = r->start + size;
r->flags = IORESOURCE_MEM;
}
}
/**
* sba_distributed_lmmio - return portion of distributed LMMIO range
* @pa_dev: The parisc device.
* @r: resource PCI host controller wants start/end fields assigned.
*
* For the given parisc PCI controller, return portion of distributed LMMIO
* range. The distributed LMMIO is always present and it's just a question
* of the base address and size of the range.
*/
void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
{
struct parisc_device *sba_dev = parisc_parent(pci_hba);
struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
char t = sba_dev->id.hw_type;
int base, size;
int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
r->start = r->end = 0;
base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
if ((base & 1) == 0) {
BUG(); /* Gah! Distr Range wasn't enabled! */
return;
}
r->start = (base & ~1UL) | PCI_F_EXTEND;
size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
r->start += rope * (size + 1); /* adjust base for this rope */
r->end = r->start + size;
r->flags = IORESOURCE_MEM;
}
| gpl-2.0 |
draekko/android_kernel_lg_hammerhead-neobuddy89 | drivers/base/soc.c | 4791 | 4246 | /*
* Copyright (C) ST-Ericsson SA 2011
*
* Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
* License terms: GNU General Public License (GPL), version 2
*/
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/err.h>
static DEFINE_IDA(soc_ida);
static DEFINE_SPINLOCK(soc_lock);
static ssize_t soc_info_get(struct device *dev,
struct device_attribute *attr,
char *buf);
struct soc_device {
struct device dev;
struct soc_device_attribute *attr;
int soc_dev_num;
};
static struct bus_type soc_bus_type = {
.name = "soc",
};
static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL);
static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL);
static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL);
static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL);
struct device *soc_device_to_device(struct soc_device *soc_dev)
{
return &soc_dev->dev;
}
static mode_t soc_attribute_mode(struct kobject *kobj,
struct attribute *attr,
int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
if ((attr == &dev_attr_machine.attr)
&& (soc_dev->attr->machine != NULL))
return attr->mode;
if ((attr == &dev_attr_family.attr)
&& (soc_dev->attr->family != NULL))
return attr->mode;
if ((attr == &dev_attr_revision.attr)
&& (soc_dev->attr->revision != NULL))
return attr->mode;
if ((attr == &dev_attr_soc_id.attr)
&& (soc_dev->attr->soc_id != NULL))
return attr->mode;
/* Unknown or unfilled attribute. */
return 0;
}
static ssize_t soc_info_get(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
if (attr == &dev_attr_machine)
return sprintf(buf, "%s\n", soc_dev->attr->machine);
if (attr == &dev_attr_family)
return sprintf(buf, "%s\n", soc_dev->attr->family);
if (attr == &dev_attr_revision)
return sprintf(buf, "%s\n", soc_dev->attr->revision);
if (attr == &dev_attr_soc_id)
return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
return -EINVAL;
}
static struct attribute *soc_attr[] = {
&dev_attr_machine.attr,
&dev_attr_family.attr,
&dev_attr_soc_id.attr,
&dev_attr_revision.attr,
NULL,
};
static const struct attribute_group soc_attr_group = {
.attrs = soc_attr,
.is_visible = soc_attribute_mode,
};
static const struct attribute_group *soc_attr_groups[] = {
&soc_attr_group,
NULL,
};
static void soc_release(struct device *dev)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
kfree(soc_dev);
}
struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
{
struct soc_device *soc_dev;
int ret;
soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
if (!soc_dev) {
ret = -ENOMEM;
goto out1;
}
/* Fetch a unique (reclaimable) SOC ID. */
do {
if (!ida_pre_get(&soc_ida, GFP_KERNEL)) {
ret = -ENOMEM;
goto out2;
}
spin_lock(&soc_lock);
ret = ida_get_new(&soc_ida, &soc_dev->soc_dev_num);
spin_unlock(&soc_lock);
} while (ret == -EAGAIN);
if (ret)
goto out2;
soc_dev->attr = soc_dev_attr;
soc_dev->dev.bus = &soc_bus_type;
soc_dev->dev.groups = soc_attr_groups;
soc_dev->dev.release = soc_release;
dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
ret = device_register(&soc_dev->dev);
if (ret)
goto out3;
return soc_dev;
out3:
ida_remove(&soc_ida, soc_dev->soc_dev_num);
out2:
kfree(soc_dev);
out1:
return ERR_PTR(ret);
}
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
{
ida_remove(&soc_ida, soc_dev->soc_dev_num);
device_unregister(&soc_dev->dev);
}
static int __init soc_bus_register(void)
{
return bus_register(&soc_bus_type);
}
core_initcall(soc_bus_register);
static void __exit soc_bus_unregister(void)
{
ida_destroy(&soc_ida);
bus_unregister(&soc_bus_type);
}
module_exit(soc_bus_unregister);
| gpl-2.0 |
proxuser/kartal | sound/soc/omap/omap-mcbsp.c | 4791 | 22333 | /*
* omap-mcbsp.c -- OMAP ALSA SoC DAI driver using McBSP port
*
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Jarkko Nikula <jarkko.nikula@bitmer.com>
* Peter Ujfalusi <peter.ujfalusi@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <plat/dma.h>
#include <plat/mcbsp.h>
#include "mcbsp.h"
#include "omap-mcbsp.h"
#include "omap-pcm.h"
#define OMAP_MCBSP_RATES (SNDRV_PCM_RATE_8000_96000)
#define OMAP_MCBSP_SOC_SINGLE_S16_EXT(xname, xmin, xmax, \
xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = omap_mcbsp_st_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = (unsigned long) &(struct soc_mixer_control) \
{.min = xmin, .max = xmax} }
enum {
OMAP_MCBSP_WORD_8 = 0,
OMAP_MCBSP_WORD_12,
OMAP_MCBSP_WORD_16,
OMAP_MCBSP_WORD_20,
OMAP_MCBSP_WORD_24,
OMAP_MCBSP_WORD_32,
};
/*
* Stream DMA parameters. DMA request line and port address are set runtime
* since they are different between OMAP1 and later OMAPs
*/
static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
struct omap_pcm_dma_data *dma_data;
int words;
dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
/* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
/*
* Configure McBSP threshold based on either:
* packet_size, when the sDMA is in packet mode, or
* based on the period size.
*/
if (dma_data->packet_size)
words = dma_data->packet_size;
else
words = snd_pcm_lib_period_bytes(substream) /
(mcbsp->wlen / 8);
else
words = 1;
/* Configure McBSP internal buffer usage */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
omap_mcbsp_set_tx_threshold(mcbsp, words);
else
omap_mcbsp_set_rx_threshold(mcbsp, words);
}
static int omap_mcbsp_hwrule_min_buffersize(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval *buffer_size = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
struct omap_mcbsp *mcbsp = rule->private;
struct snd_interval frames;
int size;
snd_interval_any(&frames);
size = mcbsp->pdata->buffer_size;
frames.min = size / channels->min;
frames.integer = 1;
return snd_interval_refine(buffer_size, &frames);
}
static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
int err = 0;
if (!cpu_dai->active)
err = omap_mcbsp_request(mcbsp);
/*
* OMAP3 McBSP FIFO is word structured.
* McBSP2 has 1024 + 256 = 1280 word long buffer,
* McBSP1,3,4,5 has 128 word long buffer
* This means that the size of the FIFO depends on the sample format.
* For example on McBSP3:
* 16bit samples: size is 128 * 2 = 256 bytes
* 32bit samples: size is 128 * 4 = 512 bytes
* It is simpler to place constraint for buffer and period based on
* channels.
* McBSP3 as example again (16 or 32 bit samples):
* 1 channel (mono): size is 128 frames (128 words)
* 2 channels (stereo): size is 128 / 2 = 64 frames (2 * 64 words)
* 4 channels: size is 128 / 4 = 32 frames (4 * 32 words)
*/
if (mcbsp->pdata->buffer_size) {
/*
* Rule for the buffer size. We should not allow
* smaller buffer than the FIFO size to avoid underruns
*/
snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
omap_mcbsp_hwrule_min_buffersize,
mcbsp,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
/* Make sure, that the period size is always even */
snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
}
return err;
}
static void omap_mcbsp_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
if (!cpu_dai->active) {
omap_mcbsp_free(mcbsp);
mcbsp->configured = 0;
}
}
static int omap_mcbsp_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *cpu_dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
int err = 0, play = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
mcbsp->active++;
omap_mcbsp_start(mcbsp, play, !play);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
omap_mcbsp_stop(mcbsp, play, !play);
mcbsp->active--;
break;
default:
err = -EINVAL;
}
return err;
}
static snd_pcm_sframes_t omap_mcbsp_dai_delay(
struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
u16 fifo_use;
snd_pcm_sframes_t delay;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
fifo_use = omap_mcbsp_get_tx_delay(mcbsp);
else
fifo_use = omap_mcbsp_get_rx_delay(mcbsp);
/*
* Divide the used locations with the channel count to get the
* FIFO usage in samples (don't care about partial samples in the
* buffer).
*/
delay = fifo_use / substream->runtime->channels;
return delay;
}
static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs;
struct omap_pcm_dma_data *dma_data;
int wlen, channels, wpf, sync_mode = OMAP_DMA_SYNC_ELEMENT;
int pkt_size = 0;
unsigned int format, div, framesize, master;
dma_data = &mcbsp->dma_data[substream->stream];
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
dma_data->data_type = OMAP_DMA_DATA_TYPE_S16;
wlen = 16;
break;
case SNDRV_PCM_FORMAT_S32_LE:
dma_data->data_type = OMAP_DMA_DATA_TYPE_S32;
wlen = 32;
break;
default:
return -EINVAL;
}
if (mcbsp->pdata->buffer_size) {
dma_data->set_threshold = omap_mcbsp_set_threshold;
/* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) {
int period_words, max_thrsh;
period_words = params_period_bytes(params) / (wlen / 8);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
max_thrsh = mcbsp->max_tx_thres;
else
max_thrsh = mcbsp->max_rx_thres;
/*
* If the period contains less or equal number of words,
* we are using the original threshold mode setup:
* McBSP threshold = sDMA frame size = period_size
* Otherwise we switch to sDMA packet mode:
* McBSP threshold = sDMA packet size
* sDMA frame size = period size
*/
if (period_words > max_thrsh) {
int divider = 0;
/*
* Look for the biggest threshold value, which
* divides the period size evenly.
*/
divider = period_words / max_thrsh;
if (period_words % max_thrsh)
divider++;
while (period_words % divider &&
divider < period_words)
divider++;
if (divider == period_words)
return -EINVAL;
pkt_size = period_words / divider;
sync_mode = OMAP_DMA_SYNC_PACKET;
} else {
sync_mode = OMAP_DMA_SYNC_FRAME;
}
}
}
dma_data->sync_mode = sync_mode;
dma_data->packet_size = pkt_size;
snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
if (mcbsp->configured) {
/* McBSP already configured by another stream */
return 0;
}
regs->rcr2 &= ~(RPHASE | RFRLEN2(0x7f) | RWDLEN2(7));
regs->xcr2 &= ~(RPHASE | XFRLEN2(0x7f) | XWDLEN2(7));
regs->rcr1 &= ~(RFRLEN1(0x7f) | RWDLEN1(7));
regs->xcr1 &= ~(XFRLEN1(0x7f) | XWDLEN1(7));
format = mcbsp->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
wpf = channels = params_channels(params);
if (channels == 2 && (format == SND_SOC_DAIFMT_I2S ||
format == SND_SOC_DAIFMT_LEFT_J)) {
/* Use dual-phase frames */
regs->rcr2 |= RPHASE;
regs->xcr2 |= XPHASE;
/* Set 1 word per (McBSP) frame for phase1 and phase2 */
wpf--;
regs->rcr2 |= RFRLEN2(wpf - 1);
regs->xcr2 |= XFRLEN2(wpf - 1);
}
regs->rcr1 |= RFRLEN1(wpf - 1);
regs->xcr1 |= XFRLEN1(wpf - 1);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
/* Set word lengths */
regs->rcr2 |= RWDLEN2(OMAP_MCBSP_WORD_16);
regs->rcr1 |= RWDLEN1(OMAP_MCBSP_WORD_16);
regs->xcr2 |= XWDLEN2(OMAP_MCBSP_WORD_16);
regs->xcr1 |= XWDLEN1(OMAP_MCBSP_WORD_16);
break;
case SNDRV_PCM_FORMAT_S32_LE:
/* Set word lengths */
regs->rcr2 |= RWDLEN2(OMAP_MCBSP_WORD_32);
regs->rcr1 |= RWDLEN1(OMAP_MCBSP_WORD_32);
regs->xcr2 |= XWDLEN2(OMAP_MCBSP_WORD_32);
regs->xcr1 |= XWDLEN1(OMAP_MCBSP_WORD_32);
break;
default:
/* Unsupported PCM format */
return -EINVAL;
}
/* In McBSP master modes, FRAME (i.e. sample rate) is generated
* by _counting_ BCLKs. Calculate frame size in BCLKs */
master = mcbsp->fmt & SND_SOC_DAIFMT_MASTER_MASK;
if (master == SND_SOC_DAIFMT_CBS_CFS) {
div = mcbsp->clk_div ? mcbsp->clk_div : 1;
framesize = (mcbsp->in_freq / div) / params_rate(params);
if (framesize < wlen * channels) {
printk(KERN_ERR "%s: not enough bandwidth for desired rate and "
"channels\n", __func__);
return -EINVAL;
}
} else
framesize = wlen * channels;
/* Set FS period and length in terms of bit clock periods */
regs->srgr2 &= ~FPER(0xfff);
regs->srgr1 &= ~FWID(0xff);
switch (format) {
case SND_SOC_DAIFMT_I2S:
case SND_SOC_DAIFMT_LEFT_J:
regs->srgr2 |= FPER(framesize - 1);
regs->srgr1 |= FWID((framesize >> 1) - 1);
break;
case SND_SOC_DAIFMT_DSP_A:
case SND_SOC_DAIFMT_DSP_B:
regs->srgr2 |= FPER(framesize - 1);
regs->srgr1 |= FWID(0);
break;
}
omap_mcbsp_config(mcbsp, &mcbsp->cfg_regs);
mcbsp->wlen = wlen;
mcbsp->configured = 1;
return 0;
}
/*
* This must be called before _set_clkdiv and _set_sysclk since McBSP register
* cache is initialized here
*/
static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs;
bool inv_fs = false;
if (mcbsp->configured)
return 0;
mcbsp->fmt = fmt;
memset(regs, 0, sizeof(*regs));
/* Generic McBSP register settings */
regs->spcr2 |= XINTM(3) | FREE;
regs->spcr1 |= RINTM(3);
/* RFIG and XFIG are not defined in 34xx */
if (!cpu_is_omap34xx() && !cpu_is_omap44xx()) {
regs->rcr2 |= RFIG;
regs->xcr2 |= XFIG;
}
if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE;
regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
/* 1-bit data delay */
regs->rcr2 |= RDATDLY(1);
regs->xcr2 |= XDATDLY(1);
break;
case SND_SOC_DAIFMT_LEFT_J:
/* 0-bit data delay */
regs->rcr2 |= RDATDLY(0);
regs->xcr2 |= XDATDLY(0);
regs->spcr1 |= RJUST(2);
/* Invert FS polarity configuration */
inv_fs = true;
break;
case SND_SOC_DAIFMT_DSP_A:
/* 1-bit data delay */
regs->rcr2 |= RDATDLY(1);
regs->xcr2 |= XDATDLY(1);
/* Invert FS polarity configuration */
inv_fs = true;
break;
case SND_SOC_DAIFMT_DSP_B:
/* 0-bit data delay */
regs->rcr2 |= RDATDLY(0);
regs->xcr2 |= XDATDLY(0);
/* Invert FS polarity configuration */
inv_fs = true;
break;
default:
/* Unsupported data format */
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
/* McBSP master. Set FS and bit clocks as outputs */
regs->pcr0 |= FSXM | FSRM |
CLKXM | CLKRM;
/* Sample rate generator drives the FS */
regs->srgr2 |= FSGM;
break;
case SND_SOC_DAIFMT_CBM_CFM:
/* McBSP slave */
break;
default:
/* Unsupported master/slave configuration */
return -EINVAL;
}
/* Set bit clock (CLKX/CLKR) and FS polarities */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
/*
* Normal BCLK + FS.
* FS active low. TX data driven on falling edge of bit clock
* and RX data sampled on rising edge of bit clock.
*/
regs->pcr0 |= FSXP | FSRP |
CLKXP | CLKRP;
break;
case SND_SOC_DAIFMT_NB_IF:
regs->pcr0 |= CLKXP | CLKRP;
break;
case SND_SOC_DAIFMT_IB_NF:
regs->pcr0 |= FSXP | FSRP;
break;
case SND_SOC_DAIFMT_IB_IF:
break;
default:
return -EINVAL;
}
if (inv_fs == true)
regs->pcr0 ^= FSXP | FSRP;
return 0;
}
static int omap_mcbsp_dai_set_clkdiv(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs;
if (div_id != OMAP_MCBSP_CLKGDV)
return -ENODEV;
mcbsp->clk_div = div;
regs->srgr1 &= ~CLKGDV(0xff);
regs->srgr1 |= CLKGDV(div - 1);
return 0;
}
static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq,
int dir)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs;
int err = 0;
if (mcbsp->active) {
if (freq == mcbsp->in_freq)
return 0;
else
return -EBUSY;
}
if (clk_id == OMAP_MCBSP_SYSCLK_CLK ||
clk_id == OMAP_MCBSP_SYSCLK_CLKS_FCLK ||
clk_id == OMAP_MCBSP_SYSCLK_CLKS_EXT ||
clk_id == OMAP_MCBSP_SYSCLK_CLKX_EXT ||
clk_id == OMAP_MCBSP_SYSCLK_CLKR_EXT) {
mcbsp->in_freq = freq;
regs->srgr2 &= ~CLKSM;
regs->pcr0 &= ~SCLKME;
} else if (cpu_class_is_omap1()) {
/*
* McBSP CLKR/FSR signal muxing functions are only available on
* OMAP2 or newer versions
*/
return -EINVAL;
}
switch (clk_id) {
case OMAP_MCBSP_SYSCLK_CLK:
regs->srgr2 |= CLKSM;
break;
case OMAP_MCBSP_SYSCLK_CLKS_FCLK:
if (cpu_class_is_omap1()) {
err = -EINVAL;
break;
}
err = omap2_mcbsp_set_clks_src(mcbsp,
MCBSP_CLKS_PRCM_SRC);
break;
case OMAP_MCBSP_SYSCLK_CLKS_EXT:
if (cpu_class_is_omap1()) {
err = 0;
break;
}
err = omap2_mcbsp_set_clks_src(mcbsp,
MCBSP_CLKS_PAD_SRC);
break;
case OMAP_MCBSP_SYSCLK_CLKX_EXT:
regs->srgr2 |= CLKSM;
case OMAP_MCBSP_SYSCLK_CLKR_EXT:
regs->pcr0 |= SCLKME;
break;
case OMAP_MCBSP_CLKR_SRC_CLKR:
err = omap_mcbsp_6pin_src_mux(mcbsp, CLKR_SRC_CLKR);
break;
case OMAP_MCBSP_CLKR_SRC_CLKX:
err = omap_mcbsp_6pin_src_mux(mcbsp, CLKR_SRC_CLKX);
break;
case OMAP_MCBSP_FSR_SRC_FSR:
err = omap_mcbsp_6pin_src_mux(mcbsp, FSR_SRC_FSR);
break;
case OMAP_MCBSP_FSR_SRC_FSX:
err = omap_mcbsp_6pin_src_mux(mcbsp, FSR_SRC_FSX);
break;
default:
err = -ENODEV;
}
return err;
}
static const struct snd_soc_dai_ops mcbsp_dai_ops = {
.startup = omap_mcbsp_dai_startup,
.shutdown = omap_mcbsp_dai_shutdown,
.trigger = omap_mcbsp_dai_trigger,
.delay = omap_mcbsp_dai_delay,
.hw_params = omap_mcbsp_dai_hw_params,
.set_fmt = omap_mcbsp_dai_set_dai_fmt,
.set_clkdiv = omap_mcbsp_dai_set_clkdiv,
.set_sysclk = omap_mcbsp_dai_set_dai_sysclk,
};
static int omap_mcbsp_probe(struct snd_soc_dai *dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(dai);
pm_runtime_enable(mcbsp->dev);
return 0;
}
static int omap_mcbsp_remove(struct snd_soc_dai *dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(dai);
pm_runtime_disable(mcbsp->dev);
return 0;
}
static struct snd_soc_dai_driver omap_mcbsp_dai = {
.probe = omap_mcbsp_probe,
.remove = omap_mcbsp_remove,
.playback = {
.channels_min = 1,
.channels_max = 16,
.rates = OMAP_MCBSP_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
},
.capture = {
.channels_min = 1,
.channels_max = 16,
.rates = OMAP_MCBSP_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
},
.ops = &mcbsp_dai_ops,
};
static int omap_mcbsp_st_info_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
int max = mc->max;
int min = mc->min;
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = min;
uinfo->value.integer.max = max;
return 0;
}
#define OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(channel) \
static int \
omap_mcbsp_set_st_ch##channel##_volume(struct snd_kcontrol *kc, \
struct snd_ctl_elem_value *uc) \
{ \
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kc); \
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); \
struct soc_mixer_control *mc = \
(struct soc_mixer_control *)kc->private_value; \
int max = mc->max; \
int min = mc->min; \
int val = uc->value.integer.value[0]; \
\
if (val < min || val > max) \
return -EINVAL; \
\
/* OMAP McBSP implementation uses index values 0..4 */ \
return omap_st_set_chgain(mcbsp, channel, val); \
}
#define OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(channel) \
static int \
omap_mcbsp_get_st_ch##channel##_volume(struct snd_kcontrol *kc, \
struct snd_ctl_elem_value *uc) \
{ \
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kc); \
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); \
s16 chgain; \
\
if (omap_st_get_chgain(mcbsp, channel, &chgain)) \
return -EAGAIN; \
\
uc->value.integer.value[0] = chgain; \
return 0; \
}
OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(0)
OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(1)
OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(0)
OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(1)
static int omap_mcbsp_st_put_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
u8 value = ucontrol->value.integer.value[0];
if (value == omap_st_is_enabled(mcbsp))
return 0;
if (value)
omap_st_enable(mcbsp);
else
omap_st_disable(mcbsp);
return 1;
}
static int omap_mcbsp_st_get_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
ucontrol->value.integer.value[0] = omap_st_is_enabled(mcbsp);
return 0;
}
static const struct snd_kcontrol_new omap_mcbsp2_st_controls[] = {
SOC_SINGLE_EXT("McBSP2 Sidetone Switch", 1, 0, 1, 0,
omap_mcbsp_st_get_mode, omap_mcbsp_st_put_mode),
OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP2 Sidetone Channel 0 Volume",
-32768, 32767,
omap_mcbsp_get_st_ch0_volume,
omap_mcbsp_set_st_ch0_volume),
OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP2 Sidetone Channel 1 Volume",
-32768, 32767,
omap_mcbsp_get_st_ch1_volume,
omap_mcbsp_set_st_ch1_volume),
};
static const struct snd_kcontrol_new omap_mcbsp3_st_controls[] = {
SOC_SINGLE_EXT("McBSP3 Sidetone Switch", 2, 0, 1, 0,
omap_mcbsp_st_get_mode, omap_mcbsp_st_put_mode),
OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP3 Sidetone Channel 0 Volume",
-32768, 32767,
omap_mcbsp_get_st_ch0_volume,
omap_mcbsp_set_st_ch0_volume),
OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP3 Sidetone Channel 1 Volume",
-32768, 32767,
omap_mcbsp_get_st_ch1_volume,
omap_mcbsp_set_st_ch1_volume),
};
int omap_mcbsp_st_add_controls(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
if (!mcbsp->st_data)
return -ENODEV;
switch (cpu_dai->id) {
case 2: /* McBSP 2 */
return snd_soc_add_dai_controls(cpu_dai,
omap_mcbsp2_st_controls,
ARRAY_SIZE(omap_mcbsp2_st_controls));
case 3: /* McBSP 3 */
return snd_soc_add_dai_controls(cpu_dai,
omap_mcbsp3_st_controls,
ARRAY_SIZE(omap_mcbsp3_st_controls));
default:
break;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_mcbsp_st_add_controls);
static __devinit int asoc_mcbsp_probe(struct platform_device *pdev)
{
struct omap_mcbsp_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct omap_mcbsp *mcbsp;
int ret;
if (!pdata) {
dev_err(&pdev->dev, "missing platform data.\n");
return -EINVAL;
}
mcbsp = devm_kzalloc(&pdev->dev, sizeof(struct omap_mcbsp), GFP_KERNEL);
if (!mcbsp)
return -ENOMEM;
mcbsp->id = pdev->id;
mcbsp->pdata = pdata;
mcbsp->dev = &pdev->dev;
platform_set_drvdata(pdev, mcbsp);
ret = omap_mcbsp_init(pdev);
if (!ret)
return snd_soc_register_dai(&pdev->dev, &omap_mcbsp_dai);
return ret;
}
static int __devexit asoc_mcbsp_remove(struct platform_device *pdev)
{
struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
snd_soc_unregister_dai(&pdev->dev);
if (mcbsp->pdata->ops && mcbsp->pdata->ops->free)
mcbsp->pdata->ops->free(mcbsp->id);
omap_mcbsp_sysfs_remove(mcbsp);
clk_put(mcbsp->fclk);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver asoc_mcbsp_driver = {
.driver = {
.name = "omap-mcbsp",
.owner = THIS_MODULE,
},
.probe = asoc_mcbsp_probe,
.remove = __devexit_p(asoc_mcbsp_remove),
};
module_platform_driver(asoc_mcbsp_driver);
MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
MODULE_DESCRIPTION("OMAP I2S SoC Interface");
MODULE_LICENSE("GPL");
| gpl-2.0 |
deepsrd/android_kernel_nx507j | drivers/media/dvb/dvb-usb/dibusb-mc.c | 9399 | 5106 | /* DVB USB compliant linux driver for mobile DVB-T USB devices based on
* reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-C/P)
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
*
* based on GPL code from DiBcom, which has
* Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation, version 2.
*
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "dibusb.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
/* USB Driver stuff */
static struct dvb_usb_device_properties dibusb_mc_properties;
static int dibusb_mc_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return dvb_usb_device_init(intf, &dibusb_mc_properties, THIS_MODULE,
NULL, adapter_nr);
}
/* do not change the order of the ID table */
static struct usb_device_id dibusb_dib3000mc_table [] = {
/* 00 */ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_MOD3001_COLD) },
/* 01 */ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_MOD3001_WARM) },
/* 02 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_USB2_COLD) },
/* 03 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_USB2_WARM) }, // ( ? )
/* 04 */ { USB_DEVICE(USB_VID_LITEON, USB_PID_LITEON_DVB_T_COLD) },
/* 05 */ { USB_DEVICE(USB_VID_LITEON, USB_PID_LITEON_DVB_T_WARM) },
/* 06 */ { USB_DEVICE(USB_VID_EMPIA, USB_PID_DIGIVOX_MINI_SL_COLD) },
/* 07 */ { USB_DEVICE(USB_VID_EMPIA, USB_PID_DIGIVOX_MINI_SL_WARM) },
/* 08 */ { USB_DEVICE(USB_VID_GRANDTEC, USB_PID_GRANDTEC_DVBT_USB2_COLD) },
/* 09 */ { USB_DEVICE(USB_VID_GRANDTEC, USB_PID_GRANDTEC_DVBT_USB2_WARM) },
/* 10 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ARTEC_T14_COLD) },
/* 11 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ARTEC_T14_WARM) },
/* 12 */ { USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_COLD) },
/* 13 */ { USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_WARM) },
/* 14 */ { USB_DEVICE(USB_VID_HUMAX_COEX, USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD) },
/* 15 */ { USB_DEVICE(USB_VID_HUMAX_COEX, USB_PID_DVB_T_USB_STICK_HIGH_SPEED_WARM) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, dibusb_dib3000mc_table);
static struct dvb_usb_device_properties dibusb_mc_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-dibusb-6.0.0.8.fw",
.num_adapters = 1,
.adapter = {
{
.num_frontends = 1,
.fe = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
.streaming_ctrl = dibusb2_0_streaming_ctrl,
.pid_filter = dibusb_pid_filter,
.pid_filter_ctrl = dibusb_pid_filter_ctrl,
.frontend_attach = dibusb_dib3000mc_frontend_attach,
.tuner_attach = dibusb_dib3000mc_tuner_attach,
/* parameter for the MPEG2-data transfer */
.stream = {
.type = USB_BULK,
.count = 8,
.endpoint = 0x06,
.u = {
.bulk = {
.buffersize = 4096,
}
}
},
}},
.size_of_priv = sizeof(struct dibusb_state),
}
},
.power_ctrl = dibusb2_0_power_ctrl,
.rc.legacy = {
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_map_table = rc_map_dibusb_table,
.rc_map_size = 111, /* FIXME */
.rc_query = dibusb_rc_query,
},
.i2c_algo = &dibusb_i2c_algo,
.generic_bulk_ctrl_endpoint = 0x01,
.num_device_descs = 8,
.devices = {
{ "DiBcom USB2.0 DVB-T reference design (MOD3000P)",
{ &dibusb_dib3000mc_table[0], NULL },
{ &dibusb_dib3000mc_table[1], NULL },
},
{ "Artec T1 USB2.0 TVBOX (please check the warm ID)",
{ &dibusb_dib3000mc_table[2], NULL },
{ &dibusb_dib3000mc_table[3], NULL },
},
{ "LITE-ON USB2.0 DVB-T Tuner",
/* Also rebranded as Intuix S800, Toshiba */
{ &dibusb_dib3000mc_table[4], NULL },
{ &dibusb_dib3000mc_table[5], NULL },
},
{ "MSI Digivox Mini SL",
{ &dibusb_dib3000mc_table[6], NULL },
{ &dibusb_dib3000mc_table[7], NULL },
},
{ "GRAND - USB2.0 DVB-T adapter",
{ &dibusb_dib3000mc_table[8], NULL },
{ &dibusb_dib3000mc_table[9], NULL },
},
{ "Artec T14 - USB2.0 DVB-T",
{ &dibusb_dib3000mc_table[10], NULL },
{ &dibusb_dib3000mc_table[11], NULL },
},
{ "Leadtek - USB2.0 Winfast DTV dongle",
{ &dibusb_dib3000mc_table[12], NULL },
{ &dibusb_dib3000mc_table[13], NULL },
},
{ "Humax/Coex DVB-T USB Stick 2.0 High Speed",
{ &dibusb_dib3000mc_table[14], NULL },
{ &dibusb_dib3000mc_table[15], NULL },
},
{ NULL },
}
};
static struct usb_driver dibusb_mc_driver = {
.name = "dvb_usb_dibusb_mc",
.probe = dibusb_mc_probe,
.disconnect = dvb_usb_device_exit,
.id_table = dibusb_dib3000mc_table,
};
module_usb_driver(dibusb_mc_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for DiBcom USB2.0 DVB-T (DiB3000M-C/P based) devices");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
lnfamous/Kernel_CyanogenMod9_Pico | arch/sh/boards/mach-highlander/irq-r7780mp.c | 13239 | 1941 | /*
* Renesas Solutions Highlander R7780MP Support.
*
* Copyright (C) 2002 Atom Create Engineering Co., Ltd.
* Copyright (C) 2006 Paul Mundt
* Copyright (C) 2007 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <mach/highlander.h>
enum {
UNUSED = 0,
/* board specific interrupt sources */
CF, /* Compact Flash */
TP, /* Touch panel */
SCIF1, /* FPGA SCIF1 */
SCIF0, /* FPGA SCIF0 */
SMBUS, /* SMBUS */
RTC, /* RTC Alarm */
AX88796, /* Ethernet controller */
PSW, /* Push Switch */
/* external bus connector */
EXT1, EXT2, EXT4, EXT5, EXT6,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(CF, IRQ_CF),
INTC_IRQ(TP, IRQ_TP),
INTC_IRQ(SCIF1, IRQ_SCIF1),
INTC_IRQ(SCIF0, IRQ_SCIF0),
INTC_IRQ(SMBUS, IRQ_SMBUS),
INTC_IRQ(RTC, IRQ_RTC),
INTC_IRQ(AX88796, IRQ_AX88796),
INTC_IRQ(PSW, IRQ_PSW),
INTC_IRQ(EXT1, IRQ_EXT1), INTC_IRQ(EXT2, IRQ_EXT2),
INTC_IRQ(EXT4, IRQ_EXT4), INTC_IRQ(EXT5, IRQ_EXT5),
INTC_IRQ(EXT6, IRQ_EXT6),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4000000, 0, 16, /* IRLMSK */
{ SCIF0, SCIF1, RTC, 0, CF, 0, TP, SMBUS,
0, EXT6, EXT5, EXT4, EXT2, EXT1, PSW, AX88796 } },
};
static unsigned char irl2irq[HL_NR_IRL] __initdata = {
0, IRQ_CF, IRQ_TP, IRQ_SCIF1,
IRQ_SCIF0, IRQ_SMBUS, IRQ_RTC, IRQ_EXT6,
IRQ_EXT5, IRQ_EXT4, IRQ_EXT2, IRQ_EXT1,
0, IRQ_AX88796, IRQ_PSW,
};
static DECLARE_INTC_DESC(intc_desc, "r7780mp", vectors,
NULL, mask_registers, NULL, NULL);
unsigned char * __init highlander_plat_irq_setup(void)
{
if ((__raw_readw(0xa4000700) & 0xf000) == 0x2000) {
printk(KERN_INFO "Using r7780mp interrupt controller.\n");
register_intc_controller(&intc_desc);
return irl2irq;
}
return NULL;
}
| gpl-2.0 |
turl/zeppelin_kernel | drivers/video/vfb.c | 184 | 14563 | /*
* linux/drivers/video/vfb.c -- Virtual frame buffer device
*
* Copyright (C) 2002 James Simmons
*
* Copyright (C) 1997 Geert Uytterhoeven
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/fb.h>
#include <linux/init.h>
/*
* RAM we reserve for the frame buffer. This defines the maximum screen
* size
*
* The default can be overridden if the driver is compiled as a module
*/
#define VIDEOMEMSIZE (1*1024*1024) /* 1 MB */
static void *videomemory;
static u_long videomemorysize = VIDEOMEMSIZE;
module_param(videomemorysize, ulong, 0);
/**********************************************************************
*
* Memory management
*
**********************************************************************/
static void *rvmalloc(unsigned long size)
{
void *mem;
unsigned long adr;
size = PAGE_ALIGN(size);
mem = vmalloc_32(size);
if (!mem)
return NULL;
memset(mem, 0, size); /* Clear the ram out, no junk to the user */
adr = (unsigned long) mem;
while (size > 0) {
SetPageReserved(vmalloc_to_page((void *)adr));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
return mem;
}
static void rvfree(void *mem, unsigned long size)
{
unsigned long adr;
if (!mem)
return;
adr = (unsigned long) mem;
while ((long) size > 0) {
ClearPageReserved(vmalloc_to_page((void *)adr));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
vfree(mem);
}
static struct fb_var_screeninfo vfb_default __initdata = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.bits_per_pixel = 8,
.red = { 0, 8, 0 },
.green = { 0, 8, 0 },
.blue = { 0, 8, 0 },
.activate = FB_ACTIVATE_TEST,
.height = -1,
.width = -1,
.pixclock = 20000,
.left_margin = 64,
.right_margin = 64,
.upper_margin = 32,
.lower_margin = 32,
.hsync_len = 64,
.vsync_len = 2,
.vmode = FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo vfb_fix __initdata = {
.id = "Virtual FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.xpanstep = 1,
.ypanstep = 1,
.ywrapstep = 1,
.accel = FB_ACCEL_NONE,
};
static int vfb_enable __initdata = 0; /* disabled by default */
module_param(vfb_enable, bool, 0);
static int vfb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
static int vfb_set_par(struct fb_info *info);
static int vfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info);
static int vfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
static int vfb_mmap(struct fb_info *info,
struct vm_area_struct *vma);
static struct fb_ops vfb_ops = {
.fb_read = fb_sys_read,
.fb_write = fb_sys_write,
.fb_check_var = vfb_check_var,
.fb_set_par = vfb_set_par,
.fb_setcolreg = vfb_setcolreg,
.fb_pan_display = vfb_pan_display,
.fb_fillrect = sys_fillrect,
.fb_copyarea = sys_copyarea,
.fb_imageblit = sys_imageblit,
.fb_mmap = vfb_mmap,
};
/*
* Internal routines
*/
static u_long get_line_length(int xres_virtual, int bpp)
{
u_long length;
length = xres_virtual * bpp;
length = (length + 31) & ~31;
length >>= 3;
return (length);
}
/*
* Setting the video mode has been split into two parts.
* First part, xxxfb_check_var, must not write anything
* to hardware, it should only verify and adjust var.
* This means it doesn't alter par but it does use hardware
* data from it to check this var.
*/
static int vfb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
u_long line_length;
/*
* FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal!
* as FB_VMODE_SMOOTH_XPAN is only used internally
*/
if (var->vmode & FB_VMODE_CONUPDATE) {
var->vmode |= FB_VMODE_YWRAP;
var->xoffset = info->var.xoffset;
var->yoffset = info->var.yoffset;
}
/*
* Some very basic checks
*/
if (!var->xres)
var->xres = 1;
if (!var->yres)
var->yres = 1;
if (var->xres > var->xres_virtual)
var->xres_virtual = var->xres;
if (var->yres > var->yres_virtual)
var->yres_virtual = var->yres;
if (var->bits_per_pixel <= 1)
var->bits_per_pixel = 1;
else if (var->bits_per_pixel <= 8)
var->bits_per_pixel = 8;
else if (var->bits_per_pixel <= 16)
var->bits_per_pixel = 16;
else if (var->bits_per_pixel <= 24)
var->bits_per_pixel = 24;
else if (var->bits_per_pixel <= 32)
var->bits_per_pixel = 32;
else
return -EINVAL;
if (var->xres_virtual < var->xoffset + var->xres)
var->xres_virtual = var->xoffset + var->xres;
if (var->yres_virtual < var->yoffset + var->yres)
var->yres_virtual = var->yoffset + var->yres;
/*
* Memory limit
*/
line_length =
get_line_length(var->xres_virtual, var->bits_per_pixel);
if (line_length * var->yres_virtual > videomemorysize)
return -ENOMEM;
/*
* Now that we checked it we alter var. The reason being is that the video
* mode passed in might not work but slight changes to it might make it
* work. This way we let the user know what is acceptable.
*/
switch (var->bits_per_pixel) {
case 1:
case 8:
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 16: /* RGBA 5551 */
if (var->transp.length) {
var->red.offset = 0;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 5;
var->blue.offset = 10;
var->blue.length = 5;
var->transp.offset = 15;
var->transp.length = 1;
} else { /* RGB 565 */
var->red.offset = 0;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 11;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
}
break;
case 24: /* RGB 888 */
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 16;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 32: /* RGBA 8888 */
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 16;
var->blue.length = 8;
var->transp.offset = 24;
var->transp.length = 8;
break;
}
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
var->transp.msb_right = 0;
return 0;
}
/* This routine actually sets the video mode. It's in here where we
* the hardware state info->par and fix which can be affected by the
* change in par. For this driver it doesn't do much.
*/
static int vfb_set_par(struct fb_info *info)
{
info->fix.line_length = get_line_length(info->var.xres_virtual,
info->var.bits_per_pixel);
return 0;
}
/*
* Set a single color register. The values supplied are already
* rounded down to the hardware's capabilities (according to the
* entries in the var structure). Return != 0 for invalid regno.
*/
static int vfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
if (regno >= 256) /* no. of hw registers */
return 1;
/*
* Program hardware... do anything you want with transp
*/
/* grayscale works only partially under directcolor */
if (info->var.grayscale) {
/* grayscale = 0.30*R + 0.59*G + 0.11*B */
red = green = blue =
(red * 77 + green * 151 + blue * 28) >> 8;
}
/* Directcolor:
* var->{color}.offset contains start of bitfield
* var->{color}.length contains length of bitfield
* {hardwarespecific} contains width of RAMDAC
* cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset)
* RAMDAC[X] is programmed to (red, green, blue)
*
* Pseudocolor:
* uses offset = 0 && length = RAMDAC register width.
* var->{color}.offset is 0
* var->{color}.length contains widht of DAC
* cmap is not used
* RAMDAC[X] is programmed to (red, green, blue)
* Truecolor:
* does not use DAC. Usually 3 are present.
* var->{color}.offset contains start of bitfield
* var->{color}.length contains length of bitfield
* cmap is programmed to (red << red.offset) | (green << green.offset) |
* (blue << blue.offset) | (transp << transp.offset)
* RAMDAC does not exist
*/
#define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16)
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
case FB_VISUAL_PSEUDOCOLOR:
red = CNVT_TOHW(red, info->var.red.length);
green = CNVT_TOHW(green, info->var.green.length);
blue = CNVT_TOHW(blue, info->var.blue.length);
transp = CNVT_TOHW(transp, info->var.transp.length);
break;
case FB_VISUAL_DIRECTCOLOR:
red = CNVT_TOHW(red, 8); /* expect 8 bit DAC */
green = CNVT_TOHW(green, 8);
blue = CNVT_TOHW(blue, 8);
/* hey, there is bug in transp handling... */
transp = CNVT_TOHW(transp, 8);
break;
}
#undef CNVT_TOHW
/* Truecolor has hardware independent palette */
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 v;
if (regno >= 16)
return 1;
v = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset) |
(transp << info->var.transp.offset);
switch (info->var.bits_per_pixel) {
case 8:
break;
case 16:
((u32 *) (info->pseudo_palette))[regno] = v;
break;
case 24:
case 32:
((u32 *) (info->pseudo_palette))[regno] = v;
break;
}
return 0;
}
return 0;
}
/*
* Pan or Wrap the Display
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
static int vfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->vmode & FB_VMODE_YWRAP) {
if (var->yoffset < 0
|| var->yoffset >= info->var.yres_virtual
|| var->xoffset)
return -EINVAL;
} else {
if (var->xoffset + var->xres > info->var.xres_virtual ||
var->yoffset + var->yres > info->var.yres_virtual)
return -EINVAL;
}
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
if (var->vmode & FB_VMODE_YWRAP)
info->var.vmode |= FB_VMODE_YWRAP;
else
info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
/*
* Most drivers don't need their own mmap function
*/
static int vfb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long page, pos;
if (offset + size > info->fix.smem_len) {
return -EINVAL;
}
pos = (unsigned long)info->fix.smem_start + offset;
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) {
return -EAGAIN;
}
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
return 0;
}
#ifndef MODULE
/*
* The virtual framebuffer driver is only enabled if explicitly
* requested by passing 'video=vfb:' (or any actual options).
*/
static int __init vfb_setup(char *options)
{
char *this_opt;
vfb_enable = 0;
if (!options)
return 1;
vfb_enable = 1;
if (!*options)
return 1;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
/* Test disable for backwards compatibility */
if (!strcmp(this_opt, "disable"))
vfb_enable = 0;
}
return 1;
}
#endif /* MODULE */
/*
* Initialisation
*/
static int __init vfb_probe(struct platform_device *dev)
{
struct fb_info *info;
int retval = -ENOMEM;
/*
* For real video cards we use ioremap.
*/
if (!(videomemory = rvmalloc(videomemorysize)))
return retval;
/*
* VFB must clear memory to prevent kernel info
* leakage into userspace
* VGA-based drivers MUST NOT clear memory if
* they want to be able to take over vgacon
*/
memset(videomemory, 0, videomemorysize);
info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev);
if (!info)
goto err;
info->screen_base = (char __iomem *)videomemory;
info->fbops = &vfb_ops;
retval = fb_find_mode(&info->var, info, NULL,
NULL, 0, NULL, 8);
if (!retval || (retval == 4))
info->var = vfb_default;
vfb_fix.smem_start = (unsigned long) videomemory;
vfb_fix.smem_len = videomemorysize;
info->fix = vfb_fix;
info->pseudo_palette = info->par;
info->par = NULL;
info->flags = FBINFO_FLAG_DEFAULT;
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0)
goto err1;
retval = register_framebuffer(info);
if (retval < 0)
goto err2;
platform_set_drvdata(dev, info);
printk(KERN_INFO
"fb%d: Virtual frame buffer device, using %ldK of video memory\n",
info->node, videomemorysize >> 10);
return 0;
err2:
fb_dealloc_cmap(&info->cmap);
err1:
framebuffer_release(info);
err:
rvfree(videomemory, videomemorysize);
return retval;
}
static int vfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
if (info) {
unregister_framebuffer(info);
rvfree(videomemory, videomemorysize);
framebuffer_release(info);
}
return 0;
}
static struct platform_driver vfb_driver = {
.probe = vfb_probe,
.remove = vfb_remove,
.driver = {
.name = "vfb",
},
};
static struct platform_device *vfb_device;
static int __init vfb_init(void)
{
int ret = 0;
#ifndef MODULE
char *option = NULL;
if (fb_get_options("vfb", &option))
return -ENODEV;
vfb_setup(option);
#endif
if (!vfb_enable)
return -ENXIO;
ret = platform_driver_register(&vfb_driver);
if (!ret) {
vfb_device = platform_device_alloc("vfb", 0);
if (vfb_device)
ret = platform_device_add(vfb_device);
else
ret = -ENOMEM;
if (ret) {
platform_device_put(vfb_device);
platform_driver_unregister(&vfb_driver);
}
}
return ret;
}
module_init(vfb_init);
#ifdef MODULE
static void __exit vfb_exit(void)
{
platform_device_unregister(vfb_device);
platform_driver_unregister(&vfb_driver);
}
module_exit(vfb_exit);
MODULE_LICENSE("GPL");
#endif /* MODULE */
| gpl-2.0 |
DerArtem/android-tegra-2.6.36-hc-dev | fs/hpfs/inode.c | 184 | 8671 | /*
* linux/fs/hpfs/inode.c
*
* Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
*
* inode VFS functions
*/
#include <linux/smp_lock.h>
#include <linux/slab.h>
#include "hpfs_fn.h"
void hpfs_init_inode(struct inode *i)
{
struct super_block *sb = i->i_sb;
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
i->i_uid = hpfs_sb(sb)->sb_uid;
i->i_gid = hpfs_sb(sb)->sb_gid;
i->i_mode = hpfs_sb(sb)->sb_mode;
hpfs_inode->i_conv = hpfs_sb(sb)->sb_conv;
i->i_size = -1;
i->i_blocks = -1;
hpfs_inode->i_dno = 0;
hpfs_inode->i_n_secs = 0;
hpfs_inode->i_file_sec = 0;
hpfs_inode->i_disk_sec = 0;
hpfs_inode->i_dpos = 0;
hpfs_inode->i_dsubdno = 0;
hpfs_inode->i_ea_mode = 0;
hpfs_inode->i_ea_uid = 0;
hpfs_inode->i_ea_gid = 0;
hpfs_inode->i_ea_size = 0;
hpfs_inode->i_rddir_off = NULL;
hpfs_inode->i_dirty = 0;
i->i_ctime.tv_sec = i->i_ctime.tv_nsec = 0;
i->i_mtime.tv_sec = i->i_mtime.tv_nsec = 0;
i->i_atime.tv_sec = i->i_atime.tv_nsec = 0;
}
void hpfs_read_inode(struct inode *i)
{
struct buffer_head *bh;
struct fnode *fnode;
struct super_block *sb = i->i_sb;
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
void *ea;
int ea_size;
if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) {
/*i->i_mode |= S_IFREG;
i->i_mode &= ~0111;
i->i_op = &hpfs_file_iops;
i->i_fop = &hpfs_file_ops;
i->i_nlink = 0;*/
make_bad_inode(i);
return;
}
if (hpfs_sb(i->i_sb)->sb_eas) {
if ((ea = hpfs_get_ea(i->i_sb, fnode, "UID", &ea_size))) {
if (ea_size == 2) {
i->i_uid = le16_to_cpu(*(__le16*)ea);
hpfs_inode->i_ea_uid = 1;
}
kfree(ea);
}
if ((ea = hpfs_get_ea(i->i_sb, fnode, "GID", &ea_size))) {
if (ea_size == 2) {
i->i_gid = le16_to_cpu(*(__le16*)ea);
hpfs_inode->i_ea_gid = 1;
}
kfree(ea);
}
if ((ea = hpfs_get_ea(i->i_sb, fnode, "SYMLINK", &ea_size))) {
kfree(ea);
i->i_mode = S_IFLNK | 0777;
i->i_op = &page_symlink_inode_operations;
i->i_data.a_ops = &hpfs_symlink_aops;
i->i_nlink = 1;
i->i_size = ea_size;
i->i_blocks = 1;
brelse(bh);
return;
}
if ((ea = hpfs_get_ea(i->i_sb, fnode, "MODE", &ea_size))) {
int rdev = 0;
umode_t mode = hpfs_sb(sb)->sb_mode;
if (ea_size == 2) {
mode = le16_to_cpu(*(__le16*)ea);
hpfs_inode->i_ea_mode = 1;
}
kfree(ea);
i->i_mode = mode;
if (S_ISBLK(mode) || S_ISCHR(mode)) {
if ((ea = hpfs_get_ea(i->i_sb, fnode, "DEV", &ea_size))) {
if (ea_size == 4)
rdev = le32_to_cpu(*(__le32*)ea);
kfree(ea);
}
}
if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
brelse(bh);
i->i_nlink = 1;
i->i_size = 0;
i->i_blocks = 1;
init_special_inode(i, mode,
new_decode_dev(rdev));
return;
}
}
}
if (fnode->dirflag) {
int n_dnodes, n_subdirs;
i->i_mode |= S_IFDIR;
i->i_op = &hpfs_dir_iops;
i->i_fop = &hpfs_dir_ops;
hpfs_inode->i_parent_dir = fnode->up;
hpfs_inode->i_dno = fnode->u.external[0].disk_secno;
if (hpfs_sb(sb)->sb_chk >= 2) {
struct buffer_head *bh0;
if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0);
}
n_dnodes = 0; n_subdirs = 0;
hpfs_count_dnodes(i->i_sb, hpfs_inode->i_dno, &n_dnodes, &n_subdirs, NULL);
i->i_blocks = 4 * n_dnodes;
i->i_size = 2048 * n_dnodes;
i->i_nlink = 2 + n_subdirs;
} else {
i->i_mode |= S_IFREG;
if (!hpfs_inode->i_ea_mode) i->i_mode &= ~0111;
i->i_op = &hpfs_file_iops;
i->i_fop = &hpfs_file_ops;
i->i_nlink = 1;
i->i_size = fnode->file_size;
i->i_blocks = ((i->i_size + 511) >> 9) + 1;
i->i_data.a_ops = &hpfs_aops;
hpfs_i(i)->mmu_private = i->i_size;
}
brelse(bh);
}
static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode)
{
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
/*if (fnode->acl_size_l || fnode->acl_size_s) {
Some unknown structures like ACL may be in fnode,
we'd better not overwrite them
hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino);
} else*/ if (hpfs_sb(i->i_sb)->sb_eas >= 2) {
__le32 ea;
if ((i->i_uid != hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) {
ea = cpu_to_le32(i->i_uid);
hpfs_set_ea(i, fnode, "UID", (char*)&ea, 2);
hpfs_inode->i_ea_uid = 1;
}
if ((i->i_gid != hpfs_sb(i->i_sb)->sb_gid) || hpfs_inode->i_ea_gid) {
ea = cpu_to_le32(i->i_gid);
hpfs_set_ea(i, fnode, "GID", (char *)&ea, 2);
hpfs_inode->i_ea_gid = 1;
}
if (!S_ISLNK(i->i_mode))
if ((i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0 : 0111))
| (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG))
&& i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0222 : 0333))
| (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG))) || hpfs_inode->i_ea_mode) {
ea = cpu_to_le32(i->i_mode);
/* sick, but legal */
hpfs_set_ea(i, fnode, "MODE", (char *)&ea, 2);
hpfs_inode->i_ea_mode = 1;
}
if (S_ISBLK(i->i_mode) || S_ISCHR(i->i_mode)) {
ea = cpu_to_le32(new_encode_dev(i->i_rdev));
hpfs_set_ea(i, fnode, "DEV", (char *)&ea, 4);
}
}
}
void hpfs_write_inode(struct inode *i)
{
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
struct inode *parent;
if (i->i_ino == hpfs_sb(i->i_sb)->sb_root) return;
if (hpfs_inode->i_rddir_off && !atomic_read(&i->i_count)) {
if (*hpfs_inode->i_rddir_off) printk("HPFS: write_inode: some position still there\n");
kfree(hpfs_inode->i_rddir_off);
hpfs_inode->i_rddir_off = NULL;
}
mutex_lock(&hpfs_inode->i_parent_mutex);
if (!i->i_nlink) {
mutex_unlock(&hpfs_inode->i_parent_mutex);
return;
}
parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir);
if (parent) {
hpfs_inode->i_dirty = 0;
if (parent->i_state & I_NEW) {
hpfs_init_inode(parent);
hpfs_read_inode(parent);
unlock_new_inode(parent);
}
mutex_lock(&hpfs_inode->i_mutex);
hpfs_write_inode_nolock(i);
mutex_unlock(&hpfs_inode->i_mutex);
iput(parent);
} else {
mark_inode_dirty(i);
}
mutex_unlock(&hpfs_inode->i_parent_mutex);
}
void hpfs_write_inode_nolock(struct inode *i)
{
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
struct buffer_head *bh;
struct fnode *fnode;
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
if (i->i_ino == hpfs_sb(i->i_sb)->sb_root) return;
if (!(fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) return;
if (i->i_ino != hpfs_sb(i->i_sb)->sb_root && i->i_nlink) {
if (!(de = map_fnode_dirent(i->i_sb, i->i_ino, fnode, &qbh))) {
brelse(bh);
return;
}
} else de = NULL;
if (S_ISREG(i->i_mode)) {
fnode->file_size = i->i_size;
if (de) de->file_size = i->i_size;
} else if (S_ISDIR(i->i_mode)) {
fnode->file_size = 0;
if (de) de->file_size = 0;
}
hpfs_write_inode_ea(i, fnode);
if (de) {
de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec);
de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec);
de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec);
de->read_only = !(i->i_mode & 0222);
de->ea_size = hpfs_inode->i_ea_size;
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
}
if (S_ISDIR(i->i_mode)) {
if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) {
de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec);
de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec);
de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec);
de->read_only = !(i->i_mode & 0222);
de->ea_size = /*hpfs_inode->i_ea_size*/0;
de->file_size = 0;
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
} else
hpfs_error(i->i_sb,
"directory %08lx doesn't have '.' entry",
(unsigned long)i->i_ino);
}
mark_buffer_dirty(bh);
brelse(bh);
}
int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
int error = -EINVAL;
lock_kernel();
if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root)
goto out_unlock;
if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
goto out_unlock;
error = inode_change_ok(inode, attr);
if (error)
goto out_unlock;
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
error = vmtruncate(inode, attr->ia_size);
if (error)
return error;
}
setattr_copy(inode, attr);
mark_inode_dirty(inode);
hpfs_write_inode(inode);
out_unlock:
unlock_kernel();
return error;
}
void hpfs_write_if_changed(struct inode *inode)
{
struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
if (hpfs_inode->i_dirty)
hpfs_write_inode(inode);
}
void hpfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
if (!inode->i_nlink) {
lock_kernel();
hpfs_remove_fnode(inode->i_sb, inode->i_ino);
unlock_kernel();
}
}
| gpl-2.0 |
alpscale/linux | arch/arm/mach-s3c64xx/mach-smartq5.c | 440 | 3623 | /*
* linux/arch/arm/mach-s3c64xx/mach-smartq5.c
*
* Copyright (C) 2010 Maurus Cuelenaere
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/fb.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <video/samsung_fimd.h>
#include <mach/map.h>
#include <mach/regs-gpio.h>
#include <mach/gpio-samsung.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
#include <plat/samsung-time.h>
#include "common.h"
#include "mach-smartq.h"
static struct gpio_led smartq5_leds[] = {
{
.name = "smartq5:green",
.active_low = 1,
.gpio = S3C64XX_GPN(8),
},
{
.name = "smartq5:red",
.active_low = 1,
.gpio = S3C64XX_GPN(9),
},
};
static struct gpio_led_platform_data smartq5_led_data = {
.num_leds = ARRAY_SIZE(smartq5_leds),
.leds = smartq5_leds,
};
static struct platform_device smartq5_leds_device = {
.name = "leds-gpio",
.id = -1,
.dev.platform_data = &smartq5_led_data,
};
/* Labels according to the SmartQ manual */
static struct gpio_keys_button smartq5_buttons[] = {
{
.gpio = S3C64XX_GPL(14),
.code = KEY_POWER,
.desc = "Power",
.active_low = 1,
.debounce_interval = 5,
.type = EV_KEY,
},
{
.gpio = S3C64XX_GPN(2),
.code = KEY_KPMINUS,
.desc = "Minus",
.active_low = 1,
.debounce_interval = 5,
.type = EV_KEY,
},
{
.gpio = S3C64XX_GPN(12),
.code = KEY_KPPLUS,
.desc = "Plus",
.active_low = 1,
.debounce_interval = 5,
.type = EV_KEY,
},
{
.gpio = S3C64XX_GPN(15),
.code = KEY_ENTER,
.desc = "Move",
.active_low = 1,
.debounce_interval = 5,
.type = EV_KEY,
},
};
static struct gpio_keys_platform_data smartq5_buttons_data = {
.buttons = smartq5_buttons,
.nbuttons = ARRAY_SIZE(smartq5_buttons),
};
static struct platform_device smartq5_buttons_device = {
.name = "gpio-keys",
.id = 0,
.num_resources = 0,
.dev = {
.platform_data = &smartq5_buttons_data,
}
};
static struct s3c_fb_pd_win smartq5_fb_win0 = {
.max_bpp = 32,
.default_bpp = 16,
.xres = 800,
.yres = 480,
};
static struct fb_videomode smartq5_lcd_timing = {
.left_margin = 216,
.right_margin = 40,
.upper_margin = 35,
.lower_margin = 10,
.hsync_len = 1,
.vsync_len = 1,
.xres = 800,
.yres = 480,
.refresh = 80,
};
static struct s3c_fb_platdata smartq5_lcd_pdata __initdata = {
.setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
.vtiming = &smartq5_lcd_timing,
.win[0] = &smartq5_fb_win0,
.vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
.vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
VIDCON1_INV_VDEN,
};
static struct platform_device *smartq5_devices[] __initdata = {
&smartq5_leds_device,
&smartq5_buttons_device,
};
static void __init smartq5_machine_init(void)
{
s3c_fb_set_platdata(&smartq5_lcd_pdata);
smartq_machine_init();
platform_add_devices(smartq5_devices, ARRAY_SIZE(smartq5_devices));
}
MACHINE_START(SMARTQ5, "SmartQ 5")
/* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
.map_io = smartq_map_io,
.init_machine = smartq5_machine_init,
.init_late = s3c64xx_init_late,
.init_time = samsung_timer_init,
.restart = s3c64xx_restart,
MACHINE_END
| gpl-2.0 |
roguesyko/the_reaper_shamu | drivers/hid/hid-lenovo-tpkbd.c | 1976 | 13068 | /*
* HID driver for Lenovo ThinkPad USB Keyboard with TrackPoint
*
* Copyright (c) 2012 Bernhard Seibold
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/module.h>
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/usb.h>
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/leds.h>
#include "usbhid/usbhid.h"
#include "hid-ids.h"
/* This is only used for the trackpoint part of the driver, hence _tp */
struct tpkbd_data_pointer {
int led_state;
struct led_classdev led_mute;
struct led_classdev led_micmute;
int press_to_select;
int dragging;
int release_to_select;
int select_right;
int sensitivity;
int press_speed;
};
#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
static int tpkbd_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
{
struct usbhid_device *uhdev;
uhdev = (struct usbhid_device *) hdev->driver_data;
if (uhdev->ifnum == 1 && usage->hid == (HID_UP_BUTTON | 0x0010)) {
map_key_clear(KEY_MICMUTE);
return 1;
}
return 0;
}
#undef map_key_clear
static int tpkbd_features_set(struct hid_device *hdev)
{
struct hid_report *report;
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[4];
report->field[0]->value[0] = data_pointer->press_to_select ? 0x01 : 0x02;
report->field[0]->value[0] |= data_pointer->dragging ? 0x04 : 0x08;
report->field[0]->value[0] |= data_pointer->release_to_select ? 0x10 : 0x20;
report->field[0]->value[0] |= data_pointer->select_right ? 0x80 : 0x40;
report->field[1]->value[0] = 0x03; // unknown setting, imitate windows driver
report->field[2]->value[0] = data_pointer->sensitivity;
report->field[3]->value[0] = data_pointer->press_speed;
hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
return 0;
}
static ssize_t pointer_press_to_select_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select);
}
static ssize_t pointer_press_to_select_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
int value;
if (kstrtoint(buf, 10, &value))
return -EINVAL;
if (value < 0 || value > 1)
return -EINVAL;
data_pointer->press_to_select = value;
tpkbd_features_set(hdev);
return count;
}
static ssize_t pointer_dragging_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging);
}
static ssize_t pointer_dragging_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
int value;
if (kstrtoint(buf, 10, &value))
return -EINVAL;
if (value < 0 || value > 1)
return -EINVAL;
data_pointer->dragging = value;
tpkbd_features_set(hdev);
return count;
}
static ssize_t pointer_release_to_select_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select);
}
static ssize_t pointer_release_to_select_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
int value;
if (kstrtoint(buf, 10, &value))
return -EINVAL;
if (value < 0 || value > 1)
return -EINVAL;
data_pointer->release_to_select = value;
tpkbd_features_set(hdev);
return count;
}
static ssize_t pointer_select_right_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right);
}
static ssize_t pointer_select_right_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
int value;
if (kstrtoint(buf, 10, &value))
return -EINVAL;
if (value < 0 || value > 1)
return -EINVAL;
data_pointer->select_right = value;
tpkbd_features_set(hdev);
return count;
}
static ssize_t pointer_sensitivity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
return snprintf(buf, PAGE_SIZE, "%u\n",
data_pointer->sensitivity);
}
static ssize_t pointer_sensitivity_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
int value;
if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
return -EINVAL;
data_pointer->sensitivity = value;
tpkbd_features_set(hdev);
return count;
}
static ssize_t pointer_press_speed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
return snprintf(buf, PAGE_SIZE, "%u\n",
data_pointer->press_speed);
}
static ssize_t pointer_press_speed_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
int value;
if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
return -EINVAL;
data_pointer->press_speed = value;
tpkbd_features_set(hdev);
return count;
}
static struct device_attribute dev_attr_pointer_press_to_select =
__ATTR(press_to_select, S_IWUSR | S_IRUGO,
pointer_press_to_select_show,
pointer_press_to_select_store);
static struct device_attribute dev_attr_pointer_dragging =
__ATTR(dragging, S_IWUSR | S_IRUGO,
pointer_dragging_show,
pointer_dragging_store);
static struct device_attribute dev_attr_pointer_release_to_select =
__ATTR(release_to_select, S_IWUSR | S_IRUGO,
pointer_release_to_select_show,
pointer_release_to_select_store);
static struct device_attribute dev_attr_pointer_select_right =
__ATTR(select_right, S_IWUSR | S_IRUGO,
pointer_select_right_show,
pointer_select_right_store);
static struct device_attribute dev_attr_pointer_sensitivity =
__ATTR(sensitivity, S_IWUSR | S_IRUGO,
pointer_sensitivity_show,
pointer_sensitivity_store);
static struct device_attribute dev_attr_pointer_press_speed =
__ATTR(press_speed, S_IWUSR | S_IRUGO,
pointer_press_speed_show,
pointer_press_speed_store);
static struct attribute *tpkbd_attributes_pointer[] = {
&dev_attr_pointer_press_to_select.attr,
&dev_attr_pointer_dragging.attr,
&dev_attr_pointer_release_to_select.attr,
&dev_attr_pointer_select_right.attr,
&dev_attr_pointer_sensitivity.attr,
&dev_attr_pointer_press_speed.attr,
NULL
};
static const struct attribute_group tpkbd_attr_group_pointer = {
.attrs = tpkbd_attributes_pointer,
};
static enum led_brightness tpkbd_led_brightness_get(
struct led_classdev *led_cdev)
{
struct device *dev = led_cdev->dev->parent;
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
int led_nr = 0;
if (led_cdev == &data_pointer->led_micmute)
led_nr = 1;
return data_pointer->led_state & (1 << led_nr)
? LED_FULL
: LED_OFF;
}
static void tpkbd_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct device *dev = led_cdev->dev->parent;
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
struct hid_report *report;
int led_nr = 0;
if (led_cdev == &data_pointer->led_micmute)
led_nr = 1;
if (value == LED_OFF)
data_pointer->led_state &= ~(1 << led_nr);
else
data_pointer->led_state |= 1 << led_nr;
report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3];
report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1;
report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1;
hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
}
static int tpkbd_probe_tp(struct hid_device *hdev)
{
struct device *dev = &hdev->dev;
struct tpkbd_data_pointer *data_pointer;
size_t name_sz = strlen(dev_name(dev)) + 16;
char *name_mute, *name_micmute;
int i, ret;
/* Validate required reports. */
for (i = 0; i < 4; i++) {
if (!hid_validate_values(hdev, HID_FEATURE_REPORT, 4, i, 1))
return -ENODEV;
}
if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 3, 0, 2))
return -ENODEV;
if (sysfs_create_group(&hdev->dev.kobj,
&tpkbd_attr_group_pointer)) {
hid_warn(hdev, "Could not create sysfs group\n");
}
data_pointer = kzalloc(sizeof(struct tpkbd_data_pointer), GFP_KERNEL);
if (data_pointer == NULL) {
hid_err(hdev, "Could not allocate memory for driver data\n");
return -ENOMEM;
}
// set same default values as windows driver
data_pointer->sensitivity = 0xa0;
data_pointer->press_speed = 0x38;
name_mute = kzalloc(name_sz, GFP_KERNEL);
if (name_mute == NULL) {
hid_err(hdev, "Could not allocate memory for led data\n");
ret = -ENOMEM;
goto err;
}
snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
name_micmute = kzalloc(name_sz, GFP_KERNEL);
if (name_micmute == NULL) {
hid_err(hdev, "Could not allocate memory for led data\n");
ret = -ENOMEM;
goto err2;
}
snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
hid_set_drvdata(hdev, data_pointer);
data_pointer->led_mute.name = name_mute;
data_pointer->led_mute.brightness_get = tpkbd_led_brightness_get;
data_pointer->led_mute.brightness_set = tpkbd_led_brightness_set;
data_pointer->led_mute.dev = dev;
led_classdev_register(dev, &data_pointer->led_mute);
data_pointer->led_micmute.name = name_micmute;
data_pointer->led_micmute.brightness_get = tpkbd_led_brightness_get;
data_pointer->led_micmute.brightness_set = tpkbd_led_brightness_set;
data_pointer->led_micmute.dev = dev;
led_classdev_register(dev, &data_pointer->led_micmute);
tpkbd_features_set(hdev);
return 0;
err2:
kfree(name_mute);
err:
kfree(data_pointer);
return ret;
}
static int tpkbd_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int ret;
struct usbhid_device *uhdev;
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "hid_parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hid_hw_start failed\n");
goto err;
}
uhdev = (struct usbhid_device *) hdev->driver_data;
if (uhdev->ifnum == 1) {
ret = tpkbd_probe_tp(hdev);
if (ret)
goto err_hid;
}
return 0;
err_hid:
hid_hw_stop(hdev);
err:
return ret;
}
static void tpkbd_remove_tp(struct hid_device *hdev)
{
struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
sysfs_remove_group(&hdev->dev.kobj,
&tpkbd_attr_group_pointer);
led_classdev_unregister(&data_pointer->led_micmute);
led_classdev_unregister(&data_pointer->led_mute);
hid_set_drvdata(hdev, NULL);
kfree(data_pointer->led_micmute.name);
kfree(data_pointer->led_mute.name);
kfree(data_pointer);
}
static void tpkbd_remove(struct hid_device *hdev)
{
struct usbhid_device *uhdev;
uhdev = (struct usbhid_device *) hdev->driver_data;
if (uhdev->ifnum == 1)
tpkbd_remove_tp(hdev);
hid_hw_stop(hdev);
}
static const struct hid_device_id tpkbd_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
{ }
};
MODULE_DEVICE_TABLE(hid, tpkbd_devices);
static struct hid_driver tpkbd_driver = {
.name = "lenovo_tpkbd",
.id_table = tpkbd_devices,
.input_mapping = tpkbd_input_mapping,
.probe = tpkbd_probe,
.remove = tpkbd_remove,
};
module_hid_driver(tpkbd_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
shminer/android_kernel_flounder | drivers/mtd/ubi/vmt.c | 2232 | 23531 | /*
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* This file contains implementation of volume creation, deletion, updating and
* resizing.
*/
#include <linux/err.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "ubi.h"
static int self_check_volumes(struct ubi_device *ubi);
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
/* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */
static struct device_attribute attr_vol_reserved_ebs =
__ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_type =
__ATTR(type, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_name =
__ATTR(name, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_corrupted =
__ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_alignment =
__ATTR(alignment, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_usable_eb_size =
__ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_data_bytes =
__ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_upd_marker =
__ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL);
/*
* "Show" method for files in '/<sysfs>/class/ubi/ubiX_Y/'.
*
* Consider a situation:
* A. process 1 opens a sysfs file related to volume Y, say
* /<sysfs>/class/ubi/ubiX_Y/reserved_ebs;
* B. process 2 removes volume Y;
* C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
*
* In this situation, this function will return %-ENODEV because it will find
* out that the volume was removed from the @ubi->volumes array.
*/
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
struct ubi_device *ubi;
ubi = ubi_get_device(vol->ubi->ubi_num);
if (!ubi)
return -ENODEV;
spin_lock(&ubi->volumes_lock);
if (!ubi->volumes[vol->vol_id]) {
spin_unlock(&ubi->volumes_lock);
ubi_put_device(ubi);
return -ENODEV;
}
/* Take a reference to prevent volume removal */
vol->ref_count += 1;
spin_unlock(&ubi->volumes_lock);
if (attr == &attr_vol_reserved_ebs)
ret = sprintf(buf, "%d\n", vol->reserved_pebs);
else if (attr == &attr_vol_type) {
const char *tp;
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
tp = "dynamic";
else
tp = "static";
ret = sprintf(buf, "%s\n", tp);
} else if (attr == &attr_vol_name)
ret = sprintf(buf, "%s\n", vol->name);
else if (attr == &attr_vol_corrupted)
ret = sprintf(buf, "%d\n", vol->corrupted);
else if (attr == &attr_vol_alignment)
ret = sprintf(buf, "%d\n", vol->alignment);
else if (attr == &attr_vol_usable_eb_size)
ret = sprintf(buf, "%d\n", vol->usable_leb_size);
else if (attr == &attr_vol_data_bytes)
ret = sprintf(buf, "%lld\n", vol->used_bytes);
else if (attr == &attr_vol_upd_marker)
ret = sprintf(buf, "%d\n", vol->upd_marker);
else
/* This must be a bug */
ret = -EINVAL;
/* We've done the operation, drop volume and UBI device references */
spin_lock(&ubi->volumes_lock);
vol->ref_count -= 1;
ubi_assert(vol->ref_count >= 0);
spin_unlock(&ubi->volumes_lock);
ubi_put_device(ubi);
return ret;
}
/* Release method for volume devices */
static void vol_release(struct device *dev)
{
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
kfree(vol->eba_tbl);
kfree(vol);
}
/**
* volume_sysfs_init - initialize sysfs for new volume.
* @ubi: UBI device description object
* @vol: volume description object
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*
* Note, this function does not free allocated resources in case of failure -
* the caller does it. This is because this would cause release() here and the
* caller would oops.
*/
static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err;
err = device_create_file(&vol->dev, &attr_vol_reserved_ebs);
if (err)
return err;
err = device_create_file(&vol->dev, &attr_vol_type);
if (err)
return err;
err = device_create_file(&vol->dev, &attr_vol_name);
if (err)
return err;
err = device_create_file(&vol->dev, &attr_vol_corrupted);
if (err)
return err;
err = device_create_file(&vol->dev, &attr_vol_alignment);
if (err)
return err;
err = device_create_file(&vol->dev, &attr_vol_usable_eb_size);
if (err)
return err;
err = device_create_file(&vol->dev, &attr_vol_data_bytes);
if (err)
return err;
err = device_create_file(&vol->dev, &attr_vol_upd_marker);
return err;
}
/**
* volume_sysfs_close - close sysfs for a volume.
* @vol: volume description object
*/
static void volume_sysfs_close(struct ubi_volume *vol)
{
device_remove_file(&vol->dev, &attr_vol_upd_marker);
device_remove_file(&vol->dev, &attr_vol_data_bytes);
device_remove_file(&vol->dev, &attr_vol_usable_eb_size);
device_remove_file(&vol->dev, &attr_vol_alignment);
device_remove_file(&vol->dev, &attr_vol_corrupted);
device_remove_file(&vol->dev, &attr_vol_name);
device_remove_file(&vol->dev, &attr_vol_type);
device_remove_file(&vol->dev, &attr_vol_reserved_ebs);
device_unregister(&vol->dev);
}
/**
* ubi_create_volume - create volume.
* @ubi: UBI device description object
* @req: volume creation request
*
* This function creates volume described by @req. If @req->vol_id id
* %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
* and saves it in @req->vol_id. Returns zero in case of success and a negative
* error code in case of failure. Note, the caller has to have the
* @ubi->device_mutex locked.
*/
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
{
int i, err, vol_id = req->vol_id, do_free = 1;
struct ubi_volume *vol;
struct ubi_vtbl_record vtbl_rec;
dev_t dev;
if (ubi->ro_mode)
return -EROFS;
vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
if (!vol)
return -ENOMEM;
spin_lock(&ubi->volumes_lock);
if (vol_id == UBI_VOL_NUM_AUTO) {
/* Find unused volume ID */
dbg_gen("search for vacant volume ID");
for (i = 0; i < ubi->vtbl_slots; i++)
if (!ubi->volumes[i]) {
vol_id = i;
break;
}
if (vol_id == UBI_VOL_NUM_AUTO) {
ubi_err("out of volume IDs");
err = -ENFILE;
goto out_unlock;
}
req->vol_id = vol_id;
}
dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s",
ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
(int)req->vol_type, req->name);
/* Ensure that this volume does not exist */
err = -EEXIST;
if (ubi->volumes[vol_id]) {
ubi_err("volume %d already exists", vol_id);
goto out_unlock;
}
/* Ensure that the name is unique */
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i] &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
goto out_unlock;
}
/* Calculate how many eraseblocks are requested */
vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
vol->reserved_pebs += div_u64(req->bytes + vol->usable_leb_size - 1,
vol->usable_leb_size);
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
if (ubi->corr_peb_count)
ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_unlock;
}
ubi->avail_pebs -= vol->reserved_pebs;
ubi->rsvd_pebs += vol->reserved_pebs;
spin_unlock(&ubi->volumes_lock);
vol->vol_id = vol_id;
vol->alignment = req->alignment;
vol->data_pad = ubi->leb_size % vol->alignment;
vol->vol_type = req->vol_type;
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len);
vol->ubi = ubi;
/*
* Finish all pending erases because there may be some LEBs belonging
* to the same volume ID.
*/
err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
if (err)
goto out_acc;
vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL);
if (!vol->eba_tbl) {
err = -ENOMEM;
goto out_acc;
}
for (i = 0; i < vol->reserved_pebs; i++)
vol->eba_tbl[i] = UBI_LEB_UNMAPPED;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = vol->reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
vol->used_bytes =
(long long)vol->used_ebs * vol->usable_leb_size;
} else {
vol->used_ebs = div_u64_rem(vol->used_bytes,
vol->usable_leb_size,
&vol->last_eb_bytes);
if (vol->last_eb_bytes != 0)
vol->used_ebs += 1;
else
vol->last_eb_bytes = vol->usable_leb_size;
}
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
err = cdev_add(&vol->cdev, dev, 1);
if (err) {
ubi_err("cannot add character device");
goto out_mapping;
}
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
vol->dev.class = ubi_class;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err) {
ubi_err("cannot register device");
goto out_cdev;
}
err = volume_sysfs_init(ubi, vol);
if (err)
goto out_sysfs;
/* Fill volume table record */
memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
vtbl_rec.alignment = cpu_to_be32(vol->alignment);
vtbl_rec.data_pad = cpu_to_be32(vol->data_pad);
vtbl_rec.name_len = cpu_to_be16(vol->name_len);
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
vtbl_rec.vol_type = UBI_VID_DYNAMIC;
else
vtbl_rec.vol_type = UBI_VID_STATIC;
memcpy(vtbl_rec.name, vol->name, vol->name_len);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
goto out_sysfs;
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
ubi->vol_count += 1;
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
out_sysfs:
/*
* We have registered our device, we should not free the volume
* description object in this function in case of an error - it is
* freed by the release function.
*
* Get device reference to prevent the release function from being
* called just after sysfs has been closed.
*/
do_free = 0;
get_device(&vol->dev);
volume_sysfs_close(vol);
out_cdev:
cdev_del(&vol->cdev);
out_mapping:
if (do_free)
kfree(vol->eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
out_unlock:
spin_unlock(&ubi->volumes_lock);
if (do_free)
kfree(vol);
else
put_device(&vol->dev);
ubi_err("cannot create volume %d, error %d", vol_id, err);
return err;
}
/**
* ubi_remove_volume - remove volume.
* @desc: volume descriptor
* @no_vtbl: do not change volume table if not zero
*
* This function removes volume described by @desc. The volume has to be opened
* in "exclusive" mode. Returns zero in case of success and a negative error
* code in case of failure. The caller has to have the @ubi->device_mutex
* locked.
*/
int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
ubi_assert(desc->mode == UBI_EXCLUSIVE);
ubi_assert(vol == ubi->volumes[vol_id]);
if (ubi->ro_mode)
return -EROFS;
spin_lock(&ubi->volumes_lock);
if (vol->ref_count > 1) {
/*
* The volume is busy, probably someone is reading one of its
* sysfs files.
*/
err = -EBUSY;
goto out_unlock;
}
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
if (!no_vtbl) {
err = ubi_change_vtbl_record(ubi, vol_id, NULL);
if (err)
goto out_err;
}
for (i = 0; i < vol->reserved_pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, i);
if (err)
goto out_err;
}
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
ubi->avail_pebs += reserved_pebs;
ubi_update_reserved(ubi);
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
if (!no_vtbl)
self_check_volumes(ubi);
return err;
out_err:
ubi_err("cannot remove volume %d, error %d", vol_id, err);
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
out_unlock:
spin_unlock(&ubi->volumes_lock);
return err;
}
/**
* ubi_resize_volume - re-size volume.
* @desc: volume descriptor
* @reserved_pebs: new size in physical eraseblocks
*
* This function re-sizes the volume and returns zero in case of success, and a
* negative error code in case of failure. The caller has to have the
* @ubi->device_mutex locked.
*/
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
{
int i, err, pebs, *new_mapping;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
struct ubi_vtbl_record vtbl_rec;
int vol_id = vol->vol_id;
if (ubi->ro_mode)
return -EROFS;
dbg_gen("re-size device %d, volume %d to from %d to %d PEBs",
ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
ubi_err("too small size %d, %d LEBs contain data",
reserved_pebs, vol->used_ebs);
return -EINVAL;
}
/* If the size is the same, we have nothing to do */
if (reserved_pebs == vol->reserved_pebs)
return 0;
new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL);
if (!new_mapping)
return -ENOMEM;
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = UBI_LEB_UNMAPPED;
spin_lock(&ubi->volumes_lock);
if (vol->ref_count > 1) {
spin_unlock(&ubi->volumes_lock);
err = -EBUSY;
goto out_free;
}
spin_unlock(&ubi->volumes_lock);
/* Reserve physical eraseblocks */
pebs = reserved_pebs - vol->reserved_pebs;
if (pebs > 0) {
spin_lock(&ubi->volumes_lock);
if (pebs > ubi->avail_pebs) {
ubi_err("not enough PEBs: requested %d, available %d",
pebs, ubi->avail_pebs);
if (ubi->corr_peb_count)
ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
spin_unlock(&ubi->volumes_lock);
err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= pebs;
ubi->rsvd_pebs += pebs;
for (i = 0; i < vol->reserved_pebs; i++)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
vol->eba_tbl = new_mapping;
spin_unlock(&ubi->volumes_lock);
}
/* Change volume table record */
vtbl_rec = ubi->vtbl[vol_id];
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
goto out_acc;
if (pebs < 0) {
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
if (err)
goto out_acc;
}
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
ubi_update_reserved(ubi);
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
vol->eba_tbl = new_mapping;
spin_unlock(&ubi->volumes_lock);
}
vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
vol->used_bytes =
(long long)vol->used_ebs * vol->usable_leb_size;
}
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
self_check_volumes(ubi);
return err;
out_acc:
if (pebs > 0) {
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= pebs;
ubi->avail_pebs += pebs;
spin_unlock(&ubi->volumes_lock);
}
out_free:
kfree(new_mapping);
return err;
}
/**
* ubi_rename_volumes - re-name UBI volumes.
* @ubi: UBI device description object
* @rename_list: list of &struct ubi_rename_entry objects
*
* This function re-names or removes volumes specified in the re-name list.
* Returns zero in case of success and a negative error code in case of
* failure.
*/
int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
{
int err;
struct ubi_rename_entry *re;
err = ubi_vtbl_rename_volumes(ubi, rename_list);
if (err)
return err;
list_for_each_entry(re, rename_list, list) {
if (re->remove) {
err = ubi_remove_volume(re->desc, 1);
if (err)
break;
} else {
struct ubi_volume *vol = re->desc->vol;
spin_lock(&ubi->volumes_lock);
vol->name_len = re->new_name_len;
memcpy(vol->name, re->new_name, re->new_name_len + 1);
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
}
}
if (!err)
self_check_volumes(ubi);
return err;
}
/**
* ubi_add_volume - add volume.
* @ubi: UBI device description object
* @vol: volume description object
*
* This function adds an existing volume and initializes all its data
* structures. Returns zero in case of success and a negative error code in
* case of failure.
*/
int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err, vol_id = vol->vol_id;
dev_t dev;
dbg_gen("add volume %d", vol_id);
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
err = cdev_add(&vol->cdev, dev, 1);
if (err) {
ubi_err("cannot add character device for volume %d, error %d",
vol_id, err);
return err;
}
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
vol->dev.class = ubi_class;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err)
goto out_cdev;
err = volume_sysfs_init(ubi, vol);
if (err) {
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
return err;
}
self_check_volumes(ubi);
return err;
out_cdev:
cdev_del(&vol->cdev);
return err;
}
/**
* ubi_free_volume - free volume.
* @ubi: UBI device description object
* @vol: volume description object
*
* This function frees all resources for volume @vol but does not remove it.
* Used only when the UBI device is detached.
*/
void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
dbg_gen("free volume %d", vol->vol_id);
ubi->volumes[vol->vol_id] = NULL;
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
}
/**
* self_check_volume - check volume information.
* @ubi: UBI device description object
* @vol_id: volume ID
*
* Returns zero if volume is all right and a a negative error code if not.
*/
static int self_check_volume(struct ubi_device *ubi, int vol_id)
{
int idx = vol_id2idx(ubi, vol_id);
int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
const struct ubi_volume *vol;
long long n;
const char *name;
spin_lock(&ubi->volumes_lock);
reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
vol = ubi->volumes[idx];
if (!vol) {
if (reserved_pebs) {
ubi_err("no volume info, but volume exists");
goto fail;
}
spin_unlock(&ubi->volumes_lock);
return 0;
}
if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
vol->name_len < 0) {
ubi_err("negative values");
goto fail;
}
if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
ubi_err("bad alignment");
goto fail;
}
n = vol->alignment & (ubi->min_io_size - 1);
if (vol->alignment != 1 && n) {
ubi_err("alignment is not multiple of min I/O unit");
goto fail;
}
n = ubi->leb_size % vol->alignment;
if (vol->data_pad != n) {
ubi_err("bad data_pad, has to be %lld", n);
goto fail;
}
if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
vol->vol_type != UBI_STATIC_VOLUME) {
ubi_err("bad vol_type");
goto fail;
}
if (vol->upd_marker && vol->corrupted) {
ubi_err("update marker and corrupted simultaneously");
goto fail;
}
if (vol->reserved_pebs > ubi->good_peb_count) {
ubi_err("too large reserved_pebs");
goto fail;
}
n = ubi->leb_size - vol->data_pad;
if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
ubi_err("bad usable_leb_size, has to be %lld", n);
goto fail;
}
if (vol->name_len > UBI_VOL_NAME_MAX) {
ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX);
goto fail;
}
n = strnlen(vol->name, vol->name_len + 1);
if (n != vol->name_len) {
ubi_err("bad name_len %lld", n);
goto fail;
}
n = (long long)vol->used_ebs * vol->usable_leb_size;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
if (vol->corrupted) {
ubi_err("corrupted dynamic volume");
goto fail;
}
if (vol->used_ebs != vol->reserved_pebs) {
ubi_err("bad used_ebs");
goto fail;
}
if (vol->last_eb_bytes != vol->usable_leb_size) {
ubi_err("bad last_eb_bytes");
goto fail;
}
if (vol->used_bytes != n) {
ubi_err("bad used_bytes");
goto fail;
}
} else {
if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
ubi_err("bad used_ebs");
goto fail;
}
if (vol->last_eb_bytes < 0 ||
vol->last_eb_bytes > vol->usable_leb_size) {
ubi_err("bad last_eb_bytes");
goto fail;
}
if (vol->used_bytes < 0 || vol->used_bytes > n ||
vol->used_bytes < n - vol->usable_leb_size) {
ubi_err("bad used_bytes");
goto fail;
}
}
alignment = be32_to_cpu(ubi->vtbl[vol_id].alignment);
data_pad = be32_to_cpu(ubi->vtbl[vol_id].data_pad);
name_len = be16_to_cpu(ubi->vtbl[vol_id].name_len);
upd_marker = ubi->vtbl[vol_id].upd_marker;
name = &ubi->vtbl[vol_id].name[0];
if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
vol_type = UBI_DYNAMIC_VOLUME;
else
vol_type = UBI_STATIC_VOLUME;
if (alignment != vol->alignment || data_pad != vol->data_pad ||
upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
ubi_err("volume info is different");
goto fail;
}
spin_unlock(&ubi->volumes_lock);
return 0;
fail:
ubi_err("self-check failed for volume %d", vol_id);
if (vol)
ubi_dump_vol_info(vol);
ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
dump_stack();
spin_unlock(&ubi->volumes_lock);
return -EINVAL;
}
/**
* self_check_volumes - check information about all volumes.
* @ubi: UBI device description object
*
* Returns zero if volumes are all right and a a negative error code if not.
*/
static int self_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
err = self_check_volume(ubi, i);
if (err)
break;
}
return err;
}
| gpl-2.0 |
Tesla-Redux-Devices/JuiceD-N6-Kernel | arch/mips/lasat/picvue.c | 2232 | 4377 | /*
* Picvue PVC160206 display driver
*
* Brian Murphy <brian@murphy.dk>
*
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/bootinfo.h>
#include <asm/lasat/lasat.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/string.h>
#include "picvue.h"
#define PVC_BUSY 0x80
#define PVC_NLINES 2
#define PVC_DISPMEM 80
#define PVC_LINELEN PVC_DISPMEM / PVC_NLINES
struct pvc_defs *picvue;
static void pvc_reg_write(u32 val)
{
*picvue->reg = val;
}
static u32 pvc_reg_read(void)
{
u32 tmp = *picvue->reg;
return tmp;
}
static void pvc_write_byte(u32 data, u8 byte)
{
data |= picvue->e;
pvc_reg_write(data);
data &= ~picvue->data_mask;
data |= byte << picvue->data_shift;
pvc_reg_write(data);
ndelay(220);
pvc_reg_write(data & ~picvue->e);
ndelay(220);
}
static u8 pvc_read_byte(u32 data)
{
u8 byte;
data |= picvue->e;
pvc_reg_write(data);
ndelay(220);
byte = (pvc_reg_read() & picvue->data_mask) >> picvue->data_shift;
data &= ~picvue->e;
pvc_reg_write(data);
ndelay(220);
return byte;
}
static u8 pvc_read_data(void)
{
u32 data = pvc_reg_read();
u8 byte;
data |= picvue->rw;
data &= ~picvue->rs;
pvc_reg_write(data);
ndelay(40);
byte = pvc_read_byte(data);
data |= picvue->rs;
pvc_reg_write(data);
return byte;
}
#define TIMEOUT 1000
static int pvc_wait(void)
{
int i = TIMEOUT;
int err = 0;
while ((pvc_read_data() & PVC_BUSY) && i)
i--;
if (i == 0)
err = -ETIME;
return err;
}
#define MODE_INST 0
#define MODE_DATA 1
static void pvc_write(u8 byte, int mode)
{
u32 data = pvc_reg_read();
data &= ~picvue->rw;
if (mode == MODE_DATA)
data |= picvue->rs;
else
data &= ~picvue->rs;
pvc_reg_write(data);
ndelay(40);
pvc_write_byte(data, byte);
if (mode == MODE_DATA)
data &= ~picvue->rs;
else
data |= picvue->rs;
pvc_reg_write(data);
pvc_wait();
}
void pvc_write_string(const unsigned char *str, u8 addr, int line)
{
int i = 0;
if (line > 0 && (PVC_NLINES > 1))
addr += 0x40 * line;
pvc_write(0x80 | addr, MODE_INST);
while (*str != 0 && i < PVC_LINELEN) {
pvc_write(*str++, MODE_DATA);
i++;
}
}
void pvc_write_string_centered(const unsigned char *str, int line)
{
int len = strlen(str);
u8 addr;
if (len > PVC_VISIBLE_CHARS)
addr = 0;
else
addr = (PVC_VISIBLE_CHARS - strlen(str))/2;
pvc_write_string(str, addr, line);
}
void pvc_dump_string(const unsigned char *str)
{
int len = strlen(str);
pvc_write_string(str, 0, 0);
if (len > PVC_VISIBLE_CHARS)
pvc_write_string(&str[PVC_VISIBLE_CHARS], 0, 1);
}
#define BM_SIZE 8
#define MAX_PROGRAMMABLE_CHARS 8
int pvc_program_cg(int charnum, u8 bitmap[BM_SIZE])
{
int i;
int addr;
if (charnum > MAX_PROGRAMMABLE_CHARS)
return -ENOENT;
addr = charnum * 8;
pvc_write(0x40 | addr, MODE_INST);
for (i = 0; i < BM_SIZE; i++)
pvc_write(bitmap[i], MODE_DATA);
return 0;
}
#define FUNC_SET_CMD 0x20
#define EIGHT_BYTE (1 << 4)
#define FOUR_BYTE 0
#define TWO_LINES (1 << 3)
#define ONE_LINE 0
#define LARGE_FONT (1 << 2)
#define SMALL_FONT 0
static void pvc_funcset(u8 cmd)
{
pvc_write(FUNC_SET_CMD | (cmd & (EIGHT_BYTE|TWO_LINES|LARGE_FONT)),
MODE_INST);
}
#define ENTRYMODE_CMD 0x4
#define AUTO_INC (1 << 1)
#define AUTO_DEC 0
#define CURSOR_FOLLOWS_DISP (1 << 0)
static void pvc_entrymode(u8 cmd)
{
pvc_write(ENTRYMODE_CMD | (cmd & (AUTO_INC|CURSOR_FOLLOWS_DISP)),
MODE_INST);
}
#define DISP_CNT_CMD 0x08
#define DISP_OFF 0
#define DISP_ON (1 << 2)
#define CUR_ON (1 << 1)
#define CUR_BLINK (1 << 0)
void pvc_dispcnt(u8 cmd)
{
pvc_write(DISP_CNT_CMD | (cmd & (DISP_ON|CUR_ON|CUR_BLINK)), MODE_INST);
}
#define MOVE_CMD 0x10
#define DISPLAY (1 << 3)
#define CURSOR 0
#define RIGHT (1 << 2)
#define LEFT 0
void pvc_move(u8 cmd)
{
pvc_write(MOVE_CMD | (cmd & (DISPLAY|RIGHT)), MODE_INST);
}
#define CLEAR_CMD 0x1
void pvc_clear(void)
{
pvc_write(CLEAR_CMD, MODE_INST);
}
#define HOME_CMD 0x2
void pvc_home(void)
{
pvc_write(HOME_CMD, MODE_INST);
}
int pvc_init(void)
{
u8 cmd = EIGHT_BYTE;
if (PVC_NLINES == 2)
cmd |= (SMALL_FONT|TWO_LINES);
else
cmd |= (LARGE_FONT|ONE_LINE);
pvc_funcset(cmd);
pvc_dispcnt(DISP_ON);
pvc_entrymode(AUTO_INC);
pvc_clear();
pvc_write_string_centered("Display", 0);
pvc_write_string_centered("Initialized", 1);
return 0;
}
module_init(pvc_init);
MODULE_LICENSE("GPL");
| gpl-2.0 |
omnirom/android_kernel_samsung_smdk4412 | drivers/net/ppp_async.c | 2232 | 24193 | /*
* PPP async serial channel driver for Linux.
*
* Copyright 1999 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This driver provides the encapsulation and framing for sending
* and receiving PPP frames over async serial lines. It relies on
* the generic PPP layer to give it frames to send and to process
* received frames. It implements the PPP line discipline.
*
* Part of the code in this driver was inspired by the old async-only
* PPP driver, written by Michael Callahan and Al Longyear, and
* subsequently hacked by Paul Mackerras.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/tty.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/crc-ccitt.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
#include <linux/ppp_channel.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#define PPP_VERSION "2.4.2"
#define OBUFSIZE 4096
/* Structure for storing local state. */
struct asyncppp {
struct tty_struct *tty;
unsigned int flags;
unsigned int state;
unsigned int rbits;
int mru;
spinlock_t xmit_lock;
spinlock_t recv_lock;
unsigned long xmit_flags;
u32 xaccm[8];
u32 raccm;
unsigned int bytes_sent;
unsigned int bytes_rcvd;
struct sk_buff *tpkt;
int tpkt_pos;
u16 tfcs;
unsigned char *optr;
unsigned char *olim;
unsigned long last_xmit;
struct sk_buff *rpkt;
int lcp_fcs;
struct sk_buff_head rqueue;
struct tasklet_struct tsk;
atomic_t refcnt;
struct semaphore dead_sem;
struct ppp_channel chan; /* interface to generic ppp layer */
unsigned char obuf[OBUFSIZE];
};
/* Bit numbers in xmit_flags */
#define XMIT_WAKEUP 0
#define XMIT_FULL 1
#define XMIT_BUSY 2
/* State bits */
#define SC_TOSS 1
#define SC_ESCAPE 2
#define SC_PREV_ERROR 4
/* Bits in rbits */
#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
static int flag_time = HZ;
module_param(flag_time, int, 0);
MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_PPP);
/*
* Prototypes.
*/
static int ppp_async_encode(struct asyncppp *ap);
static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
static int ppp_async_push(struct asyncppp *ap);
static void ppp_async_flush_output(struct asyncppp *ap);
static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
char *flags, int count);
static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg);
static void ppp_async_process(unsigned long arg);
static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
int len, int inbound);
static const struct ppp_channel_ops async_ops = {
.start_xmit = ppp_async_send,
.ioctl = ppp_async_ioctl,
};
/*
* Routines implementing the PPP line discipline.
*/
/*
* We have a potential race on dereferencing tty->disc_data,
* because the tty layer provides no locking at all - thus one
* cpu could be running ppp_asynctty_receive while another
* calls ppp_asynctty_close, which zeroes tty->disc_data and
* frees the memory that ppp_asynctty_receive is using. The best
* way to fix this is to use a rwlock in the tty struct, but for now
* we use a single global rwlock for all ttys in ppp line discipline.
*
* FIXME: this is no longer true. The _close path for the ldisc is
* now guaranteed to be sane.
*/
static DEFINE_RWLOCK(disc_data_lock);
static struct asyncppp *ap_get(struct tty_struct *tty)
{
struct asyncppp *ap;
read_lock(&disc_data_lock);
ap = tty->disc_data;
if (ap != NULL)
atomic_inc(&ap->refcnt);
read_unlock(&disc_data_lock);
return ap;
}
static void ap_put(struct asyncppp *ap)
{
if (atomic_dec_and_test(&ap->refcnt))
up(&ap->dead_sem);
}
/*
* Called when a tty is put into PPP line discipline. Called in process
* context.
*/
static int
ppp_asynctty_open(struct tty_struct *tty)
{
struct asyncppp *ap;
int err;
int speed;
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
err = -ENOMEM;
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (!ap)
goto out;
/* initialize the asyncppp structure */
ap->tty = tty;
ap->mru = PPP_MRU;
spin_lock_init(&ap->xmit_lock);
spin_lock_init(&ap->recv_lock);
ap->xaccm[0] = ~0U;
ap->xaccm[3] = 0x60000000U;
ap->raccm = ~0U;
ap->optr = ap->obuf;
ap->olim = ap->obuf;
ap->lcp_fcs = -1;
skb_queue_head_init(&ap->rqueue);
tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
atomic_set(&ap->refcnt, 1);
sema_init(&ap->dead_sem, 0);
ap->chan.private = ap;
ap->chan.ops = &async_ops;
ap->chan.mtu = PPP_MRU;
speed = tty_get_baud_rate(tty);
ap->chan.speed = speed;
err = ppp_register_channel(&ap->chan);
if (err)
goto out_free;
tty->disc_data = ap;
tty->receive_room = 65536;
return 0;
out_free:
kfree(ap);
out:
return err;
}
/*
* Called when the tty is put into another line discipline
* or it hangs up. We have to wait for any cpu currently
* executing in any of the other ppp_asynctty_* routines to
* finish before we can call ppp_unregister_channel and free
* the asyncppp struct. This routine must be called from
* process context, not interrupt or softirq context.
*/
static void
ppp_asynctty_close(struct tty_struct *tty)
{
struct asyncppp *ap;
write_lock_irq(&disc_data_lock);
ap = tty->disc_data;
tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock);
if (!ap)
return;
/*
* We have now ensured that nobody can start using ap from now
* on, but we have to wait for all existing users to finish.
* Note that ppp_unregister_channel ensures that no calls to
* our channel ops (i.e. ppp_async_send/ioctl) are in progress
* by the time it returns.
*/
if (!atomic_dec_and_test(&ap->refcnt))
down(&ap->dead_sem);
tasklet_kill(&ap->tsk);
ppp_unregister_channel(&ap->chan);
kfree_skb(ap->rpkt);
skb_queue_purge(&ap->rqueue);
kfree_skb(ap->tpkt);
kfree(ap);
}
/*
* Called on tty hangup in process context.
*
* Wait for I/O to driver to complete and unregister PPP channel.
* This is already done by the close routine, so just call that.
*/
static int ppp_asynctty_hangup(struct tty_struct *tty)
{
ppp_asynctty_close(tty);
return 0;
}
/*
* Read does nothing - no data is ever available this way.
* Pppd reads and writes packets via /dev/ppp instead.
*/
static ssize_t
ppp_asynctty_read(struct tty_struct *tty, struct file *file,
unsigned char __user *buf, size_t count)
{
return -EAGAIN;
}
/*
* Write on the tty does nothing, the packets all come in
* from the ppp generic stuff.
*/
static ssize_t
ppp_asynctty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t count)
{
return -EAGAIN;
}
/*
* Called in process context only. May be re-entered by multiple
* ioctl calling threads.
*/
static int
ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct asyncppp *ap = ap_get(tty);
int err, val;
int __user *p = (int __user *)arg;
if (!ap)
return -ENXIO;
err = -EFAULT;
switch (cmd) {
case PPPIOCGCHAN:
err = -EFAULT;
if (put_user(ppp_channel_index(&ap->chan), p))
break;
err = 0;
break;
case PPPIOCGUNIT:
err = -EFAULT;
if (put_user(ppp_unit_number(&ap->chan), p))
break;
err = 0;
break;
case TCFLSH:
/* flush our buffers and the serial port's buffer */
if (arg == TCIOFLUSH || arg == TCOFLUSH)
ppp_async_flush_output(ap);
err = tty_perform_flush(tty, arg);
break;
case FIONREAD:
val = 0;
if (put_user(val, p))
break;
err = 0;
break;
default:
/* Try the various mode ioctls */
err = tty_mode_ioctl(tty, file, cmd, arg);
}
ap_put(ap);
return err;
}
/* No kernel lock - fine */
static unsigned int
ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
return 0;
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
static void
ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
struct asyncppp *ap = ap_get(tty);
unsigned long flags;
if (!ap)
return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_async_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
ap_put(ap);
tty_unthrottle(tty);
}
static void
ppp_asynctty_wakeup(struct tty_struct *tty)
{
struct asyncppp *ap = ap_get(tty);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (!ap)
return;
set_bit(XMIT_WAKEUP, &ap->xmit_flags);
tasklet_schedule(&ap->tsk);
ap_put(ap);
}
static struct tty_ldisc_ops ppp_ldisc = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
.name = "ppp",
.open = ppp_asynctty_open,
.close = ppp_asynctty_close,
.hangup = ppp_asynctty_hangup,
.read = ppp_asynctty_read,
.write = ppp_asynctty_write,
.ioctl = ppp_asynctty_ioctl,
.poll = ppp_asynctty_poll,
.receive_buf = ppp_asynctty_receive,
.write_wakeup = ppp_asynctty_wakeup,
};
static int __init
ppp_async_init(void)
{
int err;
err = tty_register_ldisc(N_PPP, &ppp_ldisc);
if (err != 0)
printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
err);
return err;
}
/*
* The following routines provide the PPP channel interface.
*/
static int
ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
{
struct asyncppp *ap = chan->private;
void __user *argp = (void __user *)arg;
int __user *p = argp;
int err, val;
u32 accm[8];
err = -EFAULT;
switch (cmd) {
case PPPIOCGFLAGS:
val = ap->flags | ap->rbits;
if (put_user(val, p))
break;
err = 0;
break;
case PPPIOCSFLAGS:
if (get_user(val, p))
break;
ap->flags = val & ~SC_RCV_BITS;
spin_lock_irq(&ap->recv_lock);
ap->rbits = val & SC_RCV_BITS;
spin_unlock_irq(&ap->recv_lock);
err = 0;
break;
case PPPIOCGASYNCMAP:
if (put_user(ap->xaccm[0], (u32 __user *)argp))
break;
err = 0;
break;
case PPPIOCSASYNCMAP:
if (get_user(ap->xaccm[0], (u32 __user *)argp))
break;
err = 0;
break;
case PPPIOCGRASYNCMAP:
if (put_user(ap->raccm, (u32 __user *)argp))
break;
err = 0;
break;
case PPPIOCSRASYNCMAP:
if (get_user(ap->raccm, (u32 __user *)argp))
break;
err = 0;
break;
case PPPIOCGXASYNCMAP:
if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
break;
err = 0;
break;
case PPPIOCSXASYNCMAP:
if (copy_from_user(accm, argp, sizeof(accm)))
break;
accm[2] &= ~0x40000000U; /* can't escape 0x5e */
accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
err = 0;
break;
case PPPIOCGMRU:
if (put_user(ap->mru, p))
break;
err = 0;
break;
case PPPIOCSMRU:
if (get_user(val, p))
break;
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
err = 0;
break;
default:
err = -ENOTTY;
}
return err;
}
/*
* This is called at softirq level to deliver received packets
* to the ppp_generic code, and to tell the ppp_generic code
* if we can accept more output now.
*/
static void ppp_async_process(unsigned long arg)
{
struct asyncppp *ap = (struct asyncppp *) arg;
struct sk_buff *skb;
/* process received packets */
while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
if (skb->cb[0])
ppp_input_error(&ap->chan, 0);
ppp_input(&ap->chan, skb);
}
/* try to push more stuff out */
if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap))
ppp_output_wakeup(&ap->chan);
}
/*
* Procedures for encapsulation and framing.
*/
/*
* Procedure to encode the data for async serial transmission.
* Does octet stuffing (escaping), puts the address/control bytes
* on if A/C compression is disabled, and does protocol compression.
* Assumes ap->tpkt != 0 on entry.
* Returns 1 if we finished the current frame, 0 otherwise.
*/
#define PUT_BYTE(ap, buf, c, islcp) do { \
if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
*buf++ = PPP_ESCAPE; \
*buf++ = c ^ PPP_TRANS; \
} else \
*buf++ = c; \
} while (0)
static int
ppp_async_encode(struct asyncppp *ap)
{
int fcs, i, count, c, proto;
unsigned char *buf, *buflim;
unsigned char *data;
int islcp;
buf = ap->obuf;
ap->olim = buf;
ap->optr = buf;
i = ap->tpkt_pos;
data = ap->tpkt->data;
count = ap->tpkt->len;
fcs = ap->tfcs;
proto = get_unaligned_be16(data);
/*
* LCP packets with code values between 1 (configure-reqest)
* and 7 (code-reject) must be sent as though no options
* had been negotiated.
*/
islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
if (i == 0) {
if (islcp)
async_lcp_peek(ap, data, count, 0);
/*
* Start of a new packet - insert the leading FLAG
* character if necessary.
*/
if (islcp || flag_time == 0 ||
time_after_eq(jiffies, ap->last_xmit + flag_time))
*buf++ = PPP_FLAG;
ap->last_xmit = jiffies;
fcs = PPP_INITFCS;
/*
* Put in the address/control bytes if necessary
*/
if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
PUT_BYTE(ap, buf, 0xff, islcp);
fcs = PPP_FCS(fcs, 0xff);
PUT_BYTE(ap, buf, 0x03, islcp);
fcs = PPP_FCS(fcs, 0x03);
}
}
/*
* Once we put in the last byte, we need to put in the FCS
* and closing flag, so make sure there is at least 7 bytes
* of free space in the output buffer.
*/
buflim = ap->obuf + OBUFSIZE - 6;
while (i < count && buf < buflim) {
c = data[i++];
if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT))
continue; /* compress protocol field */
fcs = PPP_FCS(fcs, c);
PUT_BYTE(ap, buf, c, islcp);
}
if (i < count) {
/*
* Remember where we are up to in this packet.
*/
ap->olim = buf;
ap->tpkt_pos = i;
ap->tfcs = fcs;
return 0;
}
/*
* We have finished the packet. Add the FCS and flag.
*/
fcs = ~fcs;
c = fcs & 0xff;
PUT_BYTE(ap, buf, c, islcp);
c = (fcs >> 8) & 0xff;
PUT_BYTE(ap, buf, c, islcp);
*buf++ = PPP_FLAG;
ap->olim = buf;
kfree_skb(ap->tpkt);
ap->tpkt = NULL;
return 1;
}
/*
* Transmit-side routines.
*/
/*
* Send a packet to the peer over an async tty line.
* Returns 1 iff the packet was accepted.
* If the packet was not accepted, we will call ppp_output_wakeup
* at some later time.
*/
static int
ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
{
struct asyncppp *ap = chan->private;
ppp_async_push(ap);
if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
return 0; /* already full */
ap->tpkt = skb;
ap->tpkt_pos = 0;
ppp_async_push(ap);
return 1;
}
/*
* Push as much data as possible out to the tty.
*/
static int
ppp_async_push(struct asyncppp *ap)
{
int avail, sent, done = 0;
struct tty_struct *tty = ap->tty;
int tty_stuffed = 0;
/*
* We can get called recursively here if the tty write
* function calls our wakeup function. This can happen
* for example on a pty with both the master and slave
* set to PPP line discipline.
* We use the XMIT_BUSY bit to detect this and get out,
* leaving the XMIT_WAKEUP bit set to tell the other
* instance that it may now be able to write more now.
*/
if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
return 0;
spin_lock_bh(&ap->xmit_lock);
for (;;) {
if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
tty_stuffed = 0;
if (!tty_stuffed && ap->optr < ap->olim) {
avail = ap->olim - ap->optr;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sent = tty->ops->write(tty, ap->optr, avail);
if (sent < 0)
goto flush; /* error, e.g. loss of CD */
ap->optr += sent;
if (sent < avail)
tty_stuffed = 1;
continue;
}
if (ap->optr >= ap->olim && ap->tpkt) {
if (ppp_async_encode(ap)) {
/* finished processing ap->tpkt */
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
continue;
}
/*
* We haven't made any progress this time around.
* Clear XMIT_BUSY to let other callers in, but
* after doing so we have to check if anyone set
* XMIT_WAKEUP since we last checked it. If they
* did, we should try again to set XMIT_BUSY and go
* around again in case XMIT_BUSY was still set when
* the other caller tried.
*/
clear_bit(XMIT_BUSY, &ap->xmit_flags);
/* any more work to do? if not, exit the loop */
if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
(!tty_stuffed && ap->tpkt)))
break;
/* more work to do, see if we can do it now */
if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
break;
}
spin_unlock_bh(&ap->xmit_lock);
return done;
flush:
clear_bit(XMIT_BUSY, &ap->xmit_flags);
if (ap->tpkt) {
kfree_skb(ap->tpkt);
ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
ap->optr = ap->olim;
spin_unlock_bh(&ap->xmit_lock);
return done;
}
/*
* Flush output from our internal buffers.
* Called for the TCFLSH ioctl. Can be entered in parallel
* but this is covered by the xmit_lock.
*/
static void
ppp_async_flush_output(struct asyncppp *ap)
{
int done = 0;
spin_lock_bh(&ap->xmit_lock);
ap->optr = ap->olim;
if (ap->tpkt != NULL) {
kfree_skb(ap->tpkt);
ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
spin_unlock_bh(&ap->xmit_lock);
if (done)
ppp_output_wakeup(&ap->chan);
}
/*
* Receive-side routines.
*/
/* see how many ordinary chars there are at the start of buf */
static inline int
scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
{
int i, c;
for (i = 0; i < count; ++i) {
c = buf[i];
if (c == PPP_ESCAPE || c == PPP_FLAG ||
(c < 0x20 && (ap->raccm & (1 << c)) != 0))
break;
}
return i;
}
/* called when a flag is seen - do end-of-packet processing */
static void
process_input_packet(struct asyncppp *ap)
{
struct sk_buff *skb;
unsigned char *p;
unsigned int len, fcs, proto;
skb = ap->rpkt;
if (ap->state & (SC_TOSS | SC_ESCAPE))
goto err;
if (skb == NULL)
return; /* 0-length packet */
/* check the FCS */
p = skb->data;
len = skb->len;
if (len < 3)
goto err; /* too short */
fcs = PPP_INITFCS;
for (; len > 0; --len)
fcs = PPP_FCS(fcs, *p++);
if (fcs != PPP_GOODFCS)
goto err; /* bad FCS */
skb_trim(skb, skb->len - 2);
/* check for address/control and protocol compression */
p = skb->data;
if (p[0] == PPP_ALLSTATIONS) {
/* chop off address/control */
if (p[1] != PPP_UI || skb->len < 3)
goto err;
p = skb_pull(skb, 2);
}
proto = p[0];
if (proto & 1) {
/* protocol is compressed */
skb_push(skb, 1)[0] = 0;
} else {
if (skb->len < 2)
goto err;
proto = (proto << 8) + p[1];
if (proto == PPP_LCP)
async_lcp_peek(ap, p, skb->len, 1);
}
/* queue the frame to be processed */
skb->cb[0] = ap->state;
skb_queue_tail(&ap->rqueue, skb);
ap->rpkt = NULL;
ap->state = 0;
return;
err:
/* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
ap->state = SC_PREV_ERROR;
if (skb) {
/* make skb appear as freshly allocated */
skb_trim(skb, 0);
skb_reserve(skb, - skb_headroom(skb));
}
}
/* Called when the tty driver has data for us. Runs parallel with the
other ldisc functions but will not be re-entered */
static void
ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
char *flags, int count)
{
struct sk_buff *skb;
int c, i, j, n, s, f;
unsigned char *sp;
/* update bits used for 8-bit cleanness detection */
if (~ap->rbits & SC_RCV_BITS) {
s = 0;
for (i = 0; i < count; ++i) {
c = buf[i];
if (flags && flags[i] != 0)
continue;
s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
c = ((c >> 4) ^ c) & 0xf;
s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP;
}
ap->rbits |= s;
}
while (count > 0) {
/* scan through and see how many chars we can do in bulk */
if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE)
n = 1;
else
n = scan_ordinary(ap, buf, count);
f = 0;
if (flags && (ap->state & SC_TOSS) == 0) {
/* check the flags to see if any char had an error */
for (j = 0; j < n; ++j)
if ((f = flags[j]) != 0)
break;
}
if (f != 0) {
/* start tossing */
ap->state |= SC_TOSS;
} else if (n > 0 && (ap->state & SC_TOSS) == 0) {
/* stuff the chars in the skb */
skb = ap->rpkt;
if (!skb) {
skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
if (!skb)
goto nomem;
ap->rpkt = skb;
}
if (skb->len == 0) {
/* Try to get the payload 4-byte aligned.
* This should match the
* PPP_ALLSTATIONS/PPP_UI/compressed tests in
* process_input_packet, but we do not have
* enough chars here to test buf[1] and buf[2].
*/
if (buf[0] != PPP_ALLSTATIONS)
skb_reserve(skb, 2 + (buf[0] & 1));
}
if (n > skb_tailroom(skb)) {
/* packet overflowed MRU */
ap->state |= SC_TOSS;
} else {
sp = skb_put(skb, n);
memcpy(sp, buf, n);
if (ap->state & SC_ESCAPE) {
sp[0] ^= PPP_TRANS;
ap->state &= ~SC_ESCAPE;
}
}
}
if (n >= count)
break;
c = buf[n];
if (flags != NULL && flags[n] != 0) {
ap->state |= SC_TOSS;
} else if (c == PPP_FLAG) {
process_input_packet(ap);
} else if (c == PPP_ESCAPE) {
ap->state |= SC_ESCAPE;
} else if (I_IXON(ap->tty)) {
if (c == START_CHAR(ap->tty))
start_tty(ap->tty);
else if (c == STOP_CHAR(ap->tty))
stop_tty(ap->tty);
}
/* otherwise it's a char in the recv ACCM */
++n;
buf += n;
if (flags)
flags += n;
count -= n;
}
return;
nomem:
printk(KERN_ERR "PPPasync: no memory (input pkt)\n");
ap->state |= SC_TOSS;
}
/*
* We look at LCP frames going past so that we can notice
* and react to the LCP configure-ack from the peer.
* In the situation where the peer has been sent a configure-ack
* already, LCP is up once it has sent its configure-ack
* so the immediately following packet can be sent with the
* configured LCP options. This allows us to process the following
* packet correctly without pppd needing to respond quickly.
*
* We only respond to the received configure-ack if we have just
* sent a configure-request, and the configure-ack contains the
* same data (this is checked using a 16-bit crc of the data).
*/
#define CONFREQ 1 /* LCP code field values */
#define CONFACK 2
#define LCP_MRU 1 /* LCP option numbers */
#define LCP_ASYNCMAP 2
static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
int len, int inbound)
{
int dlen, fcs, i, code;
u32 val;
data += 2; /* skip protocol bytes */
len -= 2;
if (len < 4) /* 4 = code, ID, length */
return;
code = data[0];
if (code != CONFACK && code != CONFREQ)
return;
dlen = get_unaligned_be16(data + 2);
if (len < dlen)
return; /* packet got truncated or length is bogus */
if (code == (inbound? CONFACK: CONFREQ)) {
/*
* sent confreq or received confack:
* calculate the crc of the data from the ID field on.
*/
fcs = PPP_INITFCS;
for (i = 1; i < dlen; ++i)
fcs = PPP_FCS(fcs, data[i]);
if (!inbound) {
/* outbound confreq - remember the crc for later */
ap->lcp_fcs = fcs;
return;
}
/* received confack, check the crc */
fcs ^= ap->lcp_fcs;
ap->lcp_fcs = -1;
if (fcs != 0)
return;
} else if (inbound)
return; /* not interested in received confreq */
/* process the options in the confack */
data += 4;
dlen -= 4;
/* data[0] is code, data[1] is length */
while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
switch (data[0]) {
case LCP_MRU:
val = get_unaligned_be16(data + 2);
if (inbound)
ap->mru = val;
else
ap->chan.mtu = val;
break;
case LCP_ASYNCMAP:
val = get_unaligned_be32(data + 2);
if (inbound)
ap->raccm = val;
else
ap->xaccm[0] = val;
break;
}
dlen -= data[1];
data += data[1];
}
}
static void __exit ppp_async_cleanup(void)
{
if (tty_unregister_ldisc(N_PPP) != 0)
printk(KERN_ERR "failed to unregister PPP line discipline\n");
}
module_init(ppp_async_init);
module_exit(ppp_async_cleanup);
| gpl-2.0 |
Validus-Kernel/kernel_oneplus2 | arch/mips/pci/ops-msc.c | 2232 | 3973 | /*
* Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
* Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* MIPS boards specific PCI support.
*
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/mips-boards/msc01_pci.h>
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/*
* PCI configuration cycle AD bus definition
*/
/* Type 0 */
#define PCI_CFG_TYPE0_REG_SHF 0
#define PCI_CFG_TYPE0_FUNC_SHF 8
/* Type 1 */
#define PCI_CFG_TYPE1_REG_SHF 0
#define PCI_CFG_TYPE1_FUNC_SHF 8
#define PCI_CFG_TYPE1_DEV_SHF 11
#define PCI_CFG_TYPE1_BUS_SHF 16
static int msc_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
{
unsigned char busnum = bus->number;
u32 intr;
/* Clear status register bits. */
MSC_WRITE(MSC01_PCI_INTSTAT,
(MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT));
MSC_WRITE(MSC01_PCI_CFGADDR,
((busnum << MSC01_PCI_CFGADDR_BNUM_SHF) |
(PCI_SLOT(devfn) << MSC01_PCI_CFGADDR_DNUM_SHF) |
(PCI_FUNC(devfn) << MSC01_PCI_CFGADDR_FNUM_SHF) |
((where / 4) << MSC01_PCI_CFGADDR_RNUM_SHF)));
/* Perform access */
if (access_type == PCI_ACCESS_WRITE)
MSC_WRITE(MSC01_PCI_CFGDATA, *data);
else
MSC_READ(MSC01_PCI_CFGDATA, *data);
/* Detect Master/Target abort */
MSC_READ(MSC01_PCI_INTSTAT, intr);
if (intr & (MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT)) {
/* Error occurred */
/* Clear bits */
MSC_WRITE(MSC01_PCI_INTSTAT,
(MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT));
return -1;
}
return 0;
}
/*
* We can't address 8 and 16 bit words directly. Instead we have to
* read/write a 32bit word and mask/modify the data we actually want.
*/
static int msc_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
&data))
return -1;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int msc_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
data = val;
else {
if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
where, &data))
return -1;
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
}
if (msc_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
&data))
return -1;
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops msc_pci_ops = {
.read = msc_pcibios_read,
.write = msc_pcibios_write
};
| gpl-2.0 |
sagar846/kernel_athene | arch/mips/cavium-octeon/executive/cvmx-helper.c | 2232 | 34481 | /***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Helper functions for common, but complicated tasks.
*
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-pip.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-ipd.h>
#include <asm/octeon/cvmx-spi.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-pip-defs.h>
#include <asm/octeon/cvmx-smix-defs.h>
#include <asm/octeon/cvmx-asxx-defs.h>
/**
* cvmx_override_pko_queue_priority(int ipd_port, uint64_t
* priorities[16]) is a function pointer. It is meant to allow
* customization of the PKO queue priorities based on the port
* number. Users should set this pointer to a function before
* calling any cvmx-helper operations.
*/
void (*cvmx_override_pko_queue_priority) (int pko_port,
uint64_t priorities[16]);
/**
* cvmx_override_ipd_port_setup(int ipd_port) is a function
* pointer. It is meant to allow customization of the IPD port
* setup before packet input/output comes online. It is called
* after cvmx-helper does the default IPD configuration, but
* before IPD is enabled. Users should set this pointer to a
* function before calling any cvmx-helper operations.
*/
void (*cvmx_override_ipd_port_setup) (int ipd_port);
/* Port count per interface */
static int interface_port_count[4] = { 0, 0, 0, 0 };
/* Port last configured link info index by IPD/PKO port */
static cvmx_helper_link_info_t
port_link_info[CVMX_PIP_NUM_INPUT_PORTS];
/**
* Return the number of interfaces the chip has. Each interface
* may have multiple ports. Most chips support two interfaces,
* but the CNX0XX and CNX1XX are exceptions. These only support
* one interface.
*
* Returns Number of interfaces on chip
*/
int cvmx_helper_get_number_of_interfaces(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
return 4;
else
return 3;
}
/**
* Return the number of ports on an interface. Depending on the
* chip and configuration, this can be 1-16. A value of 0
* specifies that the interface doesn't exist or isn't usable.
*
* @interface: Interface to get the port count for
*
* Returns Number of ports on interface. Can be Zero.
*/
int cvmx_helper_ports_on_interface(int interface)
{
return interface_port_count[interface];
}
/**
* Get the operating mode of an interface. Depending on the Octeon
* chip and configuration, this function returns an enumeration
* of the type of packet I/O supported by an interface.
*
* @interface: Interface to probe
*
* Returns Mode of the interface. Unknown or unsupported interfaces return
* DISABLED.
*/
cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
{
union cvmx_gmxx_inf_mode mode;
if (interface == 2)
return CVMX_HELPER_INTERFACE_MODE_NPI;
if (interface == 3) {
if (OCTEON_IS_MODEL(OCTEON_CN56XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX))
return CVMX_HELPER_INTERFACE_MODE_LOOP;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
if (interface == 0
&& cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5
&& cvmx_sysinfo_get()->board_rev_major == 1) {
/*
* Lie about interface type of CN3005 board. This
* board has a switch on port 1 like the other
* evaluation boards, but it is connected over RGMII
* instead of GMII. Report GMII mode so that the
* speed is forced to 1 Gbit full duplex. Other than
* some initial configuration (which does not use the
* output of this function) there is no difference in
* setup between GMII and RGMII modes.
*/
return CVMX_HELPER_INTERFACE_MODE_GMII;
}
/* Interface 1 is always disabled on CN31XX and CN30XX */
if ((interface == 1)
&& (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX)))
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
switch (mode.cn56xx.mode) {
case 0:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
case 1:
return CVMX_HELPER_INTERFACE_MODE_XAUI;
case 2:
return CVMX_HELPER_INTERFACE_MODE_SGMII;
case 3:
return CVMX_HELPER_INTERFACE_MODE_PICMG;
default:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
} else {
if (!mode.s.en)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (mode.s.type) {
if (OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX))
return CVMX_HELPER_INTERFACE_MODE_SPI;
else
return CVMX_HELPER_INTERFACE_MODE_GMII;
} else
return CVMX_HELPER_INTERFACE_MODE_RGMII;
}
}
/**
* Configure the IPD/PIP tagging and QoS options for a specific
* port. This function determines the POW work queue entry
* contents for a port. The setup performed here is controlled by
* the defines in executive-config.h.
*
* @ipd_port: Port to configure. This follows the IPD numbering, not the
* per interface numbering
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_port_setup_ipd(int ipd_port)
{
union cvmx_pip_prt_cfgx port_config;
union cvmx_pip_prt_tagx tag_config;
port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port));
/* Have each port go to a different POW queue */
port_config.s.qos = ipd_port & 0x7;
/* Process the headers and place the IP header in the work queue */
port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE;
tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP;
tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP;
tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT;
tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT;
tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER;
tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP;
tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP;
tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT;
tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT;
tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL;
tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT;
tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
/* Put all packets in group 0. Other groups can be used by the app */
tag_config.s.grp = 0;
cvmx_pip_config_port(ipd_port, port_config, tag_config);
/* Give the user a chance to override our setting for each port */
if (cvmx_override_ipd_port_setup)
cvmx_override_ipd_port_setup(ipd_port);
return 0;
}
/**
* This function sets the interface_port_count[interface] correctly,
* without modifying any hardware configuration. Hardware setup of
* the ports will be performed later.
*
* @interface: Interface to probe
*
* Returns Zero on success, negative on failure
*/
int cvmx_helper_interface_enumerate(int interface)
{
switch (cvmx_helper_interface_get_mode(interface)) {
/* These types don't support ports to IPD/PKO */
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
interface_port_count[interface] = 0;
break;
/* XAUI is a single high speed port */
case CVMX_HELPER_INTERFACE_MODE_XAUI:
interface_port_count[interface] =
__cvmx_helper_xaui_enumerate(interface);
break;
/*
* RGMII/GMII/MII are all treated about the same. Most
* functions refer to these ports as RGMII.
*/
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
interface_port_count[interface] =
__cvmx_helper_rgmii_enumerate(interface);
break;
/*
* SPI4 can have 1-16 ports depending on the device at
* the other end.
*/
case CVMX_HELPER_INTERFACE_MODE_SPI:
interface_port_count[interface] =
__cvmx_helper_spi_enumerate(interface);
break;
/*
* SGMII can have 1-4 ports depending on how many are
* hooked up.
*/
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
interface_port_count[interface] =
__cvmx_helper_sgmii_enumerate(interface);
break;
/* PCI target Network Packet Interface */
case CVMX_HELPER_INTERFACE_MODE_NPI:
interface_port_count[interface] =
__cvmx_helper_npi_enumerate(interface);
break;
/*
* Special loopback only ports. These are not the same
* as other ports in loopback mode.
*/
case CVMX_HELPER_INTERFACE_MODE_LOOP:
interface_port_count[interface] =
__cvmx_helper_loop_enumerate(interface);
break;
}
interface_port_count[interface] =
__cvmx_helper_board_interface_probe(interface,
interface_port_count
[interface]);
/* Make sure all global variables propagate to other cores */
CVMX_SYNCWS;
return 0;
}
/**
* This function probes an interface to determine the actual
* number of hardware ports connected to it. It doesn't setup the
* ports or enable them. The main goal here is to set the global
* interface_port_count[interface] correctly. Hardware setup of the
* ports will be performed later.
*
* @interface: Interface to probe
*
* Returns Zero on success, negative on failure
*/
int cvmx_helper_interface_probe(int interface)
{
cvmx_helper_interface_enumerate(interface);
/* At this stage in the game we don't want packets to be moving yet.
The following probe calls should perform hardware setup
needed to determine port counts. Receive must still be disabled */
switch (cvmx_helper_interface_get_mode(interface)) {
/* These types don't support ports to IPD/PKO */
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
break;
/* XAUI is a single high speed port */
case CVMX_HELPER_INTERFACE_MODE_XAUI:
__cvmx_helper_xaui_probe(interface);
break;
/*
* RGMII/GMII/MII are all treated about the same. Most
* functions refer to these ports as RGMII.
*/
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
__cvmx_helper_rgmii_probe(interface);
break;
/*
* SPI4 can have 1-16 ports depending on the device at
* the other end.
*/
case CVMX_HELPER_INTERFACE_MODE_SPI:
__cvmx_helper_spi_probe(interface);
break;
/*
* SGMII can have 1-4 ports depending on how many are
* hooked up.
*/
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
__cvmx_helper_sgmii_probe(interface);
break;
/* PCI target Network Packet Interface */
case CVMX_HELPER_INTERFACE_MODE_NPI:
__cvmx_helper_npi_probe(interface);
break;
/*
* Special loopback only ports. These are not the same
* as other ports in loopback mode.
*/
case CVMX_HELPER_INTERFACE_MODE_LOOP:
__cvmx_helper_loop_probe(interface);
break;
}
/* Make sure all global variables propagate to other cores */
CVMX_SYNCWS;
return 0;
}
/**
* Setup the IPD/PIP for the ports on an interface. Packet
* classification and tagging are set for every port on the
* interface. The number of ports on the interface must already
* have been probed.
*
* @interface: Interface to setup IPD/PIP for
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_interface_setup_ipd(int interface)
{
int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
int num_ports = interface_port_count[interface];
while (num_ports--) {
__cvmx_helper_port_setup_ipd(ipd_port);
ipd_port++;
}
return 0;
}
/**
* Setup global setting for IPD/PIP not related to a specific
* interface or port. This must be called before IPD is enabled.
*
* Returns Zero on success, negative on failure.
*/
static int __cvmx_helper_global_setup_ipd(void)
{
/* Setup the global packet input options */
cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE / 8,
CVMX_HELPER_FIRST_MBUFF_SKIP / 8,
CVMX_HELPER_NOT_FIRST_MBUFF_SKIP / 8,
/* The +8 is to account for the next ptr */
(CVMX_HELPER_FIRST_MBUFF_SKIP + 8) / 128,
/* The +8 is to account for the next ptr */
(CVMX_HELPER_NOT_FIRST_MBUFF_SKIP + 8) / 128,
CVMX_FPA_WQE_POOL,
CVMX_IPD_OPC_MODE_STT,
CVMX_HELPER_ENABLE_BACK_PRESSURE);
return 0;
}
/**
* Setup the PKO for the ports on an interface. The number of
* queues per port and the priority of each PKO output queue
* is set here. PKO must be disabled when this function is called.
*
* @interface: Interface to setup PKO for
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_interface_setup_pko(int interface)
{
/*
* Each packet output queue has an associated priority. The
* higher the priority, the more often it can send a packet. A
* priority of 8 means it can send in all 8 rounds of
* contention. We're going to make each queue one less than
* the last. The vector of priorities has been extended to
* support CN5xxx CPUs, where up to 16 queues can be
* associated to a port. To keep backward compatibility we
* don't change the initial 8 priorities and replicate them in
* the second half. With per-core PKO queues (PKO lockless
* operation) all queues have the same priority.
*/
uint64_t priorities[16] =
{ 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 };
/*
* Setup the IPD/PIP and PKO for the ports discovered
* above. Here packet classification, tagging and output
* priorities are set.
*/
int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
int num_ports = interface_port_count[interface];
while (num_ports--) {
/*
* Give the user a chance to override the per queue
* priorities.
*/
if (cvmx_override_pko_queue_priority)
cvmx_override_pko_queue_priority(ipd_port, priorities);
cvmx_pko_config_port(ipd_port,
cvmx_pko_get_base_queue_per_core(ipd_port,
0),
cvmx_pko_get_num_queues(ipd_port),
priorities);
ipd_port++;
}
return 0;
}
/**
* Setup global setting for PKO not related to a specific
* interface or port. This must be called before PKO is enabled.
*
* Returns Zero on success, negative on failure.
*/
static int __cvmx_helper_global_setup_pko(void)
{
/*
* Disable tagwait FAU timeout. This needs to be done before
* anyone might start packet output using tags.
*/
union cvmx_iob_fau_timeout fau_to;
fau_to.u64 = 0;
fau_to.s.tout_val = 0xfff;
fau_to.s.tout_enb = 0;
cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
return 0;
}
/**
* Setup global backpressure setting.
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_global_setup_backpressure(void)
{
#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
/* Disable backpressure if configured to do so */
/* Disable backpressure (pause frame) generation */
int num_interfaces = cvmx_helper_get_number_of_interfaces();
int interface;
for (interface = 0; interface < num_interfaces; interface++) {
switch (cvmx_helper_interface_get_mode(interface)) {
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
case CVMX_HELPER_INTERFACE_MODE_NPI:
case CVMX_HELPER_INTERFACE_MODE_LOOP:
case CVMX_HELPER_INTERFACE_MODE_XAUI:
break;
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
case CVMX_HELPER_INTERFACE_MODE_SPI:
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
cvmx_gmx_set_backpressure_override(interface, 0xf);
break;
}
}
#endif
return 0;
}
/**
* Enable packet input/output from the hardware. This function is
* called after all internal setup is complete and IPD is enabled.
* After this function completes, packets will be accepted from the
* hardware ports. PKO should still be disabled to make sure packets
* aren't sent out partially setup hardware.
*
* @interface: Interface to enable
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_packet_hardware_enable(int interface)
{
int result = 0;
switch (cvmx_helper_interface_get_mode(interface)) {
/* These types don't support ports to IPD/PKO */
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
/* Nothing to do */
break;
/* XAUI is a single high speed port */
case CVMX_HELPER_INTERFACE_MODE_XAUI:
result = __cvmx_helper_xaui_enable(interface);
break;
/*
* RGMII/GMII/MII are all treated about the same. Most
* functions refer to these ports as RGMII
*/
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
result = __cvmx_helper_rgmii_enable(interface);
break;
/*
* SPI4 can have 1-16 ports depending on the device at
* the other end
*/
case CVMX_HELPER_INTERFACE_MODE_SPI:
result = __cvmx_helper_spi_enable(interface);
break;
/*
* SGMII can have 1-4 ports depending on how many are
* hooked up
*/
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
result = __cvmx_helper_sgmii_enable(interface);
break;
/* PCI target Network Packet Interface */
case CVMX_HELPER_INTERFACE_MODE_NPI:
result = __cvmx_helper_npi_enable(interface);
break;
/*
* Special loopback only ports. These are not the same
* as other ports in loopback mode
*/
case CVMX_HELPER_INTERFACE_MODE_LOOP:
result = __cvmx_helper_loop_enable(interface);
break;
}
result |= __cvmx_helper_board_hardware_enable(interface);
return result;
}
/**
* Function to adjust internal IPD pointer alignments
*
* Returns 0 on success
* !0 on failure
*/
int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
{
#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \
(CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES \
(CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP)
#define FIX_IPD_OUTPORT 0
/* Ports 0-15 are interface 0, 16-31 are interface 1 */
#define INTERFACE(port) (port >> 4)
#define INDEX(port) (port & 0xf)
uint64_t *p64;
cvmx_pko_command_word0_t pko_command;
union cvmx_buf_ptr g_buffer, pkt_buffer;
cvmx_wqe_t *work;
int size, num_segs = 0, wqe_pcnt, pkt_pcnt;
union cvmx_gmxx_prtx_cfg gmx_cfg;
int retry_cnt;
int retry_loop_cnt;
int i;
cvmx_helper_link_info_t link_info;
/* Save values for restore at end */
uint64_t prtx_cfg =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
uint64_t tx_ptr_en =
cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
uint64_t rx_ptr_en =
cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
uint64_t rxx_jabber =
cvmx_read_csr(CVMX_GMXX_RXX_JABBER
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
uint64_t frame_max =
cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
/* Configure port to gig FDX as required for loopback mode */
cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT);
/*
* Disable reception on all ports so if traffic is present it
* will not interfere.
*/
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
cvmx_wait(100000000ull);
for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) {
retry_cnt = 100000;
wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
pkt_pcnt = (wqe_pcnt >> 7) & 0x7f;
wqe_pcnt &= 0x7f;
num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3;
if (num_segs == 0)
goto fix_ipd_exit;
num_segs += 1;
size =
FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES +
((num_segs - 1) * FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) -
(FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2);
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)),
1 << INDEX(FIX_IPD_OUTPORT));
CVMX_SYNC;
g_buffer.u64 = 0;
g_buffer.s.addr =
cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL));
if (g_buffer.s.addr == 0) {
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
"buffer allocation failure.\n");
goto fix_ipd_exit;
}
g_buffer.s.pool = CVMX_FPA_WQE_POOL;
g_buffer.s.size = num_segs;
pkt_buffer.u64 = 0;
pkt_buffer.s.addr =
cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL));
if (pkt_buffer.s.addr == 0) {
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
"buffer allocation failure.\n");
goto fix_ipd_exit;
}
pkt_buffer.s.i = 1;
pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES;
p64 = (uint64_t *) cvmx_phys_to_ptr(pkt_buffer.s.addr);
p64[0] = 0xffffffffffff0000ull;
p64[1] = 0x08004510ull;
p64[2] = ((uint64_t) (size - 14) << 48) | 0x5ae740004000ull;
p64[3] = 0x3a5fc0a81073c0a8ull;
for (i = 0; i < num_segs; i++) {
if (i > 0)
pkt_buffer.s.size =
FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES;
if (i == (num_segs - 1))
pkt_buffer.s.i = 0;
*(uint64_t *) cvmx_phys_to_ptr(g_buffer.s.addr +
8 * i) = pkt_buffer.u64;
}
/* Build the PKO command */
pko_command.u64 = 0;
pko_command.s.segs = num_segs;
pko_command.s.total_bytes = size;
pko_command.s.dontfree = 0;
pko_command.s.gather = 1;
gmx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG
(INDEX(FIX_IPD_OUTPORT),
INTERFACE(FIX_IPD_OUTPORT)));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG
(INDEX(FIX_IPD_OUTPORT),
INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64);
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
1 << INDEX(FIX_IPD_OUTPORT));
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
1 << INDEX(FIX_IPD_OUTPORT));
cvmx_write_csr(CVMX_GMXX_RXX_JABBER
(INDEX(FIX_IPD_OUTPORT),
INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
(INDEX(FIX_IPD_OUTPORT),
INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT,
cvmx_pko_get_base_queue
(FIX_IPD_OUTPORT),
CVMX_PKO_LOCK_CMD_QUEUE);
cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT,
cvmx_pko_get_base_queue
(FIX_IPD_OUTPORT), pko_command,
g_buffer, CVMX_PKO_LOCK_CMD_QUEUE);
CVMX_SYNC;
do {
work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
retry_cnt--;
} while ((work == NULL) && (retry_cnt > 0));
if (!retry_cnt)
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
"get_work() timeout occurred.\n");
/* Free packet */
if (work)
cvmx_helper_free_packet_data(work);
}
fix_ipd_exit:
/* Return CSR configs to saved values */
cvmx_write_csr(CVMX_GMXX_PRTX_CFG
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
prtx_cfg);
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
tx_ptr_en);
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
rx_ptr_en);
cvmx_write_csr(CVMX_GMXX_RXX_JABBER
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
rxx_jabber);
cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
frame_max);
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
/* Set link to down so autonegotiation will set it up again */
link_info.u64 = 0;
cvmx_helper_link_set(FIX_IPD_OUTPORT, link_info);
/*
* Bring the link back up as autonegotiation is not done in
* user applications.
*/
cvmx_helper_link_autoconf(FIX_IPD_OUTPORT);
CVMX_SYNC;
if (num_segs)
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n");
return !!num_segs;
}
/**
* Called after all internal packet IO paths are setup. This
* function enables IPD/PIP and begins packet input and output.
*
* Returns Zero on success, negative on failure
*/
int cvmx_helper_ipd_and_packet_input_enable(void)
{
int num_interfaces;
int interface;
/* Enable IPD */
cvmx_ipd_enable();
/*
* Time to enable hardware ports packet input and output. Note
* that at this point IPD/PIP must be fully functional and PKO
* must be disabled
*/
num_interfaces = cvmx_helper_get_number_of_interfaces();
for (interface = 0; interface < num_interfaces; interface++) {
if (cvmx_helper_ports_on_interface(interface) > 0)
__cvmx_helper_packet_hardware_enable(interface);
}
/* Finally enable PKO now that the entire path is up and running */
cvmx_pko_enable();
if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1)
|| OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1))
&& (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM))
__cvmx_helper_errata_fix_ipd_ptr_alignment();
return 0;
}
/**
* Initialize the PIP, IPD, and PKO hardware to support
* simple priority based queues for the ethernet ports. Each
* port is configured with a number of priority queues based
* on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
* priority than the previous.
*
* Returns Zero on success, non-zero on failure
*/
int cvmx_helper_initialize_packet_io_global(void)
{
int result = 0;
int interface;
union cvmx_l2c_cfg l2c_cfg;
union cvmx_smix_en smix_en;
const int num_interfaces = cvmx_helper_get_number_of_interfaces();
/*
* CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to
* be disabled.
*/
if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
__cvmx_helper_errata_qlm_disable_2nd_order_cdr(1);
/*
* Tell L2 to give the IOB statically higher priority compared
* to the cores. This avoids conditions where IO blocks might
* be starved under very high L2 loads.
*/
l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
l2c_cfg.s.lrf_arb_mode = 0;
l2c_cfg.s.rfb_arb_mode = 0;
cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
/* Make sure SMI/MDIO is enabled so we can query PHYs */
smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0));
if (!smix_en.s.en) {
smix_en.s.en = 1;
cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64);
}
/* Newer chips actually have two SMI/MDIO interfaces */
if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
!OCTEON_IS_MODEL(OCTEON_CN58XX) &&
!OCTEON_IS_MODEL(OCTEON_CN50XX)) {
smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1));
if (!smix_en.s.en) {
smix_en.s.en = 1;
cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64);
}
}
cvmx_pko_initialize_global();
for (interface = 0; interface < num_interfaces; interface++) {
result |= cvmx_helper_interface_probe(interface);
if (cvmx_helper_ports_on_interface(interface) > 0)
cvmx_dprintf("Interface %d has %d ports (%s)\n",
interface,
cvmx_helper_ports_on_interface(interface),
cvmx_helper_interface_mode_to_string
(cvmx_helper_interface_get_mode
(interface)));
result |= __cvmx_helper_interface_setup_ipd(interface);
result |= __cvmx_helper_interface_setup_pko(interface);
}
result |= __cvmx_helper_global_setup_ipd();
result |= __cvmx_helper_global_setup_pko();
/* Enable any flow control and backpressure */
result |= __cvmx_helper_global_setup_backpressure();
#if CVMX_HELPER_ENABLE_IPD
result |= cvmx_helper_ipd_and_packet_input_enable();
#endif
return result;
}
/**
* Does core local initialization for packet io
*
* Returns Zero on success, non-zero on failure
*/
int cvmx_helper_initialize_packet_io_local(void)
{
return cvmx_pko_initialize_local();
}
/**
* Auto configure an IPD/PKO port link state and speed. This
* function basically does the equivalent of:
* cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
*
* @ipd_port: IPD/PKO port to auto configure
*
* Returns Link state after configure
*/
cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port)
{
cvmx_helper_link_info_t link_info;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
if (index >= cvmx_helper_ports_on_interface(interface)) {
link_info.u64 = 0;
return link_info;
}
link_info = cvmx_helper_link_get(ipd_port);
if (link_info.u64 == port_link_info[ipd_port].u64)
return link_info;
/* If we fail to set the link speed, port_link_info will not change */
cvmx_helper_link_set(ipd_port, link_info);
/*
* port_link_info should be the current value, which will be
* different than expect if cvmx_helper_link_set() failed.
*/
return port_link_info[ipd_port];
}
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
/* The default result will be a down link unless the code below
changes it */
result.u64 = 0;
if (index >= cvmx_helper_ports_on_interface(interface))
return result;
switch (cvmx_helper_interface_get_mode(interface)) {
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
/* Network links are not supported */
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
result = __cvmx_helper_xaui_link_get(ipd_port);
break;
case CVMX_HELPER_INTERFACE_MODE_GMII:
if (index == 0)
result = __cvmx_helper_rgmii_link_get(ipd_port);
else {
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;
}
break;
case CVMX_HELPER_INTERFACE_MODE_RGMII:
result = __cvmx_helper_rgmii_link_get(ipd_port);
break;
case CVMX_HELPER_INTERFACE_MODE_SPI:
result = __cvmx_helper_spi_link_get(ipd_port);
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
result = __cvmx_helper_sgmii_link_get(ipd_port);
break;
case CVMX_HELPER_INTERFACE_MODE_NPI:
case CVMX_HELPER_INTERFACE_MODE_LOOP:
/* Network links are not supported */
break;
}
return result;
}
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
int result = -1;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
if (index >= cvmx_helper_ports_on_interface(interface))
return -1;
switch (cvmx_helper_interface_get_mode(interface)) {
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
result = __cvmx_helper_xaui_link_set(ipd_port, link_info);
break;
/*
* RGMII/GMII/MII are all treated about the same. Most
* functions refer to these ports as RGMII.
*/
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
result = __cvmx_helper_rgmii_link_set(ipd_port, link_info);
break;
case CVMX_HELPER_INTERFACE_MODE_SPI:
result = __cvmx_helper_spi_link_set(ipd_port, link_info);
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
result = __cvmx_helper_sgmii_link_set(ipd_port, link_info);
break;
case CVMX_HELPER_INTERFACE_MODE_NPI:
case CVMX_HELPER_INTERFACE_MODE_LOOP:
break;
}
/* Set the port_link_info here so that the link status is updated
no matter how cvmx_helper_link_set is called. We don't change
the value if link_set failed */
if (result == 0)
port_link_info[ipd_port].u64 = link_info.u64;
return result;
}
/**
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
* Non zero if you want internal loopback
* @enable_external:
* Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
int enable_external)
{
int result = -1;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
if (index >= cvmx_helper_ports_on_interface(interface))
return -1;
switch (cvmx_helper_interface_get_mode(interface)) {
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
case CVMX_HELPER_INTERFACE_MODE_SPI:
case CVMX_HELPER_INTERFACE_MODE_NPI:
case CVMX_HELPER_INTERFACE_MODE_LOOP:
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
result =
__cvmx_helper_xaui_configure_loopback(ipd_port,
enable_internal,
enable_external);
break;
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
result =
__cvmx_helper_rgmii_configure_loopback(ipd_port,
enable_internal,
enable_external);
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
result =
__cvmx_helper_sgmii_configure_loopback(ipd_port,
enable_internal,
enable_external);
break;
}
return result;
}
| gpl-2.0 |
LorDClockaN/shooter-ics | mm/cleancache.c | 3000 | 7026 | /*
* Cleancache frontend
*
* This code provides the generic "frontend" layer to call a matching
* "backend" driver implementation of cleancache. See
* Documentation/vm/cleancache.txt for more information.
*
* Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
* Author: Dan Magenheimer
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/exportfs.h>
#include <linux/mm.h>
#include <linux/cleancache.h>
/*
* This global enablement flag may be read thousands of times per second
* by cleancache_get/put/flush even on systems where cleancache_ops
* is not claimed (e.g. cleancache is config'ed on but remains
* disabled), so is preferred to the slower alternative: a function
* call that checks a non-global.
*/
int cleancache_enabled;
EXPORT_SYMBOL(cleancache_enabled);
/*
* cleancache_ops is set by cleancache_ops_register to contain the pointers
* to the cleancache "backend" implementation functions.
*/
static struct cleancache_ops cleancache_ops;
/* useful stats available in /sys/kernel/mm/cleancache */
static unsigned long cleancache_succ_gets;
static unsigned long cleancache_failed_gets;
static unsigned long cleancache_puts;
static unsigned long cleancache_flushes;
/*
* register operations for cleancache, returning previous thus allowing
* detection of multiple backends and possible nesting
*/
struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops)
{
struct cleancache_ops old = cleancache_ops;
cleancache_ops = *ops;
cleancache_enabled = 1;
return old;
}
EXPORT_SYMBOL(cleancache_register_ops);
/* Called by a cleancache-enabled filesystem at time of mount */
void __cleancache_init_fs(struct super_block *sb)
{
sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE);
}
EXPORT_SYMBOL(__cleancache_init_fs);
/* Called by a cleancache-enabled clustered filesystem at time of mount */
void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
{
sb->cleancache_poolid =
(*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE);
}
EXPORT_SYMBOL(__cleancache_init_shared_fs);
/*
* If the filesystem uses exportable filehandles, use the filehandle as
* the key, else use the inode number.
*/
static int cleancache_get_key(struct inode *inode,
struct cleancache_filekey *key)
{
int (*fhfn)(struct dentry *, __u32 *fh, int *, int);
int len = 0, maxlen = CLEANCACHE_KEY_MAX;
struct super_block *sb = inode->i_sb;
key->u.ino = inode->i_ino;
if (sb->s_export_op != NULL) {
fhfn = sb->s_export_op->encode_fh;
if (fhfn) {
struct dentry d;
d.d_inode = inode;
len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0);
if (len <= 0 || len == 255)
return -1;
if (maxlen > CLEANCACHE_KEY_MAX)
return -1;
}
}
return 0;
}
/*
* "Get" data from cleancache associated with the poolid/inode/index
* that were specified when the data was put to cleanache and, if
* successful, use it to fill the specified page with data and return 0.
* The pageframe is unchanged and returns -1 if the get fails.
* Page must be locked by caller.
*/
int __cleancache_get_page(struct page *page)
{
int ret = -1;
int pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
VM_BUG_ON(!PageLocked(page));
pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (pool_id < 0)
goto out;
if (cleancache_get_key(page->mapping->host, &key) < 0)
goto out;
ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
if (ret == 0)
cleancache_succ_gets++;
else
cleancache_failed_gets++;
out:
return ret;
}
EXPORT_SYMBOL(__cleancache_get_page);
/*
* "Put" data from a page to cleancache and associate it with the
* (previously-obtained per-filesystem) poolid and the page's,
* inode and page index. Page must be locked. Note that a put_page
* always "succeeds", though a subsequent get_page may succeed or fail.
*/
void __cleancache_put_page(struct page *page)
{
int pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
VM_BUG_ON(!PageLocked(page));
pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (pool_id >= 0 &&
cleancache_get_key(page->mapping->host, &key) >= 0) {
(*cleancache_ops.put_page)(pool_id, key, page->index, page);
cleancache_puts++;
}
}
EXPORT_SYMBOL(__cleancache_put_page);
/*
* Flush any data from cleancache associated with the poolid and the
* page's inode and page index so that a subsequent "get" will fail.
*/
void __cleancache_flush_page(struct address_space *mapping, struct page *page)
{
/* careful... page->mapping is NULL sometimes when this is called */
int pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
if (pool_id >= 0) {
VM_BUG_ON(!PageLocked(page));
if (cleancache_get_key(mapping->host, &key) >= 0) {
(*cleancache_ops.flush_page)(pool_id, key, page->index);
cleancache_flushes++;
}
}
}
EXPORT_SYMBOL(__cleancache_flush_page);
/*
* Flush all data from cleancache associated with the poolid and the
* mappings's inode so that all subsequent gets to this poolid/inode
* will fail.
*/
void __cleancache_flush_inode(struct address_space *mapping)
{
int pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
(*cleancache_ops.flush_inode)(pool_id, key);
}
EXPORT_SYMBOL(__cleancache_flush_inode);
/*
* Called by any cleancache-enabled filesystem at time of unmount;
* note that pool_id is surrendered and may be reutrned by a subsequent
* cleancache_init_fs or cleancache_init_shared_fs
*/
void __cleancache_flush_fs(struct super_block *sb)
{
if (sb->cleancache_poolid >= 0) {
int old_poolid = sb->cleancache_poolid;
sb->cleancache_poolid = -1;
(*cleancache_ops.flush_fs)(old_poolid);
}
}
EXPORT_SYMBOL(__cleancache_flush_fs);
#ifdef CONFIG_SYSFS
/* see Documentation/ABI/xxx/sysfs-kernel-mm-cleancache */
#define CLEANCACHE_SYSFS_RO(_name) \
static ssize_t cleancache_##_name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%lu\n", cleancache_##_name); \
} \
static struct kobj_attribute cleancache_##_name##_attr = { \
.attr = { .name = __stringify(_name), .mode = 0444 }, \
.show = cleancache_##_name##_show, \
}
CLEANCACHE_SYSFS_RO(succ_gets);
CLEANCACHE_SYSFS_RO(failed_gets);
CLEANCACHE_SYSFS_RO(puts);
CLEANCACHE_SYSFS_RO(flushes);
static struct attribute *cleancache_attrs[] = {
&cleancache_succ_gets_attr.attr,
&cleancache_failed_gets_attr.attr,
&cleancache_puts_attr.attr,
&cleancache_flushes_attr.attr,
NULL,
};
static struct attribute_group cleancache_attr_group = {
.attrs = cleancache_attrs,
.name = "cleancache",
};
#endif /* CONFIG_SYSFS */
static int __init init_cleancache(void)
{
#ifdef CONFIG_SYSFS
int err;
err = sysfs_create_group(mm_kobj, &cleancache_attr_group);
#endif /* CONFIG_SYSFS */
return 0;
}
module_init(init_cleancache)
| gpl-2.0 |
Galaxy-J5/android_kernel_samsung_j5nlte | arch/mips/sgi-ip22/ip28-berr.c | 3000 | 14864 | /*
* ip28-berr.c: Bus error handling.
*
* Copyright (C) 2002, 2003 Ladislav Michl (ladis@linux-mips.org)
* Copyright (C) 2005 Peter Fuerst (pf@net.alphadv.de) - IP28
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/addrspace.h>
#include <asm/traps.h>
#include <asm/branch.h>
#include <asm/irq_regs.h>
#include <asm/sgi/mc.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ioc.h>
#include <asm/sgi/ip22.h>
#include <asm/r4kcache.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
static unsigned int count_be_is_fixup;
static unsigned int count_be_handler;
static unsigned int count_be_interrupt;
static int debug_be_interrupt;
static unsigned int cpu_err_stat; /* Status reg for CPU */
static unsigned int gio_err_stat; /* Status reg for GIO */
static unsigned int cpu_err_addr; /* Error address reg for CPU */
static unsigned int gio_err_addr; /* Error address reg for GIO */
static unsigned int extio_stat;
static unsigned int hpc3_berr_stat; /* Bus error interrupt status */
struct hpc3_stat {
unsigned long addr;
unsigned int ctrl;
unsigned int cbp;
unsigned int ndptr;
};
static struct {
struct hpc3_stat pbdma[8];
struct hpc3_stat scsi[2];
struct hpc3_stat ethrx, ethtx;
} hpc3;
static struct {
unsigned long err_addr;
struct {
u32 lo;
u32 hi;
} tags[1][2], tagd[4][2], tagi[4][2]; /* Way 0/1 */
} cache_tags;
static inline void save_cache_tags(unsigned busaddr)
{
unsigned long addr = CAC_BASE | busaddr;
int i;
cache_tags.err_addr = addr;
/*
* Starting with a bus-address, save secondary cache (indexed by
* PA[23..18:7..6]) tags first.
*/
addr &= ~1L;
#define tag cache_tags.tags[0]
cache_op(Index_Load_Tag_S, addr);
tag[0].lo = read_c0_taglo(); /* PA[35:18], VA[13:12] */
tag[0].hi = read_c0_taghi(); /* PA[39:36] */
cache_op(Index_Load_Tag_S, addr | 1L);
tag[1].lo = read_c0_taglo(); /* PA[35:18], VA[13:12] */
tag[1].hi = read_c0_taghi(); /* PA[39:36] */
#undef tag
/*
* Save all primary data cache (indexed by VA[13:5]) tags which
* might fit to this bus-address, knowing that VA[11:0] == PA[11:0].
* Saving all tags and evaluating them later is easier and safer
* than relying on VA[13:12] from the secondary cache tags to pick
* matching primary tags here already.
*/
addr &= (0xffL << 56) | ((1 << 12) - 1);
#define tag cache_tags.tagd[i]
for (i = 0; i < 4; ++i, addr += (1 << 12)) {
cache_op(Index_Load_Tag_D, addr);
tag[0].lo = read_c0_taglo(); /* PA[35:12] */
tag[0].hi = read_c0_taghi(); /* PA[39:36] */
cache_op(Index_Load_Tag_D, addr | 1L);
tag[1].lo = read_c0_taglo(); /* PA[35:12] */
tag[1].hi = read_c0_taghi(); /* PA[39:36] */
}
#undef tag
/*
* Save primary instruction cache (indexed by VA[13:6]) tags
* the same way.
*/
addr &= (0xffL << 56) | ((1 << 12) - 1);
#define tag cache_tags.tagi[i]
for (i = 0; i < 4; ++i, addr += (1 << 12)) {
cache_op(Index_Load_Tag_I, addr);
tag[0].lo = read_c0_taglo(); /* PA[35:12] */
tag[0].hi = read_c0_taghi(); /* PA[39:36] */
cache_op(Index_Load_Tag_I, addr | 1L);
tag[1].lo = read_c0_taglo(); /* PA[35:12] */
tag[1].hi = read_c0_taghi(); /* PA[39:36] */
}
#undef tag
}
#define GIO_ERRMASK 0xff00
#define CPU_ERRMASK 0x3f00
static void save_and_clear_buserr(void)
{
int i;
/* save status registers */
cpu_err_addr = sgimc->cerr;
cpu_err_stat = sgimc->cstat;
gio_err_addr = sgimc->gerr;
gio_err_stat = sgimc->gstat;
extio_stat = sgioc->extio;
hpc3_berr_stat = hpc3c0->bestat;
hpc3.scsi[0].addr = (unsigned long)&hpc3c0->scsi_chan0;
hpc3.scsi[0].ctrl = hpc3c0->scsi_chan0.ctrl; /* HPC3_SCTRL_ACTIVE ? */
hpc3.scsi[0].cbp = hpc3c0->scsi_chan0.cbptr;
hpc3.scsi[0].ndptr = hpc3c0->scsi_chan0.ndptr;
hpc3.scsi[1].addr = (unsigned long)&hpc3c0->scsi_chan1;
hpc3.scsi[1].ctrl = hpc3c0->scsi_chan1.ctrl; /* HPC3_SCTRL_ACTIVE ? */
hpc3.scsi[1].cbp = hpc3c0->scsi_chan1.cbptr;
hpc3.scsi[1].ndptr = hpc3c0->scsi_chan1.ndptr;
hpc3.ethrx.addr = (unsigned long)&hpc3c0->ethregs.rx_cbptr;
hpc3.ethrx.ctrl = hpc3c0->ethregs.rx_ctrl; /* HPC3_ERXCTRL_ACTIVE ? */
hpc3.ethrx.cbp = hpc3c0->ethregs.rx_cbptr;
hpc3.ethrx.ndptr = hpc3c0->ethregs.rx_ndptr;
hpc3.ethtx.addr = (unsigned long)&hpc3c0->ethregs.tx_cbptr;
hpc3.ethtx.ctrl = hpc3c0->ethregs.tx_ctrl; /* HPC3_ETXCTRL_ACTIVE ? */
hpc3.ethtx.cbp = hpc3c0->ethregs.tx_cbptr;
hpc3.ethtx.ndptr = hpc3c0->ethregs.tx_ndptr;
for (i = 0; i < 8; ++i) {
/* HPC3_PDMACTRL_ISACT ? */
hpc3.pbdma[i].addr = (unsigned long)&hpc3c0->pbdma[i];
hpc3.pbdma[i].ctrl = hpc3c0->pbdma[i].pbdma_ctrl;
hpc3.pbdma[i].cbp = hpc3c0->pbdma[i].pbdma_bptr;
hpc3.pbdma[i].ndptr = hpc3c0->pbdma[i].pbdma_dptr;
}
i = 0;
if (gio_err_stat & CPU_ERRMASK)
i = gio_err_addr;
if (cpu_err_stat & CPU_ERRMASK)
i = cpu_err_addr;
save_cache_tags(i);
sgimc->cstat = sgimc->gstat = 0;
}
static void print_cache_tags(void)
{
u32 scb, scw;
int i;
printk(KERN_ERR "Cache tags @ %08x:\n", (unsigned)cache_tags.err_addr);
/* PA[31:12] shifted to PTag0 (PA[35:12]) format */
scw = (cache_tags.err_addr >> 4) & 0x0fffff00;
scb = cache_tags.err_addr & ((1 << 12) - 1) & ~((1 << 5) - 1);
for (i = 0; i < 4; ++i) { /* for each possible VA[13:12] value */
if ((cache_tags.tagd[i][0].lo & 0x0fffff00) != scw &&
(cache_tags.tagd[i][1].lo & 0x0fffff00) != scw)
continue;
printk(KERN_ERR
"D: 0: %08x %08x, 1: %08x %08x (VA[13:5] %04x)\n",
cache_tags.tagd[i][0].hi, cache_tags.tagd[i][0].lo,
cache_tags.tagd[i][1].hi, cache_tags.tagd[i][1].lo,
scb | (1 << 12)*i);
}
scb = cache_tags.err_addr & ((1 << 12) - 1) & ~((1 << 6) - 1);
for (i = 0; i < 4; ++i) { /* for each possible VA[13:12] value */
if ((cache_tags.tagi[i][0].lo & 0x0fffff00) != scw &&
(cache_tags.tagi[i][1].lo & 0x0fffff00) != scw)
continue;
printk(KERN_ERR
"I: 0: %08x %08x, 1: %08x %08x (VA[13:6] %04x)\n",
cache_tags.tagi[i][0].hi, cache_tags.tagi[i][0].lo,
cache_tags.tagi[i][1].hi, cache_tags.tagi[i][1].lo,
scb | (1 << 12)*i);
}
i = read_c0_config();
scb = i & (1 << 13) ? 7:6; /* scblksize = 2^[7..6] */
scw = ((i >> 16) & 7) + 19 - 1; /* scwaysize = 2^[24..19] / 2 */
i = ((1 << scw) - 1) & ~((1 << scb) - 1);
printk(KERN_ERR "S: 0: %08x %08x, 1: %08x %08x (PA[%u:%u] %05x)\n",
cache_tags.tags[0][0].hi, cache_tags.tags[0][0].lo,
cache_tags.tags[0][1].hi, cache_tags.tags[0][1].lo,
scw-1, scb, i & (unsigned)cache_tags.err_addr);
}
static inline const char *cause_excode_text(int cause)
{
static const char *txt[32] =
{ "Interrupt",
"TLB modification",
"TLB (load or instruction fetch)",
"TLB (store)",
"Address error (load or instruction fetch)",
"Address error (store)",
"Bus error (instruction fetch)",
"Bus error (data: load or store)",
"Syscall",
"Breakpoint",
"Reserved instruction",
"Coprocessor unusable",
"Arithmetic Overflow",
"Trap",
"14",
"Floating-Point",
"16", "17", "18", "19", "20", "21", "22",
"Watch Hi/Lo",
"24", "25", "26", "27", "28", "29", "30", "31",
};
return txt[(cause & 0x7c) >> 2];
}
static void print_buserr(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
int error = 0;
if (extio_stat & EXTIO_MC_BUSERR) {
printk(KERN_ERR "MC Bus Error\n");
error |= 1;
}
if (extio_stat & EXTIO_HPC3_BUSERR) {
printk(KERN_ERR "HPC3 Bus Error 0x%x:<id=0x%x,%s,lane=0x%x>\n",
hpc3_berr_stat,
(hpc3_berr_stat & HPC3_BESTAT_PIDMASK) >>
HPC3_BESTAT_PIDSHIFT,
(hpc3_berr_stat & HPC3_BESTAT_CTYPE) ? "PIO" : "DMA",
hpc3_berr_stat & HPC3_BESTAT_BLMASK);
error |= 2;
}
if (extio_stat & EXTIO_EISA_BUSERR) {
printk(KERN_ERR "EISA Bus Error\n");
error |= 4;
}
if (cpu_err_stat & CPU_ERRMASK) {
printk(KERN_ERR "CPU error 0x%x<%s%s%s%s%s%s> @ 0x%08x\n",
cpu_err_stat,
cpu_err_stat & SGIMC_CSTAT_RD ? "RD " : "",
cpu_err_stat & SGIMC_CSTAT_PAR ? "PAR " : "",
cpu_err_stat & SGIMC_CSTAT_ADDR ? "ADDR " : "",
cpu_err_stat & SGIMC_CSTAT_SYSAD_PAR ? "SYSAD " : "",
cpu_err_stat & SGIMC_CSTAT_SYSCMD_PAR ? "SYSCMD " : "",
cpu_err_stat & SGIMC_CSTAT_BAD_DATA ? "BAD_DATA " : "",
cpu_err_addr);
error |= 8;
}
if (gio_err_stat & GIO_ERRMASK) {
printk(KERN_ERR "GIO error 0x%x:<%s%s%s%s%s%s%s%s> @ 0x%08x\n",
gio_err_stat,
gio_err_stat & SGIMC_GSTAT_RD ? "RD " : "",
gio_err_stat & SGIMC_GSTAT_WR ? "WR " : "",
gio_err_stat & SGIMC_GSTAT_TIME ? "TIME " : "",
gio_err_stat & SGIMC_GSTAT_PROM ? "PROM " : "",
gio_err_stat & SGIMC_GSTAT_ADDR ? "ADDR " : "",
gio_err_stat & SGIMC_GSTAT_BC ? "BC " : "",
gio_err_stat & SGIMC_GSTAT_PIO_RD ? "PIO_RD " : "",
gio_err_stat & SGIMC_GSTAT_PIO_WR ? "PIO_WR " : "",
gio_err_addr);
error |= 16;
}
if (!error)
printk(KERN_ERR "MC: Hmm, didn't find any error condition.\n");
else {
printk(KERN_ERR "CP0: config %08x, "
"MC: cpuctrl0/1: %08x/%05x, giopar: %04x\n"
"MC: cpu/gio_memacc: %08x/%05x, memcfg0/1: %08x/%08x\n",
read_c0_config(),
sgimc->cpuctrl0, sgimc->cpuctrl0, sgimc->giopar,
sgimc->cmacc, sgimc->gmacc,
sgimc->mconfig0, sgimc->mconfig1);
print_cache_tags();
}
printk(KERN_ALERT "%s, epc == %0*lx, ra == %0*lx\n",
cause_excode_text(regs->cp0_cause),
field, regs->cp0_epc, field, regs->regs[31]);
}
/*
* Check, whether MC's (virtual) DMA address caused the bus error.
* See "Virtual DMA Specification", Draft 1.5, Feb 13 1992, SGI
*/
static int addr_is_ram(unsigned long addr, unsigned sz)
{
int i;
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long a = boot_mem_map.map[i].addr;
if (a <= addr && addr+sz <= a+boot_mem_map.map[i].size)
return 1;
}
return 0;
}
static int check_microtlb(u32 hi, u32 lo, unsigned long vaddr)
{
/* This is likely rather similar to correct code ;-) */
vaddr &= 0x7fffffff; /* Doc. states that top bit is ignored */
/* If tlb-entry is valid and VPN-high (bits [30:21] ?) matches... */
if ((lo & 2) && (vaddr >> 21) == ((hi<<1) >> 22)) {
u32 ctl = sgimc->dma_ctrl;
if (ctl & 1) {
unsigned int pgsz = (ctl & 2) ? 14:12; /* 16k:4k */
/* PTEIndex is VPN-low (bits [22:14]/[20:12] ?) */
unsigned long pte = (lo >> 6) << 12; /* PTEBase */
pte += 8*((vaddr >> pgsz) & 0x1ff);
if (addr_is_ram(pte, 8)) {
/*
* Note: Since DMA hardware does look up
* translation on its own, this PTE *must*
* match the TLB/EntryLo-register format !
*/
unsigned long a = *(unsigned long *)
PHYS_TO_XKSEG_UNCACHED(pte);
a = (a & 0x3f) << 6; /* PFN */
a += vaddr & ((1 << pgsz) - 1);
return (cpu_err_addr == a);
}
}
}
return 0;
}
static int check_vdma_memaddr(void)
{
if (cpu_err_stat & CPU_ERRMASK) {
u32 a = sgimc->maddronly;
if (!(sgimc->dma_ctrl & 0x100)) /* Xlate-bit clear ? */
return (cpu_err_addr == a);
if (check_microtlb(sgimc->dtlb_hi0, sgimc->dtlb_lo0, a) ||
check_microtlb(sgimc->dtlb_hi1, sgimc->dtlb_lo1, a) ||
check_microtlb(sgimc->dtlb_hi2, sgimc->dtlb_lo2, a) ||
check_microtlb(sgimc->dtlb_hi3, sgimc->dtlb_lo3, a))
return 1;
}
return 0;
}
static int check_vdma_gioaddr(void)
{
if (gio_err_stat & GIO_ERRMASK) {
u32 a = sgimc->gio_dma_trans;
a = (sgimc->gmaddronly & ~a) | (sgimc->gio_dma_sbits & a);
return (gio_err_addr == a);
}
return 0;
}
/*
* MC sends an interrupt whenever bus or parity errors occur. In addition,
* if the error happened during a CPU read, it also asserts the bus error
* pin on the R4K. Code in bus error handler save the MC bus error registers
* and then clear the interrupt when this happens.
*/
static int ip28_be_interrupt(const struct pt_regs *regs)
{
int i;
save_and_clear_buserr();
/*
* Try to find out, whether we got here by a mispredicted speculative
* load/store operation. If so, it's not fatal, we can go on.
*/
/* Any cause other than "Interrupt" (ExcCode 0) is fatal. */
if (regs->cp0_cause & CAUSEF_EXCCODE)
goto mips_be_fatal;
/* Any cause other than "Bus error interrupt" (IP6) is weird. */
if ((regs->cp0_cause & CAUSEF_IP6) != CAUSEF_IP6)
goto mips_be_fatal;
if (extio_stat & (EXTIO_HPC3_BUSERR | EXTIO_EISA_BUSERR))
goto mips_be_fatal;
/* Any state other than "Memory bus error" is fatal. */
if (cpu_err_stat & CPU_ERRMASK & ~SGIMC_CSTAT_ADDR)
goto mips_be_fatal;
/* GIO errors other than timeouts are fatal */
if (gio_err_stat & GIO_ERRMASK & ~SGIMC_GSTAT_TIME)
goto mips_be_fatal;
/*
* Now we have an asynchronous bus error, speculatively or DMA caused.
* Need to search all DMA descriptors for the error address.
*/
for (i = 0; i < sizeof(hpc3)/sizeof(struct hpc3_stat); ++i) {
struct hpc3_stat *hp = (struct hpc3_stat *)&hpc3 + i;
if ((cpu_err_stat & CPU_ERRMASK) &&
(cpu_err_addr == hp->ndptr || cpu_err_addr == hp->cbp))
break;
if ((gio_err_stat & GIO_ERRMASK) &&
(gio_err_addr == hp->ndptr || gio_err_addr == hp->cbp))
break;
}
if (i < sizeof(hpc3)/sizeof(struct hpc3_stat)) {
struct hpc3_stat *hp = (struct hpc3_stat *)&hpc3 + i;
printk(KERN_ERR "at DMA addresses: HPC3 @ %08lx:"
" ctl %08x, ndp %08x, cbp %08x\n",
CPHYSADDR(hp->addr), hp->ctrl, hp->ndptr, hp->cbp);
goto mips_be_fatal;
}
/* Check MC's virtual DMA stuff. */
if (check_vdma_memaddr()) {
printk(KERN_ERR "at GIO DMA: mem address 0x%08x.\n",
sgimc->maddronly);
goto mips_be_fatal;
}
if (check_vdma_gioaddr()) {
printk(KERN_ERR "at GIO DMA: gio address 0x%08x.\n",
sgimc->gmaddronly);
goto mips_be_fatal;
}
/* A speculative bus error... */
if (debug_be_interrupt) {
print_buserr(regs);
printk(KERN_ERR "discarded!\n");
}
return MIPS_BE_DISCARD;
mips_be_fatal:
print_buserr(regs);
return MIPS_BE_FATAL;
}
void ip22_be_interrupt(int irq)
{
struct pt_regs *regs = get_irq_regs();
count_be_interrupt++;
if (ip28_be_interrupt(regs) != MIPS_BE_DISCARD) {
/* Assume it would be too dangerous to continue ... */
die_if_kernel("Oops", regs);
force_sig(SIGBUS, current);
} else if (debug_be_interrupt)
show_regs((struct pt_regs *)regs);
}
static int ip28_be_handler(struct pt_regs *regs, int is_fixup)
{
/*
* We arrive here only in the unusual case of do_be() invocation,
* i.e. by a bus error exception without a bus error interrupt.
*/
if (is_fixup) {
count_be_is_fixup++;
save_and_clear_buserr();
return MIPS_BE_FIXUP;
}
count_be_handler++;
return ip28_be_interrupt(regs);
}
void __init ip22_be_init(void)
{
board_be_handler = ip28_be_handler;
}
int ip28_show_be_info(struct seq_file *m)
{
seq_printf(m, "IP28 be fixups\t\t: %u\n", count_be_is_fixup);
seq_printf(m, "IP28 be interrupts\t: %u\n", count_be_interrupt);
seq_printf(m, "IP28 be handler\t\t: %u\n", count_be_handler);
return 0;
}
static int __init debug_be_setup(char *str)
{
debug_be_interrupt++;
return 1;
}
__setup("ip28_debug_be", debug_be_setup);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.